Merge branch 'develop' of https://github.com/nicolargo/glances into develop

This commit is contained in:
Drakarah 2025-11-23 10:09:11 +00:00
commit 2146673841
210 changed files with 17254 additions and 10414 deletions

View File

@ -8,10 +8,8 @@
!/glances/outputs/static
# Include Requirements files
!/requirements.txt
!/all-requirements.txt
!/docker-requirements.txt
!/webui-requirements.txt
!/optional-requirements.txt
# Include Config file
!/docker-compose/glances.conf
@ -19,3 +17,6 @@
# Include Binary file
!/docker-bin.sh
# Include TOML file
!/pyproject.toml

View File

@ -12,9 +12,9 @@ jobs:
if: github.event_name == 'push'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: "3.13"
- name: Install pypa/build
@ -45,7 +45,7 @@ jobs:
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
@ -54,6 +54,7 @@ jobs:
with:
skip-existing: true
attestations: false
print-hash: true
pypi_test:
name: Publish Python 🐍 distribution 📦 to TestPyPI
@ -69,7 +70,7 @@ jobs:
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/

View File

@ -4,7 +4,7 @@ name: build_docker
env:
DEFAULT_DOCKER_IMAGE: nicolargo/glances
PUSH_BRANCH: ${{ 'refs/heads/develop' == github.ref || 'refs/heads/master' == github.ref || startsWith(github.ref, 'refs/tags/v') }}
PUSH_BRANCH: ${{ 'refs/heads/develop' == github.ref || startsWith(github.ref, 'refs/tags/v') }}
# Alpine image platform: https://hub.docker.com/_/alpine
# linux/arm/v6,linux/arm/v7 do not work (timeout during the build)
DOCKER_PLATFORMS: linux/amd64,linux/arm64/v8
@ -36,15 +36,11 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/v}
TAG_ARRAY="[{ \"target\": \"minimal\", \"tag\": \"${VERSION}\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"${VERSION}-full\" }]"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"minimal\", \"tag\": \"latest\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"${VERSION}-full\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"latest-full\" }]"
elif [[ $GITHUB_REF == refs/heads/develop ]]; then
TAG_ARRAY="[{ \"target\": \"dev\", \"tag\": \"dev\" }]"
elif [[ $GITHUB_REF == refs/heads/master ]]; then
TAG_ARRAY="[{ \"target\": \"minimal\", \"tag\": \"latest\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"latest-full\" }]"
elif [[ $GITHUB_REF == refs/heads/main ]]; then
TAG_ARRAY="[{ \"target\": \"minimal\", \"tag\": \"latest\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"latest-full\" }]"
else
TAG_ARRAY="[]"
fi
@ -63,7 +59,7 @@ jobs:
tag: ${{ fromJson(needs.create_docker_images_list.outputs.tags) }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Retrieve Repository Docker metadata
id: docker_meta

View File

@ -11,7 +11,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Run Trivy vulnerability scanner in repo mode
uses: aquasecurity/trivy-action@master

View File

@ -10,7 +10,7 @@ jobs:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
- uses: actions/stale@v10
with:
days-before-issue-stale: 90
days-before-issue-close: -1

View File

@ -22,7 +22,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL

View File

@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Check formatting with Ruff
uses: chartboost/ruff-action@v1
@ -37,14 +37,14 @@ jobs:
runs-on: ubuntu-24.04
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
@ -58,11 +58,6 @@ jobs:
run: |
python -m pytest ./tests/test_core.py
# Error appear with h11, not related to Glances
# Should be tested if correction is done
# Installed c:\hostedtoolcache\windows\python\3.9.13\x64\lib\site-packages\exceptiongroup-1.2.1-py3.9.egg
# error: h11 0.14.0 is installed but h11<0.13,>=0.11 is required by {'httpcore'}
# Error: Process completed with exit code 1.
test-windows:
needs: source-code-checks
@ -70,14 +65,15 @@ jobs:
runs-on: windows-2025
strategy:
matrix:
# Windows-curses not available for Python 3.13 for the moment
python-version: ["3.9", "3.10", "3.11", "3.12"]
# Windows-curses not available for Python 3.14 for the moment
# See https://github.com/zephyrproject-rtos/windows-curses/issues/76
python-version: ["3.13"]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
@ -96,18 +92,18 @@ jobs:
needs: source-code-checks
# https://github.com/actions/runner-images?tab=readme-ov-file#available-images
runs-on: macos-14
runs-on: macos-15
strategy:
matrix:
# Only test the latest stable version
python-version: ["3.13"]
python-version: ["3.14"]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'

View File

@ -14,9 +14,9 @@ jobs:
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Glances will be build with Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
uses: actions/setup-node@v5
with:
node-version: ${{ matrix.node-version }}
cache: 'npm'

10
.gitignore vendored
View File

@ -23,6 +23,7 @@ local.properties
.classpath
.settings/
.loadpath
.ipynb_checkpoints/
# External tool builders
.externalToolBuilders/
@ -63,7 +64,14 @@ bower_components/
/*_source.tar.bz2
# Virtual env
/venv*/
.venv-uv/
.venv/
uv.lock
.python-version
# Test
.coverage
tests-data/issues/*/config/
# Local SSL certificates
glances.local*.pem

View File

@ -1,22 +1,106 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
- repo: https://github.com/gitleaks/gitleaks
rev: v8.24.2
hooks:
- id: check-ast
- id: check-docstring-first
- id: check-json
- id: check-merge-conflict
- id: check-shebang-scripts-are-executable
- id: check-toml
- id: check-yaml
- id: debug-statements
- id: detect-private-key
- id: mixed-line-ending
- id: requirements-txt-fixer
- id: gitleaks
name: "🔒 security · Detect hardcoded secrets"
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.5
rev: v0.14.1
hooks:
- id: ruff-format
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- id: ruff-check
name: "🐍 python · Linter with Ruff"
types_or: [ python, pyi ]
args: [ --fix, --exit-non-zero-on-fix ]
- id: ruff-format
name: "🐍 python · Format with Ruff"
types_or: [ python, pyi ]
# - repo: https://github.com/RobertCraigie/pyright-python
# rev: v1.1.391
# hooks:
# - id: pyright
# name: "🐍 python · Check types"
# - repo: https://github.com/biomejs/pre-commit
# rev: "v2.3.7"
# hooks:
# - id: biome-check
# name: "🟨 javascript · Lint, format, and safe fixes with Biome"
- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.35.0
hooks:
- id: check-github-workflows
name: "🐙 github-actions · Validate gh workflow files"
args: ["--verbose"]
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.11.0.1
hooks:
- id: shellcheck
name: "🐚 shell · Lint shell scripts"
- repo: https://github.com/openstack/bashate
rev: 2.1.1
hooks:
- id: bashate
name: "🐚 shell · Check shell script code style"
entry: bashate --error . --ignore=E006
- repo: https://github.com/mrtazz/checkmake.git
rev: 0.2.2
hooks:
- id: checkmake
name: "🐮 Makefile · Lint Makefile"
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: check-executables-have-shebangs
name: "📁 filesystem/⚙️ exec · Verify shebang presence"
- id: check-shebang-scripts-are-executable
name: "📁 filesystem/⚙️ exec · Verify script permissions"
- id: check-case-conflict
name: "📁 filesystem/📝 names · Check case sensitivity"
- id: destroyed-symlinks
name: "📁 filesystem/🔗 symlink · Detect broken symlinks"
- id: check-merge-conflict
name: "🌳 git · Detect conflict markers"
- id: forbid-new-submodules
name: "🌳 git · Prevent submodule creation"
- id: no-commit-to-branch
name: "🌳 git · Protect main branches"
args: ["--branch", "main", "--branch", "master"]
- id: check-added-large-files
name: "🌳 git · Block large file commits"
args: ['--maxkb=5000']
- id: check-ast
name: "🐍 python/🔍 quality · Validate Python AST"
- id: check-docstring-first
name: "🐍 python/📝 style · Enforce docstring at top"
- id: check-json
name: "📄 formats/json · Validate JSON files"
- id: check-shebang-scripts-are-executable
name: "📁 filesystem/⚙️ exec · Ensure scripts are executable"
- id: check-toml
name: "📄 formats/toml · Validate TOML files"
- id: check-yaml
name: "📄 formats/yaml · Validate YAML syntax"
- id: debug-statements
name: "🐍 python/🪲 debug · Detect debug statements"
- id: detect-private-key
name: "🔐 security · Detect private keys"
- id: mixed-line-ending
name: "📄 text/↩️ newline · Normalize line endings"
- id: requirements-txt-fixer
name: "🐍 python/📦 deps · Sort requirements.txt"
- repo: local
hooks:
- id: find-duplicate-lines
name: "❗local script · Find duplicate lines at the end of file"
entry: bash tests-data/tools/find-duplicate-lines.sh
language: system
types: [python]
pass_filenames: false

View File

@ -31,4 +31,4 @@ sphinx:
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: doc-requirements.txt
- requirements: dev-requirements.txt

View File

@ -13,6 +13,9 @@ PGP Public key: gpg --keyserver pgp.mit.edu --recv-keys 0xba43c11f2c8b4347
RazCrimson (maintainer of the Glances project)
https://github.com/RazCrimson
Ariel Otibili (aka) ariel-anieli (for the huge work on code quality)
https://github.com/ariel-anieli
Alessio Sergi (aka) Al3hex (thanks you for the great job on this project)
https://twitter.com/al3hex
https://github.com/asergi

View File

@ -3,9 +3,12 @@ include CONTRIBUTING.md
include COPYING
include NEWS.rst
include README.rst
include README-pypi.rst
include SECURITY.md
include conf/glances.conf
include conf/fetch-templates/*.jinja
include requirements.txt
include all-requirements.txt
recursive-include docs *
recursive-include glances *.py
recursive-include glances/outputs/static *

238
Makefile
View File

@ -1,17 +1,6 @@
PORT ?= 8008
venv_full:= venv/bin
venv_min := venv-min/bin
CONF := conf/glances.conf
PIP := $(venv_full)/pip
PYTHON := $(venv_full)/python
PYTEST := $(venv_full)/python -m pytest
LASTTAG = $(shell git describe --tags --abbrev=0)
VENV_TYPES := full min
VENV_PYTHON := $(VENV_TYPES:%=venv-%-python)
VENV_UPG := $(VENV_TYPES:%=venv-%-upgrade)
VENV_DEPS := $(VENV_TYPES:%=venv-%)
VENV_INST_UPG := $(VENV_DEPS) $(VENV_UPG)
PORT ?= 8008
CONF := conf/glances.conf
LASTTAG = $(shell git describe --tags --abbrev=0)
IMAGES_TYPES := full minimal
DISTROS := alpine ubuntu
@ -26,94 +15,110 @@ PODMAN_SOCK ?= /run/user/$(shell id -u)/podman/podman.sock
DOCKER_SOCK ?= /var/run/docker.sock
DOCKER_SOCKS := -v $(PODMAN_SOCK):$(PODMAN_SOCK):ro -v $(DOCKER_SOCK):$(DOCKER_SOCK):ro
DOCKER_OPTS := --rm -e TZ="${TZ}" -e GLANCES_OPT="" --pid host --network host
UV_RUN := .venv-uv/bin/uv
# if the command is only `make`, the default tasks will be the printing of the help.
.DEFAULT_GOAL := help
.PHONY: help test docs docs-server venv venv-min
.PHONY: help test docs docs-server venv requirements profiling docker all clean
help: ## List all make commands available
@grep -E '^[\.a-zA-Z_%-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk -F ":" '{print $1}' | \
grep -v % | \
sed 's/\\//g' | \
sort | \
grep -v % | sed 's/\\//g' | sort | \
awk 'BEGIN {FS = ":[^:]*?##"}; {printf "\033[1;34mmake %-50s\033[0m %s\n", $$1, $$2}'
# ===================================================================
# Virtualenv
# ===================================================================
venv-%-upgrade: UPGRADE = --upgrade
# install-uv: ## Instructions to install the UV tool
# @echo "Install the UV tool (https://astral.sh/uv/)"
# @echo "Please install the UV tool manually"
# @echo "For example with: curl -LsSf https://astral.sh/uv/install.sh | sh"
# @echo "Or via a package manager of your distribution"
# @echo "For example for Snap: snap install astral-uv"
define DEFINE_VARS_FOR_TYPE
venv-$(TYPE) venv-$(TYPE)-upgrade: VIRTUAL_ENV = $(venv_$(TYPE))
endef
install-uv: ## Install UV tool in a specific virtualenv
python3 -m venv .venv-uv
.venv-uv/bin/pip install uv
$(foreach TYPE,$(VENV_TYPES),$(eval $(DEFINE_VARS_FOR_TYPE)))
upgrade-uv: ## Upgrade the UV tool
.venv-uv/bin/pip install --upgrade pip
.venv-uv/bin/pip install --upgrade uv
$(VENV_PYTHON): venv-%-python:
virtualenv -p python3 $(if $(filter full,$*),venv,venv-$*)
venv: ## Create the virtualenv with all dependencies
$(UV_RUN) sync --all-extras --no-group dev
$(VENV_INST_UPG): venv-%:
$(if $(UPGRADE),$(VIRTUAL_ENV)/pip install --upgrade pip,)
$(foreach REQ,$(REQS), $(VIRTUAL_ENV)/pip install $(UPGRADE) -r $(REQ);)
$(if $(PRE_COMMIT),$(VIRTUAL_ENV)/pre-commit install --hook-type pre-commit,)
venv-upgrade venv-switch-to-full: ## Upgrade the virtualenv with all dependencies
$(UV_RUN) sync --upgrade --all-extras
venv-python: $(VENV_PYTHON) ## Install all Python 3 venv
venv: $(VENV_DEPS) ## Install all Python 3 dependencies
venv-upgrade: $(VENV_UPG) ## Upgrade all Python 3 dependencies
venv-min: ## Create the virtualenv with minimal dependencies
$(UV_RUN) sync
# For full installation (with optional dependencies)
venv-upgrade-min venv-switch-to-min: ## Upgrade the virtualenv with minimal dependencies
$(UV_RUN) sync --upgrade
venv-full venv-full-upgrade: REQS = requirements.txt optional-requirements.txt dev-requirements.txt doc-requirements.txt
venv-clean: ## Remove the virtualenv
rm -rf .venv
venv-full-python: ## Install Python 3 venv
venv-full: venv-python ## Install Python 3 run-time
venv-full-upgrade: ## Upgrade Python 3 run-time dependencies
venv-full: PRE_COMMIT = 1
venv-dev: ## Create the virtualenv with dev dependencies
$(UV_RUN) sync --dev --all-extras
$(UV_RUN) run pre-commit install --hook-type pre-commit
# For minimal installation (without optional dependencies)
# ===================================================================
# Requirements
#
# Note: the --no-hashes option should be used because pip (in CI) has
# issues with hashes.
# ===================================================================
venv-min venv-min-upgrade: REQS = requirements.txt dev-requirements.txt doc-requirements.txt
requirements-min: ## Generate the requirements.txt files (minimal dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --no-group dev --output-file requirements.txt
venv-min-python: ## Install Python 3 venv minimal
venv-min: venv-min-python ## Install Python 3 minimal run-time dependencies
venv-min-upgrade: ## Upgrade Python 3 minimal run-time dependencies
requirements-all: ## Generate the all-requirements.txt files (all dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --all-extras --no-group dev --output-file all-requirements.txt
requirements-docker: ## Generate the docker-requirements.txt files (Docker specific dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --no-group dev --extra containers --extra web --output-file docker-requirements.txt
requirements-dev: ## Generate the dev-requirements.txt files (dev dependencies)
$(UV_RUN) export --no-hashes --only-dev --output-file dev-requirements.txt
requirements: requirements-min requirements-all requirements-dev requirements-docker ## Generate all the requirements files
requirements-upgrade: venv-upgrade requirements ## Upgrade the virtualenv and regenerate all the requirements files
# ===================================================================
# Tests
# ===================================================================
test: ## Run All unit tests
$(PYTEST)
$(UV_RUN) run pytest
test-core: ## Run Core unit tests
$(PYTEST) tests/test_core.py
$(UV_RUN) run pytest tests/test_core.py
test-api: ## Run API unit tests
$(UV_RUN) run pytest tests/test_api.py
test-memoryleak: ## Run Memory-leak unit tests
$(PYTEST) tests/test_memoryleak.py
$(UV_RUN) run pytest tests/test_memoryleak.py
test-perf: ## Run Perf unit tests
$(PYTEST) tests/test_perf.py
$(UV_RUN) run pytest tests/test_perf.py
test-restful: ## Run Restful API unit tests
$(PYTEST) tests/test_restful.py
$(UV_RUN) run pytest tests/test_restful.py
test-webui: ## Run WebUI unit tests
$(PYTEST) tests/test_webui.py
$(UV_RUN) run pytest tests/test_webui.py
test-xmlrpc: ## Run XMLRPC API unit tests
$(PYTEST) tests/test_xmlrpc.py
$(UV_RUN) run pytest tests/test_xmlrpc.py
test-with-upgrade: venv-upgrade test ## Upgrade deps and run unit tests
test-min: ## Run core unit tests in minimal environment
$(venv_min)/python -m pytest tests/test_core.py
test-min-with-upgrade: venv-min-upgrade ## Upgrade deps and run unit tests in minimal environment
$(venv_min)/python -m pytest tests/test_core.py
test-export-csv: ## Run interface tests with CSV
/bin/bash ./tests/test_export_csv.sh
@ -129,26 +134,33 @@ test-export-influxdb-v3: ## Run interface tests with InfluxDB version 3 (Core)
test-export-timescaledb: ## Run interface tests with TimescaleDB
/bin/bash ./tests/test_export_timescaledb.sh
test-export: test-export-csv test-export-json test-export-influxdb-v1 test-export-influxdb-v3 test-export-timescaledb## Tests all exports
test-exports: test-export-csv test-export-json test-export-influxdb-v1 test-export-influxdb-v3 test-export-timescaledb ## Tests all exports
# ===================================================================
# Linters, profilers and cyber security
# ===================================================================
pre-commit: ## Run pre-commit hooks
$(UV_RUN) run pre-commit run --all-files
find-duplicate-lines: ## Search for duplicate lines in files
/bin/bash tests-data/tools/find-duplicate-lines.sh
format: ## Format the code
$(venv_full)/python -m ruff format .
$(UV_RUN) run ruff format .
lint: ## Lint the code.
$(venv_full)/python -m ruff check . --fix
$(UV_RUN) run ruff check . --fix
lint-readme: ## Lint the main README.rst file
$(venv_full)/python -m rstcheck README.rst
$(UV_RUN) run rstcheck README.rst
$(UV_RUN) run rstcheck README-pypi.rst
codespell: ## Run codespell to fix common misspellings in text files
$(venv_full)/codespell -S .git,./docs/_build,./Glances.egg-info,./venv*,./glances/outputs,*.svg -L hart,bu,te,statics -w
$(UV_RUN) run codespell -S .git,./docs/_build,./Glances.egg-info,./venv*,./glances/outputs,*.svg -L hart,bu,te,statics -w
semgrep: ## Run semgrep to find bugs and enforce code standards
$(venv_full)/semgrep scan --config=auto
$(UV_RUN) run semgrep scan --config=auto
profiling-%: SLEEP = 3
profiling-%: TIMES = 30
@ -162,27 +174,27 @@ endef
profiling-gprof: CPROF = glances.cprof
profiling-gprof: ## Callgraph profiling (need "apt install graphviz")
$(DISPLAY-BANNER)
$(PYTHON) -m cProfile -o $(CPROF) run-venv.py -C $(CONF) --stop-after $(TIMES)
$(venv_full)/gprof2dot -f pstats $(CPROF) | dot -Tsvg -o $(OUT_DIR)/glances-cgraph.svg
$(UV_RUN) run python -m cProfile -o $(CPROF) run-venv.py -C $(CONF) --stop-after $(TIMES)
$(UV_RUN) run gprof2dot -f pstats $(CPROF) | dot -Tsvg -o $(OUT_DIR)/glances-cgraph.svg
rm -f $(CPROF)
profiling-pyinstrument: ## PyInstrument profiling
$(DISPLAY-BANNER)
$(PIP) install pyinstrument
$(PYTHON) -m pyinstrument -r html -o $(OUT_DIR)/glances-pyinstrument.html -m glances -C $(CONF) --stop-after $(TIMES)
$(UV_RUN) add pyinstrument
$(UV_RUN) run pyinstrument -r html -o $(OUT_DIR)/glances-pyinstrument.html -m glances -C $(CONF) --stop-after $(TIMES)
profiling-pyspy: ## Flame profiling
$(DISPLAY-BANNER)
$(venv_full)/py-spy record -o $(OUT_DIR)/glances-flame.svg -d 60 -s -- $(PYTHON) run-venv.py -C $(CONF) --stop-after $(TIMES)
$(UV_RUN) run py-spy record -o $(OUT_DIR)/glances-flame.svg -d 60 -s -- .venv-uv/bin/uvrun python run-venv.py -C $(CONF) --stop-after $(TIMES)
profiling: profiling-gprof profiling-pyinstrument profiling-pyspy ## Profiling of the Glances software
trace-malloc: ## Trace the malloc() calls
@echo "Malloc test is running, please wait ~30 secondes..."
$(PYTHON) -m glances -C $(CONF) --trace-malloc --stop-after 15 --quiet
$(UV_RUN) run python -m glances -C $(CONF) --trace-malloc --stop-after 15 --quiet
memory-leak: ## Profile memory leaks
$(PYTHON) -m glances -C $(CONF) --memory-leak
$(UV_RUN) run python -m glances -C $(CONF) --memory-leak
memory-profiling: TIMES = 2400
memory-profiling: PROFILE = mprofile_*.dat
@ -191,30 +203,37 @@ memory-profiling: ## Profile memory usage
@echo "It's a very long test (~4 hours)..."
rm -f $(PROFILE)
@echo "1/2 - Start memory profiling with the history option enable"
$(venv_full)/mprof run -T 1 -C run-venv.py -C $(CONF) --stop-after $(TIMES) --quiet
$(venv_full)/mprof plot --output $(OUT_DIR)/glances-memory-profiling-with-history.png
$(UV_RUN) run mprof run -T 1 -C run-venv.py -C $(CONF) --stop-after $(TIMES) --quiet
$(UV_RUN) run mprof plot --output $(OUT_DIR)/glances-memory-profiling-with-history.png
rm -f $(PROFILE)
@echo "2/2 - Start memory profiling with the history option disable"
$(venv_full)/mprof run -T 1 -C run-venv.py -C $(CONF) --disable-history --stop-after $(TIMES) --quiet
$(venv_full)/mprof plot --output $(OUT_DIR)/glances-memory-profiling-without-history.png
$(UV_RUN) run mprof run -T 1 -C run-venv.py -C $(CONF) --disable-history --stop-after $(TIMES) --quiet
$(UV_RUN) run mprof plot --output $(OUT_DIR)/glances-memory-profiling-without-history.png
rm -f $(PROFILE)
# Trivy installation: https://aquasecurity.github.io/trivy/latest/getting-started/installation/
trivy: ## Run Trivy to find vulnerabilities in container images
trivy fs .
trivy: ## Run Trivy to find vulnerabilities
$(UV_RUN) run trivy fs ./glances/
bandit: ## Run Bandit to find vulnerabilities
$(UV_RUN) run bandit glances -r
# ===================================================================
# Docs
# ===================================================================
docs: ## Create the documentation
$(PYTHON) ./generate_openapi.py
$(PYTHON) -m glances -C $(CONF) --api-doc > ./docs/api.rst
$(UV_RUN) run python -m glances -C $(CONF) --api-doc > ./docs/api/python.rst
$(UV_RUN) run python ./generate_openapi.py
$(UV_RUN) run python -m glances -C $(CONF) --api-restful-doc > ./docs/api/restful.rst
cd docs && ./build.sh && cd ..
docs-server: docs ## Start a Web server to serve the documentation
(sleep 2 && sensible-browser "http://localhost:$(PORT)") &
cd docs/_build/html/ && ../../../venv/bin/python -m http.server $(PORT)
cd docs/_build/html/ && .venv-uv/bin/uvrun python -m http.server $(PORT)
docs-jupyter: ## Start Jupyter Notebook
$(UV_RUN) run --with jupyter jupyter lab
release-note: ## Generate release note
git --no-pager log $(LASTTAG)..HEAD --first-parent --pretty=format:"* %s"
@ -231,17 +250,19 @@ install: ## Open a Web Browser to the installation procedure
webui webui%: DIR = glances/outputs/static/
webui: ## Build the Web UI
$(PYTHON) -c 'import json; from glances.outputs.glances_curses import _GlancesCurses; print(json.dumps({ "leftMenu": [p for p in _GlancesCurses._left_sidebar if p != "now"]}, indent=4))' > ./glances/outputs/static/js/uiconfig.json
webui-gen-config: ## Generate the Web UI config file
$(UV_RUN) run python ./generate_webui_conf.py > ./glances/outputs/static/js/uiconfig.json
webui: webui-gen-config ## Build the Web UI
cd $(DIR) && npm ci && npm run build
webui-audit: ## Audit the Web UI
cd $(DIR) && npm audit
webui-audit-fix: ## Fix audit the Web UI
webui-audit-fix: webui-gen-config ## Fix audit the Web UI
cd $(DIR) && npm audit fix && npm ci && npm run build
webui-update: ## Update JS dependencies
webui-update: webui-gen-config ## Update JS dependencies
cd $(DIR) && npm update --save && npm ci && npm run build
# ===================================================================
@ -250,7 +271,7 @@ webui-update: ## Update JS dependencies
flatpak: venv-upgrade ## Generate FlatPack JSON file
git clone https://github.com/flatpak/flatpak-builder-tools.git
$(PYTHON) ./flatpak-builder-tools/pip/flatpak-pip-generator glances
$(UV_RUN) run python ./flatpak-builder-tools/pip/flatpak-pip-generator glances
rm -rf ./flatpak-builder-tools
@echo "Now follow: https://github.com/flathub/flathub/wiki/App-Submission"
@ -284,33 +305,33 @@ docker-ubuntu-full: ## Generate local docker image (Ubuntu full)
docker-ubuntu-minimal: ## Generate local docker image (Ubuntu minimal)
docker-ubuntu-dev: ## Generate local docker image (Ubuntu dev)
trivy-docker: ## Run Trivy to find vulnerabilities in Docker images
$(UV_RUN) run trivy image glances:local-alpine-full
$(UV_RUN) run trivy image glances:local-alpine-minimal
$(UV_RUN) run trivy image glances:local-ubuntu-full
$(UV_RUN) run trivy image glances:local-ubuntu-minimal
# ===================================================================
# Run
# ===================================================================
run: ## Start Glances in console mode (also called standalone)
$(PYTHON) -m glances -C $(CONF)
$(UV_RUN) run python -m glances -C $(CONF)
run-debug: ## Start Glances in debug console mode (also called standalone)
$(PYTHON) -m glances -C $(CONF) -d
$(UV_RUN) run python -m glances -C $(CONF) -d
run-local-conf: ## Start Glances in console mode with the system conf file
$(PYTHON) -m glances
$(UV_RUN) run python -m glances
run-local-conf-hide-public: ## Start Glances in console mode with the system conf file and hide public information
$(PYTHON) -m glances --hide-public-info
run-min: ## Start minimal Glances in console mode (also called standalone)
$(venv_min)/python -m glances -C $(CONF)
run-min-debug: ## Start minimal Glances in debug console mode (also called standalone)
$(venv_min)/python -m glances -C $(CONF) -d
run-min-local-conf: ## Start minimal Glances in console mode with the system conf file
$(venv_min)/python -m glances
$(UV_RUN) run python -m glances --hide-public-info
run-like-htop: ## Start Glances with the same features than Htop
$(venv_min)/python -m glances --disable-plugin network,ports,wifi,connections,diskio,fs,irq,folders,raid,smart,sensors,vms,containers,ip,amps --disable-left-sidebar
$(UV_RUN) run python -m glances --disable-plugin network,ports,wifi,connections,diskio,fs,irq,folders,raid,smart,sensors,vms,containers,ip,amps --disable-left-sidebar
run-fetch: ## Start Glances in fetch mode
$(UV_RUN) run python -m glances --fetch
$(DOCKER_RUNTIMES): run-docker-%:
$(DOCKER_RUN) $(DOCKER_OPTS) $(DOCKER_SOCKS) -it glances:local-$*
@ -322,32 +343,35 @@ run-docker-ubuntu-minimal: ## Start Glances Ubuntu Docker minimal in console mod
run-docker-ubuntu-full: ## Start Glances Ubuntu Docker full in console mode
run-docker-ubuntu-dev: ## Start Glances Ubuntu Docker dev in console mode
generate-ssl: ## Generate local and sel signed SSL certificates for dev (need mkcert)
mkcert glances.local localhost 120.0.0.1 0.0.0.0
run-webserver: ## Start Glances in Web server mode
$(PYTHON) -m glances -C $(CONF) -w
$(UV_RUN) run python -m glances -C $(CONF) -w
run-webserver-local-conf: ## Start Glances in Web server mode with the system conf file
$(PYTHON) -m glances -w
$(UV_RUN) run python -m glances -w
run-webserver-local-conf-hide-public: ## Start Glances in Web server mode with the system conf file and hide public info
$(PYTHON) -m glances -w --hide-public-info
$(UV_RUN) run python -m glances -w --hide-public-info
run-restapiserver: ## Start Glances in REST API server mode
$(PYTHON) -m glances -C $(CONF) -w --disable-webui
$(UV_RUN) run python -m glances -C $(CONF) -w --disable-webui
run-server: ## Start Glances in server mode (RPC)
$(PYTHON) -m glances -C $(CONF) -s
$(UV_RUN) run python -m glances -C $(CONF) -s
run-client: ## Start Glances in client mode (RPC)
$(PYTHON) -m glances -C $(CONF) -c localhost
$(UV_RUN) run python -m glances -C $(CONF) -c localhost
run-browser: ## Start Glances in browser mode (RPC)
$(PYTHON) -m glances -C $(CONF) --browser
$(UV_RUN) run python -m glances -C $(CONF) --browser
run-web-browser: ## Start Web Central Browser
$(PYTHON) -m glances -C $(CONF) -w --browser
$(UV_RUN) run python -m glances -C $(CONF) -w --browser
run-issue: ## Start Glances in issue mode
$(PYTHON) -m glances -C $(CONF) --issue
$(UV_RUN) run python -m glances -C $(CONF) --issue
run-multipass: ## Install and start Glances in a VM (only available on Ubuntu with multipass already installed)
multipass launch -n glances-on-lts lts
@ -357,4 +381,4 @@ run-multipass: ## Install and start Glances in a VM (only available on Ubuntu wi
multipass delete glances-on-lts
show-version: ## Show Glances version number
$(PYTHON) -m glances -C $(CONF) -V
$(UV_RUN) run python -m glances -C $(CONF) -V

162
NEWS.rst
View File

@ -1,6 +1,150 @@
==============================================================================
Glances ChangeLog
============================================================================
==============================================================================
=============
Version 4.4.1
=============
Bug corrected:
* Restful API issue after a while (stats are no more updated) #3333
=============
Version 4.4.0
=============
Breaking changes:
* A new Python API is now available to use Glances as a Python lib in your hown development #3237
* In the process list, the long command line is now truncated by default. Use the arrow keys to show the full command line. SHIFT + arrow keys are used to switch between column sorts (TUI).
* Prometheus export format is now more user friendly (see detail in #3283)
Enhancements:
* Make a Glances API in order to use Glances as a Python lib #3237
* Add a new --fetch (neofetch like) option to display a snapshot of the current system status #3281
* Show used port in container section #2054
* Show long command line with arrow key #1553
* Sensors plugin refresh by default every 10 seconds
* Do not call update if a call is done to a specific plugin through the API #3033
* [UI] Process virtual memory display can be disable by configuration #3299
* Choose between used or available in the mem plugin #3288
* [Experimental] Add export to DuckDB database #3205
* Add Disk I/O Latency stats #1070
* Filter fields to export #3258
* Remove .keys() from loops over dicts #3253
* Remove iterator helpers #3252
Bug corrected:
* [MACOS] Glances not showing Processes on MacOS #3100
* Last dev build broke Homepage API calls ? only 1 widget still working #3322
* Cloud plugin always generate communication with 169.254.169.254, even if the plugin is disabled #3316
* API response delay (3+ minutes) when VMs are running #3317
* [WINDOWS] Glances do not display CPU stat correctly #3155
* Glances hangs if network device (NFS) is no available #3290
* Fix prometheus export format #3283
* Issue #3279 zfs cache and memory math issues #3289
* [MACOS] Glances crashes when I try to filter #3266
* Glances hang when killing process with muliple CTRL-C #3264
* Issues after disabling system and processcount plugins #3248
* Headers missing from predefined fields in TUI browser machine list #3250
* Add another check for the famous Netifaces issue - Related to #3219
* Key error 'type' in server_list_static.py (load_server_list) #3247
Continious integration and documentation:
* Glances now use uv for the dev environment #3025
* Glances is compatible with Python 3.14 #3319
* Glances provides requirements files with specific versions for each release
* Requirements files are now generated dynamically with the make requirements or requirements-upgrade target
* Add duplicate line check in pre-commit (strange behavor with some VScode extension)
* Solve issue with multiprocessing exception with Snap package
* Add a test script for identify CPU consumption of sensor plugin
* Refactor port to take into account netifaces2
* Correct issue with Chrome driver in WebUI unit test
* Upgrade export test with InfluxDB 1.12
* Fix typo of --export-process-filter help message #3314
* In the outdated feature, catch error message if Pypi server not reachable
* Add unit test for auto_unit
* Label error in docs #3286
* Put WebUI conf generator in a dedicated script
* Refactor the Makefile to generate WebUI config file for all webui targets
* Update sensors documentation #3275
* Update docker compose env quote #3273
* Update docker-compose.yml #3249
* Update API doc generation
* Update README with nice icons #3236
* Add documentation for WebUI test
Thanks to all contributors and bug reporters !
Special thanks to:
- Adi
- Bennett Kanuka
- Tim Potter
- Ariel Otilibili
- Boris Okassa
- Lawrence
- Shohei YOSHIDA
- jmwallach
- korn3r
=============
Version 4.3.3
=============
Bug corrected:
* Something in 4.3.2 broke the home assistant add-on for Glances #3238
Thanks to the FastAPI and Home Assistant community for the support.
=============
Version 4.3.2
=============
Enhancements:
* Add stats about running VMS (qemu/libvirt/kvm support through virsh) #1531
* Add support for InfluxDB 3 Core #3182
* (postgre)SQL export support / TimeScaleDB #2814
* CSV column name now include the plugin name - Related to #2394
* Make all results from amps plugins exportable #2394
* Make --stdout (csv and json) compliant with client/server mode #3235
* API history endpoints shows times without timezone #3218
* FR: Sort Sensors my name in proper number order #3132
* In the FS module, do not display threshold for volume mounted in 'ro' (read-only) #3143
* Add a new field in the process list to identifie Zombie process #3178
* Update plugin containers display and order #3186
* Implement a basic memory cache with TTL for API call (set to ~1 second) #3202
* Add container inactive_file & limit to InfluxDB2 export #3206
Bug corrected:
* [GPU] AMD Plugin: Operation not permitted #3125
* Container memory stats not displayed #3142
* [WEBUI] Irix mode (per core instead of per CPU percentage) not togglable #3158
* Related to iteritems, itervalues, and iterkeys are not more needed in Python 3 #3181
* Glances Central Browser should use name instead of IP adress for redirection #3103
* Glances breaks if Podman container is started while it is running #3199
Continious integration and documentation:
* Add a new option --print-completion to generate shell tab completion - #3111
* Improve Restful API documentation embeded in FastAPI #2632
* Upgrade JS libs #3147
* Improve unittest for CSV export #3150
* Improve unittest for InfluxDB plugin #3149
* Code refactoring - Rename plugin class to <Plugin name>Plugin instead of PluginModel #3169
* Refactor code to limit the complexity of update_views method in plugins #3171
Thanks to all contributors and bug reporters !
Special thanks to:
- Ariel Otilibili
- kenrmayfield
=============
Version 4.3.1
@ -369,7 +513,7 @@ See release note in Wiki format: https://github.com/nicolargo/glances/wiki/Glanc
**BREAKING CHANGES:**
* The minimal Python version is 3.8
* The Glances API version 3 is replaced by the version 4. So Restfull API URL is now /api/4/ #2610
* The Glances API version 3 is replaced by the version 4. So Restful API URL is now /api/4/ #2610
* Alias definition change in the configuration file #1735
Glances version 3.x and lower:
@ -394,9 +538,9 @@ Minimal requirements for Glances version 4 are:
* packaging
* ujson
* pydantic
* fastapi (for WebUI / RestFull API)
* uvicorn (for WebUI / RestFull API)
* jinja2 (for WebUI / RestFull API)
* fastapi (for WebUI / RestFul API)
* uvicorn (for WebUI / RestFul API)
* jinja2 (for WebUI / RestFul API)
Majors changes between Glances version 3 and version 4:
@ -456,7 +600,7 @@ Bug corrected:
CI and documentation:
* New logo for Glances version 4.0 #2713
* Update api.rst documentation #2496
* Update api-restful.rst documentation #2496
* Change Renovate config #2729
* Docker compose password unrecognized arguments when applying docs #2698
* Docker includes OS Release Volume mount info #2473
@ -834,7 +978,7 @@ Bugs corrected:
* Threading.Event.isSet is deprecated in Python 3.10 #2017
* Fix code scanning alert - Clear-text logging of sensitive information security #2006
* The gpu temperature unit are displayed incorrectly in web ui bug #2002
* Doc for 'alert' Restfull/JSON API response documentation #1994
* Doc for 'alert' Restful/JSON API response documentation #1994
* Show the spinning state of a disk documentation #1993
* Web server status check endpoint enhancement #1988
* --time parameter being ignored for client/server mode bug #1978
@ -929,7 +1073,7 @@ Bugs corrected:
* [3.2.0/3.2.1] keybinding not working anymore #1904
* InfluxDB/InfluxDB2 Export object has no attribute hostname #1899
Documentation: The "make docs" generate RestFull/API documentation file.
Documentation: The "make docs" generate RestFul/API documentation file.
===============
Version 3.2.1
@ -1956,7 +2100,7 @@ Version 2.1
* Add Glances log message (in the /tmp/glances.log file)
The default log level is INFO, you can switch to the DEBUG mode using the -d option on the command line.
* Add RESTful API to the Web server mode
RESTful API doc: https://github.com/nicolargo/glances/wiki/The-Glances-RESTFULL-JSON-API
RESTful API doc: https://github.com/nicolargo/glances/wiki/The-Glances-RESTFUL-JSON-API
* Improve SNMP fallback mode for Cisco IOS, VMware ESXi
* Add --theme-white feature to optimize display for white background
* Experimental history feature (--enable-history option on the command line)

386
README-pypi.rst Normal file
View File

@ -0,0 +1,386 @@
Glances 🌟
==========
**Glances** is an open-source system cross-platform monitoring tool.
It allows real-time monitoring of various aspects of your system such as
CPU, memory, disk, network usage etc. It also allows monitoring of running processes,
logged in users, temperatures, voltages, fan speeds etc.
It also supports container monitoring, it supports different container management
systems such as Docker, LXC. The information is presented in an easy to read dashboard
and can also be used for remote monitoring of systems via a web interface or command
line interface. It is easy to install and use and can be customized to show only
the information that you are interested in.
In client/server mode, remote monitoring could be done via terminal,
Web interface or API (XML-RPC and RESTful).
Stats can also be exported to files or external time/value databases, CSV or direct
output to STDOUT.
Glances is written in Python and uses libraries to grab information from
your system. It is based on an open architecture where developers can
add new plugins or exports modules.
Usage 👋
========
For the standalone mode, just run:
.. code-block:: console
$ glances
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/glances-responsive-webdesign.png
For the Web server mode, run:
.. code-block:: console
$ glances -w
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
In this mode, a HTTP/Restful API is exposed, see document `RestfulApi`_ for more details.
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/screenshot-web.png
For the client/server mode (remote monitoring through XML-RPC), run the following command on the server:
.. code-block:: console
$ glances -s
and this one on the client:
.. code-block:: console
$ glances -c <ip>
You can also detect and display all Glances servers available on your
network (or defined in the configuration file) in TUI:
.. code-block:: console
$ glances --browser
or WebUI:
.. code-block:: console
$ glances -w --browser
It possible to display raw stats on stdout:
.. code-block:: console
$ glances --stdout cpu.user,mem.used,load
cpu.user: 30.7
mem.used: 3278204928
load: {'cpucore': 4, 'min1': 0.21, 'min5': 0.4, 'min15': 0.27}
cpu.user: 3.4
mem.used: 3275251712
load: {'cpucore': 4, 'min1': 0.19, 'min5': 0.39, 'min15': 0.27}
...
or in a CSV format thanks to the stdout-csv option:
.. code-block:: console
$ glances --stdout-csv now,cpu.user,mem.used,load
now,cpu.user,mem.used,load.cpucore,load.min1,load.min5,load.min15
2018-12-08 22:04:20 CEST,7.3,5948149760,4,1.04,0.99,1.04
2018-12-08 22:04:23 CEST,5.4,5949136896,4,1.04,0.99,1.04
...
or in a JSON format thanks to the stdout-json option (attribute not supported in this mode in order to have a real JSON object in output):
.. code-block:: console
$ glances --stdout-json cpu,mem
cpu: {"total": 29.0, "user": 24.7, "nice": 0.0, "system": 3.8, "idle": 71.4, "iowait": 0.0, "irq": 0.0, "softirq": 0.0, "steal": 0.0, "guest": 0.0, "guest_nice": 0.0, "time_since_update": 1, "cpucore": 4, "ctx_switches": 0, "interrupts": 0, "soft_interrupts": 0, "syscalls": 0}
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
Last but not least, you can use the fetch mode to get a quick look of a machine:
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/screenshot-fetch.png
Use Glances as a Python library 📚
==================================
You can access the Glances API by importing the `glances.api` module and creating an
instance of the `GlancesAPI` class. This instance provides access to all Glances plugins
and their fields. For example, to access the CPU plugin and its total field, you can
use the following code:
.. code-block:: python
>>> from glances import api
>>> gl = api.GlancesAPI()
>>> gl.cpu
{'cpucore': 16,
'ctx_switches': 1214157811,
'guest': 0.0,
'idle': 91.4,
'interrupts': 991768733,
'iowait': 0.3,
'irq': 0.0,
'nice': 0.0,
'soft_interrupts': 423297898,
'steal': 0.0,
'syscalls': 0,
'system': 5.4,
'total': 7.3,
'user': 3.0}
>>> gl.cpu["total"]
7.3
>>> gl.mem["used"]
12498582144
>>> gl.auto_unit(gl.mem["used"])
11.6G
If the stats return a list of items (like network interfaces or processes), you can
access them by their name:
.. code-block:: python
>>> gl.network.keys()
['wlp0s20f3', 'veth33b370c', 'veth19c7711']
>>> gl.network["wlp0s20f3"]
{'alias': None,
'bytes_all': 362,
'bytes_all_gauge': 9242285709,
'bytes_all_rate_per_sec': 1032.0,
'bytes_recv': 210,
'bytes_recv_gauge': 7420522678,
'bytes_recv_rate_per_sec': 599.0,
'bytes_sent': 152,
'bytes_sent_gauge': 1821763031,
'bytes_sent_rate_per_sec': 433.0,
'interface_name': 'wlp0s20f3',
'key': 'interface_name',
'speed': 0,
'time_since_update': 0.3504955768585205}
For a complete example of how to use Glances as a library, have a look to the `PythonApi`_.
Documentation 📜
================
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM! and the `FAQ`_), please post it on the official Reddit `forum`_ or in GitHub `Discussions`_.
Gateway to other services 🌐
============================
Glances can export stats to:
- ``CSV`` file
- ``JSON`` file
- ``InfluxDB`` server
- ``Cassandra`` server
- ``CouchDB`` server
- ``OpenTSDB`` server
- ``Prometheus`` server
- ``StatsD`` server
- ``ElasticSearch`` server
- ``PostgreSQL/TimeScale`` server
- ``RabbitMQ/ActiveMQ`` broker
- ``ZeroMQ`` broker
- ``Kafka`` broker
- ``Riemann`` server
- ``Graphite`` server
- ``RESTful`` endpoint
Installation 🚀
===============
There are several methods to test/install Glances on your system. Choose your weapon!
PyPI: Pip, the standard way
---------------------------
Glances is on ``PyPI``. By using PyPI, you will be using the latest stable version.
To install Glances, simply use the ``pip`` command line.
Warning: on modern Linux operating systems, you may have an externally-managed-environment
error message when you try to use ``pip``. In this case, go to the the PipX section below.
.. code-block:: console
pip install --user glances
*Note*: Python headers are required to install `psutil`_, a Glances
dependency. For example, on Debian/Ubuntu **the simplest** is
``apt install python3-psutil`` or alternatively need to install first
the *python-dev* package and gcc (*python-devel* on Fedora/CentOS/RHEL).
For Windows, just install psutil from the binary installation file.
By default, Glances is installed **without** the Web interface dependencies.
To install it, use the following command:
.. code-block:: console
pip install --user 'glances[web]'
For a full installation (with all features, see features list bellow):
.. code-block:: console
pip install --user 'glances[all]'
Features list:
- all: install dependencies for all features
- action: install dependencies for action feature
- browser: install dependencies for Glances centram browser
- cloud: install dependencies for cloud plugin
- containers: install dependencies for container plugin
- export: install dependencies for all exports modules
- gpu: install dependencies for GPU plugin
- graph: install dependencies for graph export
- ip: install dependencies for IP public option
- raid: install dependencies for RAID plugin
- sensors: install dependencies for sensors plugin
- smart: install dependencies for smart plugin
- snmp: install dependencies for SNMP
- sparklines: install dependencies for sparklines option
- web: install dependencies for Webserver (WebUI) and Web API
- wifi: install dependencies for Wifi plugin
To upgrade Glances to the latest version:
.. code-block:: console
pip install --user --upgrade glances
The current develop branch is published to the test.pypi.org package index.
If you want to test the develop version (could be instable), enter:
.. code-block:: console
pip install --user -i https://test.pypi.org/simple/ Glances
PyPI: PipX, the alternative way
-------------------------------
Install PipX on your system (apt install pipx on Ubuntu).
Install Glances (with all features):
.. code-block:: console
pipx install 'glances[all]'
The glances script will be installed in the ~/.local/bin folder.
Shell tab completion 🔍
=======================
Glances 4.3.2 and higher includes shell tab autocompletion thanks to the --print-completion option.
For example, on a Linux operating system with bash shell:
.. code-block:: console
$ mkdir -p ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion
$ glances --print-completion bash > ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion/glances
$ source ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion/glances
Following shells are supported: bash, zsh and tcsh.
Requirements 🧩
===============
Glances is developed in Python. A minimal Python version 3.9 or higher
should be installed on your system.
*Note for Python 2 users*
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.9).
Please uses Glances version 3.4.x if you need Python 2 support.
Dependencies:
- ``psutil`` (better with latest version)
- ``defusedxml`` (in order to monkey patch xmlrpc)
- ``packaging`` (for the version comparison)
- ``windows-curses`` (Windows Curses implementation) [Windows-only]
- ``shtab`` (Shell autocompletion) [All but Windows]
- ``jinja2`` (for fetch mode and templating)
Extra dependencies:
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
- ``cassandra-driver`` (for the Cassandra export module)
- ``chevron`` (for the action script feature)
- ``docker`` (for the Containers Docker monitoring support)
- ``elasticsearch`` (for the Elastic Search export module)
- ``FastAPI`` and ``Uvicorn`` (for Web server mode)
- ``graphitesender`` (For the Graphite export module)
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``kafka-python`` (for the Kafka export module)
- ``netifaces2`` (for the IP plugin)
- ``nvidia-ml-py`` (for the GPU plugin)
- ``pycouchdb`` (for the CouchDB export module)
- ``pika`` (for the RabbitMQ/ActiveMQ export module)
- ``podman`` (for the Containers Podman monitoring support)
- ``potsdb`` (for the OpenTSDB export module)
- ``prometheus_client`` (for the Prometheus export module)
- ``psycopg[binary]`` (for the PostgreSQL/TimeScale export module)
- ``pygal`` (for the graph export module)
- ``pymdstat`` (for RAID support) [Linux-only]
- ``pymongo`` (for the MongoDB export module)
- ``pysnmp-lextudio`` (for SNMP support)
- ``pySMART.smartx`` (for HDD Smart support) [Linux-only]
- ``pyzmq`` (for the ZeroMQ export module)
- ``requests`` (for the Ports, Cloud plugins and RESTful export module)
- ``sparklines`` (for the Quick Plugin sparklines option)
- ``statsd`` (for the StatsD export module)
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
Project sponsorship 🙌
======================
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
- sponsor me using one-time or monthly tier Github sponsors_ page
- send me some pieces of bitcoin: 185KN9FCix3svJYp7JQM7hRMfSKyeaJR4X
- buy me a gift on my wishlist_ page
Any and all contributions are greatly appreciated.
Authors and Contributors 🔥
===========================
Nicolas Hennion (@nicolargo) <nicolas@nicolargo.com>
.. image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
License 📜
==========
Glances is distributed under the LGPL version 3 license. See ``COPYING`` for more details.
.. _psutil: https://github.com/giampaolo/psutil
.. _readthedocs: https://glances.readthedocs.io/
.. _forum: https://www.reddit.com/r/glances/
.. _sponsors: https://github.com/sponsors/nicolargo
.. _wishlist: https://www.amazon.fr/hz/wishlist/ls/BWAAQKWFR3FI?ref_=wl_share
.. _PythonApi: https://glances.readthedocs.io/en/develop/api/python.html
.. _RestfulApi: https://glances.readthedocs.io/en/develop/api/restful.html
.. _FAQ: https://github.com/nicolargo/glances/blob/develop/docs/faq.rst
.. _Discussions: https://github.com/nicolargo/glances/discussions

View File

@ -1,10 +1,18 @@
===============================
Glances - An Eye on your System
===============================
.. raw:: html
<div align="center">
.. image:: ./docs/_static/glances-responsive-webdesign.png
.. raw:: html
<h1>Glances</h1>
An Eye on your System
| |pypi| |test| |contributors| |quality|
| |starts| |docker| |pypistat| |ossrank|
| |sponsors| |twitter|
| |starts| |docker| |pypistat| |sponsors|
| |reddit|
.. |pypi| image:: https://img.shields.io/pypi/v/glances.svg
:target: https://pypi.python.org/pypi/Glances
@ -21,10 +29,6 @@ Glances - An Eye on your System
:target: https://pepy.tech/project/glances
:alt: Pypi downloads
.. |ossrank| image:: https://shields.io/endpoint?url=https://ossrank.com/shield/3689
:target: https://ossrank.com/p/3689
:alt: OSSRank
.. |test| image:: https://github.com/nicolargo/glances/actions/workflows/ci.yml/badge.svg?branch=develop
:target: https://github.com/nicolargo/glances/actions
:alt: Linux tests (GitHub Actions)
@ -41,12 +45,20 @@ Glances - An Eye on your System
:target: https://github.com/sponsors/nicolargo
:alt: Sponsors
.. |twitter| image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
.. |twitter| image:: https://img.shields.io/badge/X-000000?style=for-the-badge&logo=x&logoColor=white
:target: https://twitter.com/nicolargo
:alt: @nicolargo
Summary
=======
.. |reddit| image:: https://img.shields.io/badge/Reddit-FF4500?style=for-the-badge&logo=reddit&logoColor=white
:target: https://www.reddit.com/r/glances/
:alt: @reddit
.. raw:: html
</div>
Summary 🌟
==========
**Glances** is an open-source system cross-platform monitoring tool.
It allows real-time monitoring of various aspects of your system such as
@ -58,95 +70,190 @@ and can also be used for remote monitoring of systems via a web interface or com
line interface. It is easy to install and use and can be customized to show only
the information that you are interested in.
.. image:: https://raw.githubusercontent.com/nicolargo/glances/develop/docs/_static/glances-summary.png
In client/server mode, remote monitoring could be done via terminal,
Web interface or API (XML-RPC and RESTful).
Stats can also be exported to files or external time/value databases, CSV or direct
output to STDOUT.
.. image:: https://raw.githubusercontent.com/nicolargo/glances/develop/docs/_static/glances-responsive-webdesign.png
Glances is written in Python and uses libraries to grab information from
your system. It is based on an open architecture where developers can
add new plugins or exports modules.
Project sponsorship
===================
Usage 👋
========
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
For the standalone mode, just run:
- sponsor me using one-time or monthly tier Github sponsors_ page
- send me some pieces of bitcoin: 185KN9FCix3svJYp7JQM7hRMfSKyeaJR4X
- buy me a gift on my wishlist_ page
.. code-block:: console
Any and all contributions are greatly appreciated.
$ glances
Requirements
============
.. image:: ./docs/_static/glances-summary.png
Glances is developed in Python. A minimal Python version 3.9 or higher
should be installed on your system.
For the Web server mode, run:
*Note for Python 2 users*
.. code-block:: console
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.9).
Please uses Glances version 3.4.x if you need Python 2 support.
$ glances -w
Dependencies:
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
- ``psutil`` (better with latest version)
- ``defusedxml`` (in order to monkey patch xmlrpc)
- ``packaging`` (for the version comparison)
- ``windows-curses`` (Windows Curses implementation) [Windows-only]
- ``shtab`` (Shell autocompletion) [All but Windows]
In this mode, a HTTP/Restful API is exposed, see document `RestfulApi`_ for more details.
Optional dependencies:
.. image:: ./docs/_static/screenshot-web.png
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
- ``cassandra-driver`` (for the Cassandra export module)
- ``chevron`` (for the action script feature)
- ``docker`` (for the Containers Docker monitoring support)
- ``elasticsearch`` (for the Elastic Search export module)
- ``FastAPI`` and ``Uvicorn`` (for Web server mode)
- ``graphitesender`` (For the Graphite export module)
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``jinja2`` (for templating, used under the hood by FastAPI)
- ``kafka-python`` (for the Kafka export module)
- ``netifaces2`` (for the IP plugin)
- ``nvidia-ml-py`` (for the GPU plugin)
- ``pycouchdb`` (for the CouchDB export module)
- ``pika`` (for the RabbitMQ/ActiveMQ export module)
- ``podman`` (for the Containers Podman monitoring support)
- ``potsdb`` (for the OpenTSDB export module)
- ``prometheus_client`` (for the Prometheus export module)
- ``psycopg[binary]`` (for the PostgreSQL/TimeScale export module)
- ``pygal`` (for the graph export module)
- ``pymdstat`` (for RAID support) [Linux-only]
- ``pymongo`` (for the MongoDB export module)
- ``pysnmp-lextudio`` (for SNMP support)
- ``pySMART.smartx`` (for HDD Smart support) [Linux-only]
- ``pyzmq`` (for the ZeroMQ export module)
- ``requests`` (for the Ports, Cloud plugins and RESTful export module)
- ``sparklines`` (for the Quick Plugin sparklines option)
- ``statsd`` (for the StatsD export module)
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
For the client/server mode (remote monitoring through XML-RPC), run the following command on the server:
Installation
============
.. code-block:: console
$ glances -s
and this one on the client:
.. code-block:: console
$ glances -c <ip>
You can also detect and display all Glances servers available on your
network (or defined in the configuration file) in TUI:
.. code-block:: console
$ glances --browser
or WebUI:
.. code-block:: console
$ glances -w --browser
It possible to display raw stats on stdout:
.. code-block:: console
$ glances --stdout cpu.user,mem.used,load
cpu.user: 30.7
mem.used: 3278204928
load: {'cpucore': 4, 'min1': 0.21, 'min5': 0.4, 'min15': 0.27}
cpu.user: 3.4
mem.used: 3275251712
load: {'cpucore': 4, 'min1': 0.19, 'min5': 0.39, 'min15': 0.27}
...
or in a CSV format thanks to the stdout-csv option:
.. code-block:: console
$ glances --stdout-csv now,cpu.user,mem.used,load
now,cpu.user,mem.used,load.cpucore,load.min1,load.min5,load.min15
2018-12-08 22:04:20 CEST,7.3,5948149760,4,1.04,0.99,1.04
2018-12-08 22:04:23 CEST,5.4,5949136896,4,1.04,0.99,1.04
...
or in a JSON format thanks to the stdout-json option (attribute not supported in this mode in order to have a real JSON object in output):
.. code-block:: console
$ glances --stdout-json cpu,mem
cpu: {"total": 29.0, "user": 24.7, "nice": 0.0, "system": 3.8, "idle": 71.4, "iowait": 0.0, "irq": 0.0, "softirq": 0.0, "steal": 0.0, "guest": 0.0, "guest_nice": 0.0, "time_since_update": 1, "cpucore": 4, "ctx_switches": 0, "interrupts": 0, "soft_interrupts": 0, "syscalls": 0}
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
Last but not least, you can use the fetch mode to get a quick look of a machine:
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: ./docs/_static/screenshot-fetch.png
Use Glances as a Python library 📚
==================================
You can access the Glances API by importing the `glances.api` module and creating an
instance of the `GlancesAPI` class. This instance provides access to all Glances plugins
and their fields. For example, to access the CPU plugin and its total field, you can
use the following code:
.. code-block:: python
>>> from glances import api
>>> gl = api.GlancesAPI()
>>> gl.cpu
{'cpucore': 16,
'ctx_switches': 1214157811,
'guest': 0.0,
'idle': 91.4,
'interrupts': 991768733,
'iowait': 0.3,
'irq': 0.0,
'nice': 0.0,
'soft_interrupts': 423297898,
'steal': 0.0,
'syscalls': 0,
'system': 5.4,
'total': 7.3,
'user': 3.0}
>>> gl.cpu.get("total")
7.3
>>> gl.mem.get("used")
12498582144
>>> gl.auto_unit(gl.mem.get("used"))
11.6G
If the stats return a list of items (like network interfaces or processes), you can
access them by their name:
.. code-block:: python
>>> gl.network.keys()
['wlp0s20f3', 'veth33b370c', 'veth19c7711']
>>> gl.network.get("wlp0s20f3")
{'alias': None,
'bytes_all': 362,
'bytes_all_gauge': 9242285709,
'bytes_all_rate_per_sec': 1032.0,
'bytes_recv': 210,
'bytes_recv_gauge': 7420522678,
'bytes_recv_rate_per_sec': 599.0,
'bytes_sent': 152,
'bytes_sent_gauge': 1821763031,
'bytes_sent_rate_per_sec': 433.0,
'interface_name': 'wlp0s20f3',
'key': 'interface_name',
'speed': 0,
'time_since_update': 0.3504955768585205}
For a complete example of how to use Glances as a library, have a look to the `PythonApi`_.
Documentation 📜
================
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM! and the `FAQ`_), please post it on the official Reddit `forum`_ or in GitHub `Discussions`_.
Gateway to other services 🌐
============================
Glances can export stats to:
- files: ``CSV`` and ``JSON``
- databases: ``InfluxDB``, ``ElasticSearch``, ``PostgreSQL/TimeScale``, ``Cassandra``, ``CouchDB``, ``OpenTSDB``, ``Prometheus``, ``StatsD``, ``Riemann`` and ``Graphite``
- brokers: ``RabbitMQ/ActiveMQ``, ``ZeroMQ`` and ``Kafka``
- others: ``RESTful`` endpoint
Installation 🚀
===============
There are several methods to test/install Glances on your system. Choose your weapon!
PyPI: Pip, the standard way
---------------------------
Glances is on ``PyPI``. By using PyPI, you will be using the latest
stable version.
Glances is on ``PyPI``. By using PyPI, you will be using the latest stable version.
To install Glances, simply use the ``pip`` command line.
@ -221,6 +328,15 @@ Install Glances (with all features):
The glances script will be installed in the ~/.local/bin folder.
Brew: The missing package manager
---------------------------------
For Linux and Mac OS, it is also possible to install Glances with `Brew`_:
.. code-block:: console
brew install glances
Docker: the cloudy way
----------------------
@ -406,8 +522,8 @@ Ansible
A Glances ``Ansible`` role is available: https://galaxy.ansible.com/zaxos/glances-ansible-role/
Shell tab completion
====================
Shell tab completion 🔍
=======================
Glances 4.3.2 and higher includes shell tab autocompletion thanks to the --print-completion option.
@ -421,109 +537,62 @@ For example, on a Linux operating system with bash shell:
Following shells are supported: bash, zsh and tcsh.
Usage
=====
Requirements 🧩
===============
For the standalone mode, just run:
Glances is developed in Python. A minimal Python version 3.9 or higher
should be installed on your system.
.. code-block:: console
*Note for Python 2 users*
$ glances
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.9).
Please uses Glances version 3.4.x if you need Python 2 support.
For the Web server mode, run:
Dependencies:
.. code-block:: console
- ``psutil`` (better with latest version)
- ``defusedxml`` (in order to monkey patch xmlrpc)
- ``packaging`` (for the version comparison)
- ``windows-curses`` (Windows Curses implementation) [Windows-only]
- ``shtab`` (Shell autocompletion) [All but Windows]
- ``jinja2`` (for fetch mode and templating)
$ glances -w
Extra dependencies:
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
- ``cassandra-driver`` (for the Cassandra export module)
- ``chevron`` (for the action script feature)
- ``docker`` (for the Containers Docker monitoring support)
- ``elasticsearch`` (for the Elastic Search export module)
- ``FastAPI`` and ``Uvicorn`` (for Web server mode)
- ``graphitesender`` (For the Graphite export module)
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``kafka-python`` (for the Kafka export module)
- ``netifaces2`` (for the IP plugin)
- ``nvidia-ml-py`` (for the GPU plugin)
- ``pycouchdb`` (for the CouchDB export module)
- ``pika`` (for the RabbitMQ/ActiveMQ export module)
- ``podman`` (for the Containers Podman monitoring support)
- ``potsdb`` (for the OpenTSDB export module)
- ``prometheus_client`` (for the Prometheus export module)
- ``psycopg[binary]`` (for the PostgreSQL/TimeScale export module)
- ``pygal`` (for the graph export module)
- ``pymdstat`` (for RAID support) [Linux-only]
- ``pymongo`` (for the MongoDB export module)
- ``pysnmp-lextudio`` (for SNMP support)
- ``pySMART.smartx`` (for HDD Smart support) [Linux-only]
- ``pyzmq`` (for the ZeroMQ export module)
- ``requests`` (for the Ports, Cloud plugins and RESTful export module)
- ``sparklines`` (for the Quick Plugin sparklines option)
- ``statsd`` (for the StatsD export module)
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
For the client/server mode, run:
.. code-block:: console
$ glances -s
on the server side and run:
.. code-block:: console
$ glances -c <ip>
on the client one.
You can also detect and display all Glances servers available on your
network or defined in the configuration file:
.. code-block:: console
$ glances --browser
You can also display raw stats on stdout:
.. code-block:: console
$ glances --stdout cpu.user,mem.used,load
cpu.user: 30.7
mem.used: 3278204928
load: {'cpucore': 4, 'min1': 0.21, 'min5': 0.4, 'min15': 0.27}
cpu.user: 3.4
mem.used: 3275251712
load: {'cpucore': 4, 'min1': 0.19, 'min5': 0.39, 'min15': 0.27}
...
or in a CSV format thanks to the stdout-csv option:
.. code-block:: console
$ glances --stdout-csv now,cpu.user,mem.used,load
now,cpu.user,mem.used,load.cpucore,load.min1,load.min5,load.min15
2018-12-08 22:04:20 CEST,7.3,5948149760,4,1.04,0.99,1.04
2018-12-08 22:04:23 CEST,5.4,5949136896,4,1.04,0.99,1.04
...
or in a JSON format thanks to the stdout-json option (attribute not supported in this mode in order to have a real JSON object in output):
.. code-block:: console
$ glances --stdout-json cpu,mem
cpu: {"total": 29.0, "user": 24.7, "nice": 0.0, "system": 3.8, "idle": 71.4, "iowait": 0.0, "irq": 0.0, "softirq": 0.0, "steal": 0.0, "guest": 0.0, "guest_nice": 0.0, "time_since_update": 1, "cpucore": 4, "ctx_switches": 0, "interrupts": 0, "soft_interrupts": 0, "syscalls": 0}
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
and RTFM, always.
Documentation
=============
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM!), please post it on the official Q&A `forum`_.
Gateway to other services
=========================
Glances can export stats to:
- ``CSV`` file
- ``JSON`` file
- ``InfluxDB`` server
- ``Cassandra`` server
- ``CouchDB`` server
- ``OpenTSDB`` server
- ``Prometheus`` server
- ``StatsD`` server
- ``ElasticSearch`` server
- ``PostgreSQL/TimeScale`` server
- ``RabbitMQ/ActiveMQ`` broker
- ``ZeroMQ`` broker
- ``Kafka`` broker
- ``Riemann`` server
- ``Graphite`` server
- ``RESTful`` endpoint
How to contribute ?
===================
How to contribute ? 🤝
======================
If you want to contribute to the Glances project, read this `wiki`_ page.
@ -532,21 +601,33 @@ There is also a chat dedicated to the Glances developers:
.. image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/nicolargo/glances?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
Author
======
Project sponsorship 🙌
======================
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
- sponsor me using one-time or monthly tier Github sponsors_ page
- send me some pieces of bitcoin: 185KN9FCix3svJYp7JQM7hRMfSKyeaJR4X
- buy me a gift on my wishlist_ page
Any and all contributions are greatly appreciated.
Authors and Contributors 🔥
===========================
Nicolas Hennion (@nicolargo) <nicolas@nicolargo.com>
.. image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
License
=======
License 📜
==========
Glances is distributed under the LGPL version 3 license. See ``COPYING`` for more details.
More stars !
============
More stars ! 🌟
===============
Please give us a star on `GitHub`_ if you like this project.
@ -555,13 +636,18 @@ Please give us a star on `GitHub`_ if you like this project.
:alt: Star history
.. _psutil: https://github.com/giampaolo/psutil
.. _Brew: https://formulae.brew.sh/formula/glances
.. _Python: https://www.python.org/getit/
.. _Termux: https://play.google.com/store/apps/details?id=com.termux
.. _readthedocs: https://glances.readthedocs.io/
.. _forum: https://groups.google.com/forum/?hl=en#!forum/glances-users
.. _forum: https://www.reddit.com/r/glances/
.. _wiki: https://github.com/nicolargo/glances/wiki/How-to-contribute-to-Glances-%3F
.. _package: https://repology.org/project/glances/versions
.. _sponsors: https://github.com/sponsors/nicolargo
.. _wishlist: https://www.amazon.fr/hz/wishlist/ls/BWAAQKWFR3FI?ref_=wl_share
.. _Docker: https://github.com/nicolargo/glances/blob/develop/docs/docker.rst
.. _Docker: https://github.com/nicolargo/glances/blob/master/docs/docker.rst
.. _GitHub: https://github.com/nicolargo/glances
.. _PythonApi: https://glances.readthedocs.io/en/develop/api/python.html
.. _RestfulApi: https://glances.readthedocs.io/en/develop/api/restful.html
.. _FAQ: https://github.com/nicolargo/glances/blob/develop/docs/faq.rst
.. _Discussions: https://github.com/nicolargo/glances/discussions

275
all-requirements.txt Normal file
View File

@ -0,0 +1,275 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-workspace --no-hashes --all-extras --no-group dev --output-file all-requirements.txt
annotated-doc==0.0.3
# via fastapi
annotated-types==0.7.0
# via pydantic
anyio==4.11.0
# via
# elasticsearch
# starlette
batinfo==0.4.2 ; sys_platform == 'linux'
# via glances
bernhard==0.2.6
# via glances
cassandra-driver==3.29.3
# via glances
certifi==2025.10.5
# via
# elastic-transport
# influxdb-client
# influxdb3-python
# requests
cffi==2.0.0 ; implementation_name == 'pypy' or platform_python_implementation != 'PyPy'
# via
# cryptography
# pyzmq
charset-normalizer==3.4.4
# via requests
chevron==0.14.0
# via glances
click==8.1.8
# via
# geomet
# uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via
# click
# pytest
coverage==7.10.7 ; python_full_version < '3.10'
# via pytest-cov
coverage==7.11.3 ; python_full_version >= '3.10'
# via pytest-cov
cryptography==46.0.3
# via pysnmpcrypto
defusedxml==0.7.1
# via glances
dnspython==2.7.0 ; python_full_version < '3.10'
# via pymongo
dnspython==2.8.0 ; python_full_version >= '3.10'
# via pymongo
docker==7.1.0
# via glances
elastic-transport==9.1.0 ; python_full_version < '3.10'
# via elasticsearch
elastic-transport==9.2.0 ; python_full_version >= '3.10'
# via elasticsearch
elasticsearch==9.1.2 ; python_full_version < '3.10'
# via glances
elasticsearch==9.2.0 ; python_full_version >= '3.10'
# via glances
exceptiongroup==1.2.2 ; python_full_version < '3.11'
# via
# anyio
# pytest
fastapi==0.121.1
# via glances
geomet==1.1.0
# via cassandra-driver
graphitesender==0.11.2
# via glances
h11==0.16.0
# via uvicorn
ibm-cloud-sdk-core==3.24.2
# via ibmcloudant
ibmcloudant==0.11.0 ; python_full_version < '3.10'
# via glances
ibmcloudant==0.11.1 ; python_full_version >= '3.10'
# via glances
idna==3.11
# via
# anyio
# requests
ifaddr==0.2.0
# via zeroconf
importlib-metadata==7.1.0 ; python_full_version < '3.10'
# via pygal
importlib-metadata==8.7.0 ; python_full_version >= '3.10'
# via pygal
influxdb==5.3.2
# via glances
influxdb-client==1.49.0
# via glances
influxdb3-python==0.16.0
# via glances
iniconfig==2.1.0 ; python_full_version < '3.10'
# via pytest
iniconfig==2.3.0 ; python_full_version >= '3.10'
# via pytest
jinja2==3.1.6
# via
# glances
# pysmi-lextudio
kafka-python==2.2.15
# via glances
markupsafe==3.0.3
# via jinja2
msgpack==1.1.2
# via influxdb
netifaces2==0.0.22
# via glances
nvidia-ml-py==13.580.82
# via glances
packaging==25.0
# via
# glances
# pytest
paho-mqtt==2.1.0
# via glances
pbkdf2==1.3
# via wifi
pika==1.3.2
# via glances
pluggy==1.6.0
# via pytest
ply==3.11
# via pysmi-lextudio
podman==5.6.0
# via glances
potsdb==1.0.3
# via glances
prometheus-client==0.23.1
# via glances
protobuf==4.25.8 ; python_full_version < '3.10'
# via bernhard
protobuf==6.33.0 ; python_full_version >= '3.10'
# via bernhard
psutil==7.1.3
# via glances
psycopg==3.2.12
# via glances
psycopg-binary==3.2.12 ; implementation_name != 'pypy'
# via psycopg
pyarrow==21.0.0 ; python_full_version < '3.10'
# via influxdb3-python
pyarrow==22.0.0 ; python_full_version >= '3.10'
# via influxdb3-python
pyasn1==0.6.0
# via pysnmp-lextudio
pycparser==2.23 ; (implementation_name != 'PyPy' and platform_python_implementation != 'PyPy') or (implementation_name == 'pypy' and platform_python_implementation == 'PyPy')
# via cffi
pydantic==2.12.4
# via fastapi
pydantic-core==2.41.5
# via pydantic
pygal==3.0.5
# via glances
pygments==2.19.2
# via pytest
pyjwt==2.10.1
# via
# ibm-cloud-sdk-core
# ibmcloudant
pymdstat==0.4.3
# via glances
pymongo==4.15.3
# via glances
pysmart-smartx==0.3.10
# via glances
pysmi-lextudio==1.4.3
# via pysnmp-lextudio
pysnmp-lextudio==6.3.0
# via glances
pysnmpcrypto==0.0.4
# via pysnmp-lextudio
pytest==8.4.2 ; python_full_version < '3.10'
# via pytest-cov
pytest==9.0.0 ; python_full_version >= '3.10'
# via pytest-cov
pytest-cov==4.1.0
# via pysnmp-lextudio
python-dateutil==2.9.0.post0
# via
# elasticsearch
# glances
# ibm-cloud-sdk-core
# ibmcloudant
# influxdb
# influxdb-client
# influxdb3-python
pytz==2025.2
# via influxdb
pywin32==311 ; sys_platform == 'win32'
# via docker
pyzmq==27.1.0
# via glances
reactivex==4.1.0
# via
# influxdb-client
# influxdb3-python
requests==2.32.5
# via
# docker
# glances
# ibm-cloud-sdk-core
# ibmcloudant
# influxdb
# podman
# pysmi-lextudio
setuptools==80.9.0
# via
# influxdb-client
# wifi
shtab==1.7.2 ; sys_platform != 'win32'
# via glances
six==1.17.0
# via
# glances
# influxdb
# python-dateutil
sniffio==1.3.1
# via
# anyio
# elastic-transport
# elasticsearch
sparklines==0.7.0
# via glances
starlette==0.49.3
# via fastapi
statsd==4.0.1
# via glances
termcolor==3.1.0 ; python_full_version < '3.10'
# via sparklines
termcolor==3.2.0 ; python_full_version >= '3.10'
# via sparklines
tomli==2.0.2 ; python_full_version <= '3.11'
# via
# coverage
# podman
# pytest
typing-extensions==4.15.0
# via
# anyio
# cryptography
# elasticsearch
# fastapi
# psycopg
# pydantic
# pydantic-core
# reactivex
# starlette
# typing-inspection
# uvicorn
typing-inspection==0.4.2
# via pydantic
tzdata==2025.2 ; sys_platform == 'win32'
# via psycopg
urllib3==2.5.0
# via
# docker
# elastic-transport
# ibm-cloud-sdk-core
# influxdb-client
# influxdb3-python
# podman
# requests
uvicorn==0.38.0
# via glances
wifi==0.3.8
# via glances
windows-curses==2.4.1 ; sys_platform == 'win32'
# via glances
zeroconf==0.148.0
# via glances
zipp==3.23.0
# via importlib-metadata

View File

@ -0,0 +1,9 @@
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}

View File

@ -0,0 +1,23 @@
_____ _
/ ____| |
| | __| | __ _ _ __ ___ ___ ___
| | |_ | |/ _` | '_ \ / __/ _ \/ __|
| |__| | | (_| | | | | (_| __/\__
\_____|_|\__,_|_| |_|\___\___||___/
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} ⚡ {{ process['cpu_percent'] }}% CPU{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }} ⚡ {{ process['cpu_percent'] }}% CPU
{% endfor %}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@ history_size=1200
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restfull_api.py is hosted
# Default is folder where glances_restful_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -64,6 +64,10 @@ history_size=1200
# Comma separated list of HTTP request headers that should be supported for cross-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile_password=kfp
#ssl_keyfile=./glances.local+3-key.pem
#ssl_certfile=./glances.local+3.pem
##############################################################################
# Plugins
@ -127,7 +131,7 @@ user_careful=50
user_warning=70
user_critical=90
user_log=False
#user_critical_action=echo {{user}} {{value}} {{max}} > /tmp/cpu.alert
#user_critical_action=echo "{{time}} User CPU {{user}} higher than {{critical}}" > /tmp/cpu.alert
#
system_careful=50
system_warning=70
@ -181,12 +185,14 @@ temperature_critical=80
[mem]
disable=False
# Display available memory instead of used memory
#available=True
# Define RAM thresholds in %
# Default values if not defined: 50/70/90
careful=50
#careful_action_repeat=echo {{percent}} >> /tmp/memory.alert
warning=70
critical=90
#critical_action_repeat=echo "{{time}} {{percent}} higher than {{critical}}"" >> /tmp/memory.alert
[memswap]
disable=False
@ -195,6 +201,7 @@ disable=False
careful=50
warning=70
critical=90
#warning_action=echo "{{time}} {{percent}} higher than {{warning}}"" > /tmp/memory.alert
[load]
disable=False
@ -241,8 +248,9 @@ hide_zero=False
#wlan0_tx_warning=900000
#wlan0_tx_critical=1000000
#wlan0_tx_log=True
#wlan0_rx_critical_action=echo "{{time}} {{interface_name}} RX {{bytes_recv_rate_per_sec}}Bps" > /tmp/network.alert
# Alias for network interface name
#alias=wlp2s0:WIFI
#alias=wlp0s20f3:WIFI
[ip]
# Disable display of private IP address
@ -300,15 +308,32 @@ hide_zero=False
#show=sda.*
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set latency thresholds (latency in ms) for a given disk name (rx = read / tx = write)
# dm-0_rx_latency_careful=10
# dm-0_rx_latency_warning=20
# dm-0_rx_latency_critical=50
# dm-0_rx_latency_log=False
# dm-0_tx_latency_careful=10
# dm-0_tx_latency_warning=20
# dm-0_tx_latency_critical=50
# dm-0_tx_latency_log=False
# There is no default bitrate thresholds for disk (because it is not possible to know the disk speed)
# Set bitrate thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
#dm-0_rx_careful=4000000000
#dm-0_rx_warning=5000000000
#dm-0_rx_critical=6000000000
#dm-0_rx_log=True
#dm-0_rx_log=False
#dm-0_tx_careful=700000000
#dm-0_tx_warning=900000000
#dm-0_tx_critical=1000000000
#dm-0_tx_log=True
#dm-0_tx_log=False
[fs]
disable=False
@ -318,15 +343,19 @@ hide=/boot.*,.*/snap.*
#show=/,/srv
# Define filesystem space thresholds in %
# Default values if not defined: 50/70/90
# It is also possible to define per mount point value
# Example: /_careful=40
careful=50
warning=70
critical=90
# It is also possible to define per mount point value
# Example: /_careful=40
#/_careful=1
#/_warning=5
#/_critical=10
#/_critical_action=echo "{{time}} {{mnt_point}} filesystem space {{percent}}% higher than {{critical}}%" > /tmp/fs.alert
# Allow additional file system types (comma-separated FS type)
#allow=shm
# Alias for root file system
#alias=/:Root,/zsfpool:ZSF
alias=/:Root,/zfspool:ZFS
[irq]
# Documentation: https://glances.readthedocs.io/en/latest/aoa/irq.html
@ -384,8 +413,8 @@ port=7634
# Documentation: https://glances.readthedocs.io/en/latest/aoa/sensors.html
disable=False
# Set the refresh multiplicator for the sensors
# By default refresh every Glances refresh * 3 (increase to reduce CPU consumption)
#refresh=3
# By default refresh every Glances refresh * 5 (increase to reduce CPU consumption)
#refresh=5
# Hide some sensors (comma separated list of regexp)
hide=unknown.*
# Show only the following sensors (comma separated list of regexp)
@ -393,10 +422,11 @@ hide=unknown.*
# Sensors core thresholds (in Celsius...)
# By default values are grabbed from the system
# Overwrite thresholds for a specific sensor
#temperature_core_Ambient_careful=45
#temperature_core_Ambient_warning=65
#temperature_core_Ambient_critical=80
#temperature_core_Ambient_log=False
# temperature_core_Ambient_careful=40
# temperature_core_Ambient_warning=60
# temperature_core_Ambient_critical=85
# temperature_core_Ambient_log=True
# temperature_core_Ambient_critical_action=echo "{{time}} {{label}} temperature {{value}}{{unit}} higher than {{critical}}{{unit}}" > /tmp/temperature.alert
# Overwrite thresholds for a specific type of sensor
#temperature_core_careful=45
#temperature_core_warning=65
@ -431,6 +461,8 @@ disable=False
# Stats that can be disabled: cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Stats that can not be disable: pid,name
#disable_stats=cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Disable display of virtual memory
#disable_virtual_memory=True
# Define CPU/MEM (per process) thresholds in %
# Default values if not defined: 50/70/90
cpu_careful=50
@ -459,6 +491,8 @@ status_critical=Z,D
# Define the list of processes to export using:
# a comma-separated list of Glances filter
#export=.*firefox.*,pid:1234
# Define a list of process to focus on (comma-separated list of Glances filter)
#focus=.*firefox.*,.*python.*
[ports]
disable=False
@ -526,8 +560,8 @@ disable=False
# Define the maximum docker size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
; disable_stats=diskio,networkio
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Thresholds for CPU and MEM (in %)
; cpu_careful=50
; cpu_warning=70
@ -605,6 +639,11 @@ disable=False
# Exports
##############################################################################
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
#exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
[graph]
# Configuration for the --export graph option
# Set the path where the graph (.svg files) will be created

View File

@ -1,20 +1,485 @@
codespell
coverage
fonttools>=4.43.0 # not directly required, pinned by Snyk to avoid a vulnerability
gprof2dot
matplotlib
memory-profiler
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
pre-commit
py-spy
pyright
pytest
requirements-parser
rstcheck
ruff
selenium
semgrep; platform_system == 'Linux'
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
webdriver-manager
h11>=0.16.0 # not directly required, pinned by Snyk to avoid a vulnerability
# This file was autogenerated by uv via the following command:
# uv export --no-hashes --only-dev --output-file dev-requirements.txt
alabaster==0.7.16 ; python_full_version < '3.10'
# via sphinx
alabaster==1.0.0 ; python_full_version >= '3.10'
# via sphinx
annotated-types==0.7.0
# via pydantic
anyio==4.11.0 ; python_full_version >= '3.10'
# via
# httpx
# mcp
# sse-starlette
# starlette
attrs==25.4.0
# via
# glom
# jsonschema
# outcome
# referencing
# reuse
# semgrep
# trio
babel==2.17.0
# via sphinx
binaryornot==0.4.4 ; python_full_version < '3.10'
# via reuse
boltons==21.0.0
# via
# face
# glom
# semgrep
boolean-py==5.0
# via
# license-expression
# reuse
bracex==2.6
# via wcmatch
certifi==2025.10.5
# via
# httpcore
# httpx
# requests
# selenium
cffi==2.0.0 ; implementation_name != 'pypy' and os_name == 'nt'
# via trio
cfgv==3.4.0
# via pre-commit
chardet==5.2.0 ; python_full_version < '3.10'
# via binaryornot
charset-normalizer==3.4.4
# via
# python-debian
# requests
click==8.1.8
# via
# click-option-group
# reuse
# semgrep
# typer
# uvicorn
click-option-group==0.5.9
# via semgrep
codespell==2.4.1
colorama==0.4.6
# via
# click
# pytest
# semgrep
# sphinx
contourpy==1.3.0 ; python_full_version < '3.10'
# via matplotlib
contourpy==1.3.2 ; python_full_version == '3.10.*'
# via matplotlib
contourpy==1.3.3 ; python_full_version >= '3.11'
# via matplotlib
cycler==0.12.1
# via matplotlib
defusedxml==0.7.1 ; python_full_version < '3.10'
# via semgrep
deprecated==1.3.1 ; python_full_version < '3.10'
# via
# opentelemetry-api
# opentelemetry-exporter-otlp-proto-http
distlib==0.4.0
# via virtualenv
docutils==0.21.2
# via
# rstcheck-core
# sphinx
# sphinx-rtd-theme
exceptiongroup==1.2.2
# via
# anyio
# pytest
# semgrep
# trio
# trio-websocket
face==24.0.0
# via glom
filelock==3.19.1 ; python_full_version < '3.10'
# via virtualenv
filelock==3.20.0 ; python_full_version >= '3.10'
# via virtualenv
fonttools==4.60.1
# via matplotlib
glom==22.1.0
# via semgrep
googleapis-common-protos==1.72.0
# via opentelemetry-exporter-otlp-proto-http
gprof2dot==2025.4.14
h11==0.16.0
# via
# httpcore
# uvicorn
# wsproto
httpcore==1.0.9 ; python_full_version >= '3.10'
# via httpx
httpx==0.28.1 ; python_full_version >= '3.10'
# via mcp
httpx-sse==0.4.3 ; python_full_version >= '3.10'
# via mcp
identify==2.6.15
# via pre-commit
idna==3.11
# via
# anyio
# httpx
# requests
# trio
imagesize==1.4.1
# via sphinx
importlib-metadata==7.1.0 ; python_full_version < '3.10'
# via
# opentelemetry-api
# sphinx
importlib-metadata==8.7.0 ; python_full_version >= '3.10'
# via opentelemetry-api
importlib-resources==6.5.2 ; python_full_version < '3.10'
# via matplotlib
iniconfig==2.1.0 ; python_full_version < '3.10'
# via pytest
iniconfig==2.3.0 ; python_full_version >= '3.10'
# via pytest
jinja2==3.1.6
# via
# reuse
# sphinx
jsonschema==4.25.1
# via
# mcp
# semgrep
jsonschema-specifications==2025.9.1
# via jsonschema
kiwisolver==1.4.7 ; python_full_version < '3.10'
# via matplotlib
kiwisolver==1.4.9 ; python_full_version >= '3.10'
# via matplotlib
license-expression==30.4.4
# via reuse
markdown-it-py==3.0.0 ; python_full_version < '3.10'
# via rich
markdown-it-py==4.0.0 ; python_full_version >= '3.10'
# via rich
markupsafe==3.0.3
# via jinja2
matplotlib==3.9.4 ; python_full_version < '3.10'
matplotlib==3.10.7 ; python_full_version >= '3.10'
mcp==1.16.0 ; python_full_version >= '3.10'
# via semgrep
mdurl==0.1.2
# via markdown-it-py
memory-profiler==0.61.0
nodeenv==1.9.1
# via
# pre-commit
# pyright
numpy==2.0.2 ; python_full_version < '3.10'
# via
# contourpy
# matplotlib
numpy==2.2.6 ; python_full_version == '3.10.*'
# via
# contourpy
# matplotlib
numpy==2.3.4 ; python_full_version >= '3.11'
# via
# contourpy
# matplotlib
opentelemetry-api==1.25.0 ; python_full_version < '3.10'
# via
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# semgrep
opentelemetry-api==1.37.0 ; python_full_version >= '3.10'
# via
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# semgrep
opentelemetry-exporter-otlp-proto-common==1.25.0 ; python_full_version < '3.10'
# via opentelemetry-exporter-otlp-proto-http
opentelemetry-exporter-otlp-proto-common==1.37.0 ; python_full_version >= '3.10'
# via opentelemetry-exporter-otlp-proto-http
opentelemetry-exporter-otlp-proto-http==1.25.0 ; python_full_version < '3.10'
# via semgrep
opentelemetry-exporter-otlp-proto-http==1.37.0 ; python_full_version >= '3.10'
# via semgrep
opentelemetry-instrumentation==0.46b0 ; python_full_version < '3.10'
# via opentelemetry-instrumentation-requests
opentelemetry-instrumentation==0.58b0 ; python_full_version >= '3.10'
# via opentelemetry-instrumentation-requests
opentelemetry-instrumentation-requests==0.46b0 ; python_full_version < '3.10'
# via semgrep
opentelemetry-instrumentation-requests==0.58b0 ; python_full_version >= '3.10'
# via semgrep
opentelemetry-proto==1.25.0 ; python_full_version < '3.10'
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-http
opentelemetry-proto==1.37.0 ; python_full_version >= '3.10'
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-http
opentelemetry-sdk==1.25.0 ; python_full_version < '3.10'
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
opentelemetry-sdk==1.37.0 ; python_full_version >= '3.10'
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
opentelemetry-semantic-conventions==0.46b0 ; python_full_version < '3.10'
# via
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
opentelemetry-semantic-conventions==0.58b0 ; python_full_version >= '3.10'
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
opentelemetry-util-http==0.46b0 ; python_full_version < '3.10'
# via opentelemetry-instrumentation-requests
opentelemetry-util-http==0.58b0 ; python_full_version >= '3.10'
# via opentelemetry-instrumentation-requests
outcome==1.3.0.post0
# via
# trio
# trio-websocket
packaging==25.0
# via
# matplotlib
# opentelemetry-instrumentation
# pytest
# requirements-parser
# semgrep
# sphinx
# webdriver-manager
peewee==3.18.3
# via semgrep
pillow==11.3.0 ; python_full_version < '3.10'
# via matplotlib
pillow==12.0.0 ; python_full_version >= '3.10'
# via matplotlib
platformdirs==4.4.0 ; python_full_version < '3.10'
# via virtualenv
platformdirs==4.5.0 ; python_full_version >= '3.10'
# via virtualenv
pluggy==1.6.0
# via pytest
pre-commit==4.3.0 ; python_full_version < '3.10'
pre-commit==4.4.0 ; python_full_version >= '3.10'
# via
# googleapis-common-protos
# opentelemetry-proto
protobuf==6.33.0 ; python_full_version >= '3.10'
protobuf==4.25.8 ; python_full_version < '3.10'
# via
# googleapis-common-protos
# opentelemetry-proto
psutil==7.1.3
# via memory-profiler
py-spy==0.4.1
pycparser==2.23 ; implementation_name != 'PyPy' and implementation_name != 'pypy' and os_name == 'nt'
# via cffi
pydantic==2.12.4
# via
# mcp
# pydantic-settings
# rstcheck-core
pydantic-core==2.41.5
# via pydantic
pydantic-settings==2.11.0 ; python_full_version >= '3.10'
# via mcp
pygments==2.19.2
# via
# pytest
# rich
# sphinx
pyinstrument==5.1.1
pyparsing==3.2.5
# via matplotlib
pyright==1.1.407
pysocks==1.7.1
# via urllib3
pytest==8.4.2 ; python_full_version < '3.10'
pytest==9.0.0 ; python_full_version >= '3.10'
python-dateutil==2.9.0.post0
# via matplotlib
python-debian==1.0.1
# via reuse
python-dotenv==1.2.1
# via
# pydantic-settings
# webdriver-manager
python-magic==0.4.27 ; python_full_version >= '3.10'
# via reuse
python-multipart==0.0.20 ; python_full_version >= '3.10'
# via mcp
pywin32==311 ; python_full_version >= '3.10' and sys_platform == 'win32'
# via
# mcp
# semgrep
pyyaml==6.0.3
# via pre-commit
referencing==0.36.2 ; python_full_version < '3.10'
# via
# jsonschema
# jsonschema-specifications
referencing==0.37.0 ; python_full_version >= '3.10'
# via
# jsonschema
# jsonschema-specifications
requests==2.32.5
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
# sphinx
# webdriver-manager
requirements-parser==0.13.0
reuse==5.1.1 ; python_full_version < '3.10'
reuse==6.2.0 ; python_full_version >= '3.10'
rich==13.5.3
# via
# semgrep
# typer
roman-numerals-py==3.1.0 ; python_full_version >= '3.11'
# via sphinx
rpds-py==0.27.1 ; python_full_version < '3.10'
# via
# jsonschema
# referencing
rpds-py==0.28.0 ; python_full_version >= '3.10'
# via
# jsonschema
# referencing
rstcheck==6.2.5
rstcheck-core==1.2.2
# via rstcheck
ruamel-yaml==0.18.16
# via semgrep
ruamel-yaml-clib==0.2.14 ; python_full_version >= '3.10' or platform_python_implementation == 'CPython'
# via
# ruamel-yaml
# semgrep
ruff==0.14.4
selenium==4.36.0 ; python_full_version < '3.10'
selenium==4.38.0 ; python_full_version >= '3.10'
semgrep==1.136.0 ; python_full_version < '3.10'
semgrep==1.142.1 ; python_full_version >= '3.10'
setuptools==80.9.0
# via opentelemetry-instrumentation
shellingham==1.5.4
# via typer
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via
# anyio
# trio
snowballstemmer==3.0.1
# via sphinx
sortedcontainers==2.4.0
# via trio
sphinx==7.4.7 ; python_full_version < '3.10'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx==8.1.3 ; python_full_version == '3.10.*'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx==8.2.3 ; python_full_version >= '3.11'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
# via sphinx
sphinxcontrib-devhelp==2.0.0
# via sphinx
sphinxcontrib-htmlhelp==2.1.0
# via sphinx
sphinxcontrib-jquery==4.1
# via sphinx-rtd-theme
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==2.0.0
# via sphinx
sphinxcontrib-serializinghtml==2.0.0
# via sphinx
sse-starlette==3.0.3 ; python_full_version >= '3.10'
# via mcp
starlette==0.49.3 ; python_full_version >= '3.10'
# via mcp
tomli==2.0.2
# via
# pytest
# semgrep
# sphinx
tomlkit==0.13.3
# via reuse
trio==0.31.0 ; python_full_version < '3.10'
# via
# selenium
# trio-websocket
trio==0.32.0 ; python_full_version >= '3.10'
# via
# selenium
# trio-websocket
trio-websocket==0.12.2
# via selenium
typer==0.20.0
# via rstcheck
typing-extensions==4.15.0
# via
# anyio
# opentelemetry-api
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# pydantic
# pydantic-core
# pyright
# referencing
# selenium
# semgrep
# starlette
# typer
# typing-inspection
# uvicorn
# virtualenv
typing-inspection==0.4.2
# via
# pydantic
# pydantic-settings
urllib3==2.5.0
# via
# requests
# selenium
# semgrep
uvicorn==0.38.0 ; python_full_version >= '3.10' and sys_platform != 'emscripten'
# via mcp
virtualenv==20.35.4
# via pre-commit
wcmatch==8.5.2
# via semgrep
webdriver-manager==4.0.2
websocket-client==1.9.0
# via selenium
wrapt==1.17.3
# via
# deprecated
# opentelemetry-instrumentation
wsproto==1.2.0
# via trio-websocket
zipp==3.23.0
# via
# importlib-metadata
# importlib-resources

View File

@ -1,7 +0,0 @@
psutil
defusedxml
orjson
reuse
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
sphinx
sphinx_rtd_theme

View File

@ -12,6 +12,9 @@ services:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/run/user/1000/podman/podman.sock:/run/user/1000/podman/podman.sock:ro"
- "./glances.conf:/glances/conf/glances.conf"
# # Uncomment for proper distro information in upper panel.
# # Works only for distros that do have this file (most of distros do).
# - "/etc/os-release:/etc/os-release:ro"
environment:
- TZ=${TZ}
- GLANCES_OPT=-C /glances/conf/glances.conf -w

View File

@ -49,7 +49,7 @@ max_processes_display=25
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restfull_api.py is hosted
# Default is folder where glances_restful_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -127,7 +127,7 @@ user_careful=50
user_warning=70
user_critical=90
user_log=False
#user_critical_action=echo {{user}} {{value}} {{max}} > /tmp/cpu.alert
#user_critical_action=echo "{{time}} User CPU {{user}} higher than {{critical}}" > /tmp/cpu.alert
#
system_careful=50
system_warning=70
@ -181,12 +181,14 @@ temperature_critical=80
[mem]
disable=False
# Display available memory instead of used memory
#available=True
# Define RAM thresholds in %
# Default values if not defined: 50/70/90
careful=50
#careful_action_repeat=echo {{percent}} >> /tmp/memory.alert
warning=70
critical=90
#critical_action_repeat=echo "{{time}} {{percent}} higher than {{critical}}"" >> /tmp/memory.alert
[memswap]
disable=False
@ -195,6 +197,7 @@ disable=False
careful=50
warning=70
critical=90
#warning_action=echo "{{time}} {{percent}} higher than {{warning}}"" > /tmp/memory.alert
[load]
disable=False
@ -241,8 +244,9 @@ hide_zero=False
#wlan0_tx_warning=900000
#wlan0_tx_critical=1000000
#wlan0_tx_log=True
#wlan0_rx_critical_action=echo "{{time}} {{interface_name}} RX {{bytes_recv_rate_per_sec}}Bps" > /tmp/network.alert
# Alias for network interface name
#alias=wlp2s0:WIFI
#alias=wlp0s20f3:WIFI
[ip]
# Disable display of private IP address
@ -300,15 +304,32 @@ hide_zero=False
#show=sda.*
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set latency thresholds (latency in ms) for a given disk name (rx = read / tx = write)
# dm-0_rx_latency_careful=10
# dm-0_rx_latency_warning=20
# dm-0_rx_latency_critical=50
# dm-0_rx_latency_log=False
# dm-0_tx_latency_careful=10
# dm-0_tx_latency_warning=20
# dm-0_tx_latency_critical=50
# dm-0_tx_latency_log=False
# There is no default bitrate thresholds for disk (because it is not possible to know the disk speed)
# Set bitrate thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
#dm-0_rx_careful=4000000000
#dm-0_rx_warning=5000000000
#dm-0_rx_critical=6000000000
#dm-0_rx_log=True
#dm-0_rx_log=False
#dm-0_tx_careful=700000000
#dm-0_tx_warning=900000000
#dm-0_tx_critical=1000000000
#dm-0_tx_log=True
#dm-0_tx_log=False
[fs]
disable=False
@ -318,11 +339,15 @@ hide=/boot.*,.*/snap.*
#show=/,/srv
# Define filesystem space thresholds in %
# Default values if not defined: 50/70/90
# It is also possible to define per mount point value
# Example: /_careful=40
careful=50
warning=70
critical=90
# It is also possible to define per mount point value
# Example: /_careful=40
#/_careful=1
#/_warning=5
#/_critical=10
#/_critical_action=echo "{{time}} {{mnt_point}} filesystem space {{percent}}% higher than {{critical}}%" > /tmp/fs.alert
# Allow additional file system types (comma-separated FS type)
#allow=shm
# Alias for root file system
@ -384,8 +409,8 @@ port=7634
# Documentation: https://glances.readthedocs.io/en/latest/aoa/sensors.html
disable=False
# Set the refresh multiplicator for the sensors
# By default refresh every Glances refresh * 3 (increase to reduce CPU consumption)
#refresh=3
# By default refresh every Glances refresh * 5 (increase to reduce CPU consumption)
#refresh=5
# Hide some sensors (comma separated list of regexp)
hide=unknown.*
# Show only the following sensors (comma separated list of regexp)
@ -393,10 +418,11 @@ hide=unknown.*
# Sensors core thresholds (in Celsius...)
# By default values are grabbed from the system
# Overwrite thresholds for a specific sensor
#temperature_core_Ambient_careful=45
#temperature_core_Ambient_warning=65
#temperature_core_Ambient_critical=80
#temperature_core_Ambient_log=False
# temperature_core_Ambient_careful=40
# temperature_core_Ambient_warning=60
# temperature_core_Ambient_critical=85
# temperature_core_Ambient_log=True
# temperature_core_Ambient_critical_action=echo "{{time}} {{label}} temperature {{value}}{{unit}} higher than {{critical}}{{unit}}" > /tmp/temperature.alert
# Overwrite thresholds for a specific type of sensor
#temperature_core_careful=45
#temperature_core_warning=65
@ -431,6 +457,8 @@ disable=False
# Stats that can be disabled: cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Stats that can not be disable: pid,name
#disable_stats=cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Disable display of virtual memory
#disable_virtual_memory=True
# Define CPU/MEM (per process) thresholds in %
# Default values if not defined: 50/70/90
cpu_careful=50
@ -510,7 +538,8 @@ port_default_gateway=False
disable=True
# Define the maximum VMs size name (default is 20 chars)
max_name_size=20
# By default, Glances only display running VMs with states: 'Running', 'Starting' or 'Restarting'
# By default, Glances only display running VMs with states:
# 'Running', 'Paused', 'Starting' or 'Restarting'
# Set the following key to True to display all VMs regarding their states
all=False
@ -525,8 +554,8 @@ disable=False
# Define the maximum docker size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
; disable_stats=diskio,networkio
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Thresholds for CPU and MEM (in %)
; cpu_careful=50
; cpu_warning=70
@ -604,6 +633,11 @@ disable=False
# Exports
##############################################################################
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
#exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
[graph]
# Configuration for the --export graph option
# Set the path where the graph (.svg files) will be created
@ -622,7 +656,7 @@ style=DarkStyle
[influxdb]
# !!!
# Will be DEPRECATED in future release.
# Please have a look on the new influxdb2 export module (compatible with InfluxDB 1.8.x and 2.x)
# Please have a look on the new influxdb3 export module
# !!!
# Configuration for the --export influxdb option
# https://influxdb.com/

View File

@ -66,7 +66,7 @@ RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --upgrade pip
RUN python${PYTHON_VERSION} -m venv --without-pip venv
COPY requirements.txt docker-requirements.txt webui-requirements.txt optional-requirements.txt ./
COPY pyproject.toml docker-requirements.txt all-requirements.txt ./
##############################################################################
# BUILD: Install the minimal image deps
@ -74,9 +74,7 @@ FROM build AS buildminimal
ARG PYTHON_VERSION
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r requirements.txt \
-r docker-requirements.txt \
-r webui-requirements.txt
-r docker-requirements.txt
##############################################################################
# BUILD: Install all the deps
@ -89,8 +87,7 @@ ARG CASS_DRIVER_NO_CYTHON=1
ARG CARGO_NET_GIT_FETCH_WITH_CLI=true
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r requirements.txt \
-r optional-requirements.txt
-r all-requirements.txt
##############################################################################
# RELEASE Stages

View File

@ -1,22 +1,24 @@
{
"version": 1,
"disable_existing_loggers": "False",
"root": {"level": "INFO", "handlers": ["console"]},
"formatters": {
"standard": {"format": "%(asctime)s -- %(levelname)s -- %(message)s"},
"short": {"format": "%(levelname)s -- %(message)s"},
"long": {"format": "%(asctime)s -- %(levelname)s -- %(message)s (%(funcName)s in %(filename)s)"},
"free": {"format": "%(message)s"}
},
"handlers": {
"console": {"class": "logging.StreamHandler", "formatter": "standard"}
},
"loggers": {
"debug": {"handlers": ["console"], "level": "DEBUG"},
"verbose": {"handlers": ["console"], "level": "INFO"},
"standard": {"handlers": ["console"], "level": "INFO"},
"requests": {"handlers": ["console"], "level": "ERROR"},
"elasticsearch": {"handlers": ["console"], "level": "ERROR"},
"elasticsearch.trace": {"handlers": ["console"], "level": "ERROR"}
}
}
"version": 1,
"disable_existing_loggers": "False",
"root": { "level": "INFO", "handlers": ["console"] },
"formatters": {
"standard": { "format": "%(asctime)s -- %(levelname)s -- %(message)s" },
"short": { "format": "%(levelname)s -- %(message)s" },
"long": {
"format": "%(asctime)s -- %(levelname)s -- %(message)s (%(funcName)s in %(filename)s)"
},
"free": { "format": "%(message)s" }
},
"handlers": {
"console": { "class": "logging.StreamHandler", "formatter": "standard" }
},
"loggers": {
"debug": { "handlers": ["console"], "level": "DEBUG" },
"verbose": { "handlers": ["console"], "level": "INFO" },
"standard": { "handlers": ["console"], "level": "INFO" },
"requests": { "handlers": ["console"], "level": "ERROR" },
"elasticsearch": { "handlers": ["console"], "level": "ERROR" },
"elasticsearch.trace": { "handlers": ["console"], "level": "ERROR" }
}
}

View File

@ -55,7 +55,7 @@ RUN apt-get clean \
RUN python3 -m venv --without-pip venv
COPY requirements.txt docker-requirements.txt webui-requirements.txt optional-requirements.txt ./
COPY pyproject.toml docker-requirements.txt all-requirements.txt ./
##############################################################################
# BUILD: Install the minimal image deps
@ -63,9 +63,7 @@ FROM build AS buildminimal
ARG PYTHON_VERSION
RUN python3 -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r requirements.txt \
-r docker-requirements.txt \
-r webui-requirements.txt
-r docker-requirements.txt
##############################################################################
# BUILD: Install all the deps
@ -73,8 +71,7 @@ FROM build AS buildfull
ARG PYTHON_VERSION
RUN python3 -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r requirements.txt \
-r optional-requirements.txt
-r all-requirements.txt
##############################################################################
# RELEASE Stages

View File

@ -1,10 +1,85 @@
# install with base requirements file
-r requirements.txt
docker>=6.1.1
orjson # JSON Serialization speedup
podman
python-dateutil
requests
six
urllib3
# This file was autogenerated by uv via the following command:
# uv export --no-emit-workspace --no-hashes --no-group dev --extra containers --extra web --output-file docker-requirements.txt
annotated-doc==0.0.3
# via fastapi
annotated-types==0.7.0
# via pydantic
anyio==4.11.0
# via starlette
certifi==2025.10.5
# via requests
charset-normalizer==3.4.4
# via requests
click==8.1.8
# via uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via click
defusedxml==0.7.1
# via glances
docker==7.1.0
# via glances
exceptiongroup==1.2.2 ; python_full_version < '3.11'
# via anyio
fastapi==0.121.1
# via glances
h11==0.16.0
# via uvicorn
idna==3.11
# via
# anyio
# requests
jinja2==3.1.6
# via glances
markupsafe==3.0.3
# via jinja2
packaging==25.0
# via glances
podman==5.6.0
# via glances
psutil==7.1.3
# via glances
pydantic==2.12.4
# via fastapi
pydantic-core==2.41.5
# via pydantic
python-dateutil==2.9.0.post0
# via glances
pywin32==311 ; sys_platform == 'win32'
# via docker
requests==2.32.5
# via
# docker
# glances
# podman
shtab==1.7.2 ; sys_platform != 'win32'
# via glances
six==1.17.0
# via
# glances
# python-dateutil
sniffio==1.3.1
# via anyio
starlette==0.49.3
# via fastapi
tomli==2.0.2 ; python_full_version < '3.11'
# via podman
typing-extensions==4.15.0
# via
# anyio
# fastapi
# pydantic
# pydantic-core
# starlette
# typing-inspection
# uvicorn
typing-inspection==0.4.2
# via pydantic
urllib3==2.5.0
# via
# docker
# podman
# requests
uvicorn==0.38.0
# via glances
windows-curses==2.4.1 ; sys_platform == 'win32'
# via glances

View File

@ -3,7 +3,7 @@
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = ../venv/bin/sphinx-build
SPHINXBUILD = ../.venv/bin/sphinx-build
PAPER =
BUILDDIR = _build

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 87 KiB

After

Width:  |  Height:  |  Size: 76 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 292 KiB

After

Width:  |  Height:  |  Size: 203 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

After

Width:  |  Height:  |  Size: 33 KiB

File diff suppressed because one or more lines are too long

BIN
docs/_static/screenshot-fetch.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

View File

@ -3,7 +3,7 @@
Actions
=======
Glances can trigger actions on events.
Glances can trigger actions on events for warning and critical thresholds.
By ``action``, we mean all shell command line. For example, if you want
to execute the ``foo.py`` script if the last 5 minutes load are critical
@ -18,6 +18,13 @@ then add the ``_action`` line to the Glances configuration file:
All the stats are available in the command line through the use of the
`Mustache`_ syntax. `Chevron`_ is required to render the mustache's template syntax.
Additionaly to the stats of the current plugin, the following variables are
also available:
- ``{{time}}``: current time in ISO format
- ``{{critical}}``: critical threshold value
- ``{{warning}}``: warning threshold value
- ``{{careful}}``: careful threshold value
Another example would be to create a log file
containing used vs total disk space if a space trigger warning is
reached:
@ -26,7 +33,7 @@ reached:
[fs]
warning=70
warning_action=echo {{mnt_point}} {{used}}/{{size}} > /tmp/fs.alert
warning_action=echo "{{time}} {{mnt_point}} {{used}}/{{size}}" > /tmp/fs.alert
A last example would be to create a log file containing the total user disk
space usage for a device and notify by email each time a space trigger
@ -36,13 +43,11 @@ critical is reached:
[fs]
critical=90
critical_action_repeat=echo {{device_name}} {{percent}} > /tmp/fs.alert && python /etc/glances/actions.d/fs-critical.py
critical_action_repeat=echo "{{time}} {{device_name}} {{percent}}" > /tmp/fs.alert && python /etc/glances/actions.d/fs-critical.py
.. note::
Use && as separator for multiple commands
Within ``/etc/glances/actions.d/fs-critical.py``:
.. code-block:: python
@ -63,7 +68,7 @@ Within ``/etc/glances/actions.d/fs-critical.py``:
.. note::
You can use all the stats for the current plugin. See
https://github.com/nicolargo/glances/wiki/The-Glances-RESTFULL-JSON-API
https://github.com/nicolargo/glances/wiki/The-Glances-RESTFUL-JSON-API
for the stats list.
It is also possible to repeat action until the end of the alert.

View File

@ -32,8 +32,8 @@ under the ``[containers]`` section:
# Define the maximum containers size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
disable_stats=diskio,networkio
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Global containers' thresholds for CPU and MEM (in %)
cpu_careful=50
cpu_warning=70

View File

@ -5,21 +5,16 @@ Disk I/O
.. image:: ../_static/diskio.png
Glances displays the disk I/O throughput. The unit is adapted
dynamically.
You can display:
Glances displays the disk I/O throughput, count and mean latency:
- bytes per second (default behavior / Bytes/s, KBytes/s, MBytes/s, etc)
- requests per second (using --diskio-iops option or *B* hotkey)
- mean latency (using --diskio-latency option or *L* hotkey)
There is no alert on this information.
It's possible to define:
It's also possible to define:
- a list of disk to show (white list)
- a list of disks to hide
- aliases for disk name
- aliases for disk name (use \ to espace special characters)
under the ``[diskio]`` section in the configuration file.
@ -42,13 +37,20 @@ Filtering is based on regular expression. Please be sure that your regular
expression works as expected. You can use an online tool like `regex101`_ in
order to test your regular expression.
It is also possible to define thesholds for bytes read and write per second:
It is also possible to define thesholds for latency and bytes read and write per second:
.. code-block:: ini
[diskio]
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
dm-0_rx_careful=4000000000
dm-0_rx_warning=5000000000

View File

@ -35,6 +35,11 @@ system:
[fs]
allow=shm
With the above configuration key, it is also possible to monitor NFS
mount points (allow=nfs). Be aware that this can slow down the
performance of the plugin if the NFS server is not reachable. In this
case, the plugin will wait for a 2 seconds timeout.
Also, you can hide mount points using regular expressions.
To hide all mount points starting with /boot and /snap:

View File

@ -27,7 +27,7 @@ Stats description:
is in RAM.
- **inactive**: (UNIX): memory that is marked as not used.
- **buffers**: (Linux, BSD): cache for things like file system metadata.
- **cached**: (Linux, BSD): cache for various things.
- **cached**: (Linux, BSD): cache for various things (including ZFS cache).
Additional stats available in through the API:
@ -41,6 +41,10 @@ Additional stats available in through the API:
- **shared**: (BSD): memory that may be simultaneously accessed by multiple
processes.
It is possible to display the available memory instead of the used memory
by setting the ``available`` option to ``True`` in the configuration file
under the ``[mem]`` section.
A character is also displayed just after the MEM header and shows the
trend value:

View File

@ -20,7 +20,7 @@ Additionally, you can define:
- automatically hide interfaces not up
- automatically hide interfaces without IP address
- per-interface limit values
- aliases for interface name
- aliases for interface name (use \ to espace special characters)
The configuration should be done in the ``[network]`` section of the
Glances configuration file.
@ -72,7 +72,7 @@ can also be used to set a threshold higher than zero.
.. code-block:: ini
[diskio]
[network]
hide_zero=True
hide_threshold_bytes=0

View File

@ -149,12 +149,24 @@ Columns display
pressing on the ``'/'`` key
========================= ==============================================
Disable display of virtual memory
---------------------------------
It's possible to disable the display of the VIRT column (virtual memory) by adding the
``disable_virtual_memory=True`` option in the ``[processlist]`` section of the configuration
file (glances.conf):
.. code-block:: ini
[processlist]
disable_virtual_memory=True
Process filtering
-----------------
It's possible to filter the processes list using the ``ENTER`` key.
Filter syntax is the following (examples):
Glances filter syntax is the following (examples):
- ``python``: Filter processes name or command line starting with
*python* (regexp)
@ -163,6 +175,25 @@ Filter syntax is the following (examples):
- ``username:nicolargo``: Processes of nicolargo user (key:regexp)
- ``cmdline:\/usr\/bin.*``: Processes starting by */usr/bin*
Process focus
-------------
It's also possible to select a processes list to focus on.
A list of Glances filters (see upper) can be define from the command line:
.. code-block:: bash
glances --process-focus .*python.*,.*firefox.*
or the glances.conf file:
.. code-block:: ini
[processlist]
focus=.*python.*,.*firefox.*
Extended info
-------------

View File

@ -33,6 +33,7 @@ thresholds (default behavor).
#temperature_core_careful=45
#temperature_core_warning=65
#temperature_core_critical=80
#alias=temp1:Motherboard 0,core 0:CPU Core 0
.. note 1::
The support for multiple batteries is only available if

923
docs/api/openapi.json Normal file
View File

@ -0,0 +1,923 @@
{
"openapi": "3.0.2",
"info": { "title": "FastAPI", "version": "0.1.0" },
"paths": {
"/api/4/status": {
"get": {
"summary": " Api Status",
"description": "Glances API RESTful implementation.\n\nReturn a 200 status code.\nThis entry point should be used to check the API health.\n\nSee related issue: Web server health check endpoint #1988",
"operationId": "_api_status_api_4_status_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
},
"head": {
"summary": " Api Status",
"description": "Glances API RESTful implementation.\n\nReturn a 200 status code.\nThis entry point should be used to check the API health.\n\nSee related issue: Web server health check endpoint #1988",
"operationId": "_api_status_api_4_status_head",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/events/clear/warning": {
"post": {
"summary": " Events Clear Warning",
"description": "Glances API RESTful implementation.\n\nReturn a 200 status code.\n\nIt's a post message to clean warning events",
"operationId": "_events_clear_warning_api_4_events_clear_warning_post",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/events/clear/all": {
"post": {
"summary": " Events Clear All",
"description": "Glances API RESTful implementation.\n\nReturn a 200 status code.\n\nIt's a post message to clean all events",
"operationId": "_events_clear_all_api_4_events_clear_all_post",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/processes/extended/disable": {
"post": {
"summary": " Api Disable Extended Processes",
"description": "Glances API RESTful implementation.\n\nDisable extended process stats\nHTTP/200 if OK\nHTTP/400 if PID is not found\nHTTP/404 if others error",
"operationId": "_api_disable_extended_processes_api_4_processes_extended_disable_post",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/processes/extended/{pid}": {
"post": {
"summary": " Api Set Extended Processes",
"description": "Glances API RESTful implementation.\n\nSet the extended process stats for the given PID\nHTTP/200 if OK\nHTTP/400 if PID is not found\nHTTP/404 if others error",
"operationId": "_api_set_extended_processes_api_4_processes_extended__pid__post",
"parameters": [
{
"name": "pid",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Pid" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/config": {
"get": {
"summary": " Api Config",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the Glances configuration file\nHTTP/200 if OK\nHTTP/404 if others error",
"operationId": "_api_config_api_4_config_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/config/{section}": {
"get": {
"summary": " Api Config Section",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the Glances configuration section\nHTTP/200 if OK\nHTTP/400 if item is not found\nHTTP/404 if others error",
"operationId": "_api_config_section_api_4_config__section__get",
"parameters": [
{
"name": "section",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Section" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/config/{section}/{item}": {
"get": {
"summary": " Api Config Section Item",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the Glances configuration section/item\nHTTP/200 if OK\nHTTP/400 if item is not found\nHTTP/404 if others error",
"operationId": "_api_config_section_item_api_4_config__section___item__get",
"parameters": [
{
"name": "section",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Section" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/args": {
"get": {
"summary": " Api Args",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the Glances command line arguments\nHTTP/200 if OK\nHTTP/404 if others error",
"operationId": "_api_args_api_4_args_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/args/{item}": {
"get": {
"summary": " Api Args Item",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the Glances command line arguments item\nHTTP/200 if OK\nHTTP/400 if item is not found\nHTTP/404 if others error",
"operationId": "_api_args_item_api_4_args__item__get",
"parameters": [
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/help": {
"get": {
"summary": " Api Help",
"description": "Glances API RESTful implementation.\n\nReturn the help data or 404 error.",
"operationId": "_api_help_api_4_help_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/all": {
"get": {
"summary": " Api All",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of all the plugins\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_all_api_4_all_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/all/limits": {
"get": {
"summary": " Api All Limits",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of all the plugins limits\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_all_limits_api_4_all_limits_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/all/views": {
"get": {
"summary": " Api All Views",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of all the plugins views\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_all_views_api_4_all_views_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/pluginslist": {
"get": {
"summary": " Api Plugins",
"description": "Glances API RESTFul implementation.\n\n@api {get} /api/%s/pluginslist Get plugins list\n@apiVersion 2.0\n@apiName pluginslist\n@apiGroup plugin\n\n@apiSuccess {String[]} Plugins list.\n\n@apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n [\n \"load\",\n \"help\",\n \"ip\",\n \"memswap\",\n \"processlist\",\n ...\n ]\n\n @apiError Cannot get plugin list.\n\n @apiErrorExample Error-Response:\n HTTP/1.1 404 Not Found",
"operationId": "_api_plugins_api_4_pluginslist_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/serverslist": {
"get": {
"summary": " Api Servers List",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the servers list (for browser mode)\nHTTP/200 if OK",
"operationId": "_api_servers_list_api_4_serverslist_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/processes/extended": {
"get": {
"summary": " Api Get Extended Processes",
"description": "Glances API RESTful implementation.\n\nGet the extended process stats (if set before)\nHTTP/200 if OK\nHTTP/400 if PID is not found\nHTTP/404 if others error",
"operationId": "_api_get_extended_processes_api_4_processes_extended_get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
}
}
}
},
"/api/4/processes/{pid}": {
"get": {
"summary": " Api Get Processes",
"description": "Glances API RESTful implementation.\n\nGet the process stats for the given PID\nHTTP/200 if OK\nHTTP/400 if PID is not found\nHTTP/404 if others error",
"operationId": "_api_get_processes_api_4_processes__pid__get",
"parameters": [
{
"name": "pid",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Pid" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}": {
"get": {
"summary": " Api",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of a given plugin\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_api_4__plugin__get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/history": {
"get": {
"summary": " Api History",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of a given plugin history\nLimit to the last nb items (all if nb=0)\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_history_api_4__plugin__history_get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "nb",
"in": "query",
"required": false,
"schema": { "type": "integer", "default": 0, "title": "Nb" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/history/{nb}": {
"get": {
"summary": " Api History",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of a given plugin history\nLimit to the last nb items (all if nb=0)\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_history_api_4__plugin__history__nb__get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "nb",
"in": "path",
"required": true,
"schema": { "type": "integer", "title": "Nb" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/top/{nb}": {
"get": {
"summary": " Api Top",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of a given plugin limited to the top nb items.\nIt is used to reduce the payload of the HTTP response (example: processlist).\n\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_top_api_4__plugin__top__nb__get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "nb",
"in": "path",
"required": true,
"schema": { "type": "integer", "title": "Nb" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/limits": {
"get": {
"summary": " Api Limits",
"description": "Glances API RESTful implementation.\n\nReturn the JSON limits of a given plugin\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_limits_api_4__plugin__limits_get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/views": {
"get": {
"summary": " Api Views",
"description": "Glances API RESTful implementation.\n\nReturn the JSON views of a given plugin\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_views_api_4__plugin__views_get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}": {
"get": {
"summary": " Api Item",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the couple plugin/item\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_item_api_4__plugin___item__get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}/views": {
"get": {
"summary": " Api Item Views",
"description": "Glances API RESTful implementation.\n\nReturn the JSON view representation of the couple plugin/item\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_item_views_api_4__plugin___item__views_get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}/history": {
"get": {
"summary": " Api Item History",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the couple plugin/history of item\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_item_history_api_4__plugin___item__history_get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
},
{
"name": "nb",
"in": "query",
"required": false,
"schema": { "type": "integer", "default": 0, "title": "Nb" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}/history/{nb}": {
"get": {
"summary": " Api Item History",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the couple plugin/history of item\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_item_history_api_4__plugin___item__history__nb__get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
},
{
"name": "nb",
"in": "path",
"required": true,
"schema": { "type": "integer", "title": "Nb" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}/description": {
"get": {
"summary": " Api Item Description",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the couple plugin/item description\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_item_description_api_4__plugin___item__description_get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}/unit": {
"get": {
"summary": " Api Item Unit",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of the couple plugin/item unit\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_item_unit_api_4__plugin___item__unit_get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}/value/{value}": {
"get": {
"summary": " Api Value",
"description": "Glances API RESTful implementation.\n\nReturn the process stats (dict) for the given item=value\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_value_api_4__plugin___item__value__value__get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
},
{
"name": "value",
"in": "path",
"required": true,
"schema": {
"anyOf": [
{ "type": "string" },
{ "type": "integer" },
{ "type": "number" }
],
"title": "Value"
}
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}/{key}": {
"get": {
"summary": " Api Key",
"description": "Glances API RESTful implementation.\n\nReturn the JSON representation of plugin/item/key\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_key_api_4__plugin___item___key__get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
},
{
"name": "key",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Key" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/api/4/{plugin}/{item}/{key}/views": {
"get": {
"summary": " Api Key Views",
"description": "Glances API RESTful implementation.\n\nReturn the JSON view representation of plugin/item/key\nHTTP/200 if OK\nHTTP/400 if plugin is not found\nHTTP/404 if others error",
"operationId": "_api_key_views_api_4__plugin___item___key__views_get",
"parameters": [
{
"name": "plugin",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Plugin" }
},
{
"name": "item",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Item" }
},
{
"name": "key",
"in": "path",
"required": true,
"schema": { "type": "string", "title": "Key" }
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": { "application/json": { "schema": {} } }
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
}
}
}
}
}
},
"/": {
"get": {
"summary": " Index",
"description": "Return main index.html (/) file.\n\nParameters are available through the request object.\nExample: http://localhost:61208/?refresh=5\n\nNote: This function is only called the first time the page is loaded.",
"operationId": "_index__get",
"responses": {
"200": {
"description": "Successful Response",
"content": { "text/html": { "schema": { "type": "string" } } }
}
}
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": { "$ref": "#/components/schemas/ValidationError" },
"type": "array",
"title": "Detail"
}
},
"type": "object",
"title": "HTTPValidationError"
},
"ValidationError": {
"properties": {
"loc": {
"items": { "anyOf": [{ "type": "string" }, { "type": "integer" }] },
"type": "array",
"title": "Location"
},
"msg": { "type": "string", "title": "Message" },
"type": { "type": "string", "title": "Error Type" }
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError"
}
}
}
}

1552
docs/api/python.rst Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +1,5 @@
#!/bin/sh
make clean
make html
LC_ALL=C make man

View File

@ -416,12 +416,18 @@ The following commands (key pressed) are supported while in Glances:
``F5`` or ``CTRL-R``
Refresh user interface
``LEFT``
``SHIFT-LEFT``
Navigation left through the process sort
``RIGHT``
``SHIFT-RIGHT``
Navigation right through the process sort
``LEFT``
Navigation left through the process name
``RIGHT``
Navigation right through the process name
``UP``
Up in the processes list

View File

@ -80,7 +80,7 @@ than a second one concerning the user interface:
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restfull_api.py is hosted
# Default is folder where glances_restful_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -95,6 +95,10 @@ than a second one concerning the user interface:
# Comma separated list of HTTP request headers that should be supported for cross-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile=./glances.local+3-key.pem
#ssl_keyfile_password=kfp
#ssl_certfile=./glances.local+3.pem
Each plugin, export module, and application monitoring process (AMP) can
have a section. Below is an example for the CPU plugin:

View File

@ -187,7 +187,7 @@ and make it visible to your container by adding it to ``docker-compose.yml`` as
image: nicolargo/glances:latest
restart: always
environment:
- GLANCES_OPT="-w --password"
- "GLANCES_OPT=-w --password"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
# Uncomment the below line if you want glances to display host OS detail instead of container's

View File

@ -18,3 +18,34 @@ On ARM64, Docker needs to be configured to allow access to the memory stats.
Edit the /boot/firmware/cmdline.txt and add the following configuration key:
cgroup_enable=memory
Netifaces issue ?
-----------------
Previously, Glances uses Netifaces to get network interfaces information.
Now, Glances uses Netifaces2.
Please uninstall Netifaces and install Netifaces2 instead.
On Debian/Ubuntu Operating Systems, Webserver display a blank screen ?
----------------------------------------------------------------------
For some reason, the Glances Debian/Ubuntu packages do not include the Web UI static files.
Please read: https://github.com/nicolargo/glances/issues/2021 for workaround and more information.
Glances said that my computer has no free memory, is it normal ?
----------------------------------------------------------------
On Linux, Glances shows by default the free memory.
Free memory can be low, it's a "normal" behavior because Linux uses free memory for disk caching
to improve performance. More information can be found here: https://linuxatemyram.com/.
If you want to display the "available" memory instead of the "free" memory, you can uses the
the following configuration key in the Glances configuration file:
[mem]
# Display available memory instead of used memory
available=True

46
docs/fetch.rst Normal file
View File

@ -0,0 +1,46 @@
.. _fetch:
Fetch
=====
The fetch mode is used to get and share a quick look of a machine using the
``fetch`` option. In this mode, current stats are displayed on the console in
a fancy way.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: _static/screenshot-fetch.png
It is also possible to use a custom template with the ``--fetch-template </path/to/template.jinja>`` option.
Some examples are provided in the ``conf/fetch-templates/`` directory. Please feel free to
customize them or create your own template (contribution via PR are welcome).
The format of the template is based on the Jinja2 templating engine and can use all the stats
available in Glances through the ``gl`` variable (an instance of the :ref:`Glances Python API<api>`).
For example, the default template is define as:
.. code-block:: jinja
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} ⚡ {{ process['cpu_percent'] }}% CPU{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }} ⚡ {{ process['cpu_percent'] }}% CPU
{% endfor %}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

110
docs/gw/duckdb.rst Normal file
View File

@ -0,0 +1,110 @@
.. _duckdb:
DuckDB
===========
DuckDB is an in-process SQL OLAP database management system.
You can export statistics to a ``DuckDB`` server.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[duckdb]
# database defines where data are stored, can be one of:
# /path/to/glances.db (see https://duckdb.org/docs/stable/clients/python/dbapi#file-based-connection)
# :memory:glances (see https://duckdb.org/docs/stable/clients/python/dbapi#in-memory-connection)
# Or anyone else supported by the API (see https://duckdb.org/docs/stable/clients/python/dbapi)
database=/tmp/glances.db
and run Glances with:
.. code-block:: console
$ glances --export duckdb
Data model
-----------
The data model is composed of one table per Glances plugin.
Example:
.. code-block:: python
>>> import duckdb
>>> db = duckdb.connect(database='/tmp/glances.db', read_only=True)
>>> db.sql("SELECT * from cpu")
┌─────────────────────┬─────────────────┬────────┬────────┬────────┬───┬────────────────────┬─────────────────────┬──────────────────────┬──────────────────────┬──────────────────────┐
│ time │ hostname_id │ total │ user │ nice │ … │ cpu_iowait_warning │ cpu_iowait_critical │ cpu_ctx_switches_c… │ cpu_ctx_switches_w… │ cpu_ctx_switches_c… │
│ time with time zone │ varchar │ double │ double │ double │ │ double │ double │ double │ double │ double │
├─────────────────────┼─────────────────┼────────┼────────┼────────┼───┼────────────────────┼─────────────────────┼──────────────────────┼──────────────────────┼──────────────────────┤
│ 11:50:25+00 │ nicolargo-xps15 │ 8.0 │ 5.6 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:27+00 │ nicolargo-xps15 │ 4.3 │ 3.2 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:29+00 │ nicolargo-xps15 │ 4.3 │ 3.2 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:31+00 │ nicolargo-xps15 │ 14.9 │ 15.7 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:33+00 │ nicolargo-xps15 │ 14.9 │ 15.7 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:35+00 │ nicolargo-xps15 │ 8.2 │ 7.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:37+00 │ nicolargo-xps15 │ 8.2 │ 7.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:39+00 │ nicolargo-xps15 │ 12.7 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:41+00 │ nicolargo-xps15 │ 12.7 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:43+00 │ nicolargo-xps15 │ 12.2 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ 11:51:29+00 │ nicolargo-xps15 │ 10.1 │ 7.4 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:32+00 │ nicolargo-xps15 │ 10.1 │ 7.4 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:34+00 │ nicolargo-xps15 │ 6.6 │ 4.9 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:36+00 │ nicolargo-xps15 │ 6.6 │ 4.9 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:38+00 │ nicolargo-xps15 │ 9.9 │ 7.5 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:40+00 │ nicolargo-xps15 │ 9.9 │ 7.5 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:42+00 │ nicolargo-xps15 │ 4.0 │ 3.1 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:44+00 │ nicolargo-xps15 │ 4.0 │ 3.1 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:46+00 │ nicolargo-xps15 │ 11.1 │ 8.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:48+00 │ nicolargo-xps15 │ 11.1 │ 8.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
├─────────────────────┴─────────────────┴────────┴────────┴────────┴───┴────────────────────┴─────────────────────┴──────────────────────┴──────────────────────┴──────────────────────┤
│ 41 rows (20 shown) 47 columns (10 shown) │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
>>> db.sql("SELECT * from cpu").fetchall()[0]
(datetime.time(11, 50, 25, tzinfo=datetime.timezone.utc), 'nicolargo-xps15', 8.0, 5.6, 0.0, 2.3, 91.9, 0.1, 0.0, 0.0, 0.0, 0, 0, 0, 0, 16, 2.4103684425354004, 90724823, 0, 63323797, 0, 30704572, 0, 0, 0, 1200.0, 65.0, 75.0, 85.0, True, 50.0, 70.0, 90.0, True, 50.0, 70.0, 90.0, True, 50.0, 70.0, 90.0, 5.0, 5.625, 6.25, 640000.0, 720000.0, 800000.0)
>>> db.sql("SELECT * from network")
┌─────────────────────┬─────────────────┬────────────────┬────────────┬────────────┬───┬─────────────────────┬────────────────┬────────────────────┬────────────────────┬───────────────────┐
│ time │ hostname_id │ key_id │ bytes_sent │ bytes_recv │ … │ network_tx_critical │ network_hide │ network_hide_no_up │ network_hide_no_ip │ network_hide_zero │
│ time with time zone │ varchar │ varchar │ int64 │ int64 │ │ double │ varchar │ boolean │ boolean │ boolean │
├─────────────────────┼─────────────────┼────────────────┼────────────┼────────────┼───┼─────────────────────┼────────────────┼────────────────────┼────────────────────┼───────────────────┤
│ 11:50:25+00 │ nicolargo-xps15 │ interface_name │ 407761 │ 32730 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:27+00 │ nicolargo-xps15 │ interface_name │ 2877 │ 4857 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:29+00 │ nicolargo-xps15 │ interface_name │ 44504 │ 32555 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:31+00 │ nicolargo-xps15 │ interface_name │ 1092285 │ 48600 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:33+00 │ nicolargo-xps15 │ interface_name │ 150119 │ 43805 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:35+00 │ nicolargo-xps15 │ interface_name │ 34424 │ 14825 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:37+00 │ nicolargo-xps15 │ interface_name │ 19382 │ 33614 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:39+00 │ nicolargo-xps15 │ interface_name │ 53060 │ 39780 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:41+00 │ nicolargo-xps15 │ interface_name │ 371914 │ 78626 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:43+00 │ nicolargo-xps15 │ interface_name │ 82356 │ 60612 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ 11:51:29+00 │ nicolargo-xps15 │ interface_name │ 3766 │ 9977 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:32+00 │ nicolargo-xps15 │ interface_name │ 188036 │ 18668 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:34+00 │ nicolargo-xps15 │ interface_name │ 543 │ 2451 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:36+00 │ nicolargo-xps15 │ interface_name │ 8247 │ 7275 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:38+00 │ nicolargo-xps15 │ interface_name │ 7252 │ 986 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:40+00 │ nicolargo-xps15 │ interface_name │ 172 │ 132 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:42+00 │ nicolargo-xps15 │ interface_name │ 8080 │ 6640 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:44+00 │ nicolargo-xps15 │ interface_name │ 19660 │ 17830 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:46+00 │ nicolargo-xps15 │ interface_name │ 1007030 │ 84170 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:48+00 │ nicolargo-xps15 │ interface_name │ 128947 │ 18087 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
├─────────────────────┴─────────────────┴────────────────┴────────────┴────────────┴───┴─────────────────────┴────────────────┴────────────────────┴────────────────────┴───────────────────┤
│ 41 rows (20 shown) 28 columns (10 shown) │
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
.. _duckdb: https://duckdb.org/

View File

@ -3,8 +3,24 @@
Gateway To Other Services
=========================
Glances can exports stats to a CSV file. Also, it can act as a gateway
to providing stats to multiple services (see list below).
Glances can exports stats in files or to other services like databases, message queues, etc.
Each exporter has its own configuration options, which can be set in the Glances
configuration file (`glances.conf`).
A common options section is also available:
is the `exclude_fields` option, which allows you to specify
.. code-block:: ini
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
This section describes the available exporters and how to configure them:
.. toctree::
:maxdepth: 2
@ -14,6 +30,7 @@ to providing stats to multiple services (see list below).
couchdb
elastic
graph
graphite
influxdb
json
kafka

View File

@ -40,7 +40,7 @@ be added as a column in the table (named key_id) and added to the timescaledb.se
Current limitations
-------------------
Sensors and Fs plugins are not supported by the TimescaleDB exporter.
Sensors, Fs and DiskIO plugins are not supported by the TimescaleDB exporter.
In the cpu plugin, the user field is exported as user_cpu (user_percpu in the percpu plugin)
because user is a reserved keyword in PostgreSQL.

View File

@ -11,12 +11,11 @@ information depending on the terminal size.
It can also work in client/server mode. Remote monitoring can be
done via terminal, Web interface, or API (XMLRPC and RESTful).
Glances is written in Python and uses the `psutil`_ library to get
information from your system.
Stats can also be exported to :ref:`files or external databases<gw>`.
Stats can also be exported to external time/value databases.
.. _psutil: https://github.com/giampaolo/psutil
It is also possible to use it in your own Python scripts thanks to
the :ref:`Glances API<api>` or in any other application through
the :ref:`RESTful API<api_restful>`.
Table of Contents
=================
@ -30,7 +29,11 @@ Table of Contents
config
aoa/index
gw/index
api
api/python
api/restful
docker
faq
support
.. _psutil: https://github.com/giampaolo/psutil

View File

@ -28,7 +28,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
.TH "GLANCES" "1" "Jun 30, 2025" "4.3.2_rc01" "Glances"
.TH "GLANCES" "1" "Nov 21, 2025" "4.4.2_dev1" "Glances"
.SH NAME
glances \- An eye on your system
.SH SYNOPSIS
@ -522,12 +522,18 @@ Switch between process command line or command name
.B \fBF5\fP or \fBCTRL\-R\fP
Refresh user interface
.TP
.B \fBLEFT\fP
.B \fBSHIFT\-LEFT\fP
Navigation left through the process sort
.TP
.B \fBRIGHT\fP
.B \fBSHIFT\-RIGHT\fP
Navigation right through the process sort
.TP
.B \fBLEFT\fP
Navigation left through the process name
.TP
.B \fBRIGHT\fP
Navigation right through the process name
.TP
.B \fBUP\fP
Up in the processes list
.TP
@ -666,7 +672,7 @@ max_processes_display=25
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restfull_api.py is hosted
# Default is folder where glances_restful_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross\-origin requests.
@ -681,6 +687,10 @@ max_processes_display=25
# Comma separated list of HTTP request headers that should be supported for cross\-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile=./glances.local+3\-key.pem
#ssl_keyfile_password=kfp
#ssl_certfile=./glances.local+3.pem
.EE
.UNINDENT
.UNINDENT

File diff suppressed because one or more lines are too long

View File

@ -4,11 +4,12 @@ Quickstart
==========
This page gives a good introduction to how to get started with Glances.
Glances offers three modes:
Glances offers multiple modes:
- Standalone
- Client/Server
- Web server
- Fetch
Standalone Mode
---------------
@ -196,7 +197,7 @@ Here's a screenshot from Chrome on Android:
.. image:: _static/screenshot-web2.png
How do you protect your server (or Web server) with a login/password ?
------------------------------------------------------------------
----------------------------------------------------------------------
You can set a password to access the server using the ``--password``.
By default, the login is ``glances`` but you can change it with
@ -222,3 +223,22 @@ file:
# Additionally (and optionally) a default password could be defined
localhost=mylocalhostpassword
default=mydefaultpassword
Fetch mode
----------
It is also possible to get and share a quick look of a machine using the
``fetch`` mode. In this mode, current stats are display on the console in
a fancy way.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: _static/screenshot-fetch.png
It is also possible to use a custom template with the ``--fetch-template </path/to/template.jinja>`` option.
Have a look to the :ref:`fetch documentation page<fetch>` to learn how to create your own template.

View File

@ -1,4 +1,5 @@
import json
from unittest.mock import patch
from fastapi.openapi.utils import get_openapi
@ -8,13 +9,15 @@ from glances.main import GlancesMain
from glances.outputs.glances_restful_api import GlancesRestfulApi
# Init Glances core
core = GlancesMain(args_begin_at=2)
testargs = ["glances", "-C", "./conf/glances.conf"]
with patch('sys.argv', testargs):
core = GlancesMain()
test_config = core.get_config()
test_args = core.get_args()
app = GlancesRestfulApi(config=test_config, args=test_args)._app
with open('./docs/openapi.json', 'w') as f:
with open('./docs/api/openapi.json', 'w') as f:
json.dump(
get_openapi(
title=app.title,

13
generate_webui_conf.py Normal file
View File

@ -0,0 +1,13 @@
import json
from glances.outputs.glances_curses import _GlancesCurses
print(
json.dumps(
{
"topMenu": list(_GlancesCurses._top),
"leftMenu": [p for p in _GlancesCurses._left_sidebar if p != "now"],
},
indent=4,
)
)

523
glances.ipynb Normal file
View File

@ -0,0 +1,523 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "592b8135-c06b-41b7-895e-9dd70787f6ac",
"metadata": {},
"source": [
"# Use Glances API in your Python code"
]
},
{
"cell_type": "markdown",
"id": "e5ec86ae-ce2b-452f-b715-54e746026a96",
"metadata": {},
"source": [
"## Init the Glances API"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "ba9b3546-65a0-4eec-942b-1855ff5c5d32",
"metadata": {},
"outputs": [],
"source": [
"from glances import api"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e81ad928-3b61-4654-8589-13cb29e7f292",
"metadata": {},
"outputs": [],
"source": [
"gl = api.GlancesAPI()"
]
},
{
"cell_type": "markdown",
"id": "6ec912a3-0875-4cdb-8539-e84ffb27768a",
"metadata": {},
"source": [
"## Get plugins list"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1ce57a13-a90d-4d65-b4a4-2bc45112697e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['alert',\n",
" 'ports',\n",
" 'diskio',\n",
" 'containers',\n",
" 'processcount',\n",
" 'programlist',\n",
" 'gpu',\n",
" 'percpu',\n",
" 'vms',\n",
" 'system',\n",
" 'network',\n",
" 'cpu',\n",
" 'amps',\n",
" 'processlist',\n",
" 'load',\n",
" 'sensors',\n",
" 'uptime',\n",
" 'now',\n",
" 'connections',\n",
" 'fs',\n",
" 'wifi',\n",
" 'ip',\n",
" 'help',\n",
" 'version',\n",
" 'psutilversion',\n",
" 'core',\n",
" 'mem',\n",
" 'folders',\n",
" 'quicklook',\n",
" 'memswap',\n",
" 'raid']"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.plugins()"
]
},
{
"cell_type": "markdown",
"id": "d5be2964-7a28-4b93-9dd0-1481afd2ee50",
"metadata": {},
"source": [
"## Get CPU stats"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "0d1636d2-3f3e-44d4-bb67-45487384f79f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'total': 3.8, 'user': 3.0, 'nice': 0.0, 'system': 0.8, 'idle': 96.1, 'iowait': 0.1, 'irq': 0.0, 'steal': 0.0, 'guest': 0.0, 'ctx_switches': 0, 'interrupts': 0, 'soft_interrupts': 0, 'syscalls': 0, 'cpucore': 16, 'time_since_update': 141.46278643608093, 'ctx_switches_gauge': 12830371, 'ctx_switches_rate_per_sec': 0, 'interrupts_gauge': 9800040, 'interrupts_rate_per_sec': 0, 'soft_interrupts_gauge': 3875931, 'soft_interrupts_rate_per_sec': 0, 'syscalls_gauge': 0, 'syscalls_rate_per_sec': 0}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.cpu"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "99681a33-045e-43bf-927d-88b15872fad0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3.1"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.cpu.get('total')"
]
},
{
"cell_type": "markdown",
"id": "07e30de4-8f2a-4110-9c43-2a87d91dbf24",
"metadata": {},
"source": [
"## Get MEMORY stats"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "33502d93-acf9-49c5-8bcd-0a0404b47829",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'total': 16422858752, 'available': 6726169136, 'percent': 59.0, 'used': 9696689616, 'free': 541847552, 'active': 8672595968, 'inactive': 5456875520, 'buffers': 354791424, 'cached': 6520318384, 'shared': 729960448}"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.mem"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "404cd8d6-ac38-4830-8ead-4b747e0ca7b1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"6779998768"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.mem.get('available')"
]
},
{
"cell_type": "markdown",
"id": "74e27e9f-3240-4827-a754-3538b7d68119",
"metadata": {},
"source": [
"Display it in a user friendly way:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "fa83b40a-51e8-45fa-b478-d0fcc9de4639",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'6.28G'"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.auto_unit(gl.mem.get('available'))"
]
},
{
"cell_type": "markdown",
"id": "bfaf5b94-7c9c-4fdc-8a91-71f543cafa4b",
"metadata": {},
"source": [
"## Get NETWORK stats"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "a0ab2ce7-e9bd-4a60-9b90-095a9023dac7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'wlp0s20f3': {'bytes_sent': 1130903, 'bytes_recv': 2213272, 'speed': 0, 'key': 'interface_name', 'interface_name': 'wlp0s20f3', 'alias': 'WIFI', 'bytes_all': 3344175, 'time_since_update': 354.35748958587646, 'bytes_recv_gauge': 1108380679, 'bytes_recv_rate_per_sec': 6245.0, 'bytes_sent_gauge': 21062113, 'bytes_sent_rate_per_sec': 3191.0, 'bytes_all_gauge': 1129442792, 'bytes_all_rate_per_sec': 9437.0}}"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network"
]
},
{
"cell_type": "markdown",
"id": "b65f7280-d9f0-4719-9e10-8b78dc414bae",
"metadata": {},
"source": [
"Get the list of networks interfaces:"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "1a55d32a-bd7d-4dfa-b239-8875c01f205e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['wlp0s20f3']"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.keys()"
]
},
{
"cell_type": "markdown",
"id": "8c7e0215-e96a-4f7e-a187-9b7bee1abcf9",
"metadata": {},
"source": [
"Get stats for a specific network interface:"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "9aacfb32-c0e3-4fc7-b1d2-d216e46088cd",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'bytes_sent': 118799,\n",
" 'bytes_recv': 275052,\n",
" 'speed': 0,\n",
" 'key': 'interface_name',\n",
" 'interface_name': 'wlp0s20f3',\n",
" 'alias': 'WIFI',\n",
" 'bytes_all': 393851,\n",
" 'time_since_update': 46.24822926521301,\n",
" 'bytes_recv_gauge': 1108795793,\n",
" 'bytes_recv_rate_per_sec': 5947.0,\n",
" 'bytes_sent_gauge': 21268464,\n",
" 'bytes_sent_rate_per_sec': 2568.0,\n",
" 'bytes_all_gauge': 1130064257,\n",
" 'bytes_all_rate_per_sec': 8516.0}"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.get('wlp0s20f3')"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "4f5ae513-6022-4a52-8d6c-e8b62afacc24",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5105.0"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.get('wlp0s20f3').get('bytes_recv_rate_per_sec')"
]
},
{
"cell_type": "markdown",
"id": "8b0bdbf4-e386-44aa-9585-1d042f0ded5d",
"metadata": {},
"source": [
"## Additional information"
]
},
{
"cell_type": "markdown",
"id": "5c52a0c7-06fb-432a-bdb7-9921f432d5a6",
"metadata": {},
"source": [
"Example for the LOAD plugin."
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "99303a2b-52a3-440f-a896-ad4951a9de34",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': 1.01123046875, 'min5': 0.83447265625, 'min15': 0.76171875, 'cpucore': 16}"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load"
]
},
{
"cell_type": "markdown",
"id": "7a560824-2787-4436-b39b-63de0c455536",
"metadata": {},
"source": [
"Get the limit configured in the glances.conf:"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "cbbc6a81-623f-4eff-9d08-e6a8b5981660",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 1 minute.',\n",
" 'unit': 'float'},\n",
" 'min5': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 5 minutes.',\n",
" 'unit': 'float'},\n",
" 'min15': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 15 minutes.',\n",
" 'unit': 'float'},\n",
" 'cpucore': {'description': 'Total number of CPU core.', 'unit': 'number'}}"
]
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.fields_description"
]
},
{
"cell_type": "markdown",
"id": "2bd51d13-77e3-48f0-aa53-af86df6425f8",
"metadata": {},
"source": [
"Get field description and unit:"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "8682edcf-a8b9-424c-976f-2a301a05be6a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history_size': 1200.0,\n",
" 'load_disable': ['False'],\n",
" 'load_careful': 0.7,\n",
" 'load_warning': 1.0,\n",
" 'load_critical': 5.0}"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.get_limits()"
]
},
{
"cell_type": "raw",
"id": "3c671ff8-3a0c-48d3-8247-6081c69c19a9",
"metadata": {},
"source": [
"Get current stats views regarding limits:"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "45e03e9b-233c-4359-bcbc-7d2f06aca1c6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': {'decoration': 'DEFAULT',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'min5': {'decoration': 'OK',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'min15': {'decoration': 'OK_LOG',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'cpucore': {'decoration': 'DEFAULT',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False}}"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.get_views()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.14.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -19,7 +19,8 @@ import tracemalloc
# Global name
# Version should start and end with a numerical char
# See https://packaging.python.org/specifications/core-metadata/#version
__version__ = "4.3.2_rc01"
# Examples: 1.0.0, 1.0.0rc1, 1.1.0_dev1
__version__ = "4.4.2_dev1"
__apiversion__ = '4'
__author__ = 'Nicolas Hennion <nicolas@nicolargo.com>'
__license__ = 'LGPLv3'
@ -52,10 +53,10 @@ if psutil_version_info < psutil_min_version:
# Trac malloc is only available on Python 3.4 or higher
def __signal_handler(signal, frame):
logger.debug(f"Signal {signal} caught")
def __signal_handler(sig, frame):
logger.debug(f"Signal {sig} caught")
# Avoid Glances hang when killing process with muliple CTRL-C See #3264
signal.signal(signal.SIGINT, signal.SIG_IGN)
end()
@ -95,8 +96,8 @@ def check_memleak(args, mode):
def setup_server_mode(args, mode):
if args.stdout_issue or args.stdout_apidoc:
# Serve once for issue/test mode
if args.stdout_issue or args.stdout_api_restful_doc or args.stdout_api_doc:
# Serve once for issue and API documentation modes
mode.serve_issue()
else:
# Serve forever
@ -104,18 +105,18 @@ def setup_server_mode(args, mode):
def maybe_trace_memleak(args, snapshot_begin):
if args.memory_leak:
if args.trace_malloc or args.memory_leak:
snapshot_end = tracemalloc.take_snapshot()
if args.memory_leak:
snapshot_diff = snapshot_end.compare_to(snapshot_begin, 'filename')
memory_leak = sum([s.size_diff for s in snapshot_diff])
print(f"Memory consumption: {memory_leak / 1000:.1f}KB (see log for details)")
logger.info("Memory consumption (top 5):")
for stat in snapshot_diff[:5]:
logger.info(stat)
elif args.trace_malloc:
if args.trace_malloc:
# See more options here: https://docs.python.org/3/library/tracemalloc.html
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics("filename")
top_stats = snapshot_end.statistics("filename")
print("[ Trace malloc - Top 10 ]")
for stat in top_stats[:10]:
print(stat)
@ -188,3 +189,6 @@ def main():
# Glances can be ran in standalone, client or server mode
start(config=core.get_config(), args=core.get_args())
# End of glances/__init__.py

117
glances/api.py Normal file
View File

@ -0,0 +1,117 @@
#
# Glances - An eye on your system
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
from glances import __version__ as glances_version
from glances.globals import auto_unit, weak_lru_cache
from glances.main import GlancesMain
from glances.outputs.glances_bars import Bar
from glances.processes import sort_stats
from glances.stats import GlancesStats
plugin_dependencies_tree = {
'processlist': ['processcount'],
}
class GlancesAPI:
ttl = 2.0 # Default cache TTL in seconds
def __init__(self, config=None, args=None):
self.__version__ = glances_version.split('.')[0] # Get the major version
core = GlancesMain()
self.args = args if args is not None else core.get_args()
self.config = config if config is not None else core.get_config()
self._stats = GlancesStats(config=self.config, args=self.args)
# Set the cache TTL for the API
self.ttl = self.args.time if self.args.time is not None else self.ttl
# Init the stats of all plugins in order to ensure that rate are computed
self._stats.update()
@weak_lru_cache(maxsize=1, ttl=ttl)
def __getattr__(self, item):
"""Fallback to the stats object for any missing attributes."""
if item in self._stats.getPluginsList():
if item in plugin_dependencies_tree:
# Ensure dependencies are updated before accessing the plugin
for dependency in plugin_dependencies_tree[item]:
self._stats.get_plugin(dependency).update()
# Update the plugin stats
self._stats.get_plugin(item).update()
return self._stats.get_plugin(item)
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{item}'")
def plugins(self):
"""Return the list of available plugins."""
return self._stats.getPluginsList()
def auto_unit(self, number, low_precision=False, min_symbol='K', none_symbol='-'):
"""
Converts a numeric value into a human-readable string with appropriate units.
Args:
number (float or int): The numeric value to be converted.
low_precision (bool, optional): If True, use lower precision for the output. Defaults to False.
min_symbol (str, optional): The minimum unit symbol to use (e.g., 'K' for kilo). Defaults to 'K'.
none_symbol (str, optional): The symbol to display if the number is None. Defaults to '-'.
Returns:
str: A human-readable string representation of the number with units.
"""
return auto_unit(number, low_precision, min_symbol, none_symbol)
def bar(self, value, size=18, bar_char='', empty_char='', pre_char='', post_char=''):
"""
Generate a progress bar representation for a given value.
Args:
value (float): The percentage value to represent in the bar (typically between 0 and 100).
size (int, optional): The total length of the bar in characters. Defaults to 18.
bar_char (str, optional): The character used to represent the filled portion of the bar. Defaults to ''.
empty_char (str, optional): The character used to represent the empty portion of the bar. Defaults to ''.
pre_char (str, optional): A string to prepend to the bar. Defaults to ''.
post_char (str, optional): A string to append to the bar. Defaults to ''.
Returns:
str: A string representing the progress bar.
"""
b = Bar(
size, bar_char=bar_char, empty_char=empty_char, pre_char=pre_char, post_char=post_char, display_value=False
)
b.percent = value
return b.get()
def top_process(self, limit=3, sorted_by='cpu_percent', sorted_by_secondary='memory_percent'):
"""
Returns a list of the top processes sorted by specified criteria.
Args:
limit (int, optional): The maximum number of top processes to return. Defaults to 3.
sorted_by (str, optional): The primary key to sort processes by (e.g., 'cpu_percent').
Defaults to 'cpu_percent'.
sorted_by_secondary (str, optional): The secondary key to sort processes by if primary keys are equal
(e.g., 'memory_percent'). Defaults to 'memory_percent'.
Returns:
list: A list of dictionaries representing the top processes, excluding those with 'glances' in their
command line.
Note:
The 'glances' process is excluded from the returned list to avoid self-generated CPU load affecting
the results.
"""
# Exclude glances process from the top list
# because in fetch mode, Glances generate a CPU load
all_but_glances = [
p
for p in self._stats.get_plugin('processlist').get_raw()
if p['cmdline'] and 'glances' not in (p['cmdline'] or ())
]
return sort_stats(all_but_glances, sorted_by=sorted_by, sorted_by_secondary=sorted_by_secondary)[:limit]

View File

@ -10,6 +10,14 @@
from datetime import datetime
# Ugly hack waiting for Python 3.10 deprecation
try:
from datetime import UTC
except ImportError:
from datetime import timezone
UTC = timezone.utc
class GlancesAttribute:
def __init__(self, name, description='', history_max_size=None):
@ -73,7 +81,7 @@ class GlancesAttribute:
Value is a tuple: (<timestamp>, <new_value>)
"""
self._value = (datetime.now(), new_value)
self._value = (datetime.now(UTC), new_value)
self.history_add(self._value)
"""

View File

@ -17,6 +17,10 @@ from glances import __version__
from glances.globals import json_loads
from glances.logger import logger
from glances.outputs.glances_curses import GlancesCursesClient
from glances.outputs.glances_stdout import GlancesStdout
from glances.outputs.glances_stdout_csv import GlancesStdoutCsv
from glances.outputs.glances_stdout_fetch import GlancesStdoutFetch
from glances.outputs.glances_stdout_json import GlancesStdoutJson
from glances.stats_client import GlancesStatsClient
from glances.timer import Counter
@ -73,7 +77,7 @@ class GlancesClient:
def log_and_exit(self, msg=''):
"""Log and exit."""
if not self.return_to_browser:
logger.critical(msg)
logger.critical(f"Error when connecting to Glances server: {msg}")
sys.exit(2)
else:
logger.error(msg)
@ -172,6 +176,21 @@ class GlancesClient:
if self.quiet:
# In quiet mode, nothing is displayed
logger.info("Quiet mode is ON: Nothing will be displayed")
elif self.args.stdout:
logger.info(f"Stdout mode is ON, following stats will be displayed: {self.args.stdout}")
# Init screen
self.screen = GlancesStdout(config=self.config, args=self.args)
elif self.args.stdout_json:
logger.info(f"Stdout JSON mode is ON, following stats will be displayed: {self.args.stdout_json}")
# Init screen
self.screen = GlancesStdoutJson(config=self.config, args=self.args)
elif self.args.stdout_csv:
logger.info(f"Stdout CSV mode is ON, following stats will be displayed: {self.args.stdout_csv}")
# Init screen
self.screen = GlancesStdoutCsv(config=self.config, args=self.args)
elif self.args.stdout_fetch:
logger.info("Fetch mode is ON")
self.screen = GlancesStdoutFetch(config=self.config, args=self.args)
else:
self.screen = GlancesCursesClient(config=self.config, args=self.args)
@ -237,6 +256,7 @@ class GlancesClient:
return self.client_mode
exit_key = False
try:
while True and not exit_key:
# Update the stats
@ -264,8 +284,8 @@ class GlancesClient:
else:
# In quiet mode, we only wait adapated_refresh seconds
time.sleep(adapted_refresh)
except Exception as e:
logger.critical(e)
except Exception:
logger.critical("Critical error in client serve_forever loop")
self.end()
return self.client_mode

View File

@ -271,6 +271,7 @@ class GlancesEventsList:
event_time, event_index, event_state, event_type, event_value, proc_list, proc_desc, global_message
)
# logger.info(self.events_list)
return self.len()
def _create_event(self, event_time, event_state, event_type, event_value, proc_desc, global_message):

View File

@ -11,6 +11,8 @@ I am your father...
...for all Glances exports IF.
"""
import re
from glances.globals import NoOptionError, NoSectionError, json_dumps
from glances.logger import logger
from glances.timer import Counter
@ -53,6 +55,10 @@ class GlancesExport:
# Fields description
self._fields_description = None
# Load the default common export configuration
if self.config is not None:
self.load_common_conf()
def _log_result_decorator(fct):
"""Log (DEBUG) the result of the function fct."""
@ -71,6 +77,24 @@ class GlancesExport:
"""Close the export module."""
logger.debug(f"Finalise export interface {self.export_name}")
def load_common_conf(self):
"""Load the common export configuration in the Glances configuration file.
:returns: Boolean -- True if section is found
"""
# Read the common [export] section
section = "export"
opt = "exclude_fields"
try:
setattr(self, opt, self.config.get_list_value(section, opt))
except NoOptionError:
logger.debug(f"{opt} option not found in the {section} configuration section")
logger.debug(f"Load common {section} from the Glances configuration file")
return True
def load_conf(self, section, mandatories=["host", "port"], options=None):
"""Load the export <section> configuration in the Glances configuration file.
@ -101,7 +125,7 @@ class GlancesExport:
try:
setattr(self, opt, self.config.get_value(section, opt))
except NoOptionError:
pass
logger.debug(f"{opt} option not found in the {section} configuration section")
logger.debug(f"Load {section} from the Glances configuration file")
logger.debug(f"{section} parameters: { ({opt: getattr(self, opt) for opt in mandatories + options}) }")
@ -128,7 +152,7 @@ class GlancesExport:
d_tags = {}
if tags:
try:
d_tags = dict([x.split(":") for x in tags.split(",")])
d_tags = dict(x.split(":", 1) for x in tags.split(","))
except ValueError:
# one of the 'key:value' pairs was missing
logger.info("Invalid tags passed: %s", tags)
@ -199,6 +223,10 @@ class GlancesExport:
ret.append({"measurement": name, "tags": tags, "fields": fields})
return ret
def is_excluded(self, field):
"""Return true if the field is excluded."""
return any(re.fullmatch(i, field, re.I) for i in (getattr(self, 'exclude_fields') or ()))
def plugins_to_export(self, stats):
"""Return the list of plugins to export.
@ -266,7 +294,7 @@ class GlancesExport:
if isinstance(stats, dict):
# Stats is a dict
# Is there a key ?
if "key" in stats.keys() and stats["key"] in stats.keys():
if "key" in stats and stats["key"] in stats:
pre_key = "{}.".format(stats[stats["key"]])
else:
pre_key = ""
@ -285,6 +313,8 @@ class GlancesExport:
export_values += item_values
else:
# We are on a simple value
if self.is_excluded(pre_key + key.lower()):
continue
export_names.append(pre_key + key.lower())
export_values.append(value)
elif isinstance(stats, list):

View File

@ -12,7 +12,7 @@
# How to test ?
#
# 1) docker run -d -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=admin -p 5984:5984 --name my-couchdb couchdb
# 2) ./venv/bin/python -m glances -C ./conf/glances.conf --export couchdb --quiet
# 2) .venv/bin/python -m glances -C ./conf/glances.conf --export couchdb --quiet
# 3) Result can be seen at: http://127.0.0.1:5984/_utils
#

View File

@ -0,0 +1,195 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""DuckDB interface class."""
import sys
import time
from datetime import datetime
from platform import node
import duckdb
from glances.exports.export import GlancesExport
from glances.logger import logger
# Define the type conversions for DuckDB
# https://duckdb.org/docs/stable/clients/python/conversion
convert_types = {
'bool': 'BOOLEAN',
'int': 'BIGINT',
'float': 'DOUBLE',
'str': 'VARCHAR',
'tuple': 'VARCHAR', # Store tuples as VARCHAR (comma-separated)
'list': 'VARCHAR', # Store lists as VARCHAR (comma-separated)
'NoneType': 'VARCHAR',
}
class Export(GlancesExport):
"""This class manages the DuckDB export module."""
def __init__(self, config=None, args=None):
"""Init the DuckDB export IF."""
super().__init__(config=config, args=args)
# Mandatory configuration keys (additional to host and port)
self.db = None
# Optional configuration keys
self.user = None
self.password = None
self.hostname = None
# Load the configuration file
self.export_enable = self.load_conf(
'duckdb', mandatories=['database'], options=['user', 'password', 'hostname']
)
if not self.export_enable:
exit('Missing DuckDB config')
# The hostname is always add as an identifier in the DuckDB table
# so we can filter the stats by hostname
self.hostname = self.hostname or node().split(".")[0]
# Init the DuckDB client
self.client = self.init()
def init(self):
"""Init the connection to the DuckDB server."""
if not self.export_enable:
return None
try:
db = duckdb.connect(database=self.database)
except Exception as e:
logger.critical(f"Cannot connect to DuckDB {self.database} ({e})")
sys.exit(2)
else:
logger.info(f"Stats will be exported to DuckDB: {self.database}")
return db
def normalize(self, value):
# Nothing to do...
if isinstance(value, list) and len(value) == 1 and value[0] in ['True', 'False']:
return bool(value[0])
return value
def update(self, stats):
"""Update the DuckDB export module."""
if not self.export_enable:
return False
# Get all the stats & limits
# Current limitation with sensors and fs plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs']]
all_stats = stats.getAllExportsAsDict(plugin_list=self.last_exported_list())
all_limits = stats.getAllLimitsAsDict(plugin_list=self.last_exported_list())
# Loop over plugins to export
for plugin in self.last_exported_list():
# Remove some fields
if isinstance(all_stats[plugin], dict):
all_stats[plugin].update(all_limits[plugin])
# Remove the <plugin>_disable field
all_stats[plugin].pop(f"{plugin}_disable", None)
elif isinstance(all_stats[plugin], list):
for i in all_stats[plugin]:
i.update(all_limits[plugin])
# Remove the <plugin>_disable field
i.pop(f"{plugin}_disable", None)
else:
continue
plugin_stats = all_stats[plugin]
creation_list = [] # List used to create the DuckDB table
values_list = [] # List of values to insert (list of lists, one list per row)
if isinstance(plugin_stats, dict):
# Create the list to create the table
creation_list.append('time TIMETZ')
creation_list.append('hostname_id VARCHAR')
for key, value in plugin_stats.items():
creation_list.append(f"{key} {convert_types[type(self.normalize(value)).__name__]}")
# Create the list of values to insert
item_list = []
item_list.append(self.normalize(datetime.now().replace(microsecond=0)))
item_list.append(self.normalize(f"{self.hostname}"))
item_list.extend([self.normalize(value) for value in plugin_stats.values()])
values_list = [item_list]
elif isinstance(plugin_stats, list) and len(plugin_stats) > 0 and 'key' in plugin_stats[0]:
# Create the list to create the table
creation_list.append('time TIMETZ')
creation_list.append('hostname_id VARCHAR')
creation_list.append('key_id VARCHAR')
for key, value in plugin_stats[0].items():
creation_list.append(f"{key} {convert_types[type(self.normalize(value)).__name__]}")
# Create the list of values to insert
for plugin_item in plugin_stats:
item_list = []
item_list.append(self.normalize(datetime.now().replace(microsecond=0)))
item_list.append(self.normalize(f"{self.hostname}"))
item_list.append(self.normalize(f"{plugin_item.get('key')}"))
item_list.extend([self.normalize(value) for value in plugin_item.values()])
values_list.append(item_list)
else:
continue
# Export stats to DuckDB
self.export(plugin, creation_list, values_list)
return True
def export(self, plugin, creation_list, values_list):
"""Export the stats to the DuckDB server."""
logger.debug(f"Export {plugin} stats to DuckDB")
# Create the table if it does not exist
table_list = [t[0] for t in self.client.sql("SHOW TABLES").fetchall()]
if plugin not in table_list:
# Execute the create table query
create_query = f"""
CREATE TABLE {plugin} (
{', '.join(creation_list)}
);"""
logger.debug(f"Create table: {create_query}")
try:
self.client.execute(create_query)
except Exception as e:
logger.error(f"Cannot create table {plugin}: {e}")
return
# Commit the changes
self.client.commit()
# Insert values into the table
for values in values_list:
insert_query = f"""
INSERT INTO {plugin} VALUES (
{', '.join(['?' for _ in values])}
);"""
logger.debug(f"Insert values into table {plugin}: {values}")
try:
self.client.execute(insert_query, values)
except Exception as e:
logger.error(f"Cannot insert data into table {plugin}: {e}")
# Commit the changes
self.client.commit()
def exit(self):
"""Close the DuckDB export module."""
# Force last write
self.client.commit()
# Close the DuckDB client
time.sleep(3) # Wait a bit to ensure all data is written
self.client.close()
# Call the father method
super().exit()

View File

@ -43,6 +43,9 @@ class Export(GlancesExport):
# Perhaps a better method is possible...
self._metric_dict = {}
# Keys name (compute in update() method)
self.keys_name = {}
# Init the Prometheus Exporter
self.init()
@ -56,29 +59,41 @@ class Export(GlancesExport):
else:
logger.info(f"Start Prometheus exporter on {self.host}:{self.port}")
def update(self, stats):
self.keys_name = {k: stats.get_plugin(k).get_key() for k in stats.getPluginsList()}
super().update(stats)
def export(self, name, columns, points):
"""Write the points to the Prometheus exporter using Gauge."""
logger.debug(f"Export {name} stats to Prometheus exporter")
# Remove non number stats and convert all to float (for Boolean)
data = {k: float(v) for k, v in zip(columns, points) if isinstance(v, Number)}
data = {str(k): float(v) for k, v in zip(columns, points) if isinstance(v, Number)}
# Write metrics to the Prometheus exporter
for k, v in data.items():
# Prometheus metric name: prefix_<glances stats name>
metric_name = self.prefix + self.METRIC_SEPARATOR + str(name) + self.METRIC_SEPARATOR + str(k)
for metric, value in data.items():
labels = self.labels
metric_name = self.prefix + self.METRIC_SEPARATOR + name + self.METRIC_SEPARATOR
try:
obj, stat = metric.split('.')
metric_name += stat
labels += f",{self.keys_name.get(name)}:{obj}"
except ValueError:
metric_name += metric
# Prometheus is very sensible to the metric name
# See: https://prometheus.io/docs/practices/naming/
for c in ' .-/:[]':
metric_name = metric_name.replace(c, self.METRIC_SEPARATOR)
# Get the labels
labels = self.parse_tags(self.labels)
labels = self.parse_tags(labels)
# Manage an internal dict between metric name and Gauge
if metric_name not in self._metric_dict:
self._metric_dict[metric_name] = Gauge(metric_name, k, labelnames=listkeys(labels))
self._metric_dict[metric_name] = Gauge(metric_name, "", labelnames=listkeys(labels))
# Write the value
if hasattr(self._metric_dict[metric_name], 'labels'):
# Add the labels (see issue #1255)
self._metric_dict[metric_name].labels(**labels).set(v)
self._metric_dict[metric_name].labels(**labels).set(value)
else:
self._metric_dict[metric_name].set(v)
self._metric_dict[metric_name].set(value)

View File

@ -83,6 +83,9 @@ class Export(GlancesExport):
if isinstance(value, bool):
return str(value).upper()
if isinstance(value, (list, tuple)):
# Special case for list of one boolean
if len(value) == 1 and isinstance(value[0], bool):
return str(value[0]).upper()
return ', '.join([f"'{v}'" for v in value])
if isinstance(value, str):
return f"'{value}'"
@ -95,8 +98,8 @@ class Export(GlancesExport):
return False
# Get all the stats & limits
# Current limitation with sensors and fs plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs']]
# @TODO: Current limitation with sensors, fs and diskio plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs', 'diskio']]
all_stats = stats.getAllExportsAsDict(plugin_list=self.last_exported_list())
all_limits = stats.getAllLimitsAsDict(plugin_list=self.last_exported_list())
@ -159,6 +162,9 @@ class Export(GlancesExport):
continue
# Export stats to TimescaleDB
# logger.info(plugin)
# logger.info(f"Segmented by: {segmented_by}")
# logger.info(list(zip(creation_list, values_list[0])))
self.export(plugin, creation_list, segmented_by, values_list)
return True

View File

@ -17,6 +17,7 @@ import base64
import errno
import functools
import importlib
import multiprocessing
import os
import platform
import queue
@ -96,6 +97,11 @@ viewkeys = methodcaller('keys')
viewvalues = methodcaller('values')
viewitems = methodcaller('items')
# Multiprocessing start method (on POSIX system)
if LINUX or BSD or SUNOS or MACOS:
ctx_mp_fork = multiprocessing.get_context('fork')
else:
ctx_mp_fork = multiprocessing.get_context()
###################
# GLOBALS FUNCTIONS
@ -127,18 +133,6 @@ def listvalues(d):
return list(d.values())
def iteritems(d):
return iter(d.items())
def iterkeys(d):
return iter(d.keys())
def itervalues(d):
return iter(d.values())
def u(s, errors='replace'):
if isinstance(s, text_type):
return s
@ -375,6 +369,13 @@ def json_loads(data: Union[str, bytes, bytearray]) -> Union[dict, list]:
return json.loads(data)
def list_to_dict(data):
"""Convert a list of dict (with key in 'key') to a dict with key as key and value as value."""
if not isinstance(data, list):
return None
return {item[item['key']]: item for item in data if 'key' in item}
def dictlist(data, item):
if isinstance(data, dict):
try:
@ -409,6 +410,65 @@ def dictlist_first_key_value(data: list[dict], key, value) -> Optional[dict]:
return ret
def auto_unit(number, low_precision=False, min_symbol='K', none_symbol='-'):
"""Make a nice human-readable string out of number.
Number of decimal places increases as quantity approaches 1.
CASE: 613421788 RESULT: 585M low_precision: 585M
CASE: 5307033647 RESULT: 4.94G low_precision: 4.9G
CASE: 44968414685 RESULT: 41.9G low_precision: 41.9G
CASE: 838471403472 RESULT: 781G low_precision: 781G
CASE: 9683209690677 RESULT: 8.81T low_precision: 8.8T
CASE: 1073741824 RESULT: 1024M low_precision: 1024M
CASE: 1181116006 RESULT: 1.10G low_precision: 1.1G
:low_precision: returns less decimal places potentially (default is False)
sacrificing precision for more readability.
:min_symbol: Do not approach if number < min_symbol (default is K)
:decimal_count: if set, force the number of decimal number (default is None)
"""
if number is None:
return none_symbol
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
if min_symbol in symbols:
symbols = symbols[symbols.index(min_symbol) :]
prefix = {
'Y': 1208925819614629174706176,
'Z': 1180591620717411303424,
'E': 1152921504606846976,
'P': 1125899906842624,
'T': 1099511627776,
'G': 1073741824,
'M': 1048576,
'K': 1024,
}
if number == 0:
# Avoid 0.0
return '0'
# If a value is a float, decimal_precision is 2 else 0
decimal_precision = 2 if isinstance(number, float) else 0
for symbol in reversed(symbols):
value = float(number) / prefix[symbol]
if value > 1:
decimal_precision = 0
if value < 10:
decimal_precision = 2
elif value < 100:
decimal_precision = 1
if low_precision:
if symbol in 'MK':
decimal_precision = 0
else:
decimal_precision = min(1, decimal_precision)
elif symbol in 'K':
decimal_precision = 0
return '{:.{decimal}f}{symbol}'.format(value, decimal=decimal_precision, symbol=symbol)
return f'{number:.{decimal_precision}f}'
def string_value_to_float(s):
"""Convert a string with a value and an unit to a float.
Example:
@ -531,3 +591,124 @@ def atoi(text):
def natural_keys(text):
"""Return a text in a natural/human readable format."""
return [atoi(c) for c in re.split(r'(\d+)', text)]
def exit_after(seconds, default=None):
"""Exit the function if it takes more than 'seconds' seconds to complete.
In this case, return the value of 'default' (default: None)."""
def handler(q, func, args, kwargs):
q.put(func(*args, **kwargs))
def decorator(func):
if not LINUX:
return func
def wraps(*args, **kwargs):
try:
q = ctx_mp_fork.Queue()
except PermissionError:
# Manage an exception in Snap packages on Linux
# The strict mode prevent the use of multiprocessing.Queue()
# There is a "dirty" hack:
# https://forum.snapcraft.io/t/python-multiprocessing-permission-denied-in-strictly-confined-snap/15518/2
# But i prefer to just disable the timeout feature in this case
func(*args, **kwargs)
else:
p = ctx_mp_fork.Process(target=handler, args=(q, func, args, kwargs))
p.start()
p.join(timeout=seconds)
if not p.is_alive():
return q.get()
p.terminate()
p.join(timeout=0.1)
if p.is_alive():
# Kill in case processes doesn't terminate
# Happens with cases like broken NFS connections
p.kill()
return default
return wraps
return decorator
def split_esc(input_string, sep=None, maxsplit=-1, esc='\\'):
"""
Return a list of the substrings in the input_string, using sep as the separator char
and esc as the escape character.
sep
The separator used to split the input_string.
When set to None (the default value), will split on any whitespace
character (including \n \r \t \f and spaces) unless the character is escaped
and will discard empty strings from the result.
maxsplit
Maximum number of splits.
-1 (the default value) means no limit.
esc
The character used to escape the separator.
When set to None, this behaves equivalently to `str.split`.
Defaults to '\\\\' i.e. backslash.
Splitting starts at the front of the input_string and works to the end.
Note: escape characters in the substrings returned are removed. However, if
maxsplit is reached, escape characters in the remaining, unprocessed substring
are not removed, which allows split_esc to be called on it again.
"""
# Input validation
if not isinstance(input_string, str):
raise TypeError(f'must be str, not {input_string.__class__.__name__}')
str.split('', sep=sep, maxsplit=maxsplit) # Use str.split to validate sep and maxsplit
if esc is None:
return input_string.split(
sep=sep, maxsplit=maxsplit
) # Short circuit to default implementation if the escape character is None
if not isinstance(esc, str):
raise TypeError(f'must be str or None, not {esc.__class__.__name__}')
if len(esc) == 0:
raise ValueError('empty escape character')
if len(esc) > 1:
raise ValueError('escape must be a single character')
# Set up a simple state machine keeping track of whether we have seen an escape character
ret, esc_seen, i = [''], False, 0
while i < len(input_string) and len(ret) - 1 != maxsplit:
if not esc_seen:
if input_string[i] == esc:
# Consume the escape character and transition state
esc_seen = True
i += 1
elif sep is None and input_string[i].isspace():
# Consume as much whitespace as possible
n = 1
while i + n + 1 < len(input_string) and input_string[i + n : i + n + 1].isspace():
n += 1
ret.append('')
i += n
elif sep is not None and input_string[i : i + len(sep)] == sep:
# Consume the separator
ret.append('')
i += len(sep)
else:
# Otherwise just add the current char
ret[-1] += input_string[i]
i += 1
else:
# Add the current char and transition state back
ret[-1] += input_string[i]
esc_seen = False
i += 1
# Append any remaining string if we broke early because of maxsplit
if i < len(input_string):
ret[-1] += input_string[i:]
# If splitting on whitespace, discard empty strings from result
if sep is None:
ret = [sub for sub in ret if len(sub) > 0]
return ret

View File

@ -103,14 +103,14 @@ Examples of use:
"""
def __init__(self, args_begin_at=1):
def __init__(self):
"""Manage the command line arguments."""
self.init_glances(args_begin_at)
self.init_glances()
def init_glances(self, args_begin_at):
def init_glances(self):
"""Main method to init Glances."""
# Read the command line arguments or parse the one given in parameter (parser)
self.args = self.parse_args(args_begin_at)
self.args = self.parse_args()
# Load the configuration file, if it exists
# This function should be called after the parse_args
@ -152,6 +152,10 @@ Examples of use:
if not self.args.process_filter and not self.is_standalone():
logger.debug("Process filter is only available in standalone mode")
# Focus filter is only available in standalone mode
if not self.args.process_focus and not self.is_standalone():
logger.debug("Process focus is only available in standalone mode")
# Cursor option is only available in standalone mode
if not self.args.disable_cursor and not self.is_standalone():
logger.debug("Cursor is only available in standalone mode")
@ -379,7 +383,7 @@ Examples of use:
default=None,
type=str,
dest='export_process_filter',
help='set the export process filter (comman separated list of regular expression)',
help='set the export process filter (comma-separated list of regular expression)',
)
# Client/Server option
parser.add_argument(
@ -496,6 +500,14 @@ Examples of use:
dest='process_filter',
help='set the process filter pattern (regular expression)',
)
# Process will focus on some process (comma-separated list of Glances filter)
parser.add_argument(
'--process-focus',
default=None,
type=str,
dest='process_focus',
help='set a process list to focus on (comma-separated list of Glances filter)',
)
parser.add_argument(
'--process-short-name',
action='store_true',
@ -550,7 +562,18 @@ Examples of use:
help='test memory leak (python 3.4 or higher needed)',
)
parser.add_argument(
'--api-doc', default=None, action='store_true', dest='stdout_apidoc', help='display fields descriptions'
'--api-doc',
default=None,
action='store_true',
dest='stdout_api_doc',
help='display Python API documentation',
)
parser.add_argument(
'--api-restful-doc',
default=None,
action='store_true',
dest='stdout_api_restful_doc',
help='display Restful API documentation',
)
if not WINDOWS:
parser.add_argument(
@ -582,6 +605,13 @@ Examples of use:
dest='diskio_iops',
help='show IO per second in the DiskIO plugin',
)
parser.add_argument(
'--diskio-latency',
action='store_true',
default=False,
dest='diskio_latency',
help='show IO latency in the DiskIO plugin',
)
parser.add_argument(
'--fahrenheit',
action='store_true',
@ -630,6 +660,22 @@ Examples of use:
default='',
help='strftime format string for displaying current date in standalone mode',
)
# Fetch
parser.add_argument(
'--fetch',
'--stdout-fetch',
action='store_true',
default=False,
dest='stdout_fetch',
help='display a (neo)fetch like summary and exit',
)
parser.add_argument(
'--fetch-template',
'--stdout-fetch-template',
dest='fetch_template',
default='',
help='overwrite default fetch template file',
)
return parser
@ -689,7 +735,10 @@ Examples of use:
args.network_cumul = False
# Processlist is updated in processcount
if getattr(args, 'enable_processlist', False) or getattr(args, 'enable_programlist', False):
if getattr(args, 'disable_processcount', False):
logger.warning('Processcount is disable, so processlist (updated by processcount) is also disable')
disable(args, 'processlist')
elif getattr(args, 'enable_processlist', False) or getattr(args, 'enable_programlist', False):
enable(args, 'processcount')
# Set a default export_process_filter (with all process) when using the stdout mode
@ -787,6 +836,10 @@ Examples of use:
disable(args, 'memswap')
disable(args, 'load')
# Unicode => No separator
if args.disable_unicode:
args.enable_separator = False
# Memory leak
if getattr(args, 'memory_leak', False):
logger.info('Memory leak detection enabled')
@ -796,15 +849,18 @@ Examples of use:
args.time = 1
args.disable_history = True
# Unicode => No separator
if args.disable_unicode:
args.enable_separator = False
# Disable history if history_size is 0
if self.config.has_section('global'):
if self.config.get_int_value('global', 'history_size', default=1200) == 0:
args.disable_history = True
def parse_args(self, args_begin_at):
"""Parse command line arguments.
Glances args start at position args_begin_at.
"""
return self.init_args().parse_args(sys.argv[args_begin_at:])
# Display an information message if history is disabled
if args.disable_history:
logger.info("Stats history is disabled")
def parse_args(self):
"""Parse command line arguments."""
return self.init_args().parse_args(sys.argv[1:])
def check_mode_compatibility(self):
"""Check mode compatibility"""

View File

@ -13,11 +13,10 @@ import os
import pickle
import threading
from datetime import datetime, timedelta
from ssl import CertificateError
from glances import __version__
from glances.config import user_cache_dir
from glances.globals import HTTPError, URLError, nativestr, safe_makedirs, urlopen
from glances.globals import nativestr, safe_makedirs, urlopen
from glances.logger import logger
try:
@ -155,7 +154,7 @@ class Outdated:
try:
res = urlopen(PYPI_API_URL, timeout=3).read()
except (HTTPError, URLError, CertificateError) as e:
except Exception as e:
logger.debug(f"Cannot get Glances version from the PyPI RESTful API ({e})")
else:
self.data['latest_version'] = json.loads(nativestr(res))['info']['version']

View File

@ -29,8 +29,8 @@ class Bar:
size,
bar_char='|',
empty_char=' ',
pre_char='[',
post_char=']',
pre_char='',
post_char='',
unit_char='%',
display_value=True,
min_value=0,
@ -42,8 +42,8 @@ class Bar:
size (_type_): Bar size
bar_char (str, optional): Bar character. Defaults to '|'.
empty_char (str, optional): Empty character. Defaults to ' '.
pre_char (str, optional): Display this char before the bar. Defaults to '['.
post_char (str, optional): Display this char after the bar. Defaults to ']'.
pre_char (str, optional): Display this char before the bar. Defaults to ''.
post_char (str, optional): Display this char after the bar. Defaults to ''.
unit_char (str, optional): Unit char to be displayed. Defaults to '%'.
display_value (bool, optional): Do i need to display the value. Defaults to True.
min_value (int, optional): Minimum value. Defaults to 0.
@ -68,12 +68,10 @@ class Bar:
@property
def size(self, with_decoration=False):
# Return the bar size, with or without decoration
if with_decoration:
return self.__size
# Return the bar size
if self.__display_value:
return self.__size - 6
return None
return self.__size
@property
def percent(self):
@ -105,6 +103,9 @@ class Bar:
whole += 1
ret += self.__empty_char * int(self.size - whole)
# Add the post and pre chars
ret = f'{self.__pre_char}{ret}{self.__post_char}'
# Add the value
if self.__display_value:
if self.percent >= self.max_value:

View File

@ -51,7 +51,7 @@ class _GlancesCurses:
'a': {'sort_key': 'auto'},
'A': {'switch': 'disable_amps'},
'b': {'switch': 'byte'},
'B': {'switch': 'diskio_iops'},
'B': {'handler': '_handle_diskio_iops'},
'c': {'sort_key': 'cpu_percent'},
'C': {'switch': 'disable_cloud'},
'd': {'switch': 'disable_diskio'},
@ -69,6 +69,7 @@ class _GlancesCurses:
# 'k' > Kill selected process
'K': {'switch': 'disable_connections'},
'l': {'switch': 'disable_alert'},
'L': {'handler': '_handle_diskio_latency'},
'm': {'sort_key': 'memory_percent'},
'M': {'switch': 'reset_minmax_tag'},
'n': {'switch': 'disable_network'},
@ -92,8 +93,10 @@ class _GlancesCurses:
'z': {'handler': '_handle_disable_process'},
'+': {'handler': '_handle_increase_nice'},
'-': {'handler': '_handle_decrease_nice'},
# "<" (left arrow) navigation through process sort
# ">" (right arrow) navigation through process sort
# "<" (shift + left arrow) navigation through process sort
# ">" (shift + right arrow) navigation through process sort
# "<" (left arrow) scroll through process name
# ">" (right arrow) scroll through process name
# 'UP' > Up in the server list
# 'DOWN' > Down in the server list
}
@ -107,7 +110,7 @@ class _GlancesCurses:
# Define left sidebar
# This variable is used in the make webui task in order to generate the
# glances/outputs/static/js/uiconfig.json file for the web interface
# This lidt can also be overwritten by the configuration file ([outputs] left_menu option)
# This list can also be overwritten by the configuration file ([outputs] left_menu option)
_left_sidebar = [
'network',
'ports',
@ -161,8 +164,8 @@ class _GlancesCurses:
# Load configuration file
self.load_config(config)
# Init cursor
self._init_cursor()
# Init Curses cursor
self._init_curses_cursor()
# Init the colors
self.colors_list = GlancesColors(args).get()
@ -183,14 +186,21 @@ class _GlancesCurses:
# Init the process min/max reset
self.args.reset_minmax_tag = False
# Init cursor
# Init Glances cursor
self.args.cursor_position = 0
self.args.cursor_process_name_position = 0
# For the moment cursor only available in standalone mode
self.args.disable_cursor = not self.args.is_standalone
# Catch key pressed with non blocking mode
self.term_window.keypad(1)
self.term_window.nodelay(1)
self.pressedkey = -1
# Is this the end ?
self.is_end = False
# History tag
self._init_history()
@ -222,7 +232,7 @@ class _GlancesCurses:
self.reset_history_tag = False
def _init_cursor(self):
def _init_curses_cursor(self):
"""Init cursors."""
if hasattr(curses, 'noecho'):
@ -245,7 +255,6 @@ class _GlancesCurses:
pass
def get_key(self, window):
# TODO: Check issue #163
return window.getch()
def catch_actions_from_hotkey(self, hotkey):
@ -261,8 +270,10 @@ class _GlancesCurses:
{
self.pressedkey in {ord('e')} and not self.args.programs: self._handle_process_extended,
self.pressedkey in {ord('k')} and not self.args.disable_cursor: self._handle_kill_process,
self.pressedkey in {curses.KEY_LEFT}: self._handle_sort_left,
self.pressedkey in {curses.KEY_RIGHT}: self._handle_sort_right,
self.pressedkey in {curses.KEY_SLEFT}: self._handle_sort_left,
self.pressedkey in {curses.KEY_SRIGHT}: self._handle_sort_right,
self.pressedkey in {curses.KEY_LEFT}: self._handle_process_name_left,
self.pressedkey in {curses.KEY_RIGHT}: self._handle_process_name_right,
self.pressedkey in {curses.KEY_UP, 65} and not self.args.disable_cursor: self._handle_cursor_up,
self.pressedkey in {curses.KEY_DOWN, 66} and not self.args.disable_cursor: self._handle_cursor_down,
self.pressedkey in {curses.KEY_F5, 18}: self._handle_refresh,
@ -348,6 +359,13 @@ class _GlancesCurses:
def _handle_kill_process(self):
self.kill_process = not self.kill_process
def _handle_process_name_left(self):
if self.args.cursor_process_name_position > 0:
self.args.cursor_process_name_position -= 1
def _handle_process_name_right(self):
self.args.cursor_process_name_position += 1
def _handle_clean_logs(self):
glances_events.clean()
@ -361,6 +379,18 @@ class _GlancesCurses:
else:
glances_processes.enable()
def _handle_diskio_iops(self):
"""Switch between bytes/s and IOPS for Disk IO."""
self.args.diskio_iops = not self.args.diskio_iops
if self.args.diskio_iops:
self.args.diskio_latency = False
def _handle_diskio_latency(self):
"""Switch between bytes/s and latency for Disk IO."""
self.args.diskio_latency = not self.args.diskio_latency
if self.args.diskio_latency:
self.args.diskio_iops = False
def _handle_sort_left(self):
next_sort = (self.loop_position() - 1) % len(self._sort_loop)
glances_processes.set_sort_key(self._sort_loop[next_sort], False)
@ -382,6 +412,10 @@ class _GlancesCurses:
logger.info("Stop Glances client and return to the browser")
else:
logger.info(f"Stop Glances (keypressed: {self.pressedkey})")
# End the curses window
self.end()
# Exit the program
sys.exit(0)
def _handle_refresh(self):
glances_processes.reset_internal_cache()
@ -428,6 +462,7 @@ class _GlancesCurses:
curses.endwin()
except Exception:
pass
self.is_end = True
def init_line_column(self):
"""Init the line and column position for the curses interface."""
@ -1126,6 +1161,11 @@ class _GlancesCurses:
while not countdown.finished() and not isexitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
if pressedkey == -1:
self.wait()
continue
isexitkey = pressedkey == ord('\x1b') or pressedkey == ord('q')
if pressedkey == curses.KEY_F5 or self.pressedkey == 18:
@ -1133,7 +1173,7 @@ class _GlancesCurses:
self.clear()
return isexitkey
if pressedkey in (curses.KEY_UP, 65, curses.KEY_DOWN, 66):
if pressedkey in (curses.KEY_UP, 65, curses.KEY_DOWN, 66, curses.KEY_LEFT, 68, curses.KEY_RIGHT, 67):
# Up of won key pressed, reset the countdown
# Better for user experience
countdown.reset()
@ -1204,10 +1244,17 @@ class _GlancesCurses:
class GlancesCursesStandalone(_GlancesCurses):
"""Class for the Glances curse standalone."""
# Default number of processes to displayed is set to 50
glances_processes.max_processes = 50
class GlancesCursesClient(_GlancesCurses):
"""Class for the Glances curse client."""
# Default number of processes to displayed is set to 50
# For the moment, cursor in client/server mode is not supported see #3221
glances_processes.max_processes = 50
class GlancesTextbox(Textbox):
def __init__(self, *args, **kwargs):

View File

@ -10,6 +10,7 @@
import curses
import math
import sys
from glances.logger import logger
from glances.outputs.glances_curses import _GlancesCurses
@ -49,7 +50,6 @@ class GlancesCursesBrowser(_GlancesCurses):
self._page_max = 0
self._page_max_lines = 0
self.is_end = False
self._revesed_sorting = False
self._stats_list = None
@ -87,7 +87,7 @@ class GlancesCursesBrowser(_GlancesCurses):
counts[color] = counts.get(color, 0) + 1
result = ''
for key in counts.keys():
for key in counts:
result += key + ': ' + str(counts[key]) + ' '
return result
@ -157,8 +157,7 @@ class GlancesCursesBrowser(_GlancesCurses):
# 'ESC'|'q' > Quit
self.end()
logger.info("Stop Glances client browser")
# sys.exit(0)
self.is_end = True
sys.exit(0)
elif self.pressedkey == 10:
# 'ENTER' > Run Glances on the selected server
self.active_server = self._current_page * self._page_max_lines + self.cursor_position
@ -327,10 +326,15 @@ class GlancesCursesBrowser(_GlancesCurses):
y += 1
# Second line (for item/key)
for k, v in column_def.items():
if xc >= screen_x or y >= screen_y or v is None:
continue
k_split = k.split('_')
if xc < screen_x and y < screen_y and v is not None:
self.term_window.addnstr(y, xc, ' '.join(k_split[1:]).upper(), screen_x - x, self.colors_list['BOLD'])
xc += v + self.space_between_column
if len(k_split) == 1:
header_str = k_split[0]
else:
header_str = ' '.join(k_split[1:])
self.term_window.addnstr(y, xc, header_str.upper(), screen_x - x, self.colors_list['BOLD'])
xc += v + self.space_between_column
y += 1
# If a servers has been deleted from the list...

View File

@ -6,21 +6,21 @@
# SPDX-License-Identifier: LGPL-3.0-only
#
"""RestFull API interface class."""
"""RestFul API interface class."""
import os
import socket
import sys
import tempfile
import webbrowser
from typing import Annotated, Any, Union
from urllib.parse import urljoin
from glances import __apiversion__, __version__
from glances.events_list import glances_events
from glances.globals import json_dumps, weak_lru_cache
from glances.globals import json_dumps
from glances.logger import logger
from glances.password import GlancesPassword
from glances.plugins.plugin.dag import get_plugin_dependencies
from glances.processes import glances_processes
from glances.servers_list import GlancesServersList
from glances.servers_list_dynamic import GlancesAutoDiscoverClient
@ -46,7 +46,6 @@ try:
except ImportError:
logger.critical('Uvicorn import error. Glances cannot start in web server mode.')
sys.exit(2)
import builtins
import contextlib
import threading
import time
@ -120,7 +119,7 @@ class GlancesRestfulApi:
self.load_config(config)
# Set the bind URL
self.bind_url = urljoin(f'http://{self.args.bind_address}:{self.args.port}/', self.url_prefix)
self.bind_url = urljoin(f'{self.protocol}://{self.args.bind_address}:{self.args.port}/', self.url_prefix)
# FastAPI Init
if self.args.password:
@ -182,11 +181,23 @@ class GlancesRestfulApi:
if self.url_prefix != '':
self.url_prefix = self.url_prefix.rstrip('/')
logger.debug(f'URL prefix: {self.url_prefix}')
# SSL
self.ssl_keyfile = config.get_value('outputs', 'ssl_keyfile', default=None)
self.ssl_keyfile_password = config.get_value('outputs', 'ssl_keyfile_password', default=None)
self.ssl_certfile = config.get_value('outputs', 'ssl_certfile', default=None)
self.protocol = 'https' if self.is_ssl() else 'http'
logger.debug(f"Protocol for Resful API and WebUI: {self.protocol}")
def __update_stats(self):
def is_ssl(self):
"""Return true if the Glances server use SSL."""
return self.ssl_keyfile is not None and self.ssl_certfile is not None
def __update_stats(self, plugins_list_to_update=None):
# Never update more than 1 time per cached_time
if self.timer.finished():
self.stats.update()
# Also update if specific plugins are requested
# In this case, lru_cache will handle the stat's update frequency
if self.timer.finished() or plugins_list_to_update:
self.stats.update(plugins_list_to_update=plugins_list_to_update)
self.timer = Timer(self.args.cached_time)
def __update_servers_list(self):
@ -207,6 +218,16 @@ class GlancesRestfulApi:
status.HTTP_401_UNAUTHORIZED, "Incorrect username or password", {"WWW-Authenticate": "Basic"}
)
def _logo(self):
return rf"""
_____ _
/ ____| |
| | __| | __ _ _ __ ___ ___ ___
| | |_ | |/ _` | '_ \ / __/ _ \/ __|
| |__| | | (_| | | | | (_| __/\__
\_____|_|\__,_|_| |_|\___\___||___/ {__version__}
"""
def _router(self) -> APIRouter:
"""Define a custom router for Glances path."""
base_path = f'/api/{self.API_VERSION}'
@ -266,6 +287,9 @@ class GlancesRestfulApi:
for path, endpoint in route_mapping.items():
router.add_api_route(path, endpoint)
# Logo
print(self._logo())
# Browser WEBUI
if hasattr(self.args, 'browser') and self.args.browser:
# Template for the root browser.html file
@ -322,7 +346,12 @@ class GlancesRestfulApi:
def _start_uvicorn(self):
# Run the Uvicorn Web server
uvicorn_config = uvicorn.Config(
self._app, host=self.args.bind_address, port=self.args.port, access_log=self.args.debug
self._app,
host=self.args.bind_address,
port=self.args.port,
access_log=self.args.debug,
ssl_keyfile=self.ssl_keyfile,
ssl_certfile=self.ssl_certfile,
)
try:
self.uvicorn_server = GlancesUvicornServer(config=uvicorn_config)
@ -436,7 +465,8 @@ class GlancesRestfulApi:
HTTP/1.1 404 Not Found
"""
# Update the stat
self.__update_stats()
# TODO: Why ??? Try to comment it
# self.__update_stats()
try:
plist = self.plugins_list
@ -456,7 +486,7 @@ class GlancesRestfulApi:
return GlancesJSONResponse(self.servers_list.get_servers_list() if self.servers_list else [])
@weak_lru_cache(maxsize=1, ttl=1)
# Comment this solve an issue on Home Assistant See #3238
def _api_all(self):
"""Glances API RESTful implementation.
@ -465,13 +495,6 @@ class GlancesRestfulApi:
HTTP/400 if plugin is not found
HTTP/404 if others error
"""
if self.args.debug:
fname = os.path.join(tempfile.gettempdir(), 'glances-debug.json')
try:
with builtins.open(fname) as f:
return f.read()
except OSError:
logger.debug(f"Debug file ({fname}) not found")
# Update the stat
self.__update_stats()
@ -485,7 +508,6 @@ class GlancesRestfulApi:
return GlancesJSONResponse(statval)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_all_limits(self):
"""Glances API RESTful implementation.
@ -502,7 +524,6 @@ class GlancesRestfulApi:
return GlancesJSONResponse(limits)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_all_views(self):
"""Glances API RESTful implementation.
@ -519,7 +540,6 @@ class GlancesRestfulApi:
return GlancesJSONResponse(limits)
@weak_lru_cache(maxsize=1, ttl=1)
def _api(self, plugin: str):
"""Glances API RESTful implementation.
@ -531,7 +551,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat ID
@ -549,7 +569,6 @@ class GlancesRestfulApi:
status.HTTP_400_BAD_REQUEST, f"Unknown plugin {plugin} (available plugins: {self.plugins_list})"
)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_top(self, plugin: str, nb: int = 0):
"""Glances API RESTful implementation.
@ -563,7 +582,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat ID
@ -577,7 +596,6 @@ class GlancesRestfulApi:
return GlancesJSONResponse(statval)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_history(self, plugin: str, nb: int = 0):
"""Glances API RESTful implementation.
@ -590,7 +608,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat ID
@ -600,7 +618,6 @@ class GlancesRestfulApi:
return statval
@weak_lru_cache(maxsize=1, ttl=1)
def _api_limits(self, plugin: str):
"""Glances API RESTful implementation.
@ -619,7 +636,6 @@ class GlancesRestfulApi:
return GlancesJSONResponse(ret)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_views(self, plugin: str):
"""Glances API RESTful implementation.
@ -652,7 +668,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat views
@ -677,7 +693,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat views
@ -702,7 +718,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat views
@ -726,7 +742,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat views
@ -751,7 +767,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat history
@ -810,7 +826,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value

View File

@ -48,7 +48,7 @@ class GlancesStdout:
def end(self):
pass
def update(self, stats, duration=3):
def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Display stats to stdout.
Refresh every duration second.

View File

@ -0,0 +1,286 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Generate Glances Python API documentation."""
from pprint import pformat
from glances import api
APIDOC_HEADER = """\
.. _api:
Python API documentation
========================
This documentation describes the Glances Python API.
Note: This API is only available in Glances 4.4.0 or higher.
"""
def printtab(s, indent=' '):
print(indent + s.replace('\n', '\n' + indent))
def print_tldr(gl):
"""Print the TL;DR section of the API documentation."""
sub_title = 'TL;DR'
print(sub_title)
print('-' * len(sub_title))
print('')
print('You can access the Glances API by importing the `glances.api` module and creating an')
print('instance of the `GlancesAPI` class. This instance provides access to all Glances plugins')
print('and their fields. For example, to access the CPU plugin and its total field, you can')
print('use the following code:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> from glances import api')
printtab('>>> gl = api.GlancesAPI()')
printtab('>>> gl.cpu')
printtab(f'{pformat(gl.cpu.stats)}')
printtab('>>> gl.cpu.get("total")')
printtab(f'{gl.cpu.get("total")}')
printtab('>>> gl.mem.get("used")')
printtab(f'{gl.mem.get("used")}')
printtab('>>> gl.auto_unit(gl.mem.get("used"))')
printtab(f'{gl.auto_unit(gl.mem.get("used"))}')
print('')
print('If the stats return a list of items (like network interfaces or processes), you can')
print('access them by their name:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.network.keys()')
printtab(f'{gl.network.keys()}')
printtab(f'>>> gl.network["{gl.network.keys()[0]}"]')
printtab(f'{pformat(gl.network[gl.network.keys()[0]])}')
print('')
def print_init_api(gl):
sub_title = 'Init Glances Python API'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Init the Glances API:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> from glances import api')
printtab('>>> gl = api.GlancesAPI()')
print('')
def print_plugins_list(gl):
sub_title = 'Get Glances plugins list'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Get the plugins list:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.plugins()')
printtab(f'{gl.plugins()}')
print('')
def print_plugin(gl, plugin):
"""Print the details of a single plugin."""
sub_title = f'Glances {plugin}'
print(sub_title)
print('-' * len(sub_title))
print('')
stats_obj = gl.__getattr__(plugin)
print(f'{plugin.capitalize()} stats:')
print('')
print('.. code-block:: python')
print('')
printtab(f'>>> type(gl.{plugin})')
printtab(f'{type(stats_obj)}')
if len(stats_obj.keys()) > 0 and isinstance(stats_obj[stats_obj.keys()[0]], dict):
printtab(f'>>> gl.{plugin}')
printtab(f'Return a dict of dict with key=<{stats_obj[stats_obj.keys()[0]]["key"]}>')
printtab(f'>>> gl.{plugin}.keys()')
printtab(f'{stats_obj.keys()}')
printtab(f'>>> gl.{plugin}.get("{stats_obj.keys()[0]}")')
printtab(f'{pformat(stats_obj[stats_obj.keys()[0]])}')
else:
printtab(f'>>> gl.{plugin}')
printtab(f'{pformat(stats_obj.stats)}')
if len(stats_obj.keys()) > 0:
printtab(f'>>> gl.{plugin}.keys()')
printtab(f'{stats_obj.keys()}')
printtab(f'>>> gl.{plugin}.get("{stats_obj.keys()[0]}")')
printtab(f'{pformat(stats_obj[stats_obj.keys()[0]])}')
print('')
if stats_obj.fields_description is not None:
print(f'{plugin.capitalize()} fields description:')
print('')
for field, description in stats_obj.fields_description.items():
print(f'* {field}: {description["description"]}')
print('')
print(f'{plugin.capitalize()} limits:')
print('')
print('.. code-block:: python')
print('')
printtab(f'>>> gl.{plugin}.limits')
printtab(f'{pformat(gl.__getattr__(plugin).limits)}')
print('')
def print_plugins(gl):
"""Print the details of all plugins."""
for plugin in [p for p in gl.plugins() if p not in ['help', 'programlist']]:
print_plugin(gl, plugin)
def print_auto_unit(gl):
sub_title = 'Use auto_unit to display a human-readable string with the unit'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use auto_unit() function to generate a human-readable string with the unit:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.mem.get("used")')
printtab(f'{gl.mem.get("used")}')
print('')
printtab('>>> gl.auto_unit(gl.mem.get("used"))')
printtab(f'{gl.auto_unit(gl.mem.get("used"))}')
print('')
print("""
Args:
number (float or int): The numeric value to be converted.
low_precision (bool, optional): If True, use lower precision for the output. Defaults to False.
min_symbol (str, optional): The minimum unit symbol to use (e.g., 'K' for kilo). Defaults to 'K'.
none_symbol (str, optional): The symbol to display if the number is None. Defaults to '-'.
Returns:
str: A human-readable string representation of the number with units.
""")
def print_bar(gl):
sub_title = 'Use to display stat as a bar'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use bar() function to generate a bar:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.bar(gl.mem["percent"])')
printtab(f'{gl.bar(gl.mem.get("percent"))}')
print('')
print("""
Args:
value (float): The percentage value to represent in the bar (typically between 0 and 100).
size (int, optional): The total length of the bar in characters. Defaults to 18.
bar_char (str, optional): The character used to represent the filled portion of the bar. Defaults to ''.
empty_char (str, optional): The character used to represent the empty portion of the bar. Defaults to ''.
pre_char (str, optional): A string to prepend to the bar. Defaults to ''.
post_char (str, optional): A string to append to the bar. Defaults to ''.
Returns:
str: A string representing the progress bar.
""")
def print_top_process(gl):
sub_title = 'Use to display top process list'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use top_process() function to generate a list of top processes sorted by CPU or MEM usage:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.top_process()')
printtab(f'{gl.top_process()}')
print('')
print("""
Args:
limit (int, optional): The maximum number of top processes to return. Defaults to 3.
sorted_by (str, optional): The primary key to sort processes by (e.g., 'cpu_percent').
Defaults to 'cpu_percent'.
sorted_by_secondary (str, optional): The secondary key to sort processes by if primary keys are equal
(e.g., 'memory_percent'). Defaults to 'memory_percent'.
Returns:
list: A list of dictionaries representing the top processes, excluding those with 'glances' in their
command line.
Note:
The 'glances' process is excluded from the returned list to avoid self-generated CPU load affecting
the results.
""")
class GlancesStdoutApiDoc:
"""This class manages the fields description display."""
def __init__(self, config=None, args=None):
# Init
self.gl = api.GlancesAPI()
def end(self):
pass
def update(self, stats, duration=1):
"""Display issue"""
# Display header
print(APIDOC_HEADER)
# Display TL;DR section
print_tldr(self.gl)
# Init the API
print_init_api(self.gl)
# Display plugins list
print_plugins_list(self.gl)
# Loop over plugins
print_plugins(self.gl)
# Others helpers
print_auto_unit(self.gl)
print_bar(self.gl)
print_top_process(self.gl)
# Return True to exit directly (no refresh)
return True

View File

@ -6,33 +6,32 @@
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Fields description interface class."""
"""Generate Glances Restful API documentation."""
import json
import time
from pprint import pformat
from glances import __apiversion__
from glances.globals import iteritems
from glances.logger import logger
API_URL = f"http://localhost:61208/api/{__apiversion__}"
APIDOC_HEADER = f"""\
.. _api:
.. _api_restful:
API (Restfull/JSON) documentation
=================================
Restful/JSON API documentation
==============================
This documentation describes the Glances API version {__apiversion__} (Restfull/JSON) interface.
This documentation describes the Glances API version {__apiversion__} (Restful/JSON) interface.
An OpenAPI specification file is available at:
``https://raw.githubusercontent.com/nicolargo/glances/refs/heads/develop/docs/openapi.json``
``https://raw.githubusercontent.com/nicolargo/glances/refs/heads/develop/docs/api/openapi.json``
Run the Glances API server
--------------------------
The Glances Restfull/API server could be ran using the following command line:
The Glances Restful/API server could be ran using the following command line:
.. code-block:: bash
@ -136,7 +135,7 @@ def print_plugin_description(plugin, stat):
print('Fields descriptions:')
print('')
time_since_update = False
for field, description in iteritems(stat.fields_description):
for field, description in stat.fields_description.items():
print(
'* **{}**: {} (unit is *{}*)'.format(
field,
@ -354,7 +353,7 @@ def print_plugin_post_events():
print('')
class GlancesStdoutApiDoc:
class GlancesStdoutApiRestfulDoc:
"""This class manages the fields description display."""
def __init__(self, config=None, args=None):

View File

@ -55,12 +55,12 @@ class GlancesStdoutCsv:
line += f'{plugin}.{attribute}{self.separator}'
else:
if isinstance(stat, dict):
for k in stat.keys():
for k in stat:
line += f'{plugin}.{str(k)}{self.separator}'
elif isinstance(stat, list):
for i in stat:
if isinstance(i, dict) and 'key' in i:
for k in i.keys():
for k in i:
line += '{}.{}.{}{}'.format(plugin, str(i[i['key']]), str(k), self.separator)
else:
line += f'{plugin}{self.separator}'
@ -87,7 +87,7 @@ class GlancesStdoutCsv:
return line
def update(self, stats, duration=3):
def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Display stats to stdout.
Refresh every duration second.

View File

@ -0,0 +1,87 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Fetch mode interface class."""
import jinja2
from glances import api
from glances.logger import logger
DEFAULT_FETCH_TEMPLATE = """
{{ gl.system['hostname'] }}{{ ' | ' + gl.ip['address'] if gl.ip['address'] else '' }} | Uptime: {{ gl.uptime }}
{{ gl.system['hr_name'] }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }}/min1 |\
{{ '%0.2f'| format(gl.load['min5']) }}/min5 |\
{{ '%0.2f'| format(gl.load['min15']) }}/min15
CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} /\
{{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}\
💾 {% if loop.index == 1 %}DISK{% else %} {% endif %}\
{{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} /\
{{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}\
{% for net in gl.network.keys() %}\
📡 {% if loop.index == 1 %}NET{% else %} {% endif %}\
{{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s\
{{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}\
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}\
{{ loop.index }} {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }}\
{{ process['cpu_percent'] }}% CPU\
{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }}\
🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}\
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}\
{{ loop.index }} {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }}\
🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM\
{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }}\
{{ process['cpu_percent'] }}% CPU
{% endfor %}\
"""
class GlancesStdoutFetch:
"""This class manages the Stdout JSON display."""
def __init__(self, config=None, args=None):
# Init
self.config = config
self.args = args
self.gl = api.GlancesAPI(self.config, self.args)
def end(self):
pass
def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Display fetch from the template file to stdout."""
if self.args.fetch_template == "":
fetch_template = DEFAULT_FETCH_TEMPLATE
else:
logger.info("Using fetch template file: " + self.args.fetch_template)
# Load the template from the file given in the self.args.fetch_template argument
with open(self.args.fetch_template) as f:
fetch_template = f.read()
# Create a Jinja2 environment
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader())
template = jinja_env.from_string(fetch_template)
output = template.render(gl=self.gl)
print(output)
# Return True to exit directly (no refresh)
return True

View File

@ -98,7 +98,7 @@ class GlancesStdoutIssue:
stat = stats.get_plugin(plugin).get_export()
# Hide private information
if plugin == 'ip':
for key in stat.keys():
for key in stat:
stat[key] = '***'
except Exception as e:
stat_error = e

View File

@ -34,19 +34,25 @@ class GlancesStdoutJson:
def end(self):
pass
def update(self, stats, duration=3):
def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Display stats in JSON format to stdout.
Refresh every duration second.
"""
all_in_json = '{'
plugins_list_json = []
for plugin in self.plugins_list:
# Check if the plugin exist and is enable
if plugin in stats.getPluginsList() and stats.get_plugin(plugin).is_enabled():
stat = stats.get_plugin(plugin).get_json()
plugins_list_json.append(f'"{plugin}": {stats.get_plugin(plugin).get_json().decode("utf-8")}')
else:
continue
# Display stats
printandflush(f'{plugin}: {stat.decode()}')
# Join all plugins in a single JSON object
all_in_json += ', '.join(plugins_list_json)
all_in_json += '}'
# Display stats
printandflush(all_in_json)
# Wait until next refresh
if duration > 0:

View File

@ -17,6 +17,7 @@ _unicode_message = {
'PROCESS_SELECTOR': ['>', '>'],
'MEDIUM_LINE': ['\u2500', ''],
'LOW_LINE': ['\u2581', '_'],
'THREE_DOTS': ['\u2026', '...'],
}

View File

@ -1,10 +1,10 @@
module.exports = {
printWidth: 100,
arrowParens: 'always',
bracketSpacing: true,
semi: true,
singleQuote: true,
tabWidth: 4,
trailingComma: 'none',
useTabs: false
printWidth: 100,
arrowParens: "always",
bracketSpacing: true,
semi: true,
singleQuote: true,
tabWidth: 4,
trailingComma: "none",
useTabs: false,
};

View File

@ -33,6 +33,7 @@ hash -r
You must run the following command from the `glances/outputs/static/` directory.
```bash
.venv/bin/python ./generate_webui_conf.py > ./glances/outputs/static/js/uiconfig.json
cd glances/outputs/static/
```

View File

@ -169,10 +169,10 @@ body {
}
.button {
color: #99CCFF; /* Bleu clair high-tech */
background: rgba(0, 0, 0, 0.4); /* Fond légèrement transparent */
border: 1px solid #99CCFF; /* Bordure discrète */
padding: 5px 10px;
color: #99CCFF;
background: rgba(0, 0, 0, 0.4);
border: 1px solid #99CCFF;
padding: 1px 5px;
border-radius: 5px;
letter-spacing: 1px;
cursor: pointer;
@ -182,14 +182,14 @@ body {
}
.button:hover {
background: rgba(153, 204, 255, 0.15); /* Légère coloration au survol */
background: rgba(183, 214, 255, 0.30);
border-color: #B0D0FF;
color: #B0D0FF;
}
.button:active {
transform: scale(0.95); /* Légère réduction pour effet de pression */
box-shadow: 0 0 8px rgba(153, 204, 255, 0.5); /* Flash léger */
transform: scale(0.95);
box-shadow: 0 0 8px rgba(153, 204, 255, 0.5);
}
.frequency {
@ -413,6 +413,8 @@ body {
#processlist {
overflow-y: auto;
height: 600px;
margin-top: 1em;
.table {
margin-bottom: 1em;
}

View File

@ -1,29 +1,29 @@
import eslint from '@eslint/js';
import eslintConfigPrettier from 'eslint-config-prettier';
import eslintPluginVue from 'eslint-plugin-vue';
import globals from 'globals';
import typescriptEslint from 'typescript-eslint';
import eslint from "@eslint/js";
import eslintConfigPrettier from "eslint-config-prettier";
import eslintPluginVue from "eslint-plugin-vue";
import globals from "globals";
import typescriptEslint from "typescript-eslint";
export default typescriptEslint.config(
{ ignores: ['*.d.ts', '**/coverage', '**/dist'] },
{
extends: [
eslint.configs.recommended,
...typescriptEslint.configs.recommended,
...eslintPluginVue.configs['flat/recommended'],
],
files: ['**/*.{ts,vue}'],
languageOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
globals: globals.browser,
parserOptions: {
parser: typescriptEslint.parser,
},
},
rules: {
// your rules
},
},
eslintConfigPrettier
{ ignores: ["*.d.ts", "**/coverage", "**/dist"] },
{
extends: [
eslint.configs.recommended,
...typescriptEslint.configs.recommended,
...eslintPluginVue.configs["flat/recommended"],
],
files: ["**/*.{ts,vue}"],
languageOptions: {
ecmaVersion: "latest",
sourceType: "module",
globals: globals.browser,
parserOptions: {
parser: typescriptEslint.parser,
},
},
rules: {
// your rules
},
},
eslintConfigPrettier,
);

View File

@ -19,10 +19,10 @@
<div v-if="!args.disable_ip" class="d-none d-lg-block"><glances-plugin-ip
:data="data"></glances-plugin-ip>
</div>
<div v-if="!args.disable_now" class="d-none d-xl-block"><glances-plugin-now
:data="data"></glances-plugin-now></div>
<div v-if="!args.disable_uptime" class="d-none d-md-block"><glances-plugin-uptime
:data="data"></glances-plugin-uptime></div>
<div v-if="!args.disable_now" class="d-none d-xl-block"><glances-plugin-now
:data="data"></glances-plugin-now></div>
</div>
</div>
<div class="d-flex d-none d-sm-block">
@ -90,313 +90,335 @@
</template>
<script>
import hotkeys from 'hotkeys-js';
import { GlancesStats } from './services.js';
import { store } from './store.js';
import hotkeys from "hotkeys-js";
import GlancesHelp from "./components/help.vue";
import GlancesPluginAlert from "./components/plugin-alert.vue";
import GlancesPluginCloud from "./components/plugin-cloud.vue";
import GlancesPluginConnections from "./components/plugin-connections.vue";
import GlancesPluginContainers from "./components/plugin-containers.vue";
import GlancesPluginCpu from "./components/plugin-cpu.vue";
import GlancesPluginDiskio from "./components/plugin-diskio.vue";
import GlancesPluginFolders from "./components/plugin-folders.vue";
import GlancesPluginFs from "./components/plugin-fs.vue";
import GlancesPluginGpu from "./components/plugin-gpu.vue";
import GlancesPluginHostname from "./components/plugin-hostname.vue";
import GlancesPluginIp from "./components/plugin-ip.vue";
import GlancesPluginIrq from "./components/plugin-irq.vue";
import GlancesPluginLoad from "./components/plugin-load.vue";
import GlancesPluginMem from "./components/plugin-mem.vue";
import GlancesPluginMemswap from "./components/plugin-memswap.vue";
import GlancesPluginNetwork from "./components/plugin-network.vue";
import GlancesPluginNow from "./components/plugin-now.vue";
import GlancesPluginPercpu from "./components/plugin-percpu.vue";
import GlancesPluginPorts from "./components/plugin-ports.vue";
import GlancesPluginProcess from "./components/plugin-process.vue";
import GlancesPluginQuicklook from "./components/plugin-quicklook.vue";
import GlancesPluginRaid from "./components/plugin-raid.vue";
import GlancesPluginSensors from "./components/plugin-sensors.vue";
import GlancesPluginSmart from "./components/plugin-smart.vue";
import GlancesPluginSystem from "./components/plugin-system.vue";
import GlancesPluginUptime from "./components/plugin-uptime.vue";
import GlancesPluginVms from "./components/plugin-vms.vue";
import GlancesPluginWifi from "./components/plugin-wifi.vue";
import { GlancesStats } from "./services.js";
import { store } from "./store.js";
import GlancesHelp from './components/help.vue';
import GlancesPluginAlert from './components/plugin-alert.vue';
import GlancesPluginCloud from './components/plugin-cloud.vue';
import GlancesPluginConnections from './components/plugin-connections.vue';
import GlancesPluginCpu from './components/plugin-cpu.vue';
import GlancesPluginDiskio from './components/plugin-diskio.vue';
import GlancesPluginContainers from './components/plugin-containers.vue';
import GlancesPluginFolders from './components/plugin-folders.vue';
import GlancesPluginFs from './components/plugin-fs.vue';
import GlancesPluginGpu from './components/plugin-gpu.vue';
import GlancesPluginHostname from './components/plugin-hostname.vue';
import GlancesPluginIp from './components/plugin-ip.vue';
import GlancesPluginIrq from './components/plugin-irq.vue';
import GlancesPluginLoad from './components/plugin-load.vue';
import GlancesPluginMem from './components/plugin-mem.vue';
import GlancesPluginMemswap from './components/plugin-memswap.vue';
import GlancesPluginNetwork from './components/plugin-network.vue';
import GlancesPluginNow from './components/plugin-now.vue';
import GlancesPluginPercpu from './components/plugin-percpu.vue';
import GlancesPluginPorts from './components/plugin-ports.vue';
import GlancesPluginProcess from './components/plugin-process.vue';
import GlancesPluginQuicklook from './components/plugin-quicklook.vue';
import GlancesPluginRaid from './components/plugin-raid.vue';
import GlancesPluginSmart from './components/plugin-smart.vue';
import GlancesPluginSensors from './components/plugin-sensors.vue';
import GlancesPluginSystem from './components/plugin-system.vue';
import GlancesPluginUptime from './components/plugin-uptime.vue';
import GlancesPluginVms from './components/plugin-vms.vue';
import GlancesPluginWifi from './components/plugin-wifi.vue';
import uiconfig from './uiconfig.json';
import uiconfig from "./uiconfig.json";
export default {
components: {
GlancesHelp,
GlancesPluginAlert,
GlancesPluginCloud,
GlancesPluginConnections,
GlancesPluginCpu,
GlancesPluginDiskio,
GlancesPluginContainers,
GlancesPluginFolders,
GlancesPluginFs,
GlancesPluginGpu,
GlancesPluginHostname,
GlancesPluginIp,
GlancesPluginIrq,
GlancesPluginLoad,
GlancesPluginMem,
GlancesPluginMemswap,
GlancesPluginNetwork,
GlancesPluginNow,
GlancesPluginPercpu,
GlancesPluginPorts,
GlancesPluginProcess,
GlancesPluginQuicklook,
GlancesPluginRaid,
GlancesPluginSensors,
GlancesPluginSmart,
GlancesPluginSystem,
GlancesPluginUptime,
GlancesPluginVms,
GlancesPluginWifi
},
data() {
return {
store
};
},
computed: {
args() {
return this.store.args || {};
},
config() {
return this.store.config || {};
},
data() {
return this.store.data || {};
},
dataLoaded() {
return this.store.data !== undefined;
},
hasGpu() {
return this.store.data.stats.gpu.length > 0;
},
isLinux() {
return this.store.data.isLinux;
},
title() {
const { data } = this;
const title = (data.stats && data.stats.system && data.stats.system.hostname) || '';
return title ? `${title} - Glances` : 'Glances';
},
leftMenu() {
return this.config.outputs !== undefined && this.config.outputs.left_menu !== undefined
? this.config.outputs.left_menu.split(',')
: uiconfig.leftMenu;
}
},
watch: {
title() {
if (document) {
document.title = this.title;
}
}
},
mounted() {
const GLANCES = window.__GLANCES__ || {};
const refreshTime = isFinite(GLANCES['refresh-time'])
? parseInt(GLANCES['refresh-time'], 10)
: undefined;
GlancesStats.init(refreshTime);
this.setupHotKeys();
},
beforeUnmount() {
hotkeys.unbind();
},
methods: {
setupHotKeys() {
// a => Sort processes/containers automatically
hotkeys('a', () => {
this.store.args.sort_processes_key = null;
});
components: {
GlancesHelp,
GlancesPluginAlert,
GlancesPluginCloud,
GlancesPluginConnections,
GlancesPluginCpu,
GlancesPluginDiskio,
GlancesPluginContainers,
GlancesPluginFolders,
GlancesPluginFs,
GlancesPluginGpu,
GlancesPluginHostname,
GlancesPluginIp,
GlancesPluginIrq,
GlancesPluginLoad,
GlancesPluginMem,
GlancesPluginMemswap,
GlancesPluginNetwork,
GlancesPluginNow,
GlancesPluginPercpu,
GlancesPluginPorts,
GlancesPluginProcess,
GlancesPluginQuicklook,
GlancesPluginRaid,
GlancesPluginSensors,
GlancesPluginSmart,
GlancesPluginSystem,
GlancesPluginUptime,
GlancesPluginVms,
GlancesPluginWifi,
},
data() {
return {
store,
};
},
computed: {
args() {
return this.store.args || {};
},
config() {
return this.store.config || {};
},
data() {
return this.store.data || {};
},
dataLoaded() {
return this.store.data !== undefined;
},
hasGpu() {
return this.store.data.stats.gpu.length > 0;
},
isLinux() {
return this.store.data.isLinux;
},
title() {
const { data } = this;
const title =
(data.stats && data.stats.system && data.stats.system.hostname) || "";
return title ? `${title} - Glances` : "Glances";
},
topMenu() {
return this.config.outputs !== undefined &&
this.config.outputs.top_menu !== undefined
? this.config.outputs.top_menu.split(",")
: uiconfig.topMenu;
},
leftMenu() {
return this.config.outputs !== undefined &&
this.config.outputs.left_menu !== undefined
? this.config.outputs.left_menu.split(",")
: uiconfig.leftMenu;
},
},
watch: {
title() {
if (document) {
document.title = this.title;
}
},
},
mounted() {
const GLANCES = window.__GLANCES__ || {};
const refreshTime = isFinite(GLANCES["refresh-time"])
? parseInt(GLANCES["refresh-time"], 10)
: undefined;
GlancesStats.init(refreshTime);
this.setupHotKeys();
},
beforeUnmount() {
hotkeys.unbind();
},
methods: {
setupHotKeys() {
// a => Sort processes/containers automatically
hotkeys("a", () => {
this.store.args.sort_processes_key = null;
});
// c => Sort processes/containers by CPU%
hotkeys('c', () => {
this.store.args.sort_processes_key = 'cpu_percent';
});
// c => Sort processes/containers by CPU%
hotkeys("c", () => {
this.store.args.sort_processes_key = "cpu_percent";
});
// m => Sort processes/containers by MEM%
hotkeys('m', () => {
this.store.args.sort_processes_key = 'memory_percent';
});
// m => Sort processes/containers by MEM%
hotkeys("m", () => {
this.store.args.sort_processes_key = "memory_percent";
});
// u => Sort processes/containers by user
hotkeys('u', () => {
this.store.args.sort_processes_key = 'username';
});
// u => Sort processes/containers by user
hotkeys("u", () => {
this.store.args.sort_processes_key = "username";
});
// p => Sort processes/containers by name
hotkeys('p', () => {
this.store.args.sort_processes_key = 'name';
});
// p => Sort processes/containers by name
hotkeys("p", () => {
this.store.args.sort_processes_key = "name";
});
// i => Sort processes/containers by I/O rate
hotkeys('i', () => {
this.store.args.sort_processes_key = 'io_counters';
});
// i => Sort processes/containers by I/O rate
hotkeys("i", () => {
this.store.args.sort_processes_key = "io_counters";
});
// t => Sort processes/containers by time
hotkeys('t', () => {
this.store.args.sort_processes_key = 'timemillis';
});
// t => Sort processes/containers by time
hotkeys("t", () => {
this.store.args.sort_processes_key = "timemillis";
});
// A => Enable/disable AMPs
hotkeys('shift+A', () => {
this.store.args.disable_amps = !this.store.args.disable_amps;
});
// A => Enable/disable AMPs
hotkeys("shift+A", () => {
this.store.args.disable_amps = !this.store.args.disable_amps;
});
// d => Show/hide disk I/O stats
hotkeys('d', () => {
this.store.args.disable_diskio = !this.store.args.disable_diskio;
});
// d => Show/hide disk I/O stats
hotkeys("d", () => {
this.store.args.disable_diskio = !this.store.args.disable_diskio;
});
// Q => Show/hide IRQ
hotkeys('shift+Q', () => {
this.store.args.enable_irq = !this.store.args.enable_irq;
});
// Q => Show/hide IRQ
hotkeys("shift+Q", () => {
this.store.args.enable_irq = !this.store.args.enable_irq;
});
// f => Show/hide filesystem stats
hotkeys('f', () => {
this.store.args.disable_fs = !this.store.args.disable_fs;
});
// f => Show/hide filesystem stats
hotkeys("f", () => {
this.store.args.disable_fs = !this.store.args.disable_fs;
});
// j => Accumulate processes by program
hotkeys('j', () => {
this.store.args.programs = !this.store.args.programs;
});
// j => Accumulate processes by program
hotkeys("j", () => {
this.store.args.programs = !this.store.args.programs;
});
// k => Show/hide connections stats
hotkeys('k', () => {
this.store.args.disable_connections = !this.store.args.disable_connections;
});
// k => Show/hide connections stats
hotkeys("k", () => {
this.store.args.disable_connections =
!this.store.args.disable_connections;
});
// n => Show/hide network stats
hotkeys('n', () => {
this.store.args.disable_network = !this.store.args.disable_network;
});
// n => Show/hide network stats
hotkeys("n", () => {
this.store.args.disable_network = !this.store.args.disable_network;
});
// s => Show/hide sensors stats
hotkeys('s', () => {
this.store.args.disable_sensors = !this.store.args.disable_sensors;
});
// s => Show/hide sensors stats
hotkeys("s", () => {
this.store.args.disable_sensors = !this.store.args.disable_sensors;
});
// 2 => Show/hide left sidebar
hotkeys('2', () => {
this.store.args.disable_left_sidebar = !this.store.args.disable_left_sidebar;
});
// 2 => Show/hide left sidebar
hotkeys("2", () => {
this.store.args.disable_left_sidebar =
!this.store.args.disable_left_sidebar;
});
// z => Enable/disable processes stats
hotkeys('z', () => {
this.store.args.disable_process = !this.store.args.disable_process;
});
// z => Enable/disable processes stats
hotkeys("z", () => {
this.store.args.disable_process = !this.store.args.disable_process;
});
// S => Enable/disable short processes name
hotkeys('shift+S', () => {
this.store.args.process_short_name = !this.store.args.process_short_name;
});
// S => Enable/disable short processes name
hotkeys("shift+S", () => {
this.store.args.process_short_name =
!this.store.args.process_short_name;
});
// D => Enable/disable containers stats
hotkeys('shift+D', () => {
this.store.args.disable_containers = !this.store.args.disable_containers;
});
// D => Enable/disable containers stats
hotkeys("shift+D", () => {
this.store.args.disable_containers =
!this.store.args.disable_containers;
});
// b => Bytes or bits for network I/O
hotkeys('b', () => {
this.store.args.byte = !this.store.args.byte;
});
// b => Bytes or bits for network I/O
hotkeys("b", () => {
this.store.args.byte = !this.store.args.byte;
});
// 'B' => Switch between bit/s and IO/s for Disk IO
hotkeys('shift+B', () => {
this.store.args.diskio_iops = !this.store.args.diskio_iops;
});
// 'B' => Switch between bit/s and IO/s for Disk IO
hotkeys("shift+B", () => {
this.store.args.diskio_iops = !this.store.args.diskio_iops;
if (this.store.args.diskio_iops) {
this.store.args.diskio_latency = false;
}
});
// l => Show/hide alert logs
hotkeys('l', () => {
this.store.args.disable_alert = !this.store.args.disable_alert;
});
// 'L' => Switch to latency for Disk IO
hotkeys("shift+L", () => {
this.store.args.diskio_latency = !this.store.args.diskio_latency;
if (this.store.args.diskio_latency) {
this.store.args.diskio_iops = false;
}
});
// 1 => Global CPU or per-CPU stats
hotkeys('1', () => {
this.store.args.percpu = !this.store.args.percpu;
});
// l => Show/hide alert logs
hotkeys("l", () => {
this.store.args.disable_alert = !this.store.args.disable_alert;
});
// h => Show/hide this help screen
hotkeys('h', () => {
this.store.args.help_tag = !this.store.args.help_tag;
});
// 1 => Global CPU or per-CPU stats
hotkeys("1", () => {
this.store.args.percpu = !this.store.args.percpu;
});
// T => View network I/O as combination
hotkeys('shift+T', () => {
this.store.args.network_sum = !this.store.args.network_sum;
});
// h => Show/hide this help screen
hotkeys("h", () => {
this.store.args.help_tag = !this.store.args.help_tag;
});
// U => View cumulative network I/O
hotkeys('shift+U', () => {
this.store.args.network_cumul = !this.store.args.network_cumul;
});
// T => View network I/O as combination
hotkeys("shift+T", () => {
this.store.args.network_sum = !this.store.args.network_sum;
});
// F => Show filesystem free space
hotkeys('shift+F', () => {
this.store.args.fs_free_space = !this.store.args.fs_free_space;
});
// U => View cumulative network I/O
hotkeys("shift+U", () => {
this.store.args.network_cumul = !this.store.args.network_cumul;
});
// 3 => Enable/disable quick look plugin
hotkeys('3', () => {
this.store.args.disable_quicklook = !this.store.args.disable_quicklook;
});
// F => Show filesystem free space
hotkeys("shift+F", () => {
this.store.args.fs_free_space = !this.store.args.fs_free_space;
});
// 6 => Enable/disable mean gpu
hotkeys('6', () => {
this.store.args.meangpu = !this.store.args.meangpu;
});
// 3 => Enable/disable quick look plugin
hotkeys("3", () => {
this.store.args.disable_quicklook = !this.store.args.disable_quicklook;
});
// G => Enable/disable gpu
hotkeys('shift+G', () => {
this.store.args.disable_gpu = !this.store.args.disable_gpu;
});
// 6 => Enable/disable mean gpu
hotkeys("6", () => {
this.store.args.meangpu = !this.store.args.meangpu;
});
hotkeys('5', () => {
this.store.args.disable_quicklook = !this.store.args.disable_quicklook;
this.store.args.disable_cpu = !this.store.args.disable_cpu;
this.store.args.disable_mem = !this.store.args.disable_mem;
this.store.args.disable_memswap = !this.store.args.disable_memswap;
this.store.args.disable_load = !this.store.args.disable_load;
this.store.args.disable_gpu = !this.store.args.disable_gpu;
});
// G => Enable/disable gpu
hotkeys("shift+G", () => {
this.store.args.disable_gpu = !this.store.args.disable_gpu;
});
// I => Show/hide IP module
hotkeys('shift+I', () => {
this.store.args.disable_ip = !this.store.args.disable_ip;
});
hotkeys("5", () => {
this.store.args.disable_quicklook = !this.store.args.disable_quicklook;
this.store.args.disable_cpu = !this.store.args.disable_cpu;
this.store.args.disable_mem = !this.store.args.disable_mem;
this.store.args.disable_memswap = !this.store.args.disable_memswap;
this.store.args.disable_load = !this.store.args.disable_load;
this.store.args.disable_gpu = !this.store.args.disable_gpu;
});
// P => Enable/disable ports module
hotkeys('shift+P', () => {
this.store.args.disable_ports = !this.store.args.disable_ports;
});
// I => Show/hide IP module
hotkeys("shift+I", () => {
this.store.args.disable_ip = !this.store.args.disable_ip;
});
// V => Enable/disable VMs stats
hotkeys('shift+V', () => {
this.store.args.disable_vms = !this.store.args.disable_vms;
});
// P => Enable/disable ports module
hotkeys("shift+P", () => {
this.store.args.disable_ports = !this.store.args.disable_ports;
});
// 'W' > Enable/Disable Wifi plugin
hotkeys('shift+W', () => {
this.store.args.disable_wifi = !this.store.args.disable_wifi;
});
// V => Enable/disable VMs stats
hotkeys("shift+V", () => {
this.store.args.disable_vms = !this.store.args.disable_vms;
});
// 0 => Enable/disable IRIX mode (see issue #3158)
hotkeys('0', () => {
this.store.args.disable_irix = !this.store.args.disable_irix;
});
}
}
// 'W' > Enable/Disable Wifi plugin
hotkeys("shift+W", () => {
this.store.args.disable_wifi = !this.store.args.disable_wifi;
});
// 0 => Enable/disable IRIX mode (see issue #3158)
hotkeys("0", () => {
this.store.args.disable_irix = !this.store.args.disable_irix;
});
},
},
};
</script>

View File

@ -56,54 +56,59 @@
// import { store } from './store.js';
export default {
data() {
return {
servers: undefined,
};
},
computed: {
serversListLoaded() {
return this.servers !== undefined;
},
},
created() {
this.updateServersList();
},
mounted() {
const GLANCES = window.__GLANCES__ || {};
const refreshTime = isFinite(GLANCES['refresh-time'])
? parseInt(GLANCES['refresh-time'], 10)
: undefined;
this.interval = setInterval(this.updateServersList, refreshTime * 1000)
},
unmounted() {
clearInterval(this.interval)
},
methods: {
updateServersList() {
fetch('api/4/serverslist', { method: 'GET' })
.then((response) => response.json())
.then((response) => (this.servers = response));
},
formatNumber(value) {
if (typeof value === "number" && !isNaN(value)) {
return value.toFixed(1);
}
return value;
},
goToGlances(server) {
if (server.protocol === 'rpc') {
alert("You just click on a Glances RPC server.\nPlease open a terminal and enter the following command line:\n\nglances -c " + String(server.ip) + " -p " + String(server.port))
} else {
window.location.href = server.uri;
}
},
getDecoration(server, column) {
if (server[column + '_decoration'] === undefined) {
return;
}
return server[column + '_decoration'].replace('_LOG', '').toLowerCase();
}
}
data() {
return {
servers: undefined,
};
},
computed: {
serversListLoaded() {
return this.servers !== undefined;
},
},
created() {
this.updateServersList();
},
mounted() {
const GLANCES = window.__GLANCES__ || {};
const refreshTime = isFinite(GLANCES["refresh-time"])
? parseInt(GLANCES["refresh-time"], 10)
: undefined;
this.interval = setInterval(this.updateServersList, refreshTime * 1000);
},
unmounted() {
clearInterval(this.interval);
},
methods: {
updateServersList() {
fetch("api/4/serverslist", { method: "GET" })
.then((response) => response.json())
.then((response) => (this.servers = response));
},
formatNumber(value) {
if (typeof value === "number" && !isNaN(value)) {
return value.toFixed(1);
}
return value;
},
goToGlances(server) {
if (server.protocol === "rpc") {
alert(
"You just click on a Glances RPC server.\nPlease open a terminal and enter the following command line:\n\nglances -c " +
String(server.ip) +
" -p " +
String(server.port),
);
} else {
window.location.href = server.uri;
}
},
getDecoration(server, column) {
if (server[column + "_decoration"] === undefined) {
return;
}
return server[column + "_decoration"].replace("_LOG", "").toLowerCase();
},
},
};
</script>

View File

@ -1,17 +1,17 @@
/* global module */
if (module.hot) {
module.hot.accept();
module.hot.accept();
}
import '../css/custom.scss';
import '../css/style.scss';
import "../css/custom.scss";
import "../css/style.scss";
import * as bootstrap from 'bootstrap';
import * as bootstrap from "bootstrap";
import { createApp } from 'vue';
import App from './App.vue';
import { createApp } from "vue";
import App from "./App.vue";
import * as filters from "./filters.js";
const app = createApp(App);
app.config.globalProperties.$filters = filters;
app.mount('#app');
app.mount("#app");

View File

@ -1,17 +1,17 @@
/* global module */
if (module.hot) {
module.hot.accept();
module.hot.accept();
}
import '../css/custom.scss';
import '../css/style.scss';
import "../css/custom.scss";
import "../css/style.scss";
import * as bootstrap from 'bootstrap';
import * as bootstrap from "bootstrap";
import { createApp } from 'vue';
import App from './Browser.vue';
import { createApp } from "vue";
import App from "./Browser.vue";
import * as filters from "./filters.js";
const app = createApp(App);
app.config.globalProperties.$filters = filters;
app.mount('#browser');
app.mount("#browser");

View File

@ -163,15 +163,15 @@
<script>
export default {
data() {
return {
help: undefined
};
},
mounted() {
fetch('api/4/help', { method: 'GET' })
.then((response) => response.json())
.then((response) => (this.help = response));
}
data() {
return {
help: undefined,
};
},
mounted() {
fetch("api/4/help", { method: "GET" })
.then((response) => response.json())
.then((response) => (this.help = response));
},
};
</script>

View File

@ -29,97 +29,110 @@
</template>
<script>
import { padStart } from 'lodash';
import { GlancesFavico } from '../services.js';
import { padStart } from "lodash";
import { GlancesFavico } from "../services.js";
export default {
props: {
data: {
type: Object
}
},
computed: {
stats() {
return this.data.stats['alert'];
},
alerts() {
return (this.stats || []).map((alertStats) => {
const alert = {};
alert.state = alertStats.state;
alert.type = alertStats.type;
alert.begin = alertStats.begin * 1000;
alert.end = alertStats.end * 1000;
alert.ongoing = alertStats.end == -1;
alert.min = alertStats.min;
alert.avg = alertStats.avg;
alert.max = alertStats.max;
if (alertStats.top.length > 0) {
alert.top = ': ' + alertStats.top.join(', ');
}
props: {
data: {
type: Object,
},
},
computed: {
stats() {
return this.data.stats["alert"];
},
alerts() {
return (this.stats || []).map((alertStats) => {
const alert = {};
alert.state = alertStats.state;
alert.type = alertStats.type;
alert.begin = alertStats.begin * 1000;
alert.end = alertStats.end * 1000;
alert.ongoing = alertStats.end == -1;
alert.min = alertStats.min;
alert.avg = alertStats.avg;
alert.max = alertStats.max;
if (alertStats.top.length > 0) {
alert.top = ": " + alertStats.top.join(", ");
}
if (!alert.ongoing) {
const duration = alert.end - alert.begin;
const seconds = parseInt((duration / 1000) % 60),
minutes = parseInt((duration / (1000 * 60)) % 60),
hours = parseInt((duration / (1000 * 60 * 60)) % 24);
if (!alert.ongoing) {
const duration = alert.end - alert.begin;
const seconds = parseInt((duration / 1000) % 60),
minutes = parseInt((duration / (1000 * 60)) % 60),
hours = parseInt((duration / (1000 * 60 * 60)) % 24);
alert.duration = padStart(hours, 2, '0') +
':' + padStart(minutes, 2, '0') +
':' + padStart(seconds, 2, '0');
}
alert.duration =
padStart(hours, 2, "0") +
":" +
padStart(minutes, 2, "0") +
":" +
padStart(seconds, 2, "0");
}
return alert;
});
},
hasAlerts() {
return this.countAlerts > 0;
},
countAlerts() {
return this.alerts.length;
},
hasOngoingAlerts() {
return this.countOngoingAlerts > 0;
},
countOngoingAlerts() {
return this.alerts.filter(({ ongoing }) => ongoing).length;
}
},
watch: {
countOngoingAlerts() {
if (this.countOngoingAlerts) {
GlancesFavico.badge(this.countOngoingAlerts);
} else {
GlancesFavico.reset();
}
}
},
methods: {
formatDate(timestamp) {
const tzOffset = new Date().getTimezoneOffset();
const hours = Math.trunc(Math.abs(tzOffset) / 60);
const minutes = Math.abs(tzOffset % 60);
return alert;
});
},
hasAlerts() {
return this.countAlerts > 0;
},
countAlerts() {
return this.alerts.length;
},
hasOngoingAlerts() {
return this.countOngoingAlerts > 0;
},
countOngoingAlerts() {
return this.alerts.filter(({ ongoing }) => ongoing).length;
},
},
watch: {
countOngoingAlerts() {
if (this.countOngoingAlerts) {
GlancesFavico.badge(this.countOngoingAlerts);
} else {
GlancesFavico.reset();
}
},
},
methods: {
formatDate(timestamp) {
const tzOffset = new Date().getTimezoneOffset();
const hours = Math.trunc(Math.abs(tzOffset) / 60);
const minutes = Math.abs(tzOffset % 60);
let tzString = tzOffset <= 0 ? '+' : '-';
tzString += String(hours).padStart(2, '0') + String(minutes).padStart(2, '0');
let tzString = tzOffset <= 0 ? "+" : "-";
tzString +=
String(hours).padStart(2, "0") + String(minutes).padStart(2, "0");
const date = new Date(timestamp);
return String(date.getFullYear()) +
'-' + String(date.getMonth() + 1).padStart(2, '0') +
'-' + String(date.getDate()).padStart(2, '0') +
' ' + String(date.getHours()).padStart(2, '0') +
':' + String(date.getMinutes()).padStart(2, '0') +
':' + String(date.getSeconds()).padStart(2, '0') +
'(' + tzString + ')';
},
clear() {
const requestOptions = {
method: 'POST',
headers: { 'Content-Type': 'application/json' }
};
fetch('api/4/events/clear/all', requestOptions)
.then(response => response.json())
.then(data => product.value = data);
}
}
const date = new Date(timestamp);
return (
String(date.getFullYear()) +
"-" +
String(date.getMonth() + 1).padStart(2, "0") +
"-" +
String(date.getDate()).padStart(2, "0") +
" " +
String(date.getHours()).padStart(2, "0") +
":" +
String(date.getMinutes()).padStart(2, "0") +
":" +
String(date.getSeconds()).padStart(2, "0") +
"(" +
tzString +
")"
);
},
clear() {
const requestOptions = {
method: "POST",
headers: { "Content-Type": "application/json" },
};
fetch("api/4/events/clear/all", requestOptions)
.then((response) => response.json())
.then((data) => (product.value = data));
},
},
};
</script>

Some files were not shown because too many files have changed in this diff Show More