Add support for InfluxDB 3 Core #3182

This commit is contained in:
nicolargo 2025-05-27 10:46:05 +02:00
parent a5b2a97e6c
commit 7c0dee3a63
13 changed files with 901 additions and 733 deletions

View File

@ -117,10 +117,13 @@ test-min-with-upgrade: venv-min-upgrade ## Upgrade deps and run unit tests in mi
test-export-csv: ## Run interface tests with CSV
/bin/bash ./tests/test_export_csv.sh
test-export-influxdb: ## Run interface tests with InfluxDB
/bin/bash ./tests/test_export_influxdb.sh
test-export-influxdb-v1: ## Run interface tests with InfluxDB version 1 (Legacy)
/bin/bash ./tests/test_export_influxdb_v1.sh
test-export: test-export-csv test-export-influxdb ## Tests all exports
test-export-influxdb-v3: ## Run interface tests with InfluxDB version 3 (Core)
/bin/bash ./tests/test_export_influxdb_v3.sh
test-export: test-export-csv test-export-influxdb-v1 test-export-influxdb-v3 ## Tests all exports
# ===================================================================
# Linters, profilers and cyber security

View File

@ -623,7 +623,7 @@ style=DarkStyle
[influxdb]
# !!!
# Will be DEPRECATED in future release.
# Please have a look on the new influxdb2 export module (compatible with InfluxDB 1.8.x and 2.x)
# Please have a look on the new influxdb3 export module
# !!!
# Configuration for the --export influxdb option
# https://influxdb.com/
@ -652,7 +652,28 @@ port=8086
protocol=http
org=nicolargo
bucket=glances
token=EjFUTWe8U-MIseEAkaVIgVnej_TrnbdvEcRkaB1imstW7gapSqy6_6-8XD-yd51V0zUUpDy-kAdVD1purDLuxA==
token=PUT_YOUR_INFLUXDB2_TOKEN_HERE
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
# Prefix will be added for all measurement name
# Ex: prefix=foo
# => foo.cpu
# => foo.mem
# You can also use dynamic values
#prefix=foo
# Following tags will be added for all measurements
# You can also use dynamic values.
# Note: hostname and name (for process) are always added as a tag
#tags=foo:bar,spam:eggs,domain:`domainname`
[influxdb3]
# Configuration for the --export influxdb3 option
# https://influxdb.com/
host=http://localhost:8181
org=nicolargo
database=glances
token=PUT_YOUR_INFLUXDB3_TOKEN_HERE
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0

File diff suppressed because it is too large Load Diff

View File

@ -17,19 +17,19 @@ Glances InfluxDB data model:
+---------------+-----------------------+-----------------------+
| Measurement | Fields | Tags |
+===============+=======================+=======================+
| cpu | user | hostname |
| cpu | user | hostname |
| | system | |
| | iowait... | |
+---------------+-----------------------+-----------------------+
| network | read_bytes | hostname |
| | write_bytes | disk_name |
| | time_since_update... | |
|  | | |
| | | |
+---------------+-----------------------+-----------------------+
| diskio | rx | hostname |
| | tx | interface_name |
| | time_since_update... | |
|  | | |
| | | |
+---------------+-----------------------+-----------------------+
| docker | cpu_percent | hostname |
| | memory_usage... | name |
@ -78,7 +78,7 @@ configuration file (no limit on columns number).
Note: if you want to use SSL, please set 'protocol=https'.
InfluxDB v2 (from InfluxDB v1.8.x/Flux and InfluxDB v2.x)
InfluxDB v2 (from InfluxDB v1.8.x/Flux and InfluxDB <v3.x)
---------------------------------------------------------
Note: The InfluxDB v2 client (https://pypi.org/project/influxdb-client/)
@ -90,12 +90,14 @@ following:
.. code-block:: ini
[influxdb2]
# Configuration for the --export influxdb2 option
# https://influxdb.com/
host=localhost
port=8086
protocol=http
org=nicolargo
bucket=glances
token=EjFUTWe8U-MIseEAkaVIgVnej_TrnbdvEcRkaB1imstW7gapSqy6_6-8XD-yd51V0zUUpDy-kAdVD1purDLuxA==
token=PUT_YOUR_INFLUXDB2_TOKEN_HERE
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
@ -107,7 +109,7 @@ following:
#prefix=foo
# Following tags will be added for all measurements
# You can also use dynamic values.
# Note: hostname is always added as a tag
# Note: hostname and name (for process) are always added as a tag
#tags=foo:bar,spam:eggs,domain:`domainname`
and run Glances with:
@ -118,6 +120,46 @@ and run Glances with:
Note: if you want to use SSL, please set 'protocol=https'.
InfluxDB v3 (for InfluxDB 3.x)
------------------------------
Note: The InfluxDB v3 client (https://pypi.org/project/influxdb3-python/)
is only available for Python 3.8 or higher.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[influxdb3]
# Configuration for the --export influxdb3 option
# https://influxdb.com/
host=http://localhost:8181
org=nicolargo
database=glances
token=PUT_YOUR_INFLUXDB3_TOKEN_HERE
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
# Prefix will be added for all measurement name
# Ex: prefix=foo
# => foo.cpu
# => foo.mem
# You can also use dynamic values
#prefix=foo
# Following tags will be added for all measurements
# You can also use dynamic values.
# Note: hostname and name (for process) are always added as a tag
#tags=foo:bar,spam:eggs,domain:`domainname`
and run Glances with:
.. code-block:: console
$ glances --export influxdb3
Note: if you want to use SSL, please set host with 'https' scheme instead of 'http'.
Grafana
-------

View File

@ -28,7 +28,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
.TH "GLANCES" "1" "Apr 21, 2025" "4.3.2_dev01" "Glances"
.TH "GLANCES" "1" "May 27, 2025" "4.3.2_dev01" "Glances"
.SH NAME
glances \- An eye on your system
.SH SYNOPSIS

View File

@ -11,7 +11,13 @@ I am your father...
...for all Glances exports IF.
"""
from glances.globals import NoOptionError, NoSectionError, iteritems, iterkeys, json_dumps
from glances.globals import (
NoOptionError,
NoSectionError,
iteritems,
iterkeys,
json_dumps,
)
from glances.logger import logger
from glances.timer import Counter
@ -21,12 +27,12 @@ class GlancesExport:
# List of non exportable internal plugins
non_exportable_plugins = [
'alert',
'help',
'plugin',
'psutilversion',
'quicklook',
'version',
"alert",
"help",
"plugin",
"psutilversion",
"quicklook",
"version",
]
def __init__(self, config=None, args=None):
@ -71,7 +77,7 @@ class GlancesExport:
"""Close the export module."""
logger.debug(f"Finalise export interface {self.export_name}")
def load_conf(self, section, mandatories=['host', 'port'], options=None):
def load_conf(self, section, mandatories=["host", "port"], options=None):
"""Load the export <section> configuration in the Glances configuration file.
:param section: name of the export section to load
@ -112,7 +118,7 @@ class GlancesExport:
"""Return the value of the item 'key'."""
ret = None
try:
ret = item[item['key']]
ret = item[item["key"]]
except KeyError:
logger.error(f"No 'key' available in {item}")
if isinstance(ret, list):
@ -128,14 +134,77 @@ class GlancesExport:
d_tags = {}
if tags:
try:
d_tags = dict([x.split(':') for x in tags.split(',')])
d_tags = dict([x.split(":") for x in tags.split(",")])
except ValueError:
# one of the 'key:value' pairs was missing
logger.info('Invalid tags passed: %s', tags)
logger.info("Invalid tags passed: %s", tags)
d_tags = {}
return d_tags
def normalize_for_influxdb(self, name, columns, points):
"""Normalize data for the InfluxDB's data model.
:return: a list of measurements.
"""
FIELD_TO_TAG = ["name", "cmdline", "type"]
ret = []
# Build initial dict by crossing columns and point
data_dict = dict(zip(columns, points))
# issue1871 - Check if a key exist. If a key exist, the value of
# the key should be used as a tag to identify the measurement.
keys_list = [k.split(".")[0] for k in columns if k.endswith(".key")]
if not keys_list:
keys_list = [None]
for measurement in keys_list:
# Manage field
if measurement is not None:
fields = {
k.replace(f"{measurement}.", ""): data_dict[k] for k in data_dict if k.startswith(f"{measurement}.")
}
else:
fields = data_dict
# Transform to InfluxDB data model
# https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_reference/
for k in fields:
# Do not export empty (None) value
if fields[k] is None:
continue
# Convert numerical to float
try:
fields[k] = float(fields[k])
except (TypeError, ValueError):
# Convert others to string
try:
fields[k] = str(fields[k])
except (TypeError, ValueError):
pass
# Manage tags
tags = self.parse_tags(self.tags)
# Add the hostname as a tag
tags["hostname"] = self.hostname
if "hostname" in fields:
fields.pop("hostname")
# Others tags...
if "key" in fields and fields["key"] in fields:
# Create a tag from the key
# Tag should be an string (see InfluxDB data model)
tags[fields["key"]] = str(fields[fields["key"]])
# Remove it from the field list (can not be a field and a tag)
fields.pop(fields["key"])
# Add name as a tag (example for the process list)
for k in FIELD_TO_TAG:
if k in fields:
tags[k] = str(fields[k])
# Remove it from the field list (can not be a field and a tag)
fields.pop(k)
# Add the measurement to the list
ret.append({"measurement": name, "tags": tags, "fields": fields})
return ret
def plugins_to_export(self, stats):
"""Return the list of plugins to export.
@ -177,7 +246,7 @@ class GlancesExport:
if isinstance(all_stats[plugin], dict):
all_stats[plugin].update(all_limits[plugin])
# Remove the <plugin>_disable field
all_stats[plugin].pop(f'{plugin}_disable', None)
all_stats[plugin].pop(f"{plugin}_disable", None)
elif isinstance(all_stats[plugin], list):
# TypeError: string indices must be integers (Network plugin) #1054
for i in all_stats[plugin]:
@ -197,17 +266,17 @@ class GlancesExport:
if isinstance(stats, dict):
# Stats is a dict
# Is there a key ?
if 'key' in iterkeys(stats) and stats['key'] in iterkeys(stats):
pre_key = '{}.'.format(stats[stats['key']])
if "key" in iterkeys(stats) and stats["key"] in iterkeys(stats):
pre_key = "{}.".format(stats[stats["key"]])
else:
pre_key = ''
pre_key = ""
# Walk through the dict
for key, value in sorted(iteritems(stats)):
if isinstance(value, bool):
value = json_dumps(value).decode()
if isinstance(value, list):
value = ' '.join([str(v) for v in value])
value = " ".join([str(v) for v in value])
if isinstance(value, dict):
item_names, item_values = self.build_export(value)

View File

@ -17,8 +17,6 @@ from influxdb.client import InfluxDBClientError
from glances.exports.export import GlancesExport
from glances.logger import logger
FIELD_TO_TAG = ['name', 'cmdline', 'type']
class Export(GlancesExport):
"""This class manages the InfluxDB export module."""
@ -33,20 +31,22 @@ class Export(GlancesExport):
self.db = None
# Optional configuration keys
self.protocol = 'http'
self.protocol = "http"
self.prefix = None
self.tags = None
self.hostname = None
# Load the InfluxDB configuration file
self.export_enable = self.load_conf(
'influxdb', mandatories=['host', 'port', 'user', 'password', 'db'], options=['protocol', 'prefix', 'tags']
"influxdb",
mandatories=["host", "port", "user", "password", "db"],
options=["protocol", "prefix", "tags"],
)
if not self.export_enable:
exit('Missing INFLUXDB version 1 config')
exit("Missing influxdb config")
# The hostname is always add as a tag
self.hostname = node().split('.')[0]
self.hostname = node().split(".")[0]
# Init the InfluxDB client
self.client = self.init()
@ -57,7 +57,7 @@ class Export(GlancesExport):
return None
# Correct issue #1530
if self.protocol is not None and (self.protocol.lower() == 'https'):
if self.protocol is not None and (self.protocol.lower() == "https"):
ssl = True
else:
ssl = False
@ -72,7 +72,7 @@ class Export(GlancesExport):
password=self.password,
database=self.db,
)
get_all_db = [i['name'] for i in db.get_list_database()]
get_all_db = [i["name"] for i in db.get_list_database()]
except InfluxDBClientError as e:
logger.critical(f"Cannot connect to InfluxDB database '{self.db}' ({e})")
sys.exit(2)
@ -85,76 +85,20 @@ class Export(GlancesExport):
return db
def _normalize(self, name, columns, points):
"""Normalize data for the InfluxDB's data model.
:return: a list of measurements.
"""
ret = []
# Build initial dict by crossing columns and point
data_dict = dict(zip(columns, points))
# issue1871 - Check if a key exist. If a key exist, the value of
# the key should be used as a tag to identify the measurement.
keys_list = [k.split('.')[0] for k in columns if k.endswith('.key')]
if not keys_list:
keys_list = [None]
for measurement in keys_list:
# Manage field
if measurement is not None:
fields = {
k.replace(f'{measurement}.', ''): data_dict[k] for k in data_dict if k.startswith(f'{measurement}.')
}
else:
fields = data_dict
# Transform to InfluxDB data model
# https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_reference/
for k in fields:
# Do not export empty (None) value
if fields[k] is None:
continue
# Convert numerical to float
try:
fields[k] = float(fields[k])
except (TypeError, ValueError):
# Convert others to string
try:
fields[k] = str(fields[k])
except (TypeError, ValueError):
pass
# Manage tags
tags = self.parse_tags(self.tags)
if 'key' in fields and fields['key'] in fields:
# Create a tag from the key
# Tag should be an string (see InfluxDB data model)
tags[fields['key']] = str(fields[fields['key']])
# Remove it from the field list (can not be a field and a tag)
fields.pop(fields['key'])
# Add the hostname as a tag
tags['hostname'] = self.hostname
# Add name as a tag (example for the process list)
for k in FIELD_TO_TAG:
if k in fields:
tags[k] = str(fields[k])
# Remove it from the field list (can not be a field and a tag)
fields.pop(k)
# Add the measurement to the list
ret.append({'measurement': name, 'tags': tags, 'fields': fields})
return ret
def export(self, name, columns, points):
"""Write the points to the InfluxDB server."""
# Manage prefix
if self.prefix is not None:
name = self.prefix + '.' + name
name = self.prefix + "." + name
# Write input to the InfluxDB database
if not points:
logger.debug(f"Cannot export empty {name} stats to InfluxDB")
else:
try:
self.client.write_points(self._normalize(name, columns, points), time_precision="s")
self.client.write_points(
self.normalize_for_influxdb(name, columns, points),
time_precision="s",
)
except Exception as e:
# Log level set to warning instead of error (see: issue #1561)
logger.warning(f"Cannot export {name} stats to InfluxDB ({e})")

View File

@ -6,7 +6,7 @@
# SPDX-License-Identifier: LGPL-3.0-only
#
"""InfluxDB (from to InfluxDB 1.8+) interface class."""
"""InfluxDB (from to InfluxDB 1.8+ to <3.0) interface class."""
import sys
from platform import node
@ -16,8 +16,6 @@ from influxdb_client import InfluxDBClient, WriteOptions
from glances.exports.export import GlancesExport
from glances.logger import logger
FIELD_TO_TAG = ['name', 'cmdline', 'type']
class Export(GlancesExport):
"""This class manages the InfluxDB export module."""
@ -32,7 +30,7 @@ class Export(GlancesExport):
self.token = None
# Optional configuration keys
self.protocol = 'http'
self.protocol = "http"
self.prefix = None
self.tags = None
self.hostname = None
@ -40,12 +38,12 @@ class Export(GlancesExport):
# Load the InfluxDB configuration file
self.export_enable = self.load_conf(
'influxdb2',
mandatories=['host', 'port', 'user', 'password', 'org', 'bucket', 'token'],
options=['protocol', 'prefix', 'tags', 'interval'],
"influxdb2",
mandatories=["host", "port", "user", "password", "org", "bucket", "token"],
options=["protocol", "prefix", "tags", "interval"],
)
if not self.export_enable:
exit('Missing influxdb2 config')
exit("Missing influxdb2 config")
# Interval between two exports (in seconds)
if self.interval is None:
@ -60,7 +58,7 @@ class Export(GlancesExport):
logger.debug(f"InfluxDB export interval is set to {self.interval} seconds")
# The hostname is always add as a tag
self.hostname = node().split('.')[0]
self.hostname = node().split(".")[0]
# Init the InfluxDB client
self.client = self.init()
@ -70,10 +68,16 @@ class Export(GlancesExport):
if not self.export_enable:
return None
url = f'{self.protocol}://{self.host}:{self.port}'
url = f"{self.protocol}://{self.host}:{self.port}"
try:
# See docs: https://influxdb-client.readthedocs.io/en/stable/api.html#influxdbclient
client = InfluxDBClient(url=url, enable_gzip=False, verify_ssl=False, org=self.org, token=self.token)
client = InfluxDBClient(
url=url,
enable_gzip=False,
verify_ssl=False,
org=self.org,
token=self.token,
)
except Exception as e:
logger.critical(f"Cannot connect to InfluxDB server '{url}' ({e})")
sys.exit(2)
@ -93,76 +97,22 @@ class Export(GlancesExport):
)
)
def _normalize(self, name, columns, points):
"""Normalize data for the InfluxDB's data model.
:return: a list of measurements.
"""
ret = []
# Build initial dict by crossing columns and point
data_dict = dict(zip(columns, points))
# issue1871 - Check if a key exist. If a key exist, the value of
# the key should be used as a tag to identify the measurement.
keys_list = [k.split('.')[0] for k in columns if k.endswith('.key')]
if not keys_list:
keys_list = [None]
for measurement in keys_list:
# Manage field
if measurement is not None:
fields = {
k.replace(f'{measurement}.', ''): data_dict[k] for k in data_dict if k.startswith(f'{measurement}.')
}
else:
fields = data_dict
# Transform to InfluxDB datamodel
# https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/
for k in fields:
# Do not export empty (None) value
if fields[k] is None:
continue
# Convert numerical to float
try:
fields[k] = float(fields[k])
except (TypeError, ValueError):
# Convert others to string
try:
fields[k] = str(fields[k])
except (TypeError, ValueError):
pass
# Manage tags
tags = self.parse_tags(self.tags)
if 'key' in fields and fields['key'] in fields:
# Create a tag from the key
# Tag should be an string (see InfluxDB data model)
tags[fields['key']] = str(fields[fields['key']])
# Remove it from the field list (can not be a field and a tag)
fields.pop(fields['key'])
# Add the hostname as a tag
tags['hostname'] = self.hostname
# Add name as a tag (example for the process list)
for k in FIELD_TO_TAG:
if k in fields:
tags[k] = str(fields[k])
# Remove it from the field list (can not be a field and a tag)
fields.pop(k)
# Add the measurement to the list
ret.append({'measurement': name, 'tags': tags, 'fields': fields})
return ret
def export(self, name, columns, points):
"""Write the points to the InfluxDB server."""
# Manage prefix
if self.prefix is not None:
name = self.prefix + '.' + name
name = self.prefix + "." + name
# Write input to the InfluxDB database
if not points:
logger.debug(f"Cannot export empty {name} stats to InfluxDB")
else:
try:
self.client.write(self.bucket, self.org, self._normalize(name, columns, points), time_precision="s")
self.client.write(
self.bucket,
self.org,
self.normalize_for_influxdb(name, columns, points),
time_precision="s",
)
except Exception as e:
# Log level set to warning instead of error (see: issue #1561)
logger.warning(f"Cannot export {name} stats to InfluxDB ({e})")

View File

@ -0,0 +1,98 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""InfluxDB (for InfluxDB 3.x) interface class."""
import sys
from platform import node
from influxdb_client_3 import InfluxDBClient3
from glances.exports.export import GlancesExport
from glances.logger import logger
class Export(GlancesExport):
"""This class manages the InfluxDB export module."""
def __init__(self, config=None, args=None):
"""Init the InfluxDB export IF."""
super().__init__(config=config, args=args)
# Mandatory configuration keys (additional to host and port)
self.host = None
self.port = None
self.org = None
self.database = None
self.token = None
# Optional configuration keys
self.prefix = None
self.tags = None
self.hostname = None
# Load the InfluxDB configuration file
self.export_enable = self.load_conf(
"influxdb3",
mandatories=["host", "port", "org", "database", "token"],
options=["prefix", "tags"],
)
if not self.export_enable:
exit("Missing influxdb3 config")
# The hostname is always add as a tag
self.hostname = node().split(".")[0]
# Init the InfluxDB client
self.client = self.init()
def init(self):
"""Init the connection to the InfluxDB server."""
if not self.export_enable:
return None
try:
db = InfluxDBClient3(
host=self.host,
org=self.org,
database=self.database,
token=self.token,
)
except Exception as e:
logger.critical(f"Cannot connect to InfluxDB database '{self.database}' ({e})")
sys.exit(2)
if self.database == db._database:
logger.info(
f"Stats will be exported to InfluxDB server {self.host}:{self.port} in {self.database} database"
)
else:
logger.critical(f"InfluxDB database '{self.database}' did not exist. Please create it")
sys.exit(2)
return db
def export(self, name, columns, points):
"""Write the points to the InfluxDB server."""
# Manage prefix
if self.prefix is not None:
name = self.prefix + "." + name
# Write input to the InfluxDB database
if not points:
logger.debug(f"Cannot export empty {name} stats to InfluxDB")
else:
try:
self.client.write(
record=self.normalize_for_influxdb(name, columns, points),
time_precision="s",
)
except Exception as e:
# Log level set to warning instead of error (see: issue #1561)
logger.warning(f"Cannot export {name} stats to InfluxDB ({e})")
else:
logger.debug(f"Export {name} stats to InfluxDB")

View File

@ -11,7 +11,8 @@ fastapi>=0.82.0
graphitesender
hddtemp
influxdb>=1.0.0 # For InfluxDB < 1.8
influxdb-client # For InfluxDB >= 1.8
influxdb-client # For InfluxDB >= 1.8 and < 3.x
influxdb3-python # For InfluxDB 3.x
jinja2
kafka-python
netifaces2

View File

@ -75,6 +75,7 @@ export = [
"ibmcloudant",
"influxdb-client",
"influxdb>=1.0.0",
"influxdb3-python",
"kafka-python",
"paho-mqtt",
"pika",

View File

@ -6,8 +6,8 @@
# Exit on error
set -e
echo "Starting InfluxDB container..."
docker run -d --name influxdb-for-glances \
echo "Starting InfluxDB version 1 container..."
docker run -d --name influxdb-v1-for-glances \
-p 8086:8086 \
influxdb:1.11
@ -21,8 +21,8 @@ for i in {1..30}; do
if [ $i -eq 30 ]; then
echo "Error: Timed out waiting for InfluxDB to start"
docker stop influxdb-for-glances
docker rm influxdb-for-glances
docker stop influxdb-v1-for-glances
docker rm influxdb-v1-for-glances
exit 1
fi
@ -32,8 +32,8 @@ done
# Create the glances database
echo "Creating 'glances' database..."
docker exec influxdb-for-glances influx -execute 'DROP DATABASE glances'
docker exec influxdb-for-glances influx -execute 'CREATE DATABASE glances'
docker exec influxdb-v1-for-glances influx -execute 'DROP DATABASE glances'
docker exec influxdb-v1-for-glances influx -execute 'CREATE DATABASE glances'
# Run glances with export to InfluxDB, stopping after 10 writes
# This will run synchronously now since we're using --stop-after
@ -42,30 +42,30 @@ echo "Glances to export system stats to InfluxDB (duration: ~ 20 seconds)"
echo "Checking if Glances data was successfully exported to InfluxDB..."
# Query to check if data exists in the glances database
MEASUREMENT_COUNT=$(docker exec influxdb-for-glances influx -database 'glances' -format json -execute 'SHOW MEASUREMENTS' | jq '.results[0].series[0].values' | jq length)
MEASUREMENT_COUNT=$(docker exec influxdb-v1-for-glances influx -database 'glances' -format json -execute 'SHOW MEASUREMENTS' | jq '.results[0].series[0].values' | jq length)
if [ "$MEASUREMENT_COUNT" -eq 0 ]; then
echo "Error: No Glances measurement found in the InfluxDB database"
docker stop influxdb-for-glances
docker rm influxdb-for-glances
docker stop influxdb-v1-for-glances
docker rm influxdb-v1-for-glances
exit 1
else
echo "Success! Found $MEASUREMENT_COUNT measurements in the Glances database."
fi
# Query to check if data exists in the glances database
SERIE_COUNT=$(docker exec influxdb-for-glances influx -database 'glances' -format json -execute 'SELECT * FROM cpu' | jq '.results[0].series[0].values' | jq length)
SERIE_COUNT=$(docker exec influxdb-v1-for-glances influx -database 'glances' -format json -execute 'SELECT * FROM cpu' | jq '.results[0].series[0].values' | jq length)
if [ "$SERIE_COUNT" -eq 9 ]; then
echo "Success! Found $SERIE_COUNT series in the Glances database (CPU plugin)."
else
echo "Error: Found $SERIE_COUNT series instead of 9"
docker stop influxdb-for-glances
docker rm influxdb-for-glances
docker stop influxdb-v1-for-glances
docker rm influxdb-v1-for-glances
exit 1
fi
# Stop and remove the InfluxDB container
echo "Stopping and removing InfluxDB container..."
docker stop influxdb-for-glances
docker rm influxdb-for-glances
docker stop influxdb-v1-for-glances
docker rm influxdb-v1-for-glances
echo "Script completed successfully!"

View File

@ -0,0 +1,75 @@
#!/bin/bash
# Pre-requisites:
# - docker
# - jq
# Exit on error
set -e
echo "Starting InfluxDB version 3 (Core) container..."
docker run -d --name influxdb-v3-for-glances \
-p 8181:8181 \
influxdb:3-core --node-id host01 --object-store memory
# Wait for InfluxDB to be ready (5 seconds)
echo "Waiting for InfluxDB to start..."
sleep 5
# Create the token
echo "Creating InfluxDB token..."
TOKEN_RETURN=$(docker exec influxdb-v3-for-glances influxdb3 create token --admin)
TOKEN=$(echo -n $TOKEN_RETURN | awk '{ print $6 }')
echo "Token: $TOKEN"
# Create a new configuration for the test
echo "Creating a temporary Glances configuration file with the token in /tmp/glances.conf..."
sed "s/PUT_YOUR_INFLUXDB3_TOKEN_HERE/$TOKEN/g" ./conf/glances.conf > /tmp/glances.conf
# Create the glances database
echo "Creating 'glances' database..."
docker exec -e "INFLUXDB3_AUTH_TOKEN=$TOKEN" influxdb-v3-for-glances influxdb3 create database glances
docker exec -e "INFLUXDB3_AUTH_TOKEN=$TOKEN" influxdb-v3-for-glances influxdb3 show databases
# Get the list of tables in the glances database after creation
TABLES_INIT=$(docker exec -e "INFLUXDB3_AUTH_TOKEN=$TOKEN" influxdb-v3-for-glances influxdb3 query --database glances --format json 'SHOW TABLES')
TABLES_INIT_COUNT=$(echo "$TABLES_INIT" | jq length)
# Run glances with export to InfluxDB, stopping after 10 writes
# This will run synchronously now since we're using --stop-after
echo "Glances to export system stats to InfluxDB (duration: ~ 20 seconds)"
./venv/bin/python -m glances --config /tmp/glances.conf --export influxdb3 --stop-after 10 --quiet
echo "Checking if Glances data was successfully exported to InfluxDB..."
# Query to check if data exists in the glances database
TABLES=$(docker exec -e "INFLUXDB3_AUTH_TOKEN=$TOKEN" influxdb-v3-for-glances influxdb3 query --database glances --format json 'SHOW TABLES')
TABLES_COUNT=$(echo "$TABLES" | jq length)
if [ "$TABLES_COUNT" -eq "$TABLES_INIT_COUNT" ]; then
echo "Error: No Glances measurement found in the InfluxDB database"
docker stop influxdb-v3-for-glances
docker rm influxdb-v3-for-glances
exit 1
else
echo "Success! Found $TABLES_COUNT measurements in the Glances database."
fi
# Query to check if data exists in the glances database
SERIE=$(docker exec -e "INFLUXDB3_AUTH_TOKEN=$TOKEN" influxdb-v3-for-glances influxdb3 query --database glances --format json 'SELECT * FROM cpu')
SERIE_COUNT=$(echo "$SERIE" | jq length)
if [ "$SERIE_COUNT" -eq 9 ]; then
echo "Success! Found $SERIE_COUNT series in the Glances database (CPU plugin)."
else
echo "Error: Found $SERIE_COUNT series instead of 9"
docker stop influxdb-v3-for-glances
docker rm influxdb-v3-for-glances
exit 1
fi
# Stop and remove the InfluxDB container
echo "Stopping and removing InfluxDB container..."
docker stop influxdb-v3-for-glances
docker rm influxdb-v3-for-glances
# Remove the temporary configuration file
rm -f /tmp/glances.conf
echo "Script completed successfully!"