mirror of https://github.com/nicolargo/glances.git
Merge pull request #1961 from RazCrimson/docker-plugin-updates
Docker plugin updates
This commit is contained in:
commit
d95a64c570
|
|
@ -24,11 +24,11 @@ import threading
|
|||
import time
|
||||
from copy import deepcopy
|
||||
|
||||
from glances.logger import logger
|
||||
from glances.compat import iterkeys, itervalues, nativestr
|
||||
from glances.timer import getTimeSinceLastUpdate
|
||||
from glances.logger import logger
|
||||
from glances.plugins.glances_plugin import GlancesPlugin
|
||||
from glances.processes import sort_stats as sort_stats_processes, glances_processes
|
||||
from glances.timer import getTimeSinceLastUpdate
|
||||
|
||||
# Docker-py library (optional and Linux-only)
|
||||
# https://github.com/docker/docker-py
|
||||
|
|
@ -66,7 +66,6 @@ items_history_list = [{'name': 'cpu_percent',
|
|||
'description': 'Container CPU consumption in %',
|
||||
'y_unit': '%'}]
|
||||
|
||||
|
||||
# List of key to remove before export
|
||||
export_exclude_list = ['cpu', 'io', 'memory', 'network']
|
||||
|
||||
|
|
@ -95,11 +94,21 @@ class Plugin(GlancesPlugin):
|
|||
# Init the Docker API
|
||||
self.docker_client = self.connect()
|
||||
|
||||
# Dict of thread (to grab stats asynchronously, one thread is created by container)
|
||||
# Dict of thread (to grab stats asynchronously, one thread is created per container)
|
||||
# key: Container Id
|
||||
# value: instance of ThreadDockerGrabber
|
||||
self.thread_list = {}
|
||||
|
||||
# Dict of Network stats (Storing previous network stats to compute Rx/s and Tx/s)
|
||||
# key: Container Id
|
||||
# value: network stats dict
|
||||
self.network_old = {}
|
||||
|
||||
# Dict of Disk IO stats (Storing previous disk_io stats to compute Rx/s and Tx/s)
|
||||
# key: Container Id
|
||||
# value: network stats dict
|
||||
self.io_old = {}
|
||||
|
||||
# Force a first update because we need two update to have the first stat
|
||||
self.update()
|
||||
self.refresh_timer.set(0)
|
||||
|
|
@ -217,8 +226,8 @@ class Plugin(GlancesPlugin):
|
|||
t.start()
|
||||
|
||||
# Stop threads for non-existing containers
|
||||
nonexisting_containers = set(iterkeys(self.thread_list)) - set([c.id for c in containers])
|
||||
for container_id in nonexisting_containers:
|
||||
absent_containers = set(iterkeys(self.thread_list)) - set([c.id for c in containers])
|
||||
for container_id in absent_containers:
|
||||
# Stop the thread
|
||||
logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12]))
|
||||
self.thread_list[container_id].stop()
|
||||
|
|
@ -267,7 +276,8 @@ class Plugin(GlancesPlugin):
|
|||
container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_list[container.id].stats)
|
||||
container_stats['cpu_percent'] = container_stats['cpu'].get('total', None)
|
||||
# MEM
|
||||
container_stats['memory'] = self.get_docker_memory(container.id, self.thread_list[container.id].stats)
|
||||
container_stats['memory'] = self.get_docker_memory(container.id,
|
||||
self.thread_list[container.id].stats)
|
||||
container_stats['memory_usage'] = container_stats['memory'].get('usage', None)
|
||||
if container_stats['memory'].get('cache', None) is not None:
|
||||
container_stats['memory_usage'] -= container_stats['memory']['cache']
|
||||
|
|
@ -276,7 +286,8 @@ class Plugin(GlancesPlugin):
|
|||
container_stats['io_r'] = container_stats['io'].get('ior', None)
|
||||
container_stats['io_w'] = container_stats['io'].get('iow', None)
|
||||
# NET
|
||||
container_stats['network'] = self.get_docker_network(container.id, self.thread_list[container.id].stats)
|
||||
container_stats['network'] = self.get_docker_network(container.id,
|
||||
self.thread_list[container.id].stats)
|
||||
container_stats['network_rx'] = container_stats['network'].get('rx', None)
|
||||
container_stats['network_tx'] = container_stats['network'].get('tx', None)
|
||||
else:
|
||||
|
|
@ -310,51 +321,41 @@ class Plugin(GlancesPlugin):
|
|||
all_stats is the output of the stats method of the Docker API
|
||||
Output: a dict {'total': 1.49}
|
||||
"""
|
||||
ret = {'total': 0.0}
|
||||
cpu_stats = {'total': 0.0}
|
||||
|
||||
# Read the stats
|
||||
# For each container, you will find a pseudo-file cpuacct.stat,
|
||||
# containing the CPU usage accumulated by the processes of the container.
|
||||
# Those times are expressed in ticks of 1/USER_HZ of a second.
|
||||
# On x86 systems, USER_HZ is 100.
|
||||
cpu_new = {}
|
||||
precpu_new = {}
|
||||
try:
|
||||
cpu_new['total'] = all_stats['cpu_stats']['cpu_usage'].get(
|
||||
'total_usage', None)
|
||||
precpu_new['total'] = all_stats['precpu_stats']['cpu_usage'].get(
|
||||
'total_usage', None)
|
||||
cpu_new['system'] = all_stats['cpu_stats'].get(
|
||||
'system_cpu_usage', None)
|
||||
precpu_new['system'] = all_stats['precpu_stats'].get(
|
||||
'system_cpu_usage', None)
|
||||
cpu = {
|
||||
'system': all_stats['cpu_stats']['system_cpu_usage'],
|
||||
'total': all_stats['cpu_stats']['cpu_usage']['total_usage']
|
||||
}
|
||||
precpu = {
|
||||
'system': all_stats['precpu_stats']['system_cpu_usage'],
|
||||
'total': all_stats['precpu_stats']['cpu_usage']['total_usage']
|
||||
}
|
||||
# Issue #1857
|
||||
# If either precpu_stats.online_cpus or cpu_stats.online_cpus is nil
|
||||
# then for compatibility with older daemons the length of
|
||||
# the corresponding cpu_usage.percpu_usage array should be used.
|
||||
if 'online_cpus' in all_stats['cpu_stats'] and \
|
||||
all_stats['cpu_stats']['online_cpus'] is not None:
|
||||
cpu_new['nb_core'] = all_stats['cpu_stats']['online_cpus']
|
||||
else:
|
||||
cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or [])
|
||||
cpu['nb_core'] = all_stats['cpu_stats'].get('online_cpus', None)
|
||||
if cpu['nb_core'] is None:
|
||||
cpu['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or [])
|
||||
except KeyError as e:
|
||||
logger.debug(
|
||||
"docker plugin - Cannot grab CPU usage for container {} ({})".format(container_id, e))
|
||||
logger.debug(all_stats)
|
||||
else:
|
||||
try:
|
||||
cpu_delta = cpu_new['total'] - precpu_new['total']
|
||||
system_cpu_delta = cpu_new['system'] - precpu_new['system']
|
||||
cpu_delta = cpu['total'] - precpu['total']
|
||||
system_cpu_delta = cpu['system'] - precpu['system']
|
||||
# CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0
|
||||
ret['total'] = (cpu_delta / system_cpu_delta) * \
|
||||
cpu_new['nb_core'] * 100.0
|
||||
cpu_stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0
|
||||
except TypeError as e:
|
||||
logger.debug(
|
||||
"docker plugin - Cannot compute CPU usage for container {} ({})".format(container_id, e))
|
||||
logger.debug(all_stats)
|
||||
|
||||
# Return the stats
|
||||
return ret
|
||||
return cpu_stats
|
||||
|
||||
def get_docker_memory(self, container_id, all_stats):
|
||||
"""Return the container MEMORY.
|
||||
|
|
@ -363,30 +364,28 @@ class Plugin(GlancesPlugin):
|
|||
all_stats is the output of the stats method of the Docker API
|
||||
Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...}
|
||||
"""
|
||||
ret = {}
|
||||
memory_stats = {}
|
||||
# Read the stats
|
||||
try:
|
||||
# Mandatory fields
|
||||
memory_stats['usage'] = all_stats['memory_stats']['usage']
|
||||
memory_stats['limit'] = all_stats['memory_stats']['limit']
|
||||
# Issue #1857
|
||||
# Some stats are not always available in ['memory_stats']['stats']
|
||||
if 'rss' in all_stats['memory_stats']['stats']:
|
||||
ret['rss'] = all_stats['memory_stats']['stats']['rss']
|
||||
memory_stats['rss'] = all_stats['memory_stats']['stats']['rss']
|
||||
elif 'total_rss' in all_stats['memory_stats']['stats']:
|
||||
ret['rss'] = all_stats['memory_stats']['stats']['total_rss']
|
||||
memory_stats['rss'] = all_stats['memory_stats']['stats']['total_rss']
|
||||
else:
|
||||
ret['rss'] = None
|
||||
ret['cache'] = all_stats['memory_stats']['stats'].get(
|
||||
'cache', None)
|
||||
ret['max_usage'] = all_stats['memory_stats'].get(
|
||||
'max_usage', None)
|
||||
# Mandatory fields
|
||||
ret['usage'] = all_stats['memory_stats']['usage']
|
||||
ret['limit'] = all_stats['memory_stats']['limit']
|
||||
memory_stats['rss'] = None
|
||||
memory_stats['cache'] = all_stats['memory_stats']['stats'].get('cache', None)
|
||||
memory_stats['max_usage'] = all_stats['memory_stats'].get('max_usage', None)
|
||||
except (KeyError, TypeError) as e:
|
||||
# all_stats do not have MEM information
|
||||
logger.debug("docker plugin - Cannot grab MEM usage for container {} ({})".format(container_id, e))
|
||||
logger.debug(all_stats)
|
||||
# Return the stats
|
||||
return ret
|
||||
return memory_stats
|
||||
|
||||
def get_docker_network(self, container_id, all_stats):
|
||||
"""Return the container network usage using the Docker API (v1.0 or higher).
|
||||
|
|
@ -403,7 +402,7 @@ class Plugin(GlancesPlugin):
|
|||
|
||||
# Read the rx/tx stats (in bytes)
|
||||
try:
|
||||
netcounters = all_stats["networks"]
|
||||
net_stats = all_stats["networks"]
|
||||
except KeyError as e:
|
||||
# all_stats do not have NETWORK information
|
||||
logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e))
|
||||
|
|
@ -411,37 +410,24 @@ class Plugin(GlancesPlugin):
|
|||
# No fallback available...
|
||||
return network_new
|
||||
|
||||
# Previous network interface stats are stored in the network_old variable
|
||||
if not hasattr(self, 'netcounters_old'):
|
||||
# First call, we init the network_old var
|
||||
self.netcounters_old = {}
|
||||
try:
|
||||
self.netcounters_old[container_id] = netcounters
|
||||
except (IOError, UnboundLocalError):
|
||||
pass
|
||||
|
||||
if container_id not in self.netcounters_old:
|
||||
try:
|
||||
self.netcounters_old[container_id] = netcounters
|
||||
except (IOError, UnboundLocalError):
|
||||
pass
|
||||
# Previous network interface stats are stored in the self.network_old variable
|
||||
# By storing time data we enable Rx/s and Tx/s calculations in the XML/RPC API, which would otherwise
|
||||
# be overly difficult work for users of the API
|
||||
try:
|
||||
network_new['cumulative_rx'] = net_stats["eth0"]["rx_bytes"]
|
||||
network_new['cumulative_tx'] = net_stats["eth0"]["tx_bytes"]
|
||||
except KeyError as e:
|
||||
# all_stats do not have INTERFACE information
|
||||
logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e))
|
||||
logger.debug(all_stats)
|
||||
else:
|
||||
# By storing time data we enable Rx/s and Tx/s calculations in the
|
||||
# XML/RPC API, which would otherwise be overly difficult work
|
||||
# for users of the API
|
||||
try:
|
||||
network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
|
||||
network_new['rx'] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"]
|
||||
network_new['tx'] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"]
|
||||
network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"]
|
||||
network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"]
|
||||
except KeyError as e:
|
||||
# all_stats do not have INTERFACE information
|
||||
logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e))
|
||||
logger.debug(all_stats)
|
||||
network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
|
||||
if container_id in self.network_old:
|
||||
network_new['rx'] = network_new['cumulative_rx'] - self.network_old[container_id]['cumulative_rx']
|
||||
network_new['tx'] = network_new['cumulative_tx'] - self.network_old[container_id]['cumulative_tx']
|
||||
|
||||
# Save stats to compute next bitrate
|
||||
self.netcounters_old[container_id] = netcounters
|
||||
self.network_old[container_id] = network_new
|
||||
|
||||
# Return the stats
|
||||
return network_new
|
||||
|
|
@ -461,7 +447,7 @@ class Plugin(GlancesPlugin):
|
|||
|
||||
# Read the ior/iow stats (in bytes)
|
||||
try:
|
||||
iocounters = all_stats["blkio_stats"]
|
||||
io_stats = all_stats["blkio_stats"]
|
||||
except KeyError as e:
|
||||
# all_stats do not have io information
|
||||
logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
|
||||
|
|
@ -469,45 +455,27 @@ class Plugin(GlancesPlugin):
|
|||
# No fallback available...
|
||||
return io_new
|
||||
|
||||
# Previous io interface stats are stored in the io_old variable
|
||||
if not hasattr(self, 'iocounters_old'):
|
||||
# First call, we init the io_old var
|
||||
self.iocounters_old = {}
|
||||
try:
|
||||
self.iocounters_old[container_id] = iocounters
|
||||
except (IOError, UnboundLocalError):
|
||||
pass
|
||||
# Previous io interface stats are stored in the self.io_old variable
|
||||
# By storing time data we enable IoR/s and IoW/s calculations in the
|
||||
# XML/RPC API, which would otherwise be overly difficult work
|
||||
# for users of the API
|
||||
try:
|
||||
io_service_bytes_recursive = io_stats['io_service_bytes_recursive']
|
||||
|
||||
if container_id not in self.iocounters_old:
|
||||
try:
|
||||
self.iocounters_old[container_id] = iocounters
|
||||
except (IOError, UnboundLocalError):
|
||||
pass
|
||||
# Read IOR and IOW value in the structure list of dict
|
||||
io_new['cumulative_ior'] = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value']
|
||||
io_new['cumulative_iow'] = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value']
|
||||
except (TypeError, IndexError, KeyError, AttributeError) as e:
|
||||
# all_stats do not have io information
|
||||
logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
|
||||
else:
|
||||
# By storing time data we enable IoR/s and IoW/s calculations in the
|
||||
# XML/RPC API, which would otherwise be overly difficult work
|
||||
# for users of the API
|
||||
try:
|
||||
new_io_service_bytes_recursive = iocounters['io_service_bytes_recursive']
|
||||
old_io_service_bytes_recursive = self.iocounters_old[container_id]['io_service_bytes_recursive']
|
||||
io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id))
|
||||
if container_id in self.io_old:
|
||||
io_new['ior'] = io_new['cumulative_ior'] - self.io_old[container_id]['cumulative_ior']
|
||||
io_new['iow'] = io_new['cumulative_iow'] - self.io_old[container_id]['cumulative_iow']
|
||||
|
||||
# Read IOR and IOW value in the structure list of dict
|
||||
ior = [i for i in new_io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value']
|
||||
iow = [i for i in new_io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value']
|
||||
ior_old = [i for i in old_io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value']
|
||||
iow_old = [i for i in old_io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value']
|
||||
except (TypeError, IndexError, KeyError, AttributeError) as e:
|
||||
# all_stats do not have io information
|
||||
logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
|
||||
else:
|
||||
io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id))
|
||||
io_new['ior'] = ior - ior_old
|
||||
io_new['iow'] = iow - iow_old
|
||||
io_new['cumulative_ior'] = ior
|
||||
io_new['cumulative_iow'] = iow
|
||||
|
||||
# Save stats to compute next bitrate
|
||||
self.iocounters_old[container_id] = iocounters
|
||||
# Save stats to compute next bitrate
|
||||
self.io_old[container_id] = io_new
|
||||
|
||||
# Return the stats
|
||||
return io_new
|
||||
|
|
@ -569,8 +537,8 @@ class Plugin(GlancesPlugin):
|
|||
|
||||
# Only process if stats exist (and non null) and display plugin enable...
|
||||
if not self.stats \
|
||||
or 'containers' not in self.stats or len(self.stats['containers']) == 0 \
|
||||
or self.is_disabled():
|
||||
or 'containers' not in self.stats or len(self.stats['containers']) == 0 \
|
||||
or self.is_disabled():
|
||||
return ret
|
||||
|
||||
# Build the string message
|
||||
|
|
@ -591,7 +559,7 @@ class Plugin(GlancesPlugin):
|
|||
default=20)
|
||||
if self.config is not None else 20,
|
||||
len(max(self.stats['containers'],
|
||||
key=lambda x: len(x['name']))['name']))
|
||||
key=lambda x: len(x['name']))['name']))
|
||||
msg = ' {:{width}}'.format('Name', width=name_max_width)
|
||||
ret.append(self.curse_add_line(msg))
|
||||
msg = '{:>10}'.format('Status')
|
||||
|
|
@ -663,7 +631,8 @@ class Plugin(GlancesPlugin):
|
|||
unit = 'b'
|
||||
for r in ['rx', 'tx']:
|
||||
try:
|
||||
value = self.auto_unit(int(container['network'][r] // container['network']['time_since_update'] * to_bit)) + unit
|
||||
value = self.auto_unit(
|
||||
int(container['network'][r] // container['network']['time_since_update'] * to_bit)) + unit
|
||||
msg = '{:>7}'.format(value)
|
||||
except KeyError:
|
||||
msg = '{:>7}'.format('_')
|
||||
|
|
|
|||
Loading…
Reference in New Issue