libatasmart-devel \
liboping-devel \
lm_sensors-devel \
- python3-devel \
- python3-pydbus \
- python3-rrdtool \
rrdtool-devel \
systemd-devel
"""
\
libatasmart \
lm_sensors \
- python3 \
- python-pydbus \
rrdtool \
systemd
"""
\
libatasmart-dev \
liboping-dev \
- libpython3-dev \
libsensors-dev \
librrd-dev \
- libsystemd-dev \
- python3-pydbus \
- python3-rrdtool
+ libsystemd-dev
"""
}
# ------------------------------------------------------------------------------
-dist_bin_SCRIPTS = \
- src/scripts/collecty
-
-dist_sbin_SCRIPTS = \
- src/scripts/collectyd
-
-collecty_PYTHON = \
- src/collecty/__init__.py \
- src/collecty/__version__.py \
- src/collecty/bus.py \
- src/collecty/client.py \
- src/collecty/colours.py \
- src/collecty/constants.py \
- src/collecty/daemon.py \
- src/collecty/errors.py \
- src/collecty/i18n.py \
- src/collecty/logger.py \
- src/collecty/util.py
-
-collectydir = $(pythondir)/collecty
-
-collectyplugins_PYTHON = \
- src/collecty/plugins/base.py \
- src/collecty/plugins/contextswitches.py \
- src/collecty/plugins/conntrack.py \
- src/collecty/plugins/cpufreq.py \
- src/collecty/plugins/df.py \
- src/collecty/plugins/disk.py \
- src/collecty/plugins/__init__.py \
- src/collecty/plugins/interface.py \
- src/collecty/plugins/interrupts.py \
- src/collecty/plugins/ipfrag.py \
- src/collecty/plugins/latency.py \
- src/collecty/plugins/loadavg.py \
- src/collecty/plugins/memory.py \
- src/collecty/plugins/psi.py \
- src/collecty/plugins/processor.py \
- src/collecty/plugins/sensors.py
-
-collectypluginsdir = $(collectydir)/plugins
-
-pkgpyexec_LTLIBRARIES = \
- _collecty.la
-
-_collecty_la_SOURCES = \
- src/_collecty/_collectymodule.c \
- src/_collecty/_collectymodule.h \
- src/_collecty/blockdev.c \
- src/_collecty/ping.c \
- src/_collecty/sensors.c \
- src/_collecty/utils.c
-
-_collecty_la_CFLAGS = \
- $(AM_CFLAGS) \
- $(LIBATASMART_CFLAGS) \
- $(OPING_CFLAGS) \
- $(PYTHON_CFLAGS)
-
-_collecty_la_LDFLAGS = \
- $(AM_LDFLAGS) \
- -shared \
- -module \
- -avoid-version
-
-_collecty_la_LIBADD = \
- $(LIBATASMART_LIBS) \
- $(OPING_LIBS) \
- $(PYTHON_LIBS) \
- $(SENSORS_LIBS)
-
dist_dbuspolicy_DATA = \
src/dbus/org.ipfire.collecty1.conf
PKG_CHECK_MODULES([RRD], [librrd])
PKG_CHECK_MODULES([SYSTEMD], [libsystemd])
-# Python
-AM_PATH_PYTHON([3.9])
-PKG_CHECK_MODULES([PYTHON], [python-${PYTHON_VERSION}])
-
-AX_PYTHON_MODULE([pydbus], [fatal])
-AX_PYTHON_MODULE([rrdtool], [fatal])
-
# libatasmart
PKG_CHECK_MODULES([LIBATASMART], [libatasmart >= 0.19])
AC_CONFIG_FILES([
Makefile
po/Makefile.in
- src/collecty/__version__.py
])
AC_OUTPUT
-src/collecty/bus.py
-src/collecty/client.py
-src/collecty/colours.py
-src/collecty/constants.py
-src/collecty/daemon.py
-src/collecty/errors.py
-src/collecty/i18n.py
-src/collecty/__init__.py
-src/collecty/logger.py
-src/collecty/plugins/base.py
-src/collecty/plugins/conntrack.py
-src/collecty/plugins/contextswitches.py
-src/collecty/plugins/cpufreq.py
-src/collecty/plugins/df.py
-src/collecty/plugins/disk.py
-src/collecty/plugins/__init__.py
-src/collecty/plugins/interface.py
-src/collecty/plugins/interrupts.py
-src/collecty/plugins/ipfrag.py
-src/collecty/plugins/latency.py
-src/collecty/plugins/loadavg.py
-src/collecty/plugins/memory.py
-src/collecty/plugins/processor.py
-src/collecty/plugins/psi.py
-src/collecty/plugins/sensors.py
-src/collecty/util.py
-src/collecty/__version__.py
-src/collecty/__version__.py.in
src/systemd/collecty.service.in
+++ /dev/null
-/*
- * collecty
- * Copyright (C) 2015 IPFire Team (www.ipfire.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <Python.h>
-
-#include "_collectymodule.h"
-
-static PyMethodDef collecty_module_methods[] = {
- {"get_detected_sensors", (PyCFunction)_collecty_get_detected_sensors, METH_VARARGS, NULL},
- {"get_mountpoints", (PyCFunction)_collecty_get_mountpoints, METH_NOARGS, NULL},
- {"sensors_cleanup", (PyCFunction)_collecty_sensors_cleanup, METH_NOARGS, NULL},
- {"sensors_init", (PyCFunction)_collecty_sensors_init, METH_NOARGS, NULL},
- {NULL},
-};
-
-static struct PyModuleDef collecty_module = {
- PyModuleDef_HEAD_INIT,
- "_collecty", /* m_name */
- "_collecty module", /* m_doc */
- -1, /* m_size */
- collecty_module_methods, /* m_methods */
- NULL, /* m_reload */
- NULL, /* m_traverse */
- NULL, /* m_clear */
- NULL, /* m_free */
-};
-
-PyMODINIT_FUNC PyInit__collecty(void) {
- if (PyType_Ready(&BlockDeviceType) < 0)
- return NULL;
-
- if (PyType_Ready(&PingType) < 0)
- return NULL;
-
- if (PyType_Ready(&SensorType) < 0)
- return NULL;
-
- PyObject* m = PyModule_Create(&collecty_module);
-
- Py_INCREF(&BlockDeviceType);
- PyModule_AddObject(m, "BlockDevice", (PyObject*)&BlockDeviceType);
-
- Py_INCREF(&PingType);
- PyModule_AddObject(m, "Ping", (PyObject*)&PingType);
-
- PyExc_PingError = PyErr_NewException("_collecty.PingError", NULL, NULL);
- Py_INCREF(PyExc_PingError);
- PyModule_AddObject(m, "PingError", PyExc_PingError);
-
- PyExc_PingAddHostError = PyErr_NewException("_collecty.PingAddHostError", NULL, NULL);
- Py_INCREF(PyExc_PingAddHostError);
- PyModule_AddObject(m, "PingAddHostError", PyExc_PingAddHostError);
-
- PyExc_PingNoReplyError = PyErr_NewException("_collecty.PingNoReplyError", NULL, NULL);
- Py_INCREF(PyExc_PingNoReplyError);
- PyModule_AddObject(m, "PingNoReplyError", PyExc_PingNoReplyError);
-
- Py_INCREF(&SensorType);
- PyModule_AddObject(m, "Sensor", (PyObject*)&SensorType);
-
- return m;
-}
+++ /dev/null
-/*
- * collecty
- * Copyright (C) 2015 IPFire Team (www.ipfire.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <Python.h>
-
-#include <atasmart.h>
-#include <linux/hdreg.h>
-#include <mntent.h>
-#include <oping.h>
-#include <sensors/error.h>
-#include <sensors/sensors.h>
-
-PyMODINIT_FUNC PyInit__collecty(void);
-
-#define MODEL_SIZE 40
-#define SERIAL_SIZE 20
-
-#define PING_HISTORY_SIZE 1024
-#define PING_DEFAULT_COUNT 10
-#define PING_DEFAULT_TIMEOUT 8
-
-/* block devices */
-typedef struct {
- PyObject_HEAD
- char* path;
- struct hd_driveid identity;
- SkDisk* disk;
-} BlockDevice;
-
-extern PyTypeObject BlockDeviceType;
-
-/* ping */
-extern PyObject* PyExc_PingError;
-extern PyObject* PyExc_PingAddHostError;
-extern PyObject* PyExc_PingNoReplyError;
-
-typedef struct {
- PyObject_HEAD
- pingobj_t* ping;
- const char* host;
- struct {
- double history[PING_HISTORY_SIZE];
- size_t history_index;
- size_t history_size;
- size_t packets_sent;
- size_t packets_rcvd;
- double average;
- double stddev;
- double loss;
- } stats;
-} PingObject;
-
-extern PyTypeObject PingType;
-
-/* sensors */
-typedef struct {
- PyObject_HEAD
- const sensors_chip_name* chip;
- const sensors_feature* feature;
-} SensorObject;
-
-extern PyTypeObject SensorType;
-
-PyObject* _collecty_sensors_init();
-PyObject* _collecty_sensors_cleanup();
-PyObject* _collecty_get_detected_sensors(PyObject* o, PyObject* args);
-
-/* utils */
-int _collecty_mountpoint_is_virtual(const struct mntent* mp);
-PyObject* _collecty_get_mountpoints();
+++ /dev/null
-/*
- * collecty
- * Copyright (C) 2015 IPFire Team (www.ipfire.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <Python.h>
-
-#include <atasmart.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <mntent.h>
-#include <stdbool.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "_collectymodule.h"
-
-static void BlockDevice_dealloc(BlockDevice* self) {
- if (self->disk)
- sk_disk_free(self->disk);
-
- if (self->path)
- free(self->path);
-
- Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static int BlockDevice_get_identity(BlockDevice* device) {
- int fd;
-
- if ((fd = open(device->path, O_RDONLY | O_NONBLOCK)) < 0) {
- return 1;
- }
-
- int r = ioctl(fd, HDIO_GET_IDENTITY, &device->identity);
- close(fd);
-
- if (r)
- return 1;
-
- return 0;
-}
-
-static int BlockDevice_smart_is_available(BlockDevice* device) {
- SkBool available = FALSE;
-
- int r = sk_disk_smart_is_available(device->disk, &available);
- if (r)
- return -1;
-
- if (available)
- return 0;
-
- return 1;
-}
-
-static int BlockDevice_check_sleep_mode(BlockDevice* device) {
- SkBool awake = FALSE;
-
- int r = sk_disk_check_sleep_mode(device->disk, &awake);
- if (r)
- return -1;
-
- if (awake)
- return 0;
-
- return 1;
-}
-
-static PyObject * BlockDevice_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
- BlockDevice* self = (BlockDevice*)type->tp_alloc(type, 0);
-
- if (self) {
- self->path = NULL;
-
- // libatasmart
- self->disk = NULL;
- }
-
- return (PyObject *)self;
-}
-
-static int BlockDevice_init(BlockDevice* self, PyObject* args, PyObject* kwds) {
- const char* path = NULL;
-
- if (!PyArg_ParseTuple(args, "s", &path))
- return -1;
-
- self->path = strdup(path);
-
- int r = BlockDevice_get_identity(self);
- if (r) {
- PyErr_Format(PyExc_OSError, "Could not open block device: %s", path);
- return -1;
- }
-
- r = sk_disk_open(path, &self->disk);
- if (r == 0) {
- if (BlockDevice_smart_is_available(self) == 0) {
- if (BlockDevice_check_sleep_mode(self) == 0) {
- r = sk_disk_smart_read_data(self->disk);
- if (r) {
- PyErr_Format(PyExc_OSError, "Could not open block device %s: %s", path,
- strerror(errno));
- return -1;
- }
- }
- }
- } else {
- PyErr_Format(PyExc_OSError, "Could not open block device %s: %s", path,
- strerror(errno));
- return -1;
- }
-
- //sk_disk_identify_is_available
-
- return 0;
-}
-
-static PyObject* BlockDevice_get_path(PyObject* self) {
- BlockDevice* device = (BlockDevice*)self;
-
- return PyUnicode_FromString(device->path);
-}
-
-static void clean_string(char *s) {
- for (char* e = s; *e; e++) {
- if (*e < ' ' || *e >= 127)
- *e = ' ';
- }
-}
-
-static void drop_spaces(char *s) {
- char *d = s;
- bool prev_space = false;
-
- s += strspn(s, " ");
-
- for (; *s; s++) {
- if (prev_space) {
- if (*s != ' ') {
- prev_space = false;
- *(d++) = ' ';
- *(d++) = *s;
- }
- } else {
- if (*s == ' ')
- prev_space = true;
- else
- *(d++) = *s;
- }
- }
-
- *d = 0;
-}
-
-static void copy_string(char* d, const char* s, size_t n) {
- // Copy the source buffer to the destination buffer up to n
- memcpy(d, s, n);
-
- // Terminate the destination buffer with NULL
- d[n] = '\0';
-
- // Clean up the string from non-printable characters
- clean_string(d);
- drop_spaces(d);
-}
-
-static PyObject* BlockDevice_get_model(PyObject* self) {
- BlockDevice* device = (BlockDevice*)self;
-
- char model[MODEL_SIZE + 1];
- copy_string(model, device->identity.model, sizeof(model));
-
- return PyUnicode_FromString(model);
-}
-
-static PyObject* BlockDevice_get_serial(PyObject* self) {
- BlockDevice* device = (BlockDevice*)self;
-
- char serial[SERIAL_SIZE + 1];
- copy_string(serial, device->identity.serial_no, sizeof(serial));
-
- return PyUnicode_FromString(serial);
-}
-
-static PyObject* BlockDevice_is_smart_supported(PyObject* self) {
- BlockDevice* device = (BlockDevice*)self;
-
- if (BlockDevice_smart_is_available(device) == 0)
- Py_RETURN_TRUE;
-
- Py_RETURN_FALSE;
-}
-
-static PyObject* BlockDevice_is_awake(PyObject* self) {
- BlockDevice* device = (BlockDevice*)self;
-
- if (BlockDevice_check_sleep_mode(device) == 0)
- Py_RETURN_TRUE;
-
- Py_RETURN_FALSE;
-}
-
-static PyObject* BlockDevice_get_bad_sectors(PyObject* self) {
- BlockDevice* device = (BlockDevice*)self;
-
- if (BlockDevice_smart_is_available(device)) {
- PyErr_Format(PyExc_OSError, "Device does not support SMART");
- return NULL;
- }
-
- uint64_t bad_sectors;
- int r = sk_disk_smart_get_bad(device->disk, &bad_sectors);
- if (r)
- return NULL;
-
- return PyLong_FromUnsignedLongLong((unsigned long long)bad_sectors);
-}
-
-static PyObject* BlockDevice_get_temperature(PyObject* self) {
- BlockDevice* device = (BlockDevice*)self;
-
- if (BlockDevice_smart_is_available(device)) {
- PyErr_Format(PyExc_OSError, "Device does not support SMART");
- return NULL;
- }
-
- uint64_t mkelvin;
- int r = sk_disk_smart_get_temperature(device->disk, &mkelvin);
- if (r) {
- // Temperature not available but SMART is supported
- if (errno == ENOENT) {
- PyErr_Format(PyExc_OSError, "Device does not have a temperature");
- }
-
- return NULL;
- }
-
- // Convert the temperature to Kelvin
- return PyFloat_FromDouble((double)mkelvin / 1000.0);
-}
-
-static PyGetSetDef BlockDevice_getsetters[] = {
- {"path", (getter)BlockDevice_get_path, NULL, NULL, NULL},
- {"model", (getter)BlockDevice_get_model, NULL, NULL, NULL},
- {"serial", (getter)BlockDevice_get_serial, NULL, NULL, NULL},
- { NULL },
-};
-
-static PyMethodDef BlockDevice_methods[] = {
- {"get_bad_sectors", (PyCFunction)BlockDevice_get_bad_sectors, METH_NOARGS, NULL},
- {"get_temperature", (PyCFunction)BlockDevice_get_temperature, METH_NOARGS, NULL},
- {"is_smart_supported", (PyCFunction)BlockDevice_is_smart_supported, METH_NOARGS, NULL},
- {"is_awake", (PyCFunction)BlockDevice_is_awake, METH_NOARGS, NULL},
- { NULL },
-};
-
-PyTypeObject BlockDeviceType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "_collecty.BlockDevice", /*tp_name*/
- sizeof(BlockDevice), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)BlockDevice_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/
- "BlockDevice objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- BlockDevice_methods, /* tp_methods */
- 0, /* tp_members */
- BlockDevice_getsetters, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)BlockDevice_init, /* tp_init */
- 0, /* tp_alloc */
- BlockDevice_new, /* tp_new */
-};
+++ /dev/null
-/*
- * collecty
- * Copyright (C) 2015 IPFire Team (www.ipfire.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <Python.h>
-
-#include <errno.h>
-#include <oping.h>
-#include <sys/time.h>
-#include <time.h>
-
-#include "_collectymodule.h"
-
-PyObject* PyExc_PingError = NULL;
-PyObject* PyExc_PingAddHostError = NULL;
-PyObject* PyExc_PingNoReplyError = NULL;
-
-static void Ping_dealloc(PingObject* self) {
- if (self->ping)
- ping_destroy(self->ping);
-
- Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static void Ping_init_stats(PingObject* self) {
- self->stats.history_index = 0;
- self->stats.history_size = 0;
- self->stats.packets_sent = 0;
- self->stats.packets_rcvd = 0;
-
- self->stats.average = 0.0;
- self->stats.stddev = 0.0;
- self->stats.loss = 0.0;
-}
-
-static PyObject* Ping_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
- PingObject* self = (PingObject*)type->tp_alloc(type, 0);
-
- if (self) {
- self->ping = NULL;
- self->host = NULL;
-
- Ping_init_stats(self);
- }
-
- return (PyObject*)self;
-}
-
-static int Ping_init(PingObject* self, PyObject* args, PyObject* kwds) {
- char* kwlist[] = {"host", "family", "timeout", "ttl", NULL};
- int family = PING_DEF_AF;
- double timeout = PING_DEFAULT_TIMEOUT;
- int ttl = PING_DEF_TTL;
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|idi", kwlist, &self->host,
- &family, &timeout, &ttl))
- return -1;
-
- if (family != AF_UNSPEC && family != AF_INET6 && family != AF_INET) {
- PyErr_Format(PyExc_ValueError, "Family must be AF_UNSPEC, AF_INET6, or AF_INET");
- return -1;
- }
-
- if (timeout < 0) {
- PyErr_Format(PyExc_ValueError, "Timeout must be greater than zero");
- return -1;
- }
-
- if (ttl < 1 || ttl > 255) {
- PyErr_Format(PyExc_ValueError, "TTL must be between 1 and 255");
- return -1;
- }
-
- self->ping = ping_construct();
- if (!self->ping) {
- return -1;
- }
-
- // Set options
- int r;
-
- r = ping_setopt(self->ping, PING_OPT_AF, &family);
- if (r) {
- PyErr_Format(PyExc_RuntimeError, "Could not set address family: %s",
- ping_get_error(self->ping));
- return -1;
- }
-
- if (timeout > 0) {
- r = ping_setopt(self->ping, PING_OPT_TIMEOUT, &timeout);
-
- if (r) {
- PyErr_Format(PyExc_RuntimeError, "Could not set timeout: %s",
- ping_get_error(self->ping));
- return -1;
- }
- }
-
- r = ping_setopt(self->ping, PING_OPT_TTL, &ttl);
- if (r) {
- PyErr_Format(PyExc_RuntimeError, "Could not set TTL: %s",
- ping_get_error(self->ping));
- return -1;
- }
-
- return 0;
-}
-
-static double Ping_compute_average(PingObject* self) {
- assert(self->stats.packets_rcvd > 0);
-
- double total_latency = 0.0;
-
- for (int i = 0; i < self->stats.history_size; i++) {
- if (self->stats.history[i] > 0)
- total_latency += self->stats.history[i];
- }
-
- return total_latency / self->stats.packets_rcvd;
-}
-
-static double Ping_compute_stddev(PingObject* self, double mean) {
- assert(self->stats.packets_rcvd > 0);
-
- double deviation = 0.0;
-
- for (int i = 0; i < self->stats.history_size; i++) {
- if (self->stats.history[i] > 0) {
- deviation += pow(self->stats.history[i] - mean, 2);
- }
- }
-
- // Normalise
- deviation /= self->stats.packets_rcvd;
-
- return sqrt(deviation);
-}
-
-static void Ping_compute_stats(PingObject* self) {
- // Compute the average latency
- self->stats.average = Ping_compute_average(self);
-
- // Compute the standard deviation
- self->stats.stddev = Ping_compute_stddev(self, self->stats.average);
-
- // Compute lost packets
- self->stats.loss = 1.0;
- self->stats.loss -= (double)self->stats.packets_rcvd \
- / (double)self->stats.packets_sent;
-}
-
-static double time_elapsed(struct timeval* t0) {
- struct timeval now;
- gettimeofday(&now, NULL);
-
- double r = now.tv_sec - t0->tv_sec;
- r += ((double)now.tv_usec / 1000000) - ((double)t0->tv_usec / 1000000);
-
- return r;
-}
-
-static PyObject* Ping_ping(PingObject* self, PyObject* args, PyObject* kwds) {
- char* kwlist[] = {"count", "deadline", NULL};
- size_t count = PING_DEFAULT_COUNT;
- double deadline = 0;
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Id", kwlist, &count, &deadline))
- return NULL;
-
- int r = ping_host_add(self->ping, self->host);
- if (r) {
- PyErr_Format(PyExc_PingAddHostError, "Could not add host %s: %s",
- self->host, ping_get_error(self->ping));
- return NULL;
- }
-
- // Reset all collected statistics in case ping() is called more than once.
- Ping_init_stats(self);
-
- // Save start time
- struct timeval time_start;
- r = gettimeofday(&time_start, NULL);
- if (r) {
- PyErr_Format(PyExc_RuntimeError, "Could not determine start time");
- return NULL;
- }
-
- // Do the pinging
- while (count--) {
- self->stats.packets_sent++;
-
- Py_BEGIN_ALLOW_THREADS
- r = ping_send(self->ping);
- Py_END_ALLOW_THREADS
-
- // Count recieved packets
- if (r >= 0) {
- self->stats.packets_rcvd += r;
-
- // Raise any errors
- } else {
- PyErr_Format(PyExc_RuntimeError, "Error executing ping_send(): %s",
- ping_get_error(self->ping));
- return NULL;
- }
-
- // Extract all data
- pingobj_iter_t* iter = ping_iterator_get(self->ping);
-
- double* latency = &self->stats.history[self->stats.history_index];
- size_t buffer_size = sizeof(latency);
- ping_iterator_get_info(iter, PING_INFO_LATENCY, latency, &buffer_size);
-
- // Increase the history pointer
- self->stats.history_index++;
- self->stats.history_index %= sizeof(self->stats.history);
-
- // Increase the history size
- if (self->stats.history_size < sizeof(self->stats.history))
- self->stats.history_size++;
-
- // Check if the deadline is due
- if (deadline > 0) {
- double elapsed_time = time_elapsed(&time_start);
-
- // If we have run longer than the deadline is, we end the main loop
- if (elapsed_time >= deadline)
- break;
- }
- }
-
- if (self->stats.packets_rcvd == 0) {
- PyErr_Format(PyExc_PingNoReplyError, "No replies received from %s", self->host);
- return NULL;
- }
-
- Ping_compute_stats(self);
-
- Py_RETURN_NONE;
-}
-
-static PyObject* Ping_get_packets_sent(PingObject* self) {
- return PyLong_FromUnsignedLong(self->stats.packets_sent);
-}
-
-static PyObject* Ping_get_packets_rcvd(PingObject* self) {
- return PyLong_FromUnsignedLong(self->stats.packets_rcvd);
-}
-
-static PyObject* Ping_get_average(PingObject* self) {
- return PyFloat_FromDouble(self->stats.average);
-}
-
-static PyObject* Ping_get_stddev(PingObject* self) {
- return PyFloat_FromDouble(self->stats.stddev);
-}
-
-static PyObject* Ping_get_loss(PingObject* self) {
- return PyFloat_FromDouble(self->stats.loss);
-}
-
-static PyGetSetDef Ping_getsetters[] = {
- {"average", (getter)Ping_get_average, NULL, NULL, NULL},
- {"loss", (getter)Ping_get_loss, NULL, NULL, NULL},
- {"stddev", (getter)Ping_get_stddev, NULL, NULL, NULL},
- {"packets_sent", (getter)Ping_get_packets_sent, NULL, NULL, NULL},
- {"packets_rcvd", (getter)Ping_get_packets_rcvd, NULL, NULL, NULL},
- { NULL },
-};
-
-static PyMethodDef Ping_methods[] = {
- {"ping", (PyCFunction)Ping_ping, METH_VARARGS|METH_KEYWORDS, NULL},
- { NULL },
-};
-
-PyTypeObject PingType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "_collecty.Ping", /*tp_name*/
- sizeof(PingObject), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)Ping_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/
- "Ping object", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- Ping_methods, /* tp_methods */
- 0, /* tp_members */
- Ping_getsetters, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)Ping_init, /* tp_init */
- 0, /* tp_alloc */
- Ping_new, /* tp_new */
-};
+++ /dev/null
-/*
- * collecty
- * Copyright (C) 2015 IPFire Team (www.ipfire.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <Python.h>
-
-#include <errno.h>
-#include <mntent.h>
-#include <sensors/error.h>
-#include <sensors/sensors.h>
-
-#include "_collectymodule.h"
-
-static void Sensor_dealloc(SensorObject* self) {
- Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* Sensor_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
- SensorObject* self = (SensorObject*)type->tp_alloc(type, 0);
-
- return (PyObject *)self;
-}
-
-static int Sensor_init(SensorObject* self, PyObject* args, PyObject* kwds) {
- return 0;
-}
-
-static PyObject* Sensor_get_label(SensorObject* self) {
- char* label = sensors_get_label(self->chip, self->feature);
-
- if (label) {
- PyObject* string = PyUnicode_FromString(label);
- free(label);
-
- return string;
- }
-
- Py_RETURN_NONE;
-}
-
-static PyObject* Sensor_get_name(SensorObject* self) {
- char chip_name[512];
-
- int r = sensors_snprintf_chip_name(chip_name, sizeof(chip_name), self->chip);
- if (r < 0) {
- PyErr_Format(PyExc_RuntimeError, "Could not print chip name");
- return NULL;
- }
-
- return PyUnicode_FromString(chip_name);
-}
-
-static PyObject* Sensor_get_type(SensorObject* self) {
- const char* type = NULL;
-
- switch (self->feature->type) {
- case SENSORS_FEATURE_IN:
- type = "voltage";
- break;
-
- case SENSORS_FEATURE_FAN:
- type = "fan";
- break;
-
- case SENSORS_FEATURE_TEMP:
- type = "temperature";
- break;
-
- case SENSORS_FEATURE_POWER:
- type = "power";
- break;
-
- default:
- break;
- }
-
- if (type)
- return PyUnicode_FromString(type);
-
- Py_RETURN_NONE;
-}
-
-static PyObject* Sensor_get_bus(SensorObject* self) {
- const char* type = NULL;
-
- switch (self->chip->bus.type) {
- case SENSORS_BUS_TYPE_I2C:
- type = "i2c";
- break;
-
- case SENSORS_BUS_TYPE_ISA:
- type = "isa";
- break;
-
- case SENSORS_BUS_TYPE_PCI:
- type = "pci";
- break;
-
- case SENSORS_BUS_TYPE_SPI:
- type = "spi";
- break;
-
- case SENSORS_BUS_TYPE_VIRTUAL:
- type = "virtual";
- break;
-
- case SENSORS_BUS_TYPE_ACPI:
- type = "acpi";
- break;
-
- case SENSORS_BUS_TYPE_HID:
- type = "hid";
- break;
-
- default:
- break;
- }
-
- if (type)
- return PyUnicode_FromString(type);
-
- Py_RETURN_NONE;
-}
-
-static const sensors_subfeature* Sensor_get_subfeature(SensorObject* sensor, sensors_subfeature_type type) {
- const sensors_subfeature* subfeature;
- int subfeature_num = 0;
-
- while ((subfeature = sensors_get_all_subfeatures(sensor->chip, sensor->feature, &subfeature_num))) {
- if (subfeature->type == type)
- break;
- }
-
- return subfeature;
-}
-
-static PyObject* Sensor_return_value(SensorObject* sensor, sensors_subfeature_type subfeature_type) {
- double value;
-
- const sensors_subfeature* subfeature = Sensor_get_subfeature(sensor, subfeature_type);
- if (!subfeature) {
- PyErr_Format(PyExc_AttributeError, "Could not find sensor of requested type");
- return NULL;
- }
-
- // Fetch value from the sensor
- int r = sensors_get_value(sensor->chip, subfeature->number, &value);
- if (r < 0) {
- PyErr_Format(PyExc_ValueError, "Error retrieving value from sensor: %s",
- sensors_strerror(errno));
- return NULL;
- }
-
- // Convert all temperature values from Celcius to Kelvon
- if (sensor->feature->type == SENSORS_FEATURE_TEMP)
- value += 273.15;
-
- return PyFloat_FromDouble(value);
-}
-
-static PyObject* Sensor_no_value() {
- PyErr_Format(PyExc_ValueError, "Value not supported for this sensor type");
- return NULL;
-}
-
-static PyObject* Sensor_get_value(SensorObject* self) {
- sensors_subfeature_type subfeature_type;
-
- switch (self->feature->type) {
- case SENSORS_FEATURE_IN:
- subfeature_type = SENSORS_SUBFEATURE_IN_INPUT;
- break;
-
- case SENSORS_FEATURE_FAN:
- subfeature_type = SENSORS_SUBFEATURE_FAN_INPUT;
- break;
-
- case SENSORS_FEATURE_TEMP:
- subfeature_type = SENSORS_SUBFEATURE_TEMP_INPUT;
- break;
-
- case SENSORS_FEATURE_POWER:
- subfeature_type = SENSORS_SUBFEATURE_POWER_INPUT;
- break;
-
- default:
- return Sensor_no_value();
- }
-
- return Sensor_return_value(self, subfeature_type);
-}
-
-static PyObject* Sensor_get_critical(SensorObject* self) {
- sensors_subfeature_type subfeature_type;
-
- switch (self->feature->type) {
- case SENSORS_FEATURE_IN:
- subfeature_type = SENSORS_SUBFEATURE_IN_CRIT;
- break;
-
- case SENSORS_FEATURE_TEMP:
- subfeature_type = SENSORS_SUBFEATURE_TEMP_CRIT;
- break;
-
- case SENSORS_FEATURE_POWER:
- subfeature_type = SENSORS_SUBFEATURE_POWER_CRIT;
- break;
-
- default:
- return Sensor_no_value();
- }
-
- return Sensor_return_value(self, subfeature_type);
-}
-
-static PyObject* Sensor_get_maximum(SensorObject* self) {
- sensors_subfeature_type subfeature_type;
-
- switch (self->feature->type) {
- case SENSORS_FEATURE_IN:
- subfeature_type = SENSORS_SUBFEATURE_IN_MAX;
- break;
-
- case SENSORS_FEATURE_FAN:
- subfeature_type = SENSORS_SUBFEATURE_FAN_MAX;
- break;
-
- case SENSORS_FEATURE_TEMP:
- subfeature_type = SENSORS_SUBFEATURE_TEMP_MAX;
- break;
-
- case SENSORS_FEATURE_POWER:
- subfeature_type = SENSORS_SUBFEATURE_POWER_MAX;
- break;
-
- default:
- return Sensor_no_value();
- }
-
- return Sensor_return_value(self, subfeature_type);
-}
-
-static PyObject* Sensor_get_minimum(SensorObject* self) {
- sensors_subfeature_type subfeature_type;
-
- switch (self->feature->type) {
- case SENSORS_FEATURE_IN:
- subfeature_type = SENSORS_SUBFEATURE_IN_MIN;
- break;
-
- case SENSORS_FEATURE_FAN:
- subfeature_type = SENSORS_SUBFEATURE_FAN_MIN;
- break;
-
- case SENSORS_FEATURE_TEMP:
- subfeature_type = SENSORS_SUBFEATURE_TEMP_MIN;
- break;
-
- default:
- return Sensor_no_value();
- }
-
- return Sensor_return_value(self, subfeature_type);
-}
-
-static PyObject* Sensor_get_high(SensorObject* self) {
- sensors_subfeature_type subfeature_type;
-
- switch (self->feature->type) {
- case SENSORS_FEATURE_TEMP:
- subfeature_type = SENSORS_SUBFEATURE_TEMP_MAX;
- break;
-
- default:
- return Sensor_no_value();
- }
-
- return Sensor_return_value(self, subfeature_type);
-}
-
-static SensorObject* make_sensor_object(const sensors_chip_name* chip, const sensors_feature* feature) {
- SensorObject* sensor = PyObject_New(SensorObject, &SensorType);
- if (!sensor)
- return NULL;
-
- if (!PyObject_Init((PyObject*)sensor, &SensorType)) {
- Py_DECREF(sensor);
- return NULL;
- }
-
- sensor->chip = chip;
- sensor->feature = feature;
-
- return sensor;
-}
-
-PyObject* _collecty_sensors_init() {
- // Clean up everything first in case sensors_init was called earlier
- sensors_cleanup();
-
- int r = sensors_init(NULL);
- if (r) {
- PyErr_Format(PyExc_OSError, "Could not initialise sensors: %s",
- sensors_strerror(errno));
- return NULL;
- }
-
- Py_RETURN_NONE;
-}
-
-PyObject* _collecty_sensors_cleanup() {
- sensors_cleanup();
- Py_RETURN_NONE;
-}
-
-PyObject* _collecty_get_detected_sensors(PyObject* o, PyObject* args) {
- const char* name = NULL;
- sensors_chip_name chip_name;
-
- if (!PyArg_ParseTuple(args, "|z", &name))
- return NULL;
-
- if (name) {
- int r = sensors_parse_chip_name(name, &chip_name);
- if (r < 0) {
- PyErr_Format(PyExc_ValueError, "Could not parse chip name: %s", name);
- return NULL;
- }
- }
-
- PyObject* list = PyList_New(0);
-
- const sensors_chip_name* chip;
- int chip_num = 0;
-
- while ((chip = sensors_get_detected_chips((name) ? &chip_name : NULL, &chip_num))) {
- const sensors_feature* feature;
- int feature_num = 0;
-
- while ((feature = sensors_get_features(chip, &feature_num))) {
- // Skip sensors we do not want to support
- switch (feature->type) {
- case SENSORS_FEATURE_IN:
- case SENSORS_FEATURE_FAN:
- case SENSORS_FEATURE_TEMP:
- case SENSORS_FEATURE_POWER:
- break;
-
- default:
- continue;
- }
-
- SensorObject* sensor = make_sensor_object(chip, feature);
- PyList_Append(list, (PyObject*)sensor);
- }
- }
-
- return list;
-}
-
-static PyGetSetDef Sensor_getsetters[] = {
- {"bus", (getter)Sensor_get_bus, NULL, NULL, NULL},
- {"critical", (getter)Sensor_get_critical, NULL, NULL, NULL},
- {"high", (getter)Sensor_get_high, NULL, NULL, NULL},
- {"label", (getter)Sensor_get_label, NULL, NULL, NULL},
- {"maximum", (getter)Sensor_get_maximum, NULL, NULL, NULL},
- {"minumum", (getter)Sensor_get_minimum, NULL, NULL, NULL},
- {"name", (getter)Sensor_get_name, NULL, NULL, NULL},
- {"type", (getter)Sensor_get_type, NULL, NULL, NULL},
- {"value", (getter)Sensor_get_value, NULL, NULL, NULL},
- { NULL },
-};
-
-PyTypeObject SensorType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "_collecty.Sensor", /*tp_name*/
- sizeof(SensorObject), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)Sensor_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/
- "Sensor objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- Sensor_getsetters, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)Sensor_init, /* tp_init */
- 0, /* tp_alloc */
- Sensor_new, /* tp_new */
-};
+++ /dev/null
-/*
- * collecty
- * Copyright (C) 2015 IPFire Team (www.ipfire.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <Python.h>
-#include <mntent.h>
-
-#include "_collectymodule.h"
-
-int _collecty_mountpoint_is_virtual(const struct mntent* mp) {
- // Ignore all ramdisks
- if (mp->mnt_fsname[0] != '/')
- return 1;
-
- // Ignore network mounts
- if (hasmntopt(mp, "_netdev") != NULL)
- return 1;
-
- return 0;
-}
-
-PyObject* _collecty_get_mountpoints() {
- FILE* fp = setmntent(_PATH_MOUNTED, "r");
- if (!fp)
- return NULL;
-
- PyObject* list = PyList_New(0);
- int r = 0;
-
- struct mntent* mountpoint = getmntent(fp);
- while (mountpoint) {
- if (!_collecty_mountpoint_is_virtual(mountpoint)) {
- // Create a tuple with the information of the mountpoint
- PyObject* mp = PyTuple_New(4);
- PyTuple_SET_ITEM(mp, 0, PyUnicode_FromString(mountpoint->mnt_fsname));
- PyTuple_SET_ITEM(mp, 1, PyUnicode_FromString(mountpoint->mnt_dir));
- PyTuple_SET_ITEM(mp, 2, PyUnicode_FromString(mountpoint->mnt_type));
- PyTuple_SET_ITEM(mp, 3, PyUnicode_FromString(mountpoint->mnt_opts));
-
- // Append the tuple to the list
- r = PyList_Append(list, mp);
- if (r)
- break;
- }
-
- // Move on to the next mountpoint
- mountpoint = getmntent(fp);
- }
-
- endmntent(fp);
-
- if (r) {
- Py_DECREF(list);
- return NULL;
- }
-
- return list;
-}
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-# Initialize logging
-from . import logger
-
-from .client import Collecty
-
-from . import util
+++ /dev/null
-# this file is autogenerated by the buildsystem
-COLLECTY_VERSION = "@PACKAGE_VERSION@"
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import dbus
-import dbus.mainloop.glib
-import dbus.service
-import gi.repository.GLib
-import gi.repository.GObject
-import logging
-import threading
-
-from .i18n import _
-
-log = logging.getLogger("collecty.bus")
-
-DOMAIN = "org.ipfire.collecty1"
-
-class Bus(threading.Thread):
- def __init__(self, collecty):
- threading.Thread.__init__(self)
- self.daemon = True
-
- self.collecty = collecty
-
- # Initialise the main loop
- gi.repository.GObject.threads_init()
- dbus.mainloop.glib.threads_init()
- dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
-
- self.loop = gi.repository.GLib.MainLoop()
-
- # Register the GraphGenerator interface
- self.generator = GraphGenerator(self.collecty)
-
- def run(self):
- log.debug(_("Bus thread has started"))
-
- # Run the main loop
- try:
- self.loop.run()
- except KeyboardInterrupt:
- self.collecty.shutdown()
-
- log.debug(_("Bus thread has ended"))
-
- def shutdown(self):
- log.debug(_("Stopping bus thread"))
-
- # End the main loop
- self.loop.quit()
-
- # Return when this thread has finished
- return self.join()
-
-
-class GraphGenerator(dbus.service.Object):
- def __init__(self, collecty):
- bus_name = dbus.service.BusName(DOMAIN, bus=dbus.SystemBus())
- dbus.service.Object.__init__(self, bus_name, "/%s" % self.__class__.__name__)
-
- self.collecty = collecty
-
- @dbus.service.method(DOMAIN, in_signature="s")
- def Backup(self, filename):
- self.collecty.backup(filename)
-
- @dbus.service.method(DOMAIN, in_signature="sa{sv}", out_signature="a{sv}")
- def GenerateGraph(self, template_name, kwargs):
- """
- Returns a graph generated from the given template and object.
- """
- graph = self.collecty.generate_graph(template_name, **kwargs)
-
- # Convert the graph back to normal Python format
- if graph:
- graph["image"] = dbus.ByteArray(graph["image"] or [])
-
- return graph
-
- @dbus.service.method(DOMAIN, in_signature="", out_signature="a{sv}")
- def GraphInfo(self, template_name, kwargs):
- """
- Returns a dictionary with information about the graph.
- """
- return self.collecty.graph_info(template_name, **kwargs)
-
- @dbus.service.method(DOMAIN, in_signature="sa{sv}", out_signature="a{sv}")
- def LastUpdate(self, template_name, kwargs):
- """
- Returns a graph generated from the given template and object.
- """
- last_update = self.collecty.last_update(template_name, **kwargs)
-
- # Serialise datetime as string
- if last_update:
- last_update["timestamp"] = last_update["timestamp"].isoformat()
-
- return last_update
-
- @dbus.service.method(DOMAIN, in_signature="", out_signature="as")
- def ListTemplates(self):
- """
- Returns a list of all available templates
- """
- return [t.name for t in self.collecty.templates]
-
- @dbus.service.method(DOMAIN, in_signature="", out_signature="s")
- def Version(self):
- return COLLECTY_VERSION
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import argparse
-import datetime
-import dbus
-import os
-import platform
-import sys
-
-from . import bus
-from .i18n import _
-
-class Collecty(object):
- def __init__(self):
- self.bus = dbus.SystemBus()
-
- self.proxy = self.bus.get_object(bus.DOMAIN, "/GraphGenerator")
-
- def backup(self, filename):
- """
- Writes a backup of everything to file given filehandle
- """
- self.proxy.Backup(filename)
-
- def last_update(self, template_name, **kwargs):
- last_update = self.proxy.LastUpdate(template_name, kwargs)
-
- if last_update:
- last_update["timestamp"] = datetime.datetime.strptime(last_update["timestamp"], "%Y-%m-%dT%H:%M:%S")
-
- return last_update
-
- def list_templates(self):
- templates = self.proxy.ListTemplates()
-
- return ["%s" % t for t in templates]
-
- def graph_info(self, template_name, **kwargs):
- graph_info = self.proxy.GraphInfo(template_name, kwargs,
- signature="sa{sv}")
-
- return dict(graph_info)
-
- def generate_graph(self, template_name, **kwargs):
- graph = self.proxy.GenerateGraph(template_name, kwargs,
- signature="sa{sv}")
-
- # Convert the byte array into a byte string again
- if graph:
- graph["image"] = bytes(graph["image"])
-
- return graph
-
- def version(self):
- """
- Returns the version of the daemon
- """
- return self.proxy.Version()
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-def _add(colour, amount):
- """
- Adds some value to colours
- """
- # Parse hex array
- bytes = bytearray.fromhex(colour.lstrip("#"))
-
- if not len(bytes) == 3:
- raise ValueError("Invalid colour: %s" % colour)
-
- ret = bytearray()
-
- for byte in bytes:
- byte = round(byte * amount)
-
- # Ensure the result is within range
- byte = min(byte, 255)
- byte = max(byte, 0)
-
- # Update the array
- ret.append(byte)
-
- return "#%s" % ret.hex()
-
-def lighten(colour, scale=0.25):
- """
- Takes a hexadecimal colour code
- and brightens the colour.
- """
- return _add(colour, scale)
-
-def darken(colour, scale=0.25):
- """
- Takes a hexadecimal colour code
- and darkens the colour.
- """
- return _add(colour, -scale)
-
-def transparency(colour, scale=0.1):
- """
- Adds transparency to the given colour code
- """
- return "%s%02X" % (colour, round(0xff * scale))
-
-BLACK = "#000000"
-WHITE = "#FFFFFF"
-GREY = "#9E9E9E"
-LIGHT_GREY = "#F5F5F5"
-
-RED = "#F44336"
-LIGHT_RED = "#CC0033"
-YELLOW = "#FFEB3B"
-LIGHT_YELLOW = "#FFFF66"
-GREEN = "#4CAF50"
-LIGHT_GREEN = "#8BC34A"
-BLUE = "#2196F3"
-LIGHT_BLUE = "#03A9F4"
-
-AMBER = "#FFC107"
-BROWN = "#795548"
-CYAN = "#00BCD4"
-INDIGO = "#3F51B5"
-LIME = "#CDDC39"
-ORANGE = "#FF9800"
-DEEP_ORANGE = "#FF5722"
-PINK = "#E91E63"
-PURPLE = "#9C27B0"
-DEEP_PURPLE = "#673AB7"
-TEAL = "#009688"
-
-COLOUR_OK = LIGHT_GREEN
-COLOUR_CRITICAL = LIGHT_RED
-COLOUR_ERROR = COLOUR_CRITICAL
-COLOUR_WARN = LIGHT_YELLOW
-COLOUR_TEXT = lighten(BLACK, 0.87) # 87% grey
-
-PRIMARY = INDIGO
-ACCENT = PINK
-
-# Lighten the areas by this factor
-AREA_OPACITY = 0.75
-STDDEV_OPACITY = 0.33
-
-# Receive and transmit
-COLOUR_RX = RED
-COLOUR_TX = GREEN
-
-# I/O
-COLOUR_READ = GREEN
-COLOUR_WRITE = RED
-
-# IPv6 + IPv4
-COLOUR_IPV6 = INDIGO
-COLOUR_IPV4 = PINK
-COLOUR_IPVX = GREY # other
-
-COLOUR_TCP = INDIGO
-COLOUR_UDP = YELLOW
-COLOUR_ICMP = PURPLE
-COLOUR_IGMP = TEAL
-COLOUR_UDPLITE = DEEP_ORANGE
-COLOUR_SCTP = LIGHT_GREEN
-COLOUR_DCCP = LIGHT_BLUE
-COLOUR_OTHER = COLOUR_IPVX
-
-# Processor
-CPU_USER = LIGHT_GREEN
-CPU_NICE = BLUE
-CPU_SYS = RED
-CPU_WAIT = DEEP_PURPLE
-CPU_IRQ = ORANGE
-CPU_SIRQ = YELLOW
-CPU_STEAL = LIGHT_BLUE
-CPU_GUEST = PINK
-CPU_GUEST_NICE = lighten(PINK, 0.8)
-CPU_IDLE = LIGHT_GREY
-
-# Memory
-MEMORY_USED = GREEN
-MEMORY_BUFFERED = BLUE
-MEMORY_CACHED = YELLOW
-MEMORY_SWAP = RED
-MEMORY_FREE = LIGHT_GREY
-
-COLOURS_PROTOCOL_STATES = {
- # General states
- "NONE" : GREY,
- "TIME_WAIT" : AMBER,
-
- # TCP
- "CLOSE" : BLACK,
- "CLOSE_WAIT" : lighten(BLACK, 0.25),
- "ESTABLISHED" : LIGHT_GREEN,
- "FIN_WAIT" : ORANGE,
- "LAST_ACK" : PURPLE,
- "SYN_RECV" : CYAN,
- "SYN_SENT" : TEAL,
- "SYN_SENT2" : AMBER,
-
- # DCCP
- "CLOSEREQ" : lighten(BLACK, 0.5),
- "CLOSING" : lighten(BLACK, 0.25),
- "IGNORE" : WHITE,
- "INVALID" : RED,
- "OPEN" : LIGHT_GREEN,
- "PARTOPEN" : YELLOW,
- "REQUEST" : CYAN,
- "RESPOND" : TEAL,
-
- # SCTP
- "CLOSED" : BLACK,
- "COOKIE_ECHOED" : AMBER,
- "COOKIE_WAIT" : CYAN,
- "SHUTDOWN_ACK_SENT" : TEAL,
- "SHUTDOWN_RECD" : PURPLE,
- "SHUTDOWN_SENT" : LIGHT_BLUE,
-}
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-from .__version__ import *
-
-DATABASE_DIR = "/var/lib/collecty"
-
-DEFAULT_IMAGE_FORMAT = "SVG"
-SUPPORTED_IMAGE_FORMATS = ("SVG", "PNG", "PDF")
-
-# Default column widths
-LABEL = "%-30s"
-EMPTY_LABEL = "%32s" % ""
-
-COLUMN = "%16s"
-PERCENTAGE = "%13.2lf%%"
-INTEGER = "%16.0lf"
-LARGE_INTEGER = "%14.0lf %s"
-FLOAT = "%14.2lf"
-LARGE_FLOAT = "%12.2lf %s"
-BPS = "%9.2lf %sbps"
-PPS = "%9.2lf %spps"
-MS = "%11.2lf ms"
-
-EMPTY_LINE = "COMMENT: \\n"
-HEADLINE = "COMMENT:---- %s ----\\c"
-
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import logging
-import os
-import rrdtool
-import sched
-import signal
-import tarfile
-import tempfile
-import threading
-import time
-
-from . import bus
-from . import plugins
-
-from .constants import *
-from .i18n import _
-
-log = logging.getLogger("collecty")
-
-class Daemon(object):
- # The default interval, when all data is written to disk.
- COMMIT_INTERVAL = 300
-
- def __init__(self, debug=False):
- self.debug = debug
-
- # Reset timezone to UTC
- # rrdtool is reading that from the environment
- os.environ["TZ"] = "UTC"
-
- # Enable debug logging when running in debug mode
- if self.debug:
- log.setLevel(logging.DEBUG)
-
- self.plugins = []
-
- # Create the scheduler
- self.scheduler = sched.scheduler()
- self._schedule_commit()
-
- # The write queue holds all collected pieces of data which
- # will be written to disk later.
- self.write_queue = WriteQueue(self)
-
- # Create a thread that connects to dbus and processes requests we
- # get from there.
- self.bus = bus.Bus(self)
-
- log.debug(_("Collecty successfully initialized"))
-
- def add_plugin(self, plugin_class):
- # Try initialising a new plugin. If that fails, we will log the
- # error and try to go on.
- try:
- plugin = plugin_class(self)
- except:
- log.critical(_("Plugin %s could not be initialised") % plugin_class, exc_info=True)
- return
-
- self.plugins.append(plugin)
-
- # Collect immediately
- self._schedule_plugin(plugin, interval=0)
-
- @property
- def templates(self):
- for plugin in self.plugins:
- for template in plugin.templates:
- yield template
-
- def _schedule_plugin(self, plugin, interval=None):
- """
- Schedules a collection event for the given plugin
- """
- log.debug("Scheduling plugin %s for executing in %ss" % (plugin, plugin.interval))
-
- self.scheduler.enter(
- plugin.interval if interval is None else interval, plugin.priority, self._collect, (plugin,),
- )
-
- def _schedule_commit(self):
- log.debug("Scheduling commit in %ss" % self.COMMIT_INTERVAL)
-
- self.scheduler.enter(
- self.COMMIT_INTERVAL, -1, self._commit,
- )
-
- def _collect(self, plugin, **kwargs):
- """
- Called for each plugin when it is time to collect some data
- """
- log.debug("Collection started for %s" % plugin)
-
- # Add the next collection event to the scheduler
- self._schedule_plugin(plugin)
-
- # Run collection
- plugin.collect()
-
- def _commit(self):
- """
- Called when all data should be committed to disk
- """
- # Schedule the next commit
- self._schedule_commit()
-
- # Write everything in the queue
- self.write_queue.commit()
-
- def run(self):
- # Register signal handlers.
- self.register_signal_handler()
-
- # Start the bus
- self.bus.start()
-
- # Add all plugins
- for plugin in plugins.get():
- self.add_plugin(plugin)
-
- # Run the scheduler
- try:
- self.scheduler.run()
- except KeyboardInterrupt:
- pass
-
- # Clear all plugins
- self.plugins.clear()
-
- # Stop the bus thread
- self.bus.shutdown()
-
- # Write all collected data to disk before ending the main thread
- self.write_queue.commit()
-
- log.debug(_("Main thread exited"))
-
- def shutdown(self):
- log.info(_("Received shutdown signal"))
-
- def register_signal_handler(self):
- for s in (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1):
- log.debug(_("Registering signal %d") % s)
-
- signal.signal(s, self.signal_handler)
-
- def signal_handler(self, sig, *args, **kwargs):
- log.info(_("Caught signal %d") % sig)
-
- if sig in (signal.SIGTERM, signal.SIGINT):
- # Shutdown this application.
- self.shutdown()
-
- elif sig == signal.SIGUSR1:
- # Commit all data.
- self.write_queue.commit()
-
- def get_plugin_from_template(self, template_name):
- for plugin in self.plugins:
- if not template_name in [t.name for t in plugin.templates]:
- continue
-
- return plugin
-
- def generate_graph(self, template_name, *args, **kwargs):
- plugin = self.get_plugin_from_template(template_name)
- if not plugin:
- raise RuntimeError("Could not find template %s" % template_name)
-
- return plugin.generate_graph(template_name, *args, **kwargs)
-
- def graph_info(self, template_name, *args, **kwargs):
- plugin = self.get_plugin_from_template(template_name)
- if not plugin:
- raise RuntimeError("Could not find template %s" % template_name)
-
- return plugin.graph_info(template_name, *args, **kwargs)
-
- def last_update(self, template_name, *args, **kwargs):
- plugin = self.get_plugin_from_template(template_name)
- if not plugin:
- raise RuntimeError("Could not find template %s" % template_name)
-
- return plugin.last_update(*args, **kwargs)
-
- def backup(self, filename):
- # Write all data to disk first
- self.write_queue.commit()
-
- log.info(_("Backing up to %s..." % filename))
-
- # Opening a compressed tar file with will have all files added to it
- with tarfile.open(filename, mode="w:gz") as archive:
- for path, directories, files in os.walk(DATABASE_DIR):
- for file in files:
- # Skip any non-RRD files
- if not file.endswith(".rrd"):
- continue
-
- # Compose the full file path
- file = os.path.join(path, file)
-
- log.debug(_("Adding %s to backup...") % file)
-
- with tempfile.NamedTemporaryFile() as t:
- rrdtool.dump(file, t.name)
-
- # Add the file to the archive
- archive.add(
- t.name, arcname=file[len(DATABASE_DIR):],
- )
-
- log.info(_("Backup finished"))
-
-
-class WriteQueue(object):
- def __init__(self, collecty):
- self.collecty = collecty
-
- self.log = logging.getLogger("collecty.queue")
-
- # Store data here
- self._data = []
-
- # Lock to make this class thread-safe
- self._lock = threading.Lock()
-
- self.log.debug(_("Initialised write queue"))
-
- def submit(self, object, data):
- """
- Submit a new data point for object
- """
- data = QueueObject(object.file, data)
-
- with self._lock:
- self._data.append(data)
-
- return data
-
- def commit(self):
- """
- Flushes the read data to disk.
- """
- self.log.debug(_("Committing data to disk..."))
-
- time_start = time.time()
-
- # There is nothing to do if the queue is empty
- with self._lock:
- if not self._data:
- self.log.debug(_("No data to commit"))
- return
-
- # Get all objects from the queue and group them by the RRD file
- # to commit them all at once
- results = {}
-
- # Group all datapoints by file
- for data in self._data:
- try:
- results[data.file].append(data)
- except KeyError:
- results[data.file] = [data]
-
- # Clear the queue
- self._data.clear()
-
- # Write the collected data to disk
- for filename in sorted(results):
- self._commit_file(filename, results[filename])
-
- duration = time.time() - time_start
- self.log.debug(_("Emptied write queue in %.2fs") % duration)
-
- def _commit_file(self, filename, results):
- self.log.debug(_("Committing %(counter)s entries to %(filename)s") \
- % { "counter" : len(results), "filename" : filename })
-
- # Sort data before submitting it to rrdtool
- results.sort()
-
- for data in results:
- self.log.debug(" %s" % data)
-
- try:
- rrdtool.update(filename, *["%s" % r for r in results])
-
- # Catch operational errors like unreadable/unwritable RRD databases
- # or those where the format has changed. The collected data will be lost.
- except rrdtool.OperationalError as e:
- self.log.critical(_("Could not update RRD database %s: %s") \
- % (filename, e))
-
- def commit_file(self, filename):
- """
- Commits all data that is in the write queue for the given
- RRD database.
- """
- results, others = [], []
-
- # We will have to walk through the entire queue since we cannot
- # ready any items selectively. Everything that belongs to our
- # transaction is kept. Everything else will be put back into the
- # queue.
- with self._lock:
- for data in self._data:
- if data.file == filename:
- results.append(data)
- else:
- others.append(data)
-
- # Put back all items that did not match
- self._data = others
-
- # Write everything else to disk
- if results:
- self._commit_file(filename, results)
-
-
-class QueueObject(object):
- def __init__(self, file, data):
- self.file = file
- self.data = self._format_data(data)
-
- # Save current timestamp
- self.time = time.time()
-
- def __str__(self):
- return "%.0f:%s" % (self.time, self.data)
-
- def __lt__(self, other):
- if isinstance(other, self.__class__):
- return self.time < other.time
-
- return NotImplemented
-
- @staticmethod
- def _format_data(data):
- # Replace all Nones by UNKNOWN
- s = []
-
- for e in data:
- if e is None:
- e = "U"
-
- s.append("%s" % e)
-
- return ":".join(s)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-class CollectyError(Exception):
- pass
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import gettext
-
-TEXTDOMAIN = "collecty"
-
-N_ = lambda x: x
-
-def _(singular, plural=None, n=None):
- """
- A function that returnes the translation of a string if available.
-
- The language is taken from the system environment.
- """
- if not plural is None:
- assert n is not None
- return gettext.dngettext(TEXTDOMAIN, singular, plural, n)
-
- return gettext.dgettext(TEXTDOMAIN, singular)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import logging
-
-# Initialize logging.
-log = logging.getLogger("collecty")
-log.propagate = False
-
-# The default log level is INFO
-log.setLevel(logging.INFO)
-
-# We try using the native journald log handler. If that is unavailable,
-# we dump everything on the console
-try:
- import journal
- handler = journal.JournalHandler()
-
-except ImportError:
- handler = logging.StreamHandler()
-
-handler.setLevel(logging.DEBUG)
-log.addHandler(handler)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-from .base import get
-
-from . import base
-from . import contextswitches
-from . import conntrack
-from . import cpufreq
-from . import df
-from . import disk
-from . import interface
-from . import interrupts
-from . import ipfrag
-from . import latency
-from . import loadavg
-from . import processor
-from . import psi
-from . import memory
-from . import sensors
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import logging
-import os
-import re
-import rrdtool
-import time
-import unicodedata
-
-from .. import util
-from ..constants import *
-from ..i18n import _
-
-DEF_MATCH = r"C?DEF:([A-Za-z0-9_]+)="
-
-class Environment(object):
- """
- Sets the correct environment for rrdtool to create
- localised graphs and graphs in the correct timezone.
- """
- def __init__(self, timezone="UTC", locale="en_US.utf-8"):
- # Build the new environment
- self.new_environment = {
- "LANGUAGE" : locale,
- "LC_ALL" : locale,
- "TZ" : timezone,
- }
-
- def __enter__(self):
- # Save the current environment
- self.old_environment = {}
-
- for k in self.new_environment:
- # Store the old value
- self.old_environment[k] = os.environ.get(k, None)
-
- # Apply the new one
- if self.new_environment[k]:
- os.environ[k] = self.new_environment[k]
-
- def __exit__(self, type, value, traceback):
- # Roll back to the previous environment
- for k, v in self.old_environment.items():
- if v is None:
- try:
- del os.environ[k]
- except KeyError:
- pass
- else:
- os.environ[k] = v
-
-
-class PluginRegistration(type):
- plugins = {}
-
- def __init__(plugin, name, bases, dict):
- type.__init__(plugin, name, bases, dict)
-
- # The main class from which is inherited is not registered
- # as a plugin.
- if name == "Plugin":
- return
-
- if not all((plugin.name, plugin.description)):
- raise RuntimeError(_("Plugin is not properly configured: %s") % plugin)
-
- PluginRegistration.plugins[plugin.name] = plugin
-
-
-def get():
- """
- Returns a list with all automatically registered plugins.
- """
- return PluginRegistration.plugins.values()
-
-class Plugin(object, metaclass=PluginRegistration):
- # The name of this plugin.
- name = None
-
- # A description for this plugin.
- description = None
-
- # Templates which can be used to generate a graph out of
- # the data from this data source.
- templates = []
-
- # The default interval for all plugins
- interval = 60
-
- # Priority
- priority = 0
-
- def __init__(self, collecty, **kwargs):
- self.collecty = collecty
-
- # Check if this plugin was configured correctly.
- assert self.name, "Name of the plugin is not set: %s" % self.name
- assert self.description, "Description of the plugin is not set: %s" % self.description
-
- # Initialize the logger.
- self.log = logging.getLogger("collecty.plugins.%s" % self.name)
-
- # Run some custom initialization.
- self.init(**kwargs)
-
- self.log.debug(_("Successfully initialized %s") % self.__class__.__name__)
-
- @property
- def path(self):
- """
- Returns the name of the sub directory in which all RRD files
- for this plugin should be stored in.
- """
- return self.name
-
- ### Basic methods
-
- def init(self, **kwargs):
- """
- Do some custom initialization stuff here.
- """
- pass
-
- def collect(self):
- """
- Gathers the statistical data, this plugin collects.
- """
- time_start = time.time()
-
- # Run through all objects of this plugin and call the collect method.
- for object in self.objects:
- # Run collection
- try:
- result = object.collect()
-
- # Catch any unhandled exceptions
- except Exception as e:
- self.log.warning(_("Unhandled exception in %s.collect()") % object, exc_info=True)
- continue
-
- if not result:
- self.log.warning(_("Received empty result: %s") % object)
- continue
-
- # Add the object to the write queue so that the data is written
- # to the databases later.
- result = self.collecty.write_queue.submit(object, result)
-
- self.log.debug(_("Collected %s: %s") % (object, result))
-
- # Returns the time this function took to complete.
- delay = time.time() - time_start
-
- # Log some warning when a collect method takes too long to return some data
- if delay >= 60:
- self.log.warning(_("A worker thread was stalled for %.4fs") % delay)
- else:
- self.log.debug(_("Collection finished in %.2fms") % (delay * 1000))
-
- def get_object(self, id):
- for object in self.objects:
- if not object.id == id:
- continue
-
- return object
-
- def get_template(self, template_name, object_id, locale=None, timezone=None):
- for template in self.templates:
- if not template.name == template_name:
- continue
-
- return template(self, object_id, locale=locale, timezone=timezone)
-
- def generate_graph(self, template_name, object_id="default",
- timezone=None, locale=None, **kwargs):
- template = self.get_template(template_name, object_id=object_id,
- timezone=timezone, locale=locale)
- if not template:
- raise RuntimeError("Could not find template %s" % template_name)
-
- time_start = time.time()
-
- with Environment(timezone=timezone, locale=locale):
- graph = template.generate_graph(**kwargs)
-
- duration = time.time() - time_start
- self.log.debug(_("Generated graph %s in %.1fms") \
- % (template, duration * 1000))
-
- return graph
-
- def graph_info(self, template_name, object_id="default",
- timezone=None, locale=None, **kwargs):
- template = self.get_template(template_name, object_id=object_id,
- timezone=timezone, locale=locale)
- if not template:
- raise RuntimeError("Could not find template %s" % template_name)
-
- return template.graph_info()
-
- def last_update(self, object_id="default"):
- object = self.get_object(object_id)
- if not object:
- raise RuntimeError("Could not find object %s" % object_id)
-
- return object.last_update()
-
-
-class Object(object):
- # The schema of the RRD database.
- rrd_schema = None
-
- # RRA properties.
- rra_types = ("AVERAGE", "MIN", "MAX")
- rra_timespans = (
- ("1m", "10d"),
- ("1h", "18M"),
- ("1d", "5y"),
- )
-
- def __init__(self, plugin, *args, **kwargs):
- self.plugin = plugin
-
- # Initialise this object
- self.init(*args, **kwargs)
-
- # Create the database file.
- self.create()
-
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__, self.id)
-
- def __lt__(self, other):
- return self.id < other.id
-
- @property
- def collecty(self):
- return self.plugin.collecty
-
- @property
- def log(self):
- return self.plugin.log
-
- @property
- def id(self):
- """
- Returns a UNIQUE identifier for this object. As this is incorporated
- into the path of RRD file, it must only contain ASCII characters.
- """
- raise NotImplementedError
-
- @property
- def file(self):
- """
- The absolute path to the RRD file of this plugin.
- """
- filename = self._normalise_filename("%s.rrd" % self.id)
-
- return os.path.join(DATABASE_DIR, self.plugin.path, filename)
-
- @staticmethod
- def _normalise_filename(filename):
- # Convert the filename into ASCII characters only
- filename = unicodedata.normalize("NFKC", filename)
-
- # Replace any spaces by dashes
- filename = filename.replace(" ", "-")
-
- return filename
-
- ### Basic methods
-
- def init(self, *args, **kwargs):
- """
- Do some custom initialization stuff here.
- """
- pass
-
- def create(self):
- """
- Creates an empty RRD file with the desired data structures.
- """
- # Skip if the file does already exist.
- if os.path.exists(self.file):
- return
-
- dirname = os.path.dirname(self.file)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- # Create argument list.
- args = self.get_rrd_schema()
-
- rrdtool.create(self.file, *args)
-
- self.log.debug(_("Created RRD file %s.") % self.file)
- for arg in args:
- self.log.debug(" %s" % arg)
-
- def info(self):
- return rrdtool.info(self.file)
-
- def last_update(self):
- """
- Returns a dictionary with the timestamp and
- data set of the last database update.
- """
- return {
- "dataset" : self.last_dataset,
- "timestamp" : self.last_updated,
- }
-
- def _last_update(self):
- return rrdtool.lastupdate(self.file)
-
- @property
- def last_updated(self):
- """
- Returns the timestamp when this database was last updated
- """
- lu = self._last_update()
-
- if lu:
- return lu.get("date")
-
- @property
- def last_dataset(self):
- """
- Returns the latest dataset in the database
- """
- lu = self._last_update()
-
- if lu:
- return lu.get("ds")
-
- @property
- def stepsize(self):
- return self.plugin.interval
-
- @property
- def heartbeat(self):
- return self.stepsize * 2
-
- def get_rrd_schema(self):
- schema = [
- "--step", "%s" % self.stepsize,
- ]
- for line in self.rrd_schema:
- if line.startswith("DS:"):
- try:
- (prefix, name, type, lower_limit, upper_limit) = line.split(":")
-
- line = ":".join((
- prefix,
- name,
- type,
- "%s" % self.heartbeat,
- lower_limit,
- upper_limit
- ))
- except ValueError:
- pass
-
- schema.append(line)
-
- xff = 0.1
-
- for steps, rows in self.rra_timespans:
- for type in self.rra_types:
- schema.append("RRA:%s:%s:%s:%s" % (type, xff, steps, rows))
-
- return schema
-
- @property
- def rrd_schema_names(self):
- ret = []
-
- for line in self.rrd_schema:
- (prefix, name, type, lower_limit, upper_limit) = line.split(":")
- ret.append(name)
-
- return ret
-
- def make_rrd_defs(self, prefix=None):
- defs = []
-
- for name in self.rrd_schema_names:
- if prefix:
- p = "%s_%s" % (prefix, name)
- else:
- p = name
-
- defs += [
- "DEF:%s=%s:%s:AVERAGE" % (p, self.file, name),
- ]
-
- return defs
-
- def get_stddev(self, interval=None):
- args = self.make_rrd_defs()
-
- # Add the correct interval
- args += ["--start", util.make_interval(interval)]
-
- for name in self.rrd_schema_names:
- args += [
- "VDEF:%s_stddev=%s,STDEV" % (name, name),
- "PRINT:%s_stddev:%%lf" % name,
- ]
-
- x, y, vals = rrdtool.graph("/dev/null", *args)
- return dict(zip(self.rrd_schema_names, vals))
-
- def commit(self):
- """
- Will commit the collected data to the database.
- """
- # Make sure that the RRD database has been created
- self.create()
-
- # Write everything to disk that is in the write queue
- self.collecty.write_queue.commit_file(self.file)
-
- # Convenience functions for plugin authors
-
- def read_file(self, *args, strip=True):
- """
- Reads the content of the given file
- """
- filename = os.path.join(*args)
-
- try:
- with open(filename) as f:
- value = f.read()
- except FileNotFoundError as e:
- return None
-
- # Strip any excess whitespace
- if strip:
- value = value.strip()
-
- return value
-
- def read_file_integer(self, filename):
- """
- Reads the content from a file and returns it as an integer
- """
- value = self.read_file(filename)
-
- try:
- return int(value)
- except (TypeError, ValueError):
- return None
-
- def read_proc_stat(self):
- """
- Reads /proc/stat and returns it as a dictionary
- """
- ret = {}
-
- with open("/proc/stat") as f:
- for line in f:
- # Split the key from the rest of the line
- key, line = line.split(" ", 1)
-
- # Remove any line breaks
- ret[key] = line.rstrip()
-
- return ret
-
- def read_proc_meminfo(self):
- ret = {}
-
- with open("/proc/meminfo") as f:
- for line in f:
- # Split the key from the rest of the line
- key, line = line.split(":", 1)
-
- # Remove any whitespace
- line = line.strip()
-
- # Remove any trailing kB
- if line.endswith(" kB"):
- line = line[:-3]
-
- # Try to convert to integer
- try:
- line = int(line)
- except (TypeError, ValueError):
- continue
-
- ret[key] = line
-
- return ret
-
-
-class GraphTemplate(object):
- # A unique name to identify this graph template.
- name = None
-
- # Headline of the graph image
- graph_title = None
-
- # Vertical label of the graph
- graph_vertical_label = None
-
- # Limits
- lower_limit = None
- upper_limit = None
-
- # Instructions how to create the graph.
- rrd_graph = None
-
- # Extra arguments passed to rrdgraph.
- rrd_graph_args = []
-
- def __init__(self, plugin, object_id, locale=None, timezone=None):
- self.plugin = plugin
-
- # Save localisation parameters
- self.locale = locale
- self.timezone = timezone
-
- # Get all required RRD objects
- self.object_id = object_id
-
- # Get the main object
- self.objects = self.get_objects(self.object_id)
- self.objects.sort()
-
- def __repr__(self):
- return "<%s>" % self.__class__.__name__
-
- @property
- def collecty(self):
- return self.plugin.collecty
-
- @property
- def log(self):
- return self.plugin.log
-
- @property
- def object(self):
- """
- Shortcut to the main object
- """
- if len(self.objects) == 1:
- return self.objects[0]
-
- def _make_command_line(self, interval, format=DEFAULT_IMAGE_FORMAT,
- width=None, height=None, with_title=True, thumbnail=False):
- args = [
- # Change the background colour
- "--color", "BACK#FFFFFFFF",
-
- # Disable the border around the image
- "--border", "0",
-
- # Let's width and height define the size of the entire image
- "--full-size-mode",
-
- # Gives the curves a more organic look
- "--slope-mode",
-
- # Show nicer labels
- "--dynamic-labels",
-
- # Brand all generated graphs
- "--watermark", _("Created by collecty"),
- ]
-
- # Set the default dimensions
- default_width, default_height = 960, 480
-
- # A thumbnail doesn't have a legend and other labels
- if thumbnail:
- args.append("--only-graph")
-
- default_width, default_height = 80, 20
-
- args += [
- "--imgformat", format,
- "--height", "%s" % (height or default_height),
- "--width", "%s" % (width or default_width),
- ]
-
- args += self.rrd_graph_args
-
- # Graph title
- if with_title and self.graph_title:
- args += ["--title", self.graph_title]
-
- # Vertical label
- if self.graph_vertical_label:
- args += ["--vertical-label", self.graph_vertical_label]
-
- if self.lower_limit is not None or self.upper_limit is not None:
- # Force to honour the set limits
- args.append("--rigid")
-
- if self.lower_limit is not None:
- args += ["--lower-limit", self.lower_limit]
-
- if self.upper_limit is not None:
- args += ["--upper-limit", self.upper_limit]
-
- # Add interval
- args += ["--start", util.make_interval(interval)]
-
- return args
-
- def _add_defs(self):
- use_prefix = len(self.objects) >= 2
-
- args = []
- for object in self.objects:
- if use_prefix:
- args += object.make_rrd_defs(object.id)
- else:
- args += object.make_rrd_defs()
-
- return args
-
- def _add_vdefs(self, args):
- ret = []
-
- for arg in args:
- ret.append(arg)
-
- # Search for all DEFs and CDEFs
- m = re.match(DEF_MATCH, "%s" % arg)
- if m:
- name = m.group(1)
-
- # Add the VDEFs for minimum, maximum, etc. values
- ret += [
- "VDEF:%s_cur=%s,LAST" % (name, name),
- "VDEF:%s_avg=%s,AVERAGE" % (name, name),
- "VDEF:%s_max=%s,MAXIMUM" % (name, name),
- "VDEF:%s_min=%s,MINIMUM" % (name, name),
- ]
-
- return ret
-
- def get_objects(self, *args, **kwargs):
- object = self.plugin.get_object(*args, **kwargs)
-
- if object:
- return [object,]
-
- return []
-
- def generate_graph(self, interval=None, **kwargs):
- assert self.objects, "Cannot render graph without any objects"
-
- # Make sure that all collected data is in the database
- # to get a recent graph image
- for object in self.objects:
- object.commit()
-
- args = self._make_command_line(interval, **kwargs)
-
- self.log.info(_("Generating graph %s") % self)
-
- rrd_graph = self.rrd_graph
-
- # Add DEFs for all objects
- if not any((e.startswith("DEF:") for e in rrd_graph)):
- args += self._add_defs()
-
- args += rrd_graph
- args = self._add_vdefs(args)
-
- # Convert arguments to string
- args = [str(e) for e in args]
-
- for arg in args:
- self.log.debug(" %s" % arg)
-
- graph = rrdtool.graphv("-", *args)
-
- return {
- "image" : graph.get("image"),
- "image_height" : graph.get("image_height"),
- "image_width" : graph.get("image_width"),
- }
-
- def graph_info(self):
- """
- Returns a dictionary with useful information
- about this graph.
- """
- return {
- "title" : self.graph_title,
- "object_id" : self.object_id or "",
- "template" : self.name,
- }
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class ConntrackGraphTemplate(base.GraphTemplate):
- name = "conntrack"
-
- lower_limit = 0
-
- @property
- def rrd_graph(self):
- return [
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- "AREA:count%s:%s" % (
- transparency(PRIMARY, AREA_OPACITY),
- LABEL % _("Entries"),
- ),
- "GPRINT:count_cur:%s" % INTEGER,
- "GPRINT:count_avg:%s" % INTEGER,
- "GPRINT:count_min:%s" % INTEGER,
- "GPRINT:count_max:%s" % INTEGER,
- "LINE1:count%s" % PRIMARY,
-
- # Draw maximum line
- "LINE:max%s:%s:dashes:skipscale" % (
- COLOUR_CRITICAL, LABEL % _("Maximum"),
- ),
- ]
-
- @property
- def graph_title(self):
- return _("Connection Tracking Table")
-
- @property
- def graph_vertical_label(self):
- return _("Entries")
-
-
-class ConntrackObject(base.Object):
- rrd_schema = [
- "DS:count:GAUGE:0:U",
- "DS:max:GAUGE:0:U",
- ]
-
- @property
- def id(self):
- return "default"
-
- def collect(self):
- """
- Read count and max values from /proc
- """
- return (
- self.read_file_integer("/proc/sys/net/netfilter/nf_conntrack_count"),
- self.read_file_integer("/proc/sys/net/netfilter/nf_conntrack_max"),
- )
-
-
-class ConntrackPlugin(base.Plugin):
- name = "conntrack"
- description = "Conntrack Plugin"
-
- templates = [
- ConntrackGraphTemplate,
- ]
-
- @property
- def objects(self):
- yield ConntrackObject(self)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import re
-
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class GraphTemplateContextSwitches(base.GraphTemplate):
- name = "context-switches"
-
- @property
- def rrd_graph(self):
- return [
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- "AREA:ctxt%s:%s" % (
- transparency(PRIMARY, AREA_OPACITY),
- LABEL % _("Context Switches"),
- ),
- "GPRINT:ctxt_cur:%s" % INTEGER,
- "GPRINT:ctxt_avg:%s" % INTEGER,
- "GPRINT:ctxt_min:%s" % INTEGER,
- "GPRINT:ctxt_max:%s" % INTEGER,
-
- "LINE1:ctxt%s" % PRIMARY,
- ]
-
- lower_limit = 0
-
- @property
- def graph_title(self):
- return _("Context Switches")
-
- @property
- def graph_vertical_label(self):
- return _("Context Switches/s")
-
-
-class ContextSwitchesObject(base.Object):
- rrd_schema = [
- "DS:ctxt:DERIVE:0:U",
- ]
-
- @property
- def id(self):
- return "default"
-
- def collect(self):
- expr = r"^ctxt (\d+)$"
-
- with open("/proc/stat") as f:
- for line in f.readlines():
- m = re.match(expr, line)
- if m:
- return m.group(1)
-
-
-class ContextSwitchesPlugin(base.Plugin):
- name = "context-switches"
- description = "Context Switches Plugin"
-
- templates = [GraphTemplateContextSwitches]
-
- @property
- def objects(self):
- yield ContextSwitchesObject(self)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-import re
-
-from . import base
-from ..i18n import _
-
-class GraphTemplateCPUFreq(base.GraphTemplate):
- name = "cpufreq"
-
- lower_limit = 0
-
- def get_objects(self, *args, **kwargs):
- return list(self.plugin.objects)
-
- @property
- def graph_title(self):
- return _("Processor Frequencies")
-
- @property
- def graph_vertical_label(self):
- return "%s [%s]" % (_("Frequency"), _("Hz"))
-
- processor_colours = [
- "#ff000066",
- "#00ff0066",
- "#0000ff66",
- "#ffff0066",
- ]
-
- @property
- def rrd_graph(self):
- rrd_graph = []
-
- for processor, colour in zip(self.objects, self.processor_colours):
- rrd_graph += processor.make_rrd_defs(processor.id) + [
- "LINE2:%s_current%s:%-10s" % (processor.id, colour, processor.name),
- "GPRINT:%s_current_avg:%%6.2lf %%sHz" % processor.id,
- ]
-
- return rrd_graph
-
- rrd_graph_args = [
- "--base", "1000", # Hz
- ]
-
-
-class CPUFreqObject(base.Object):
- rrd_schema = [
- "DS:current:GAUGE:0:U",
- "DS:minimum:GAUGE:0:U",
- "DS:maximum:GAUGE:0:U",
- ]
-
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__, self.cpuid)
-
- def init(self, cpuid):
- self.cpuid = cpuid
-
- self.sys_path = os.path.join("/sys/devices/system/cpu", self.cpuid)
-
- @property
- def name(self):
- return "Core %s" % self.core_id
-
- @property
- def id(self):
- return self.cpuid
-
- @property
- def core_id(self):
- return self.read_file(self.sys_path, "topology/core_id")
-
- def is_cpufreq_supported(self):
- path = os.path.join(self.sys_path, "cpufreq")
-
- return os.path.exists(path)
-
- def collect(self):
- return (
- self.read_frequency("cpufreq/cpuinfo_cur_freq"),
- self.read_frequency("cpufreq/cpuinfo_min_freq"),
- self.read_frequency("cpufreq/cpuinfo_max_freq"),
- )
-
- def read_frequency(self, filename):
- val = self.read_file(self.sys_path, filename)
-
- # Convert from kHz to Hz
- return int(val) * 1000
-
-
-class CPUFreqPlugin(base.Plugin):
- name = "cpufreq"
- description = "cpufreq Plugin"
-
- templates = [GraphTemplateCPUFreq]
-
- @property
- def objects(self):
- core_ids = []
-
- for cpuid in os.listdir("/sys/devices/system/cpu"):
- if not re.match(r"cpu[0-9]+", cpuid):
- continue
-
- o = CPUFreqObject(self, cpuid)
-
- # If we have already seen a virtual core of the processor,
- # we will skip any others.
- if o.core_id in core_ids:
- continue
-
- # Check if this processor is supported by cpufreq
- if not o.is_cpufreq_supported():
- continue
-
- # Save the ID of the added core
- core_ids.append(o.core_id)
-
- yield o
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-
-from .. import _collecty
-from . import base
-
-from ..constants import *
-from ..colours import *
-from ..i18n import _
-
-class GraphTemplateDiskUsage(base.GraphTemplate):
- name = "disk-usage"
- lower_limit = 0
-
- @property
- def rrd_graph(self):
- return [
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- # Area for the used space
- "AREA:used%s:%s" % (
- transparency(LIGHT_RED, AREA_OPACITY),
- LABEL % _("Used"),
- ),
- "GPRINT:used_cur:%s" % LARGE_FLOAT,
- "GPRINT:used_avg:%s" % LARGE_FLOAT,
- "GPRINT:used_min:%s" % LARGE_FLOAT,
- "GPRINT:used_max:%s\\j" % LARGE_FLOAT,
-
- # Stacked area of unused space
- "AREA:free%s:%s:STACK" % (
- transparency(LIGHT_GREEN, AREA_OPACITY),
- LABEL % _("Free"),
- ),
- "GPRINT:free_cur:%s" % LARGE_FLOAT,
- "GPRINT:free_avg:%s" % LARGE_FLOAT,
- "GPRINT:free_min:%s" % LARGE_FLOAT,
- "GPRINT:free_max:%s\\j" % LARGE_FLOAT,
-
- # Add contour lines for the areas
- "LINE:used%s" % LIGHT_RED,
- "LINE:free%s::STACK" % LIGHT_GREEN,
- ]
-
- @property
- def graph_title(self):
- return _("Disk Usage of %s") % self.object.mountpoint
-
- @property
- def graph_vertical_label(self):
- return _("Bytes")
-
-
-class GraphTemplateInodeUsage(base.GraphTemplate):
- name = "inode-usage"
- lower_limit = 0
-
- @property
- def rrd_graph(self):
- rrd_graph = [
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- # Area for the used inodes
- "AREA:inodes_used%s:%s" % (
- transparency(LIGHT_RED, AREA_OPACITY),
- LABEL % _("Used"),
- ),
- "GPRINT:inodes_used_cur:%s" % LARGE_FLOAT,
- "GPRINT:inodes_used_avg:%s" % LARGE_FLOAT,
- "GPRINT:inodes_used_min:%s" % LARGE_FLOAT,
- "GPRINT:inodes_used_max:%s\\j" % LARGE_FLOAT,
-
- # Stacked area of unused inodes
- "AREA:inodes_free%s:%s:STACK" % (
- transparency(LIGHT_GREEN, AREA_OPACITY),
- LABEL % _("Free"),
- ),
- "GPRINT:inodes_free_cur:%s" % LARGE_FLOAT,
- "GPRINT:inodes_free_avg:%s" % LARGE_FLOAT,
- "GPRINT:inodes_free_min:%s" % LARGE_FLOAT,
- "GPRINT:inodes_free_max:%s\\j" % LARGE_FLOAT,
-
- # Add contour lines for the areas
- "LINE:inodes_used%s" % LIGHT_RED,
- "LINE:inodes_free%s::STACK" % LIGHT_GREEN,
- ]
-
- return rrd_graph
-
- rrd_graph_args = [
- "--base", "1000", # inodes
- ]
-
- @property
- def graph_title(self):
- return _("Inode Usage of %s") % self.object.mountpoint
-
- @property
- def graph_vertical_label(self):
- return _("Inodes")
-
-
-class DiskUsageObject(base.Object):
- rrd_schema = [
- "DS:used:GAUGE:0:U",
- "DS:free:GAUGE:0:U",
- "DS:inodes_used:GAUGE:0:U",
- "DS:inodes_free:GAUGE:0:U",
- ]
-
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__, self.mountpoint)
-
- def init(self, mountpoint):
- self.mountpoint = mountpoint
-
- @property
- def id(self):
- mountpoint = self.mountpoint
-
- if mountpoint.startswith("/"):
- mountpoint = mountpoint[1:]
-
- if not mountpoint:
- return "root"
-
- return mountpoint.replace("/", "-")
-
- def collect(self):
- stats = os.statvfs(self.mountpoint)
-
- return (
- # used
- (stats.f_blocks * stats.f_frsize) - \
- (stats.f_bfree * stats.f_bsize),
- # free
- stats.f_bfree * stats.f_bsize,
- # inodes used
- stats.f_files - stats.f_ffree,
- # inodes free
- stats.f_ffree,
- )
-
-
-class DiskUsagePlugin(base.Plugin):
- name = "df"
- description = "Disk Usage Plugin"
-
- templates = [
- GraphTemplateDiskUsage,
- GraphTemplateInodeUsage,
- ]
-
- @property
- def objects(self):
- for dev, mnt, fs, opts in _collecty.get_mountpoints():
- yield DiskUsageObject(self, mnt)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-import re
-
-from .. import _collecty
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class GraphTemplateDiskBadSectors(base.GraphTemplate):
- name = "disk-bad-sectors"
-
- @property
- def rrd_graph(self):
- return [
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- "AREA:bad_sectors%s:%s" % (
- transparency(COLOUR_CRITICAL, AREA_OPACITY),
- LABEL % _("Bad Sectors"),
- ),
- "GPRINT:bad_sectors_cur:%s" % INTEGER,
- "GPRINT:bad_sectors_max:%s\\j" % INTEGER,
-
- # Contour line
- "LINE:bad_sectors%s" % COLOUR_CRITICAL,
- ]
-
- @property
- def graph_title(self):
- return _("Bad Sectors of %s") % self.object.device_string
-
- @property
- def graph_vertical_label(self):
- return _("Pending/Relocated Sectors")
-
-
-class GraphTemplateDiskBytes(base.GraphTemplate):
- name = "disk-bytes"
-
- @property
- def rrd_graph(self):
- rrd_graph = [
- "CDEF:read_bytes=read_sectors,512,*",
- "CDEF:write_bytes=write_sectors,512,*",
-
- "LINE1:read_bytes%s:%-15s" % (COLOUR_READ, _("Read")),
- "GPRINT:read_bytes_cur:%12s\:" % _("Current") + " %9.2lf",
- "GPRINT:read_bytes_max:%12s\:" % _("Maximum") + " %9.2lf",
- "GPRINT:read_bytes_min:%12s\:" % _("Minimum") + " %9.2lf",
- "GPRINT:read_bytes_avg:%12s\:" % _("Average") + " %9.2lf",
-
- "LINE1:write_bytes%s:%-15s" % (COLOUR_WRITE, _("Written")),
- "GPRINT:write_bytes_cur:%12s\:" % _("Current") + " %9.2lf",
- "GPRINT:write_bytes_max:%12s\:" % _("Maximum") + " %9.2lf",
- "GPRINT:write_bytes_min:%12s\:" % _("Minimum") + " %9.2lf",
- "GPRINT:write_bytes_avg:%12s\:" % _("Average") + " %9.2lf",
- ]
-
- return rrd_graph
-
- lower_limit = 0
-
- @property
- def graph_title(self):
- return _("Disk Utilisation of %s") % self.object.device_string
-
- @property
- def graph_vertical_label(self):
- return _("Byte per Second")
-
-
-class GraphTemplateDiskIoOps(base.GraphTemplate):
- name = "disk-io-ops"
-
- @property
- def rrd_graph(self):
- rrd_graph = [
- "LINE1:read_ios%s:%-15s" % (COLOUR_READ, _("Read")),
- "GPRINT:read_ios_cur:%12s\:" % _("Current") + " %6.2lf",
- "GPRINT:read_ios_max:%12s\:" % _("Maximum") + " %6.2lf",
- "GPRINT:read_ios_min:%12s\:" % _("Minimum") + " %6.2lf",
- "GPRINT:read_ios_avg:%12s\:" % _("Average") + " %6.2lf",
-
- "LINE1:write_ios%s:%-15s" % (COLOUR_WRITE, _("Written")),
- "GPRINT:write_ios_cur:%12s\:" % _("Current") + " %6.2lf",
- "GPRINT:write_ios_max:%12s\:" % _("Maximum") + " %6.2lf",
- "GPRINT:write_ios_min:%12s\:" % _("Minimum") + " %6.2lf",
- "GPRINT:write_ios_avg:%12s\:" % _("Average") + " %6.2lf",
- ]
-
- return rrd_graph
-
- lower_limit = 0
-
- @property
- def graph_title(self):
- return _("Disk IO Operations of %s") % self.object.device_string
-
- @property
- def graph_vertical_label(self):
- return _("Operations per Second")
-
-
-class GraphTemplateDiskTemperature(base.GraphTemplate):
- name = "disk-temperature"
-
- @property
- def rrd_graph(self):
- rrd_graph = [
- "CDEF:celsius=temperature,273.15,-",
- "VDEF:temp_cur=celsius,LAST",
- "VDEF:temp_min=celsius,MINIMUM",
- "VDEF:temp_max=celsius,MAXIMUM",
- "VDEF:temp_avg=celsius,AVERAGE",
-
- "LINE2:celsius%s:%s" % (PRIMARY, _("Temperature")),
- "GPRINT:temp_cur:%12s\:" % _("Current") + " %3.2lf",
- "GPRINT:temp_max:%12s\:" % _("Maximum") + " %3.2lf",
- "GPRINT:temp_min:%12s\:" % _("Minimum") + " %3.2lf",
- "GPRINT:temp_avg:%12s\:" % _("Average") + " %3.2lf",
- ]
-
- return rrd_graph
-
- @property
- def graph_title(self):
- return _("Disk Temperature of %s") % self.object.device_string
-
- @property
- def graph_vertical_label(self):
- return _("° Celsius")
-
- @property
- def rrd_graph_args(self):
- return [
- # Make the y-axis have a decimal
- "--left-axis-format", "%3.1lf",
- ]
-
-
-class DiskObject(base.Object):
- rrd_schema = [
- "DS:awake:GAUGE:0:1",
- "DS:read_ios:DERIVE:0:U",
- "DS:read_sectors:DERIVE:0:U",
- "DS:write_ios:DERIVE:0:U",
- "DS:write_sectors:DERIVE:0:U",
- "DS:bad_sectors:GAUGE:0:U",
- "DS:temperature:GAUGE:U:U",
- ]
-
- def __repr__(self):
- return "<%s %s (%s)>" % (self.__class__.__name__, self.sys_path, self.id)
-
- def init(self, device):
- self.dev_path = os.path.join("/dev", device)
- self.sys_path = os.path.join("/sys/block", device)
-
- self.device = _collecty.BlockDevice(self.dev_path)
-
- @property
- def id(self):
- return "-".join((self.device.model, self.device.serial))
-
- @property
- def device_string(self):
- return "%s (%s)" % (self.device.model, self.dev_path)
-
- def collect(self):
- stats = self.parse_stats()
-
- return (
- self.is_awake(),
- stats.get("read_ios"),
- stats.get("read_sectors"),
- stats.get("write_ios"),
- stats.get("write_sectors"),
- self.get_bad_sectors(),
- self.get_temperature(),
- )
-
- def parse_stats(self):
- """
- https://www.kernel.org/doc/Documentation/block/stat.txt
-
- Name units description
- ---- ----- -----------
- read I/Os requests number of read I/Os processed
- read merges requests number of read I/Os merged with in-queue I/O
- read sectors sectors number of sectors read
- read ticks milliseconds total wait time for read requests
- write I/Os requests number of write I/Os processed
- write merges requests number of write I/Os merged with in-queue I/O
- write sectors sectors number of sectors written
- write ticks milliseconds total wait time for write requests
- in_flight requests number of I/Os currently in flight
- io_ticks milliseconds total time this block device has been active
- time_in_queue milliseconds total wait time for all requests
- """
- stats_file = os.path.join(self.sys_path, "stat")
-
- with open(stats_file) as f:
- stats = f.read().split()
-
- return {
- "read_ios" : stats[0],
- "read_merges" : stats[1],
- "read_sectors" : stats[2],
- "read_ticks" : stats[3],
- "write_ios" : stats[4],
- "write_merges" : stats[5],
- "write_sectors" : stats[6],
- "write_ticks" : stats[7],
- "in_flight" : stats[8],
- "io_ticks" : stats[9],
- "time_in_queue" : stats[10],
- }
-
- def is_smart_supported(self):
- """
- We can only query SMART data if SMART is supported by the disk
- and when the disk is awake.
- """
- return self.device.is_smart_supported() and self.device.is_awake()
-
- def is_awake(self):
- # If SMART is supported we can get the data from the disk
- if self.device.is_smart_supported():
- if self.device.is_awake():
- return 1
- else:
- return 0
-
- # Otherwise we just assume that the disk is awake
- return 1
-
- def get_temperature(self):
- if not self.is_smart_supported():
- return "NaN"
-
- try:
- return self.device.get_temperature()
- except OSError:
- return "NaN"
-
- def get_bad_sectors(self):
- if not self.is_smart_supported():
- return "NaN"
-
- return self.device.get_bad_sectors()
-
-
-class DiskPlugin(base.Plugin):
- name = "disk"
- description = "Disk Plugin"
-
- templates = [
- GraphTemplateDiskBadSectors,
- GraphTemplateDiskBytes,
- GraphTemplateDiskIoOps,
- GraphTemplateDiskTemperature,
- ]
-
- block_device_patterns = [
- r"(x?v|s)d[a-z]+",
- r"mmcblk[0-9]+",
- ]
-
- @property
- def objects(self):
- for dev in self.find_block_devices():
- try:
- yield DiskObject(self, dev)
- except OSError:
- pass
-
- def find_block_devices(self):
- for device in os.listdir("/sys/block"):
- # Skip invalid device names
- if not self._valid_block_device_name(device):
- continue
-
- yield device
-
- def _valid_block_device_name(self, name):
- # Check if the given name matches any of the valid patterns.
- for pattern in self.block_device_patterns:
- if re.match(pattern, name):
- return True
-
- return False
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-
-from .. import util
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class GraphTemplateInterfaceBase(base.GraphTemplate):
- @property
- def interface(self):
- return self.object.interface
-
-
-class GraphTemplateInterfaceBits(GraphTemplateInterfaceBase):
- name = "interface-bits"
-
- @property
- def rrd_graph(self):
- return [
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- # Convert everything into bits.
- "CDEF:bits_rx=bytes_rx,8,*",
- "CDEF:bits_tx=bytes_tx,8,*",
-
- # Compute 95% lines.
- "VDEF:bits_rx_95p=bits_rx,95,PERCENT",
- "VDEF:bits_tx_95p=bits_tx,95,PERCENT",
-
- # Draw the received area.
- "AREA:bits_rx%s:%s" % (
- transparency(COLOUR_RX, AREA_OPACITY),
- LABEL % _("Received"),
- ),
- "GPRINT:bits_rx_cur:%s" % BPS,
- "GPRINT:bits_rx_avg:%s" % BPS,
- "GPRINT:bits_rx_min:%s" % BPS,
- "GPRINT:bits_rx_max:%s\\j" % BPS,
-
- # Draw the transmitted area.
- "AREA:bits_tx%s:%-15s" % (
- transparency(COLOUR_TX, AREA_OPACITY),
- LABEL % _("Transmitted"),
- ),
- "GPRINT:bits_tx_cur:%s" % BPS,
- "GPRINT:bits_tx_avg:%s" % BPS,
- "GPRINT:bits_tx_min:%s" % BPS,
- "GPRINT:bits_tx_max:%s\\j" % BPS,
-
- # Draw outlines.
- "LINE1:bits_rx%s" % COLOUR_RX,
- "LINE1:bits_tx%s" % COLOUR_TX,
-
- EMPTY_LINE,
-
- # Draw the 95% lines.
- "COMMENT:%s" % _("95th Percentile"),
- "LINE:bits_rx_95p%s:%s:dashes" % (COLOUR_RX, LABEL % _("Received")),
- "GPRINT:bits_rx_95p:%s\\r" % BPS,
- "LINE:bits_tx_95p%s:%s:dashes" % (COLOUR_TX, LABEL % _("Transmitted")),
- "GPRINT:bits_tx_95p:%s\\r" % BPS,
- ]
-
- @property
- def graph_title(self):
- return _("Bandwidth Usage on %s") % self.interface
-
- @property
- def graph_vertical_label(self):
- return _("Bit/s")
-
-
-class GraphTemplateInterfacePackets(GraphTemplateInterfaceBase):
- name = "interface-packets"
-
- @property
- def rrd_graph(self):
- return [
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- # Draw the received area.
- "AREA:packets_rx%s:%s" % (
- transparency(COLOUR_RX, AREA_OPACITY),
- LABEL % _("Received"),
- ),
- "GPRINT:packets_rx_cur:%s" % PPS,
- "GPRINT:packets_rx_avg:%s" % PPS,
- "GPRINT:packets_rx_min:%s" % PPS,
- "GPRINT:packets_rx_max:%s\\j" % PPS,
-
- # Draw the transmitted area.
- "AREA:packets_tx%s:%s" % (
- transparency(COLOUR_TX, AREA_OPACITY),
- LABEL % _("Transmitted"),
- ),
- "GPRINT:packets_tx_cur:%s" % PPS,
- "GPRINT:packets_tx_avg:%s" % PPS,
- "GPRINT:packets_tx_min:%s" % PPS,
- "GPRINT:packets_tx_max:%s\\j" % PPS,
-
- # Draw outlines of the areas on top.
- "LINE1:packets_rx%s" % COLOUR_RX,
- "LINE1:packets_tx%s" % COLOUR_TX,
- ]
-
- @property
- def graph_title(self):
- return _("Transferred Packets on %s") % self.interface
-
- @property
- def graph_vertical_label(self):
- return _("Packets/s")
-
-
-class GraphTemplateInterfaceErrors(GraphTemplateInterfaceBase):
- name = "interface-errors"
-
- @property
- def rrd_graph(self):
- return [
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- # Invert the transmitted packets to create upside down graph.
- "CDEF:errors_tx_inv=errors_tx,-1,*",
- "CDEF:dropped_tx_inv=dropped_tx,-1,*",
-
- # Draw the receive errors.
- "AREA:errors_rx%s:%-15s" % (
- transparency(COLOUR_RX, AREA_OPACITY),
- LABEL % _("Receive Errors"),
- ),
- "GPRINT:errors_rx_cur:%s" % PPS,
- "GPRINT:errors_rx_avg:%s" % PPS,
- "GPRINT:errors_rx_min:%s" % PPS,
- "GPRINT:errors_rx_max:%s\\j" % PPS,
- "LINE1:errors_rx%s" % COLOUR_RX,
-
- # Draw the transmit errors.
- "AREA:errors_tx_inv%s:%-15s" % (
- transparency(COLOUR_TX, AREA_OPACITY),
- LABEL % _("Transmit Errors"),
- ),
- "GPRINT:errors_tx_cur:%s" % PPS,
- "GPRINT:errors_tx_avg:%s" % PPS,
- "GPRINT:errors_tx_min:%s" % PPS,
- "GPRINT:errors_tx_max:%s\\j" % PPS,
- "LINE1:errors_tx_inv%s" % COLOUR_TX,
-
- # Draw the receive drops.
- "LINE2:dropped_rx%s:%-15s" % (
- transparency(AMBER, AREA_OPACITY),
- LABEL % _("Receive Drops"),
- ),
- "GPRINT:dropped_rx_cur:%s" % PPS,
- "GPRINT:dropped_rx_avg:%s" % PPS,
- "GPRINT:dropped_rx_min:%s" % PPS,
- "GPRINT:dropped_rx_max:%s\\j" % PPS,
- "LINE1:dropped_rx#228B22",
-
- # Draw the transmit drops.
- "LINE2:dropped_tx%s:%-15s" % (
- transparency(TEAL, AREA_OPACITY),
- LABEL % _("Transmit Drops"),
- ),
- "GPRINT:dropped_tx_cur:%s" % PPS,
- "GPRINT:dropped_tx_avg:%s" % PPS,
- "GPRINT:dropped_tx_min:%s" % PPS,
- "GPRINT:dropped_tx_max:%s\\j" % PPS,
- "LINE1:dropped_tx%s" % TEAL,
-
- EMPTY_LINE,
-
- # Draw the collisions as a line.
- "LINE2:collisions%s:%s" % (
- COLOUR_CRITICAL,
- LABEL % _("Collisions"),
- ),
- "GPRINT:collisions_cur:%s" % PPS,
- "GPRINT:collisions_avg:%s" % PPS,
- "GPRINT:collisions_min:%s" % PPS,
- "GPRINT:collisions_max:%s\\j" % PPS,
- ]
-
- @property
- def graph_title(self):
- return _("Errors/Dropped Packets on %s") % self.interface
-
- @property
- def graph_vertical_label(self):
- return _("Packets/s")
-
-
-class InterfaceObject(base.Object):
- rrd_schema = [
- "DS:bytes_rx:DERIVE:0:U",
- "DS:bytes_tx:DERIVE:0:U",
- "DS:collisions:DERIVE:0:U",
- "DS:dropped_rx:DERIVE:0:U",
- "DS:dropped_tx:DERIVE:0:U",
- "DS:errors_rx:DERIVE:0:U",
- "DS:errors_tx:DERIVE:0:U",
- "DS:multicast:DERIVE:0:U",
- "DS:packets_rx:DERIVE:0:U",
- "DS:packets_tx:DERIVE:0:U",
- ]
-
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__, self.interface)
-
- def init(self, interface):
- self.interface = interface
-
- @property
- def id(self):
- return self.interface
-
- def collect(self):
- interface_path = os.path.join("/sys/class/net", self.interface)
-
- # Check if the interface exists.
- if not os.path.exists(interface_path):
- self.log.debug(_("Interface %s does not exists. Cannot collect.") \
- % self.interface)
- return
-
- files = (
- "rx_bytes", "tx_bytes",
- "collisions",
- "rx_dropped", "tx_dropped",
- "rx_errors", "tx_errors",
- "multicast",
- "rx_packets", "tx_packets",
- )
- ret = []
-
- for file in files:
- path = os.path.join(interface_path, "statistics", file)
-
- ret.append(
- self.read_file_integer(path),
- )
-
- return ret
-
-
-class InterfacePlugin(base.Plugin):
- name = "interface"
- description = "Interface Statistics Plugin"
-
- templates = [
- GraphTemplateInterfaceBits,
- GraphTemplateInterfacePackets,
- GraphTemplateInterfaceErrors,
- ]
-
- interval = 30
-
- @property
- def objects(self):
- for interface in util.get_network_interfaces():
- yield InterfaceObject(self, interface=interface)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-import re
-
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class GraphTemplateInterrupts(base.GraphTemplate):
- name = "interrupts"
-
- @property
- def rrd_graph(self):
- return [
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- "AREA:intr%s:%-15s" % (
- transparency(PRIMARY, AREA_OPACITY), LABEL % _("Interrupts"),
- ),
- "GPRINT:intr_cur:%s" % LARGE_INTEGER,
- "GPRINT:intr_avg:%s" % LARGE_INTEGER,
- "GPRINT:intr_min:%s" % LARGE_INTEGER,
- "GPRINT:intr_max:%s\\j" % LARGE_INTEGER,
-
- "LINE1:intr%s" % PRIMARY,
- ]
-
- lower_limit = 0
-
- @property
- def graph_title(self):
- if self.object.irq is None:
- return _("Interrupts")
-
- return _("Interrupt %s") % self.object.irq
-
- @property
- def graph_vertical_label(self):
- return _("Interrupts/s")
-
-
-class InterruptObject(base.Object):
- rrd_schema = [
- "DS:intr:DERIVE:0:U",
- ]
-
- def init(self, irq=None):
- self.irq = irq
-
- @property
- def id(self):
- if self.irq is None:
- return "default"
-
- return "%s" % self.irq
-
- def collect(self):
- stat = self.read_proc_stat()
-
- # Get a list of all interrupt events
- interrupts = stat.get("intr").split()
-
- # The first value is the sum of all interrupts
- total = interrupts.pop(0)
-
- if self.irq is None:
- return total
-
- # Otherwise return the value for a specific IRQ
- return interrupts[self.irq]
-
-
-class InterruptsPlugin(base.Plugin):
- name = "interrupts"
- description = "Interrupts Plugin"
-
- templates = [GraphTemplateInterrupts]
-
- @property
- def objects(self):
- yield InterruptObject(self)
-
- for irq in os.listdir("/sys/kernel/irq"):
- try:
- irq = int(irq)
- except (ValueError, TypeError):
- continue
-
- yield InterruptObject(self, irq)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-
-from .. import util
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class GraphTemplateIPv6Fragmentation(base.GraphTemplate):
- name = "ipv6-fragmentation"
-
- @property
- def rrd_graph(self):
- return [
- "CDEF:ip6_reasm_real_fails=ip6_reasm_fails,ip6_reasm_timeout,-",
-
- # Reassembly
- "AREA:ip6_reasm_real_fails%s:%s" % \
- (transparency(COLOUR_ERROR, AREA_OPACITY),
- LABEL % _("Failed Reassemblies"),
- ),
- "GPRINT:ip6_reasm_fails_cur:%s" % INTEGER,
- "GPRINT:ip6_reasm_fails_avg:%s" % INTEGER,
- "GPRINT:ip6_reasm_fails_min:%s" % INTEGER,
- "GPRINT:ip6_reasm_fails_max:%s" % INTEGER,
-
- "AREA:ip6_reasm_timeout%s:%s:STACK" % \
- (transparency(COLOUR_WARN, AREA_OPACITY),
- LABEL % _("Reassembly Timeouts"),
- ),
- "GPRINT:ip6_reasm_timeout_cur:%s" % INTEGER,
- "GPRINT:ip6_reasm_timeout_avg:%s" % INTEGER,
- "GPRINT:ip6_reasm_timeout_max:%s" % INTEGER,
- "GPRINT:ip6_reasm_timeout_min:%s" % INTEGER,
-
- "LINE2:ip6_reasm_oks%s:%-24s" % (
- BLACK,
- LABEL % _("Successful Reassemblies"),
- ),
- "GPRINT:ip6_reasm_oks_cur:%s" % INTEGER,
- "GPRINT:ip6_reasm_oks_avg:%s" % INTEGER,
- "GPRINT:ip6_reasm_oks_max:%s" % INTEGER,
- "GPRINT:ip6_reasm_oks_min:%s" % INTEGER,
-
- EMPTY_LINE,
-
- # Fragmentation
- "LINE2:ip6_frags_fails%s:%s" % (
- COLOUR_ERROR,
- LABEL % _("Failed Fragmentations"),
- ),
- "GPRINT:ip6_frags_fails_cur:%s" % INTEGER,
- "GPRINT:ip6_frags_fails_avg:%s" % INTEGER,
- "GPRINT:ip6_frags_fails_max:%s" % INTEGER,
- "GPRINT:ip6_frags_fails_min:%s" % INTEGER,
-
- "LINE2:ip6_frags_oks%s:%-24s" % (
- COLOUR_OK,
- LABEL % _("Fragmented Packets"),
- ),
- "GPRINT:ip6_frags_oks_cur:%s" % INTEGER,
- "GPRINT:ip6_frags_oks_avg:%s" % INTEGER,
- "GPRINT:ip6_frags_oks_max:%s" % INTEGER,
- "GPRINT:ip6_frags_oks_min:%s" % INTEGER,
-
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
- ]
-
- @property
- def graph_title(self):
- if self.object.interface:
- return _("IPv6 Fragmentation on %s") % self.object.interface
-
- return _("IPv6 Fragmentation")
-
- @property
- def graph_vertical_label(self):
- return _("Packets/s")
-
- @property
- def rrd_graph_args(self):
- return [
- "--base", "1000",
- "--legend-direction=bottomup",
- ]
-
-
-class GraphTemplateIPv4Fragmentation(base.GraphTemplate):
- name = "ipv4-fragmentation"
-
- @property
- def rrd_graph(self):
- return [
- "CDEF:ip4_reasm_real_fails=ip4_reasm_fails,ip4_reasm_timeout,-",
-
- # Reassembly
- "AREA:ip4_reasm_real_fails%s:%s" % \
- (transparency(COLOUR_ERROR, AREA_OPACITY),
- LABEL % _("Failed Reassemblies"),
- ),
- "GPRINT:ip4_reasm_fails_cur:%s" % INTEGER,
- "GPRINT:ip4_reasm_fails_avg:%s" % INTEGER,
- "GPRINT:ip4_reasm_fails_min:%s" % INTEGER,
- "GPRINT:ip4_reasm_fails_max:%s" % INTEGER,
-
- "AREA:ip4_reasm_timeout%s:%s:STACK" % \
- (transparency(COLOUR_WARN, AREA_OPACITY),
- LABEL % _("Reassembly Timeouts"),
- ),
- "GPRINT:ip4_reasm_timeout_cur:%s" % INTEGER,
- "GPRINT:ip4_reasm_timeout_avg:%s" % INTEGER,
- "GPRINT:ip4_reasm_timeout_max:%s" % INTEGER,
- "GPRINT:ip4_reasm_timeout_min:%s" % INTEGER,
-
- "LINE2:ip4_reasm_oks%s:%-24s" % (
- BLACK,
- LABEL % _("Successful Reassemblies"),
- ),
- "GPRINT:ip4_reasm_oks_cur:%s" % INTEGER,
- "GPRINT:ip4_reasm_oks_avg:%s" % INTEGER,
- "GPRINT:ip4_reasm_oks_max:%s" % INTEGER,
- "GPRINT:ip4_reasm_oks_min:%s" % INTEGER,
-
- EMPTY_LINE,
-
- # Fragmentation
- "LINE2:ip4_frags_fails%s:%s" % (
- COLOUR_ERROR,
- LABEL % _("Failed Fragmentations"),
- ),
- "GPRINT:ip4_frags_fails_cur:%s" % INTEGER,
- "GPRINT:ip4_frags_fails_avg:%s" % INTEGER,
- "GPRINT:ip4_frags_fails_max:%s" % INTEGER,
- "GPRINT:ip4_frags_fails_min:%s" % INTEGER,
-
- "LINE2:ip4_frags_oks%s:%-24s" % (
- COLOUR_OK,
- LABEL % _("Fragmented Packets"),
- ),
- "GPRINT:ip4_frags_oks_cur:%s" % INTEGER,
- "GPRINT:ip4_frags_oks_avg:%s" % INTEGER,
- "GPRINT:ip4_frags_oks_max:%s" % INTEGER,
- "GPRINT:ip4_frags_oks_min:%s" % INTEGER,
-
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
- ]
-
- @property
- def graph_title(self):
- if self.object.interface:
- return _("IPv4 Fragmentation on %s") % self.object.interface
-
- return _("IPv4 Fragmentation")
-
- @property
- def graph_vertical_label(self):
- return _("Packets/s")
-
- @property
- def rrd_graph_args(self):
- return [
- "--base", "1000",
- "--legend-direction=bottomup",
- ]
-
-
-class IPFragmentationObject(base.Object):
- rrd_schema = [
- "DS:ip6_frags_oks:DERIVE:0:U",
- "DS:ip6_frags_fails:DERIVE:0:U",
- "DS:ip6_frags_creates:DERIVE:0:U",
- "DS:ip6_reasm_timeout:DERIVE:0:U",
- "DS:ip6_reasm_reqds:DERIVE:0:U",
- "DS:ip6_reasm_oks:DERIVE:0:U",
- "DS:ip6_reasm_fails:DERIVE:0:U",
- "DS:ip4_frags_oks:DERIVE:0:U",
- "DS:ip4_frags_fails:DERIVE:0:U",
- "DS:ip4_frags_creates:DERIVE:0:U",
- "DS:ip4_reasm_timeout:DERIVE:0:U",
- "DS:ip4_reasm_reqds:DERIVE:0:U",
- "DS:ip4_reasm_oks:DERIVE:0:U",
- "DS:ip4_reasm_fails:DERIVE:0:U",
- ]
-
- def __repr__(self):
- if self.interface:
- return "<%s %s>" % (self.__class__.__name__, self.interface)
-
- return "<%s>" % self.__class__.__name__
-
- def init(self, interface=None):
- self.interface = interface
-
- @property
- def id(self):
- return self.interface or "default"
-
- def collect(self):
- p = util.ProcNetSnmpParser(self.interface)
-
- # Description in RFC2465
- results = [
- p.get("Ip6", "FragOKs"),
- p.get("Ip6", "FragFails"),
- p.get("Ip6", "FragCreates"),
- p.get("Ip6", "ReasmTimeout"),
- p.get("Ip6", "ReasmReqds"),
- p.get("Ip6", "ReasmOKs"),
- p.get("Ip6", "ReasmFails"),
- p.get("Ip", "FragOKs"),
- p.get("Ip", "FragFails"),
- p.get("Ip", "FragCreates"),
- p.get("Ip", "ReasmTimeout"),
- p.get("Ip", "ReasmReqds"),
- p.get("Ip", "ReasmOKs"),
- p.get("Ip", "ReasmFails"),
- ]
-
- return results
-
-
-class IPFragmentationPlugin(base.Plugin):
- name = "ip-fragmentation"
- description = "IP Fragmentation Plugin"
-
- templates = [
- GraphTemplateIPv6Fragmentation,
- GraphTemplateIPv4Fragmentation,
- ]
-
- @property
- def objects(self):
- # Overall statistics
- yield IPFragmentationObject(self)
-
- # Stats per interface
- for interface in util.get_network_interfaces():
- yield IPFragmentationObject(self, interface)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import socket
-
-from .. import _collecty
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-PING_HOSTS = [
- # gateway is a special name that is automatically
- # resolved by myhostname to the default gateway.
- "gateway",
-
- # The IPFire main server
- "ping.ipfire.org",
-]
-
-class GraphTemplateLatency(base.GraphTemplate):
- name = "latency"
-
- lower_limit = 0
-
- @property
- def rrd_graph(self):
- return [
- # Compute the biggest loss and convert into percentage
- "CDEF:ploss=loss6,loss4,MAX,100,*",
-
- # Compute standard deviation
- "CDEF:stddevarea6=stddev6,2,*",
- "CDEF:spacer6=latency6,stddev6,-",
- "CDEF:stddevarea4=stddev4,2,*",
- "CDEF:spacer4=latency4,stddev4,-",
-
- "CDEF:l005=ploss,0,5,LIMIT,UN,UNKN,INF,IF",
- "CDEF:l010=ploss,5,10,LIMIT,UN,UNKN,INF,IF",
- "CDEF:l025=ploss,10,25,LIMIT,UN,UNKN,INF,IF",
- "CDEF:l050=ploss,25,50,LIMIT,UN,UNKN,INF,IF",
- "CDEF:l099=ploss,50,99,LIMIT,UN,UNKN,INF,IF",
-
- # Draw average lines
- "LINE:latency6_avg%s::dashes" % (
- lighten(COLOUR_IPV6),
- ),
- "LINE:latency4_avg%s::dashes" % (
- lighten(COLOUR_IPV4),
- ),
-
- # Colour background on packet loss
- "COMMENT:%s" % _("Packet Loss"),
- "AREA:l005%s:%s" % (
- transparency(BLACK, .2), _("0-5%"),
- ),
- "AREA:l010%s:%s" % (
- transparency(BLACK, .4), _("5-10%"),
- ),
- "AREA:l025%s:%s" % (
- transparency(BLACK, .6), _("10-25%"),
- ),
- "AREA:l050%s:%s" % (
- transparency(BLACK, .8), _("25-50%"),
- ),
- "AREA:l099%s:%s\\r" % (BLACK, _("50-99%")),
-
- EMPTY_LINE,
-
- # Plot standard deviation
- "AREA:spacer4",
- "AREA:stddevarea4%s:STACK" % transparency(COLOUR_IPV4, STDDEV_OPACITY),
- "LINE2:latency4%s:%s" % (
- COLOUR_IPV4,
- LABEL % _("Latency (IPv4)"),
- ),
- "GPRINT:latency4_cur:%s" % MS,
- "GPRINT:latency4_avg:%s" % MS,
- "GPRINT:latency4_min:%s" % MS,
- "GPRINT:latency4_max:%s\\j" % MS,
-
- "AREA:spacer6",
- "AREA:stddevarea6%s:STACK" % transparency(COLOUR_IPV6, STDDEV_OPACITY),
- "LINE2:latency6%s:%s" % (
- COLOUR_IPV6,
- LABEL % _("Latency (IPv6)"),
- ),
- "GPRINT:latency6_cur:%s" % MS,
- "GPRINT:latency6_avg:%s" % MS,
- "GPRINT:latency6_min:%s" % MS,
- "GPRINT:latency6_max:%s\\j" % MS,
-
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
- ]
-
- @property
- def graph_title(self):
- if self.object.hostname == "gateway":
- hostname = _("Default Gateway")
- else:
- hostname = self.object.hostname
-
- return _("Latency to %s") % hostname
-
- @property
- def graph_vertical_label(self):
- return _("Milliseconds")
-
- @property
- def rrd_graph_args(self):
- return [
- "--legend-direction=bottomup",
- ]
-
-
-class LatencyObject(base.Object):
- rrd_schema = [
- "DS:latency6:GAUGE:0:U",
- "DS:stddev6:GAUGE:0:U",
- "DS:loss6:GAUGE:0:100",
- "DS:latency4:GAUGE:0:U",
- "DS:stddev4:GAUGE:0:U",
- "DS:loss4:GAUGE:0:100",
- ]
-
- def init(self, hostname):
- self.hostname = hostname
-
- @property
- def id(self):
- return self.hostname
-
- def collect(self):
- result = []
-
- for family in (socket.AF_INET6, socket.AF_INET):
- try:
- p = _collecty.Ping(self.hostname, family=family)
- p.ping(count=10, deadline=10)
-
- result += (p.average, p.stddev, p.loss)
-
- except _collecty.PingAddHostError as e:
- self.log.debug(_("Could not add host %(host)s for family %(family)s") \
- % { "host" : self.hostname, "family" : family })
-
- # No data available
- result += (None, None, None)
- continue
-
- except _collecty.PingNoReplyError:
- # Unknown but 100% loss
- result += (None, None, 1)
- continue
-
- except _collecty.PingError as e:
- self.log.warning(_("Could not run latency check for %(host)s: %(msg)s") \
- % { "host" : self.hostname, "msg" : e })
-
- # A hundred percent loss
- result += (None, None, 1)
-
- return result
-
-
-class LatencyPlugin(base.Plugin):
- name = "latency"
- description = "Latency (ICMP ping) Plugin"
-
- templates = [GraphTemplateLatency]
-
- # Because this plugin has the potential to block, we give it a slightly lower priority
- priority = 10
-
- @property
- def objects(self):
- for hostname in PING_HOSTS:
- yield LatencyObject(self, hostname)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class GraphTemplateLoadAvg(base.GraphTemplate):
- name = "loadavg"
-
- @property
- def rrd_graph(self):
- rrd_graph = [
- "LINE2:load15%s:%s" % (
- YELLOW, LABEL % _("15 Minutes"),
- ),
- "GPRINT:load15_cur:%s" % FLOAT,
- "GPRINT:load15_avg:%s" % FLOAT,
- "GPRINT:load15_min:%s" % FLOAT,
- "GPRINT:load15_max:%s\\j" % FLOAT,
-
- "LINE2:load5%s:%s" % (
- ORANGE, LABEL % _("5 Minutes"),
- ),
- "GPRINT:load5_cur:%s" % FLOAT,
- "GPRINT:load5_avg:%s" % FLOAT,
- "GPRINT:load5_min:%s" % FLOAT,
- "GPRINT:load5_max:%s\\j" % FLOAT,
-
- "LINE2:load1%s:%s" % (
- RED, LABEL % _("1 Minute"),
- ),
- "GPRINT:load1_cur:%s" % FLOAT,
- "GPRINT:load1_avg:%s" % FLOAT,
- "GPRINT:load1_min:%s" % FLOAT,
- "GPRINT:load1_max:%s\\j" % FLOAT,
-
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
- ]
-
- return rrd_graph
-
- lower_limit = 0
-
- @property
- def graph_title(self):
- return _("Load Average")
-
- @property
- def graph_vertical_label(self):
- return _("Load")
-
- @property
- def rrd_graph_args(self):
- return [
- "--legend-direction=bottomup",
- ]
-
-
-class LoadAvgObject(base.Object):
- rrd_schema = [
- "DS:load1:GAUGE:0:U",
- "DS:load5:GAUGE:0:U",
- "DS:load15:GAUGE:0:U",
- ]
-
- @property
- def id(self):
- return "default"
-
- def collect(self):
- return os.getloadavg()
-
-
-class LoadAvgPlugin(base.Plugin):
- name = "loadavg"
- description = "Load Average Plugin"
-
- templates = [GraphTemplateLoadAvg]
-
- @property
- def objects(self):
- return [LoadAvgObject(self)]
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class GraphTemplateMemory(base.GraphTemplate):
- name = "memory"
-
- lower_limit = 0
-
- @property
- def rrd_graph(self):
- return [
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- # Convert everything into bytes
- "CDEF:mem_total_bytes=mem_total,1024,*",
- "CDEF:mem_cached_bytes=mem_cached,1024,*",
- "CDEF:mem_buffered_bytes=mem_buffered,1024,*",
- "CDEF:mem_free_bytes=mem_free,1024,*",
- "CDEF:swap_total_bytes=swap_total,1024,*",
- "CDEF:swap_free_bytes=swap_free,1024,*",
-
- # Compute used memory & swap
- "CDEF:mem_used_bytes=mem_total_bytes,mem_free_bytes,-,mem_cached_bytes,-,mem_buffered_bytes,-",
- "CDEF:swap_used_bytes=swap_total_bytes,swap_free_bytes,-",
-
- "AREA:mem_used_bytes%s:%s" % (
- transparency(MEMORY_USED, AREA_OPACITY),
- LABEL % _("Used Memory"),
- ),
- "GPRINT:mem_used_bytes_cur:%s" % LARGE_FLOAT,
- "GPRINT:mem_used_bytes_avg:%s" % LARGE_FLOAT,
- "GPRINT:mem_used_bytes_min:%s" % LARGE_FLOAT,
- "GPRINT:mem_used_bytes_max:%s\\j" % LARGE_FLOAT,
-
- "AREA:mem_buffered_bytes%s:%s:STACK" % (
- transparency(MEMORY_BUFFERED, AREA_OPACITY),
- LABEL % _("Buffered Data"),
- ),
- "GPRINT:mem_buffered_bytes_cur:%s" % LARGE_FLOAT,
- "GPRINT:mem_buffered_bytes_avg:%s" % LARGE_FLOAT,
- "GPRINT:mem_buffered_bytes_min:%s" % LARGE_FLOAT,
- "GPRINT:mem_buffered_bytes_max:%s\\j" % LARGE_FLOAT,
-
- "AREA:mem_cached_bytes%s:%s:STACK" % (
- transparency(MEMORY_CACHED, AREA_OPACITY),
- LABEL % _("Cached Data")),
- "GPRINT:mem_cached_bytes_cur:%s" % LARGE_FLOAT,
- "GPRINT:mem_cached_bytes_avg:%s" % LARGE_FLOAT,
- "GPRINT:mem_cached_bytes_min:%s" % LARGE_FLOAT,
- "GPRINT:mem_cached_bytes_max:%s\\j" % LARGE_FLOAT,
-
- "AREA:mem_free_bytes%s:%s:STACK" % (
- transparency(MEMORY_FREE, AREA_OPACITY),
- LABEL % _("Free Memory"),
- ),
- "GPRINT:mem_free_bytes_cur:%s" % LARGE_FLOAT,
- "GPRINT:mem_free_bytes_avg:%s" % LARGE_FLOAT,
- "GPRINT:mem_free_bytes_min:%s" % LARGE_FLOAT,
- "GPRINT:mem_free_bytes_max:%s\\j" % LARGE_FLOAT,
-
- EMPTY_LINE,
-
- "LINE:swap_used_bytes%s:%-15s" % (MEMORY_SWAP, LABEL % _("Used Swap Space")),
- "GPRINT:swap_used_bytes_cur:%s" % LARGE_FLOAT,
- "GPRINT:swap_used_bytes_avg:%s" % LARGE_FLOAT,
- "GPRINT:swap_used_bytes_min:%s" % LARGE_FLOAT,
- "GPRINT:swap_used_bytes_max:%s\\j" % LARGE_FLOAT,
-
- # Draw the outlines of the areas
- "LINE1:mem_used_bytes%s" % MEMORY_USED,
- "LINE1:mem_buffered_bytes%s::STACK" % MEMORY_BUFFERED,
- "LINE1:mem_cached_bytes%s::STACK" % MEMORY_CACHED,
- ]
-
- @property
- def graph_title(self):
- return _("Memory Usage")
-
- @property
- def graph_vertical_label(self):
- return _("Bytes")
-
-
-class MemoryObject(base.Object):
- rrd_schema = [
- "DS:mem_total:GAUGE:0:U",
- "DS:mem_cached:GAUGE:0:U",
- "DS:mem_buffered:GAUGE:0:U",
- "DS:mem_free:GAUGE:0:U",
- "DS:swap_total:GAUGE:0:U",
- "DS:swap_free:GAUGE:0:U",
- ]
-
- @property
- def id(self):
- return "default"
-
- def collect(self):
- meminfo = self.read_proc_meminfo()
-
- return (
- meminfo.get("MemTotal"),
- meminfo.get("Cached"),
- meminfo.get("Buffers"),
- meminfo.get("MemFree"),
- meminfo.get("SwapTotal"),
- meminfo.get("SwapFree"),
- )
-
-
-class MemoryPlugin(base.Plugin):
- name = "memory"
- description = "Memory Usage Plugin"
-
- templates = [GraphTemplateMemory]
-
- @property
- def objects(self):
- yield MemoryObject(self)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import multiprocessing
-
-from . import base
-
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class GraphTemplateProcessor(base.GraphTemplate):
- name = "processor"
-
- @property
- def rrd_graph(self):
- return [
- # Add all used CPU cycles
- "CDEF:usage=user,nice,+,sys,+,wait,+,irq,+,sirq,+,steal,+,guest,+,guest_nice,+",
-
- # Add idle to get the total number of cycles
- "CDEF:total=usage,idle,+",
-
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
-
- "CDEF:usage_p=100,usage,*,total,/",
- "COMMENT: %s" % (LABEL % _("Total")),
- "GPRINT:usage_p_cur:%s" % PERCENTAGE,
- "GPRINT:usage_p_avg:%s" % PERCENTAGE,
- "GPRINT:usage_p_min:%s" % PERCENTAGE,
- "GPRINT:usage_p_max:%s\\j" % PERCENTAGE,
-
- EMPTY_LINE,
-
- "CDEF:user_p=100,user,*,total,/",
- "AREA:user_p%s:%s" % (
- transparency(CPU_USER, AREA_OPACITY),
- LABEL % _("User"),
- ),
- "GPRINT:user_p_cur:%s" % PERCENTAGE,
- "GPRINT:user_p_avg:%s" % PERCENTAGE,
- "GPRINT:user_p_min:%s" % PERCENTAGE,
- "GPRINT:user_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:nice_p=100,nice,*,total,/",
- "AREA:nice_p%s:%s:STACK" % (
- transparency(CPU_NICE, AREA_OPACITY),
- LABEL % _("Nice"),
- ),
- "GPRINT:nice_p_cur:%s" % PERCENTAGE,
- "GPRINT:nice_p_avg:%s" % PERCENTAGE,
- "GPRINT:nice_p_min:%s" % PERCENTAGE,
- "GPRINT:nice_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:sys_p=100,sys,*,total,/",
- "AREA:sys_p%s:%s:STACK" % (
- transparency(CPU_SYS, AREA_OPACITY),
- LABEL % _("System"),
- ),
- "GPRINT:sys_p_cur:%s" % PERCENTAGE,
- "GPRINT:sys_p_avg:%s" % PERCENTAGE,
- "GPRINT:sys_p_min:%s" % PERCENTAGE,
- "GPRINT:sys_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:wait_p=100,wait,*,total,/",
- "AREA:wait_p%s:%s:STACK" % (
- transparency(CPU_WAIT, AREA_OPACITY),
- LABEL % _("Wait"),
- ),
- "GPRINT:wait_p_cur:%s" % PERCENTAGE,
- "GPRINT:wait_p_avg:%s" % PERCENTAGE,
- "GPRINT:wait_p_min:%s" % PERCENTAGE,
- "GPRINT:wait_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:irq_p=100,irq,*,total,/",
- "AREA:irq_p%s:%s:STACK" % (
- transparency(CPU_IRQ, AREA_OPACITY),
- LABEL % _("Interrupt"),
- ),
- "GPRINT:irq_p_cur:%s" % PERCENTAGE,
- "GPRINT:irq_p_avg:%s" % PERCENTAGE,
- "GPRINT:irq_p_min:%s" % PERCENTAGE,
- "GPRINT:irq_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:sirq_p=100,sirq,*,total,/",
- "AREA:sirq_p%s:%s:STACK" % (
- transparency(CPU_SIRQ, AREA_OPACITY),
- LABEL % _("Soft Interrupt"),
- ),
- "GPRINT:sirq_p_cur:%s" % PERCENTAGE,
- "GPRINT:sirq_p_avg:%s" % PERCENTAGE,
- "GPRINT:sirq_p_min:%s" % PERCENTAGE,
- "GPRINT:sirq_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:steal_p=100,steal,*,total,/",
- "AREA:steal_p%s:%s:STACK" % (
- transparency(CPU_STEAL, AREA_OPACITY),
- LABEL % _("Steal"),
- ),
- "GPRINT:steal_p_cur:%s" % PERCENTAGE,
- "GPRINT:steal_p_avg:%s" % PERCENTAGE,
- "GPRINT:steal_p_min:%s" % PERCENTAGE,
- "GPRINT:steal_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:guest_p=100,guest,*,total,/",
- "AREA:guest_p%s:%s:STACK" % (
- transparency(CPU_GUEST, AREA_OPACITY),
- LABEL % _("Guest"),
- ),
- "GPRINT:guest_p_cur:%s" % PERCENTAGE,
- "GPRINT:guest_p_avg:%s" % PERCENTAGE,
- "GPRINT:guest_p_min:%s" % PERCENTAGE,
- "GPRINT:guest_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:guest_nice_p=100,guest_nice,*,total,/",
- "AREA:guest_nice_p%s:%s:STACK" % (
- transparency(CPU_GUEST_NICE, AREA_OPACITY),
- LABEL % _("Guest Nice"),
- ),
- "GPRINT:guest_nice_p_cur:%s" % PERCENTAGE,
- "GPRINT:guest_nice_p_avg:%s" % PERCENTAGE,
- "GPRINT:guest_nice_p_min:%s" % PERCENTAGE,
- "GPRINT:guest_nice_p_max:%s\\j" % PERCENTAGE,
-
- "CDEF:idle_p=100,idle,*,total,/",
- "AREA:idle_p%s::STACK" % CPU_IDLE,
-
- # Draw contour lines
- "LINE:user_p%s" % CPU_USER,
- "LINE:nice_p%s::STACK" % CPU_NICE,
- "LINE:sys_p%s::STACK" % CPU_SYS,
- "LINE:wait_p%s::STACK" % CPU_WAIT,
- "LINE:irq_p%s::STACK" % CPU_IRQ,
- "LINE:sirq_p%s::STACK" % CPU_SIRQ,
- "LINE:steal_p%s::STACK" % CPU_STEAL,
- "LINE:guest_p%s::STACK" % CPU_GUEST,
- "LINE:guest_nice_p%s::STACK" % CPU_GUEST_NICE,
- ]
-
- upper_limit = 100
- lower_limit = 0
-
- @property
- def graph_title(self):
- return _("Processor Usage")
-
- @property
- def graph_vertical_label(self):
- return _("Percent")
-
-
-class ProcessorObject(base.Object):
- rrd_schema = [
- "DS:user:DERIVE:0:U",
- "DS:nice:DERIVE:0:U",
- "DS:sys:DERIVE:0:U",
- "DS:idle:DERIVE:0:U",
- "DS:wait:DERIVE:0:U",
- "DS:irq:DERIVE:0:U",
- "DS:sirq:DERIVE:0:U",
- "DS:steal:DERIVE:0:U",
- "DS:guest:DERIVE:0:U",
- "DS:guest_nice:DERIVE:0:U",
- ]
-
- def init(self, cpu_id=None):
- self.cpu_id = cpu_id
-
- @property
- def id(self):
- if self.cpu_id is not None:
- return "%s" % self.cpu_id
-
- return "default"
-
- def collect(self):
- """
- Reads the CPU usage.
- """
- stat = self.read_proc_stat()
-
- if self.cpu_id is None:
- values = stat.get("cpu")
- else:
- values = stat.get("cpu%s" % self.cpu_id)
-
- # Convert values into a list
- values = values.split()
-
- if not len(values) == len(self.rrd_schema):
- raise ValueError("Received unexpected output from /proc/stat: %s" % values)
-
- return values
-
-
-class ProcessorPlugin(base.Plugin):
- name = "processor"
- description = "Processor Usage Plugin"
-
- templates = [GraphTemplateProcessor]
-
- @property
- def objects(self):
- yield ProcessorObject(self)
-
- num = multiprocessing.cpu_count()
- for i in range(num):
- yield ProcessorObject(self, cpu_id=i)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2021 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-
-from . import base
-from ..colours import *
-from ..constants import *
-from ..i18n import _
-
-class PSIGraphTemplate(base.GraphTemplate):
- name = "psi"
-
- @property
- def rrd_graph(self):
- rrd_graph = [
- "LINE2:some_avg300%s:%s" % (
- YELLOW, LABEL % _("5 Minutes"),
- ),
- "GPRINT:some_avg300_cur:%s" % FLOAT,
- "GPRINT:some_avg300_avg:%s" % FLOAT,
- "GPRINT:some_avg300_min:%s" % FLOAT,
- "GPRINT:some_avg300_max:%s\\j" % FLOAT,
-
- "LINE2:some_avg60%s:%s" % (
- ORANGE, LABEL % _("1 Minute"),
- ),
- "GPRINT:some_avg60_cur:%s" % FLOAT,
- "GPRINT:some_avg60_avg:%s" % FLOAT,
- "GPRINT:some_avg60_min:%s" % FLOAT,
- "GPRINT:some_avg60_max:%s\\j" % FLOAT,
-
- "LINE2:some_avg10%s:%s" % (
- RED, LABEL % _("10 Seconds"),
- ),
- "GPRINT:some_avg10_cur:%s" % FLOAT,
- "GPRINT:some_avg10_avg:%s" % FLOAT,
- "GPRINT:some_avg10_min:%s" % FLOAT,
- "GPRINT:some_avg10_max:%s\\j" % FLOAT,
-
- # Headline
- "COMMENT:%s" % EMPTY_LABEL,
- "COMMENT:%s" % (COLUMN % _("Current")),
- "COMMENT:%s" % (COLUMN % _("Average")),
- "COMMENT:%s" % (COLUMN % _("Minimum")),
- "COMMENT:%s\\j" % (COLUMN % _("Maximum")),
- ]
-
- return rrd_graph
-
- upper_limit = 100
- lower_limit = 0
-
- @property
- def graph_title(self):
- titles = {
- "cpu" : _("Processor Pressure Stall Information"),
- "io" : _("Input/Output Pressure Stall Information"),
- "memory" : _("Memory Pressure Stall Information"),
- }
-
- try:
- return titles[self.object.id]
- except KeyError:
- return _("%s Pressure Stall Information") % self.object.id
-
- @property
- def graph_vertical_label(self):
- return _("Percentage")
-
- @property
- def rrd_graph_args(self):
- return [
- "--legend-direction=bottomup",
- ]
-
-
-class PSIObject(base.Object):
- rrd_schema = [
- # some
- "DS:some_avg10:GAUGE:0:100",
- "DS:some_avg60:GAUGE:0:100",
- "DS:some_avg300:GAUGE:0:100",
- "DS:some_total:DERIVE:0:U",
-
- # full
- "DS:full_avg10:GAUGE:0:100",
- "DS:full_avg60:GAUGE:0:100",
- "DS:full_avg300:GAUGE:0:100",
- "DS:full_total:DERIVE:0:U",
- ]
-
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__, self.item)
-
- def init(self, item):
- self.item = item
-
- self.path = os.path.join("/proc/pressure", self.item)
-
- @property
- def id(self):
- return self.item
-
- def collect(self):
- lines = self.read_file("/proc/pressure", self.item)
-
- # Do nothing if nothing could be read
- if not lines:
- return
-
- # Parse all input lines
- values = {}
- for line in lines.splitlines():
- values.update(self._parse_psi(line))
-
- # Return all values in order
- for share in ("some", "full"):
- for value in ("avg10", "avg60", "avg300", "total"):
- yield values.get("%s-%s" % (share, value), None)
-
- def _parse_psi(self, line):
- words = line.split(" ")
-
- share = None
- values = {}
-
- for i, word in enumerate(words):
- # Store the share of time
- if i == 0:
- share = word
- continue
-
- # Split word
- key, delim, value = word.partition("=")
-
- # Store it in the values array
- values["%s-%s" % (share, key)] = value
-
- # Return everything
- return values
-
-
-class PSIPlugin(base.Plugin):
- name = "psi"
- description = "Pressure Stall Information Plugin"
-
- templates = [
- PSIGraphTemplate,
- ]
-
- @property
- def objects(self):
- for item in os.listdir("/proc/pressure"):
- yield PSIObject(self, item)
+++ /dev/null
-#!/usr/bin/python3
-# encoding: utf-8
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import os
-import re
-
-from .. import _collecty
-from . import base
-from ..i18n import _
-
-class GraphTemplateSensorsTemperature(base.GraphTemplate):
- name = "sensors-temperature"
-
- @property
- def rrd_graph(self):
- return [
- # Convert everything to Celsius
- "CDEF:value_c=value,273.15,-",
- "CDEF:critical_c=critical,273.15,-",
- "CDEF:high_c=high,273.15,-",
- "CDEF:low_c=low,273.15,-",
-
- # Change colour when the value gets above high
- "CDEF:value_c_high=value_c,high_c,GT,value_c,UNKN,IF",
- "CDEF:value_c_normal=value_c,high_c,GT,UNKN,value_c,IF",
-
- # Get data points for the threshold lines
- "VDEF:critical_c_line=critical_c,MINIMUM",
- "VDEF:low_c_line=low_c,MAXIMUM",
-
- # Draw the temperature value
- "LINE3:value_c_high#ff0000",
- "LINE2:value_c_normal#00ff00:%-15s" % _("Temperature"),
-
- # Draw the legend
- "GPRINT:value_c_cur:%10.2lf °C\l",
- "GPRINT:value_c_avg: %-15s %%6.2lf °C" % _("Average"),
- "GPRINT:value_c_max: %-15s %%6.2lf °C" % _("Maximum"),
- "GPRINT:value_c_min: %-15s %%6.2lf °C" % _("Minimum"),
-
- # Empty line
- "COMMENT: \\n",
-
- # Draw boundary lines
- "COMMENT:%s\:" % _("Temperature Thresholds"),
- "HRULE:critical_c_line#000000:%-15s" % _("Critical"),
- "GPRINT:critical_c_line:%6.2lf °C\\r",
- "HRULE:low_c_line#0000ff:%-15s" % _("Low"),
- "GPRINT:low_c_line:%6.2lf °C\\r",
- ]
-
- @property
- def graph_title(self):
- return _("Temperature (%s)") % self.object.sensor.name
-
- @property
- def graph_vertical_label(self):
- return _("° Celsius")
-
-
-class GraphTemplateSensorsProcessorTemperature(base.GraphTemplate):
- name = "processor-temperature"
-
- core_colours = [
- "#ff000033",
- "#0000ff33",
- "#00ff0033",
- "#0000ff33",
- ]
-
- def get_temperature_sensors(self):
- # Use the coretemp module if available
- sensors = self.plugin.get_detected_sensor_objects("coretemp-*")
-
- # Fall back to the ACPI sensor
- if not sensors:
- sensors = self.plugin.get_detected_sensor_objects("acpitz-virtual-*")
-
- return sensors
-
- def get_objects(self, *args, **kwargs):
- sensors = self.get_temperature_sensors()
-
- return list(sensors)
-
- @property
- def rrd_graph(self):
- rrd_graph = []
-
- counter = 0
- ids = []
-
- for core in self.objects:
- id = "core%s" % counter
- ids.append(id)
- counter += 1
-
- rrd_graph += core.make_rrd_defs(id) + [
- # Convert everything to celsius
- "CDEF:%s_value_c=%s_value,273.15,-" % (id, id),
- "CDEF:%s_critical_c=%s_critical,273.15,-" % (id, id),
- "CDEF:%s_high_c=%s_high,273.15,-" % (id, id),
- ]
-
- # Compute the temperature of the processor
- # by taking the average of all cores
- all_core_values = ("%s_value_c" % id for id in ids)
- rrd_graph += [
- "CDEF:all_value_c=%s,%s,AVG" % (",".join(all_core_values), len(ids)),
- ]
-
- # Get the high threshold of the first core
- # (assuming that all cores have the same threshold)
- for id in ids:
- rrd_graph.append("CDEF:all_high_c=%s_high_c" % id)
- break
-
- rrd_graph += [
- # Change colour when the value gets above high
- "CDEF:all_value_c_high=all_value_c,all_high_c,GT,all_value_c,UNKN,IF",
- "CDEF:all_value_c_normal=all_value_c,all_high_c,GT,UNKN,all_value_c,IF",
-
- "LINE2:all_value_c_high#FF0000",
- "LINE2:all_value_c_normal#000000:%-15s" % _("Temperature"),
-
- "GPRINT:all_value_c_avg: %-15s %%6.2lf °C" % _("Average"),
- "GPRINT:all_value_c_max: %-15s %%6.2lf °C" % _("Maximum"),
- "GPRINT:all_value_c_min: %-15s %%6.2lf °C" % _("Minimum"),
- ]
-
- for id, core, colour in zip(ids, self.objects, self.core_colours):
- rrd_graph += [
- # TODO these lines were supposed to be dashed, but that
- # didn't really work here
- "LINE1:%s_value_c%s:%-10s" % (id, colour, core.sensor.label),
- ]
-
- # Draw the critical line
- for id in ids:
- rrd_graph += [
- "HRULE:%s_critical_c_min#000000:%-15s" % (id, _("Critical")),
- "GPRINT:%s_critical_c_min:%%6.2lf °C\\r" % id,
- ]
- break
-
- return rrd_graph
-
- @property
- def graph_title(self):
- return _("Processor")
-
- @property
- def graph_vertical_label(self):
- return _("Temperature")
-
-
-class SensorBaseObject(base.Object):
- def init(self, sensor):
- self.sensor = sensor
-
- def __repr__(self):
- return "<%s %s (%s)>" % (self.__class__.__name__, self.sensor.name, self.sensor.label)
-
- @property
- def id(self):
- return "-".join((self.sensor.name, self.sensor.label))
-
- @property
- def type(self):
- return self.sensor.type
-
-
-class SensorTemperatureObject(SensorBaseObject):
- rrd_schema = [
- "DS:value:GAUGE:0:U",
- "DS:critical:GAUGE:0:U",
- "DS:low:GAUGE:0:U",
- "DS:high:GAUGE:0:U",
- ]
-
- def collect(self):
- assert self.type == "temperature"
-
- return (
- self.sensor.value,
- self.critical,
- self.low,
- self.high,
- )
-
- @property
- def critical(self):
- try:
- return self.sensor.critical
- except AttributeError:
- return "NaN"
-
- @property
- def low(self):
- try:
- return self.sensor.minimum
- except AttributeError:
- return "NaN"
-
- @property
- def high(self):
- try:
- return self.sensor.high
- except AttributeError:
- return "NaN"
-
-
-class SensorVoltageObject(SensorBaseObject):
- rrd_schema = [
- "DS:value:GAUGE:0:U",
- "DS:minimum:GAUGE:0:U",
- "DS:maximum:GAUGE:0:U",
- ]
-
- def collect(self):
- assert self.type == "voltage"
-
- return (
- self.sensor.value,
- self.minimum,
- self.maximum,
- )
-
- @property
- def minimum(self):
- try:
- return self.sensor.minimum
- except AttributeError:
- return "NaN"
-
- @property
- def maximum(self):
- try:
- return self.sensor.maximum
- except AttributeError:
- return "NaN"
-
-
-class SensorFanObject(SensorBaseObject):
- rrd_schema = [
- "DS:value:GAUGE:0:U",
- "DS:minimum:GAUGE:0:U",
- "DS:maximum:GAUGE:0:U",
- ]
-
- def collect(self):
- assert self.type == "fan"
-
- return (
- self.sensor.value,
- self.minimum,
- self.maximum,
- )
-
- @property
- def minimum(self):
- try:
- return self.sensor.minimum
- except AttributeError:
- return "NaN"
-
- @property
- def maximum(self):
- try:
- return self.sensor.maximum
- except AttributeError:
- return "NaN"
-
-
-class SensorsPlugin(base.Plugin):
- name = "sensors"
- description = "Sensors Plugin"
-
- templates = [
- GraphTemplateSensorsProcessorTemperature,
- GraphTemplateSensorsTemperature,
- ]
-
- def init(self):
- # Initialise the sensors library.
- _collecty.sensors_init()
-
- def __del__(self):
- _collecty.sensors_cleanup()
-
- @property
- def objects(self):
- return self.get_detected_sensor_objects()
-
- def get_detected_sensor_objects(self, what=None):
- for sensor in _collecty.get_detected_sensors(what):
- if sensor.type == "temperature":
- yield SensorTemperatureObject(self, sensor)
-
- elif sensor.type == "voltage":
- yield SensorVoltageObject(self, sensor)
-
- elif sensor.type == "fan":
- yield SensorFanObject(self, sensor)
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2015 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import logging
-import os
-
-log = logging.getLogger("collecty.util")
-
-from .constants import *
-
-def get_network_interfaces():
- """
- Returns all real network interfaces
- """
- for interface in os.listdir("/sys/class/net"):
- # Skip some unwanted interfaces.
- if interface == "lo" or interface.startswith("mon."):
- continue
-
- path = os.path.join("/sys/class/net", interface)
- if not os.path.isdir(path):
- continue
-
- yield interface
-
-def make_interval(interval):
- intervals = {
- None : "-3h",
- "hour" : "-1h",
- "day" : "-25h",
- "month": "-30d",
- "week" : "-360h",
- "year" : "-365d",
- }
-
- try:
- return intervals[interval]
- except KeyError:
- return "end-%s" % interval
-
-def guess_format(filename):
- """
- Returns the best format by filename extension
- """
- parts = filename.split(".")
-
- if parts:
- # The extension is the last part
- extension = parts[-1]
-
- # Image formats are all uppercase
- extension = extension.upper()
-
- if extension in SUPPORTED_IMAGE_FORMATS:
- return extension
-
- # Otherwise fall back to the default format
- return DEFAULT_IMAGE_FORMAT
-
-class ProcNetSnmpParser(object):
- """
- This class parses /proc/net/snmp{,6} and allows
- easy access to the values.
- """
- def __init__(self, intf=None):
- self.intf = intf
- self._data = {}
-
- if not self.intf:
- self._data.update(self._parse())
-
- self._data.update(self._parse6())
-
- def _parse(self):
- res = {}
-
- with open("/proc/net/snmp") as f:
- keys = {}
-
- for line in f.readlines():
- line = line.strip()
-
- # Stop after an empty line
- if not line:
- break
-
- type, values = line.split(": ", 1)
-
- # Check if the keys are already known
- if type in keys:
- values = (int(v) for v in values.split())
- res[type] = dict(zip(keys[type], values))
-
- # Otherwise remember the keys
- else:
- keys[type] = values.split()
-
- return res
-
- def _parse6(self):
- res = {}
-
- fn = "/proc/net/snmp6"
- if self.intf:
- fn = os.path.join("/proc/net/dev_snmp6", self.intf)
-
- with open(fn) as f:
- for line in f.readlines():
- key, val = line.split()
-
- try:
- type, key = key.split("6", 1)
- except ValueError:
- continue
-
- type += "6"
- val = int(val)
-
- try:
- res[type][key] = val
- except KeyError:
- res[type] = { key : val }
-
- return res
-
- def get(self, proto, key):
- """
- Retrieves a value from the internally
- parse dictionary read from /proc/net/snmp.
- """
- try:
- return self._data[proto][key]
- except KeyError:
- pass
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2012 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import argparse
-import collecty
-import os
-import sys
-
-from collecty.i18n import _
-
-class CLI(object):
- def parse_cli(self):
- parser = argparse.ArgumentParser(
- description=_("Collecty Client")
- )
- subparsers = parser.add_subparsers(help="sub-command help")
-
- # generate-graph
- parser_generate_graph = subparsers.add_parser(
- "generate-graph", help=_("Generate a graph image"),
- )
- parser_generate_graph.add_argument(
- "--filename", help=_("filename"), required=True,
- )
- parser_generate_graph.add_argument(
- "--format", help=_("image format"),
- )
- parser_generate_graph.add_argument(
- "--interval", help=_("interval"),
- )
- parser_generate_graph.add_argument(
- "--object", help=_("Object identifier"), default="default",
- )
- parser_generate_graph.add_argument(
- "--template", help=_("The graph template identifier"), required=True,
- )
- parser_generate_graph.add_argument(
- "--timezone", help=_("Generate the graph with timestamps plotted for the given timezone"),
- default=os.environ.get("TZ", "UTC"),
- )
- parser_generate_graph.add_argument(
- "--locale", help=_("Generate the graph with this locale"),
- default=os.environ.get("LANG", "en_GB.utf8"),
- )
- # Dimensions
- parser_generate_graph.add_argument(
- "--height", type=int, default=0, help=_("Height of the generated image"),
- )
- parser_generate_graph.add_argument(
- "--width", type=int, default=0, help=_("Width of the generated image"),
- )
- parser_generate_graph.set_defaults(func=self._generate_graph)
-
- # last-update
- parser_last_update = subparsers.add_parser(
- "last-update", help=_("Fetch the last dataset in the database"),
- )
- parser_last_update.add_argument(
- "--template", help=_("The graph template identifier"), required=True,
- )
- parser_last_update.add_argument(
- "--object", help=_("Object identifier"), default="default",
- )
- parser_last_update.set_defaults(func=self._last_update)
-
- # list-templates
- parser_list_templates = subparsers.add_parser(
- "list-templates", help=_("Lists all graph templates"),
- )
- parser_list_templates.set_defaults(func=self._list_templates)
-
- # backup
- backup = subparsers.add_parser(
- "backup", help=_("Backup all RRD data"),
- )
- backup.add_argument(
- "filename", nargs="?", help=_("Filename"),
- )
- backup.set_defaults(func=self._backup)
-
- # version
- parser_version = subparsers.add_parser(
- "version", help=_("Show version"),
- )
- parser_version.set_defaults(func=self._version)
-
- args = parser.parse_args()
-
- # Print usage if no action was given
- if not "func" in args:
- parser.print_usage()
- sys.exit(2)
-
- return args
-
- def run(self):
- # Parse command line arguments
- args = self.parse_cli()
-
- # Initialise client
- self.client = collecty.Collecty()
-
- # Call function
- ret = args.func(args)
-
- # Return with exit code
- if ret:
- sys.exit(ret)
-
- # Otherwise just exit
- sys.exit(0)
-
- def _backup(self, args):
- print(_("Backing up..."))
-
- self.client.backup(args.filename)
-
- def _generate_graph(self, args):
- kwargs = {
- "format" : args.format or collecty.util.guess_format(args.filename),
- "object_id" : args.object,
- "locale" : args.locale,
- "timezone" : args.timezone,
- }
-
- if args.height or args.width:
- kwargs.update({
- "height" : args.height or 0,
- "width" : args.width or 0,
- })
-
- if args.interval:
- kwargs["interval"] = args.interval
-
- # Generate the graph image
- graph = self.client.generate_graph(args.template, **kwargs)
-
- # Add some useful information
- info = self.client.graph_info(args.template, **kwargs)
- if info:
- graph.update(info)
-
- # Write file to disk
- with open(args.filename, "wb") as f:
- f.write(graph["image"])
-
- print(_("Title : %(title)s (%(template)s - %(object_id)s)") % graph)
- print(_("Image size : %(image_width)sx%(image_height)spx") % graph)
-
- def _last_update(self, args):
- last_update = self.client.last_update(args.template, object_id=args.object)
-
- print(_("Last update: %s") % last_update.get("timestamp"))
-
- dataset = last_update.get("dataset")
- for k, v in dataset.items():
- print("%16s = %s" % (k, v))
-
- def _list_templates(self, args):
- templates = self.client.list_templates()
-
- for t in sorted(templates):
- print(t)
-
- def _version(self, args):
- version = self.client.version()
-
- print(version)
-
-
-def main():
- # Run the command line interface
- c = CLI()
- c.run()
-
-main()
+++ /dev/null
-#!/usr/bin/python3
-###############################################################################
-# #
-# collecty - A system statistics collection daemon for IPFire #
-# Copyright (C) 2020 IPFire development team #
-# #
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU General Public License as published by #
-# the Free Software Foundation, either version 3 of the License, or #
-# (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU General Public License for more details. #
-# #
-# You should have received a copy of the GNU General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-# #
-###############################################################################
-
-import argparse
-import collecty.daemon
-
-from collecty.i18n import _
-
-def main():
- parser = argparse.ArgumentParser(
- description=_("Collecty Daemon"),
- )
-
- # Global configuration flags
- parser.add_argument("--debug", action="store_true",
- help=_("Enable debug output"),
- )
-
- # Parse CLI arguments
- args = parser.parse_args()
-
- # Initialise the daemon
- daemon = collecty.daemon.Daemon(debug=args.debug)
-
- # Run it
- try:
- daemon.run()
- except KeyboardInterrupt:
- pass
-
-# Call main function
-main()