]> git.ipfire.org Git - collecty.git/blame - src/collecty/daemon.py
daemon: Drop our own scheduler and use sched
[collecty.git] / src / collecty / daemon.py
CommitLineData
f37913e8 1#!/usr/bin/python3
73db5226
MT
2###############################################################################
3# #
4# collecty - A system statistics collection daemon for IPFire #
5# Copyright (C) 2012 IPFire development team #
6# #
7# This program is free software: you can redistribute it and/or modify #
8# it under the terms of the GNU General Public License as published by #
9# the Free Software Foundation, either version 3 of the License, or #
10# (at your option) any later version. #
11# #
12# This program is distributed in the hope that it will be useful, #
13# but WITHOUT ANY WARRANTY; without even the implied warranty of #
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
15# GNU General Public License for more details. #
16# #
17# You should have received a copy of the GNU General Public License #
18# along with this program. If not, see <http://www.gnu.org/licenses/>. #
19# #
20###############################################################################
21
16b84672 22import logging
682b512d 23import os
f37913e8 24import queue
72364063 25import rrdtool
6e603f14 26import sched
73db5226 27import signal
72364063 28import time
73db5226 29
f37913e8 30from . import bus
429ba506 31from . import locales
f37913e8 32from . import plugins
73db5226 33
f37913e8
MT
34from .constants import *
35from .i18n import _
73db5226 36
73db5226
MT
37log = logging.getLogger("collecty")
38
39class Collecty(object):
40 # The default interval, when all data is written to disk.
6e603f14 41 COMMIT_INTERVAL = 300
72364063 42
73db5226 43 def __init__(self, debug=False):
72364063
MT
44 self.debug = debug
45
682b512d
MT
46 # Reset timezone to UTC
47 # rrdtool is reading that from the environment
48 os.environ["TZ"] = "UTC"
49
a76917bf 50 # Enable debug logging when running in debug mode
72364063 51 if self.debug:
a76917bf
MT
52 log.setLevel(logging.DEBUG)
53
5d140577 54 self.plugins = []
73db5226 55
6e603f14
MT
56 # Create the scheduler
57 self.scheduler = sched.scheduler()
58 self._schedule_commit()
72364063
MT
59
60 # The write queue holds all collected pieces of data which
61 # will be written to disk later.
6e603f14 62 self.write_queue = WriteQueue(self)
49c1b8fd 63
c968f6d9
MT
64 # Create a thread that connects to dbus and processes requests we
65 # get from there.
66 self.bus = bus.Bus(self)
67
5d140577
MT
68 # Add all plugins
69 for plugin in plugins.get():
70 self.add_plugin(plugin)
73db5226 71
0ee0c42d 72 log.debug(_("Collecty successfully initialized with %s plugins") \
5d140577 73 % len(self.plugins))
73db5226 74
429ba506
MT
75 log.debug(_("Supported locales: %s") % ", ".join(locales.get_supported_locales()))
76
5d140577
MT
77 def add_plugin(self, plugin_class):
78 # Try initialising a new plugin. If that fails, we will log the
79 # error and try to go on.
80 try:
81 plugin = plugin_class(self)
82 except:
83 log.critical(_("Plugin %s could not be initialised") % plugin_class, exc_info=True)
84 return
73db5226 85
5d140577 86 self.plugins.append(plugin)
73db5226 87
6e603f14
MT
88 # Schedule the plugin to collect
89 self._schedule_plugin(plugin)
90
c968f6d9
MT
91 @property
92 def templates(self):
93 for plugin in self.plugins:
94 for template in plugin.templates:
95 yield template
96
6e603f14
MT
97 def _schedule_plugin(self, plugin):
98 """
99 Schedules a collection event for the given plugin
100 """
101 log.debug("Scheduling plugin %s for executing in %ss" % (plugin, plugin.interval))
102
103 self.scheduler.enter(
104 plugin.interval, plugin.priority, self._collect, (plugin,),
105 )
106
107 def _schedule_commit(self):
108 log.debug("Scheduling commit in %ss" % self.COMMIT_INTERVAL)
109
110 self.scheduler.enter(
111 self.COMMIT_INTERVAL, -1, self._commit,
112 )
113
114 def _collect(self, plugin, **kwargs):
115 """
116 Called for each plugin when it is time to collect some data
117 """
118 log.debug("Collection started for %s" % plugin)
119
120 # Add the next collection event to the scheduler
121 self._schedule_plugin(plugin)
122
123 # Run collection
124 try:
125 plugin.collect()
126
127 except Exception as e:
128 log.error("Unhandled exception in %s" % plugin, exc_info=True)
129 return
130
131 def _commit(self):
132 """
133 Called when all data should be committed to disk
134 """
135 # Schedule the next commit
136 self._schedule_commit()
137
138 # Write everything in the queue
139 self.write_queue.commit()
140
73db5226
MT
141 def run(self):
142 # Register signal handlers.
143 self.register_signal_handler()
144
f37913e8
MT
145 # Cannot do anything if no plugins have been initialised
146 if not self.plugins:
147 log.critical(_("No plugins have been initialised"))
148 return
149
c968f6d9
MT
150 # Start the bus
151 self.bus.start()
152
6e603f14
MT
153 # Run the scheduler
154 try:
155 self.scheduler.run()
156 except KeyboardInterrupt:
157 pass
73db5226 158
c968f6d9
MT
159 # Stop the bus thread
160 self.bus.shutdown()
161
72364063 162 # Write all collected data to disk before ending the main thread
6e603f14 163 self.write_queue.commit()
72364063
MT
164
165 log.debug(_("Main thread exited"))
73db5226
MT
166
167 def shutdown(self):
0ee0c42d 168 log.info(_("Received shutdown signal"))
73db5226
MT
169
170 def register_signal_handler(self):
171 for s in (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1):
172 log.debug(_("Registering signal %d") % s)
173
174 signal.signal(s, self.signal_handler)
175
176 def signal_handler(self, sig, *args, **kwargs):
177 log.info(_("Caught signal %d") % sig)
178
179 if sig in (signal.SIGTERM, signal.SIGINT):
180 # Shutdown this application.
181 self.shutdown()
182
183 elif sig == signal.SIGUSR1:
72364063
MT
184 # Commit all data.
185 self.write_queue.commit()
73db5226 186
c968f6d9
MT
187 def get_plugin_from_template(self, template_name):
188 for plugin in self.plugins:
189 if not template_name in [t.name for t in plugin.templates]:
190 continue
191
192 return plugin
193
194 def generate_graph(self, template_name, *args, **kwargs):
195 plugin = self.get_plugin_from_template(template_name)
196 if not plugin:
197 raise RuntimeError("Could not find template %s" % template_name)
198
199 return plugin.generate_graph(template_name, *args, **kwargs)
72364063 200
a3864812
MT
201 def graph_info(self, template_name, *args, **kwargs):
202 plugin = self.get_plugin_from_template(template_name)
203 if not plugin:
204 raise RuntimeError("Could not find template %s" % template_name)
205
206 return plugin.graph_info(template_name, *args, **kwargs)
207
8ee5a71a
MT
208 def last_update(self, template_name, *args, **kwargs):
209 plugin = self.get_plugin_from_template(template_name)
210 if not plugin:
211 raise RuntimeError("Could not find template %s" % template_name)
212
213 return plugin.last_update(*args, **kwargs)
214
72364063 215
6e603f14
MT
216class WriteQueue(object):
217 def __init__(self, collecty):
72364063
MT
218 self.collecty = collecty
219
220 self.log = logging.getLogger("collecty.queue")
72364063 221
72364063
MT
222 self._queue = queue.PriorityQueue()
223
224 self.log.debug(_("Initialised write queue"))
225
72364063
MT
226 def add(self, object, time, data):
227 result = QueueObject(object.file, time, data)
228 self._queue.put(result)
229
230 def commit(self):
231 """
232 Flushes the read data to disk.
233 """
234 # There is nothing to do if the queue is empty
235 if self._queue.empty():
236 self.log.debug(_("No data to commit"))
237 return
238
239 time_start = time.time()
240
241 self.log.debug(_("Submitting data to the databases..."))
242
243 # Get all objects from the queue and group them by the RRD file
244 # to commit them all at once
245 results = {}
246 while not self._queue.empty():
247 result = self._queue.get()
248
249 try:
250 results[result.file].append(result)
251 except KeyError:
252 results[result.file] = [result]
253
254 # Write the collected data to disk
f37913e8 255 for filename, results in list(results.items()):
72364063
MT
256 self._commit_file(filename, results)
257
258 duration = time.time() - time_start
259 self.log.debug(_("Emptied write queue in %.2fs") % duration)
260
261 def _commit_file(self, filename, results):
0ee0c42d 262 self.log.debug(_("Committing %(counter)s entries to %(filename)s") \
72364063
MT
263 % { "counter" : len(results), "filename" : filename })
264
0ee0c42d
MT
265 for result in results:
266 self.log.debug(" %s: %s" % (result.time, result.data))
72364063 267
50b8fcff
MT
268 try:
269 rrdtool.update(filename, *["%s" % r for r in results])
270
271 # Catch operational errors like unreadable/unwritable RRD databases
272 # or those where the format has changed. The collected data will be lost.
273 except rrdtool.OperationalError as e:
274 self.log.critical(_("Could not update RRD database %s: %s") \
275 % (filename, e))
72364063 276
ca9b9221
MT
277 def commit_file(self, filename):
278 """
279 Commits all data that is in the write queue for the given
280 RRD database.
281 """
282 results, others = [], []
283
284 # We will have to walk through the entire queue since we cannot
285 # ready any items selectively. Everything that belongs to our
286 # transaction is kept. Everything else will be put back into the
287 # queue.
288 while not self._queue.empty():
289 result = self._queue.get()
290
291 if result.file == filename:
292 results.append(result)
293 else:
294 others.append(result)
295
296 # Put back all items that did not match
297 for result in others:
298 self._queue.put(result)
299
300 # Write everything else to disk
301 if results:
302 self._commit_file(filename, results)
303
72364063
MT
304
305class QueueObject(object):
306 def __init__(self, file, time, data):
307 self.file = file
308 self.time = time
309 self.data = data
310
311 def __str__(self):
312 return "%s:%s" % (self.time.strftime("%s"), self.data)
313
f37913e8
MT
314 def __lt__(self, other):
315 return self.time < other.time