]> git.ipfire.org Git - collecty.git/blame - src/collecty/daemon.py
daemon: Add plugins to scheduler when the daemon launches
[collecty.git] / src / collecty / daemon.py
CommitLineData
f37913e8 1#!/usr/bin/python3
73db5226
MT
2###############################################################################
3# #
4# collecty - A system statistics collection daemon for IPFire #
5# Copyright (C) 2012 IPFire development team #
6# #
7# This program is free software: you can redistribute it and/or modify #
8# it under the terms of the GNU General Public License as published by #
9# the Free Software Foundation, either version 3 of the License, or #
10# (at your option) any later version. #
11# #
12# This program is distributed in the hope that it will be useful, #
13# but WITHOUT ANY WARRANTY; without even the implied warranty of #
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
15# GNU General Public License for more details. #
16# #
17# You should have received a copy of the GNU General Public License #
18# along with this program. If not, see <http://www.gnu.org/licenses/>. #
19# #
20###############################################################################
21
16b84672 22import logging
682b512d 23import os
f37913e8 24import queue
72364063 25import rrdtool
6e603f14 26import sched
73db5226 27import signal
72364063 28import time
73db5226 29
f37913e8
MT
30from . import bus
31from . import plugins
73db5226 32
f37913e8
MT
33from .constants import *
34from .i18n import _
73db5226 35
73db5226
MT
36log = logging.getLogger("collecty")
37
38class Collecty(object):
39 # The default interval, when all data is written to disk.
6e603f14 40 COMMIT_INTERVAL = 300
72364063 41
73db5226 42 def __init__(self, debug=False):
72364063
MT
43 self.debug = debug
44
682b512d
MT
45 # Reset timezone to UTC
46 # rrdtool is reading that from the environment
47 os.environ["TZ"] = "UTC"
48
a76917bf 49 # Enable debug logging when running in debug mode
72364063 50 if self.debug:
a76917bf
MT
51 log.setLevel(logging.DEBUG)
52
5d140577 53 self.plugins = []
73db5226 54
6e603f14
MT
55 # Create the scheduler
56 self.scheduler = sched.scheduler()
57 self._schedule_commit()
72364063
MT
58
59 # The write queue holds all collected pieces of data which
60 # will be written to disk later.
6e603f14 61 self.write_queue = WriteQueue(self)
49c1b8fd 62
c968f6d9
MT
63 # Create a thread that connects to dbus and processes requests we
64 # get from there.
65 self.bus = bus.Bus(self)
66
5d140577
MT
67 # Add all plugins
68 for plugin in plugins.get():
69 self.add_plugin(plugin)
73db5226 70
0ee0c42d 71 log.debug(_("Collecty successfully initialized with %s plugins") \
5d140577 72 % len(self.plugins))
73db5226 73
5d140577
MT
74 def add_plugin(self, plugin_class):
75 # Try initialising a new plugin. If that fails, we will log the
76 # error and try to go on.
77 try:
78 plugin = plugin_class(self)
79 except:
80 log.critical(_("Plugin %s could not be initialised") % plugin_class, exc_info=True)
81 return
73db5226 82
5d140577 83 self.plugins.append(plugin)
73db5226 84
c968f6d9
MT
85 @property
86 def templates(self):
87 for plugin in self.plugins:
88 for template in plugin.templates:
89 yield template
90
6e603f14
MT
91 def _schedule_plugin(self, plugin):
92 """
93 Schedules a collection event for the given plugin
94 """
95 log.debug("Scheduling plugin %s for executing in %ss" % (plugin, plugin.interval))
96
97 self.scheduler.enter(
98 plugin.interval, plugin.priority, self._collect, (plugin,),
99 )
100
101 def _schedule_commit(self):
102 log.debug("Scheduling commit in %ss" % self.COMMIT_INTERVAL)
103
104 self.scheduler.enter(
105 self.COMMIT_INTERVAL, -1, self._commit,
106 )
107
108 def _collect(self, plugin, **kwargs):
109 """
110 Called for each plugin when it is time to collect some data
111 """
112 log.debug("Collection started for %s" % plugin)
113
114 # Add the next collection event to the scheduler
115 self._schedule_plugin(plugin)
116
117 # Run collection
118 try:
119 plugin.collect()
120
121 except Exception as e:
122 log.error("Unhandled exception in %s" % plugin, exc_info=True)
123 return
124
125 def _commit(self):
126 """
127 Called when all data should be committed to disk
128 """
129 # Schedule the next commit
130 self._schedule_commit()
131
132 # Write everything in the queue
133 self.write_queue.commit()
134
73db5226
MT
135 def run(self):
136 # Register signal handlers.
137 self.register_signal_handler()
138
c968f6d9
MT
139 # Start the bus
140 self.bus.start()
141
084fa197
MT
142 # Add all plugins to the scheduler
143 for plugin in self.plugins:
144 self._schedule_plugin(plugin)
145
6e603f14
MT
146 # Run the scheduler
147 try:
148 self.scheduler.run()
149 except KeyboardInterrupt:
150 pass
73db5226 151
c968f6d9
MT
152 # Stop the bus thread
153 self.bus.shutdown()
154
72364063 155 # Write all collected data to disk before ending the main thread
6e603f14 156 self.write_queue.commit()
72364063
MT
157
158 log.debug(_("Main thread exited"))
73db5226
MT
159
160 def shutdown(self):
0ee0c42d 161 log.info(_("Received shutdown signal"))
73db5226
MT
162
163 def register_signal_handler(self):
164 for s in (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1):
165 log.debug(_("Registering signal %d") % s)
166
167 signal.signal(s, self.signal_handler)
168
169 def signal_handler(self, sig, *args, **kwargs):
170 log.info(_("Caught signal %d") % sig)
171
172 if sig in (signal.SIGTERM, signal.SIGINT):
173 # Shutdown this application.
174 self.shutdown()
175
176 elif sig == signal.SIGUSR1:
72364063
MT
177 # Commit all data.
178 self.write_queue.commit()
73db5226 179
c968f6d9
MT
180 def get_plugin_from_template(self, template_name):
181 for plugin in self.plugins:
182 if not template_name in [t.name for t in plugin.templates]:
183 continue
184
185 return plugin
186
187 def generate_graph(self, template_name, *args, **kwargs):
188 plugin = self.get_plugin_from_template(template_name)
189 if not plugin:
190 raise RuntimeError("Could not find template %s" % template_name)
191
192 return plugin.generate_graph(template_name, *args, **kwargs)
72364063 193
a3864812
MT
194 def graph_info(self, template_name, *args, **kwargs):
195 plugin = self.get_plugin_from_template(template_name)
196 if not plugin:
197 raise RuntimeError("Could not find template %s" % template_name)
198
199 return plugin.graph_info(template_name, *args, **kwargs)
200
8ee5a71a
MT
201 def last_update(self, template_name, *args, **kwargs):
202 plugin = self.get_plugin_from_template(template_name)
203 if not plugin:
204 raise RuntimeError("Could not find template %s" % template_name)
205
206 return plugin.last_update(*args, **kwargs)
207
72364063 208
6e603f14
MT
209class WriteQueue(object):
210 def __init__(self, collecty):
72364063
MT
211 self.collecty = collecty
212
213 self.log = logging.getLogger("collecty.queue")
72364063 214
72364063
MT
215 self._queue = queue.PriorityQueue()
216
217 self.log.debug(_("Initialised write queue"))
218
72364063
MT
219 def add(self, object, time, data):
220 result = QueueObject(object.file, time, data)
221 self._queue.put(result)
222
223 def commit(self):
224 """
225 Flushes the read data to disk.
226 """
227 # There is nothing to do if the queue is empty
228 if self._queue.empty():
229 self.log.debug(_("No data to commit"))
230 return
231
232 time_start = time.time()
233
234 self.log.debug(_("Submitting data to the databases..."))
235
236 # Get all objects from the queue and group them by the RRD file
237 # to commit them all at once
238 results = {}
239 while not self._queue.empty():
240 result = self._queue.get()
241
242 try:
243 results[result.file].append(result)
244 except KeyError:
245 results[result.file] = [result]
246
247 # Write the collected data to disk
f37913e8 248 for filename, results in list(results.items()):
72364063
MT
249 self._commit_file(filename, results)
250
251 duration = time.time() - time_start
252 self.log.debug(_("Emptied write queue in %.2fs") % duration)
253
254 def _commit_file(self, filename, results):
0ee0c42d 255 self.log.debug(_("Committing %(counter)s entries to %(filename)s") \
72364063
MT
256 % { "counter" : len(results), "filename" : filename })
257
0ee0c42d
MT
258 for result in results:
259 self.log.debug(" %s: %s" % (result.time, result.data))
72364063 260
50b8fcff
MT
261 try:
262 rrdtool.update(filename, *["%s" % r for r in results])
263
264 # Catch operational errors like unreadable/unwritable RRD databases
265 # or those where the format has changed. The collected data will be lost.
266 except rrdtool.OperationalError as e:
267 self.log.critical(_("Could not update RRD database %s: %s") \
268 % (filename, e))
72364063 269
ca9b9221
MT
270 def commit_file(self, filename):
271 """
272 Commits all data that is in the write queue for the given
273 RRD database.
274 """
275 results, others = [], []
276
277 # We will have to walk through the entire queue since we cannot
278 # ready any items selectively. Everything that belongs to our
279 # transaction is kept. Everything else will be put back into the
280 # queue.
281 while not self._queue.empty():
282 result = self._queue.get()
283
284 if result.file == filename:
285 results.append(result)
286 else:
287 others.append(result)
288
289 # Put back all items that did not match
290 for result in others:
291 self._queue.put(result)
292
293 # Write everything else to disk
294 if results:
295 self._commit_file(filename, results)
296
72364063
MT
297
298class QueueObject(object):
299 def __init__(self, file, time, data):
300 self.file = file
301 self.time = time
302 self.data = data
303
304 def __str__(self):
305 return "%s:%s" % (self.time.strftime("%s"), self.data)
306
f37913e8
MT
307 def __lt__(self, other):
308 return self.time < other.time