]>
git.ipfire.org Git - oddments/collecty.git/blob - src/collecty/daemon.py
644ca3737a12ab2283e3a438d11c1ee15bc4acef
2 ###############################################################################
4 # collecty - A system statistics collection daemon for IPFire #
5 # Copyright (C) 2012 IPFire development team #
7 # This program is free software: you can redistribute it and/or modify #
8 # it under the terms of the GNU General Public License as published by #
9 # the Free Software Foundation, either version 3 of the License, or #
10 # (at your option) any later version. #
12 # This program is distributed in the hope that it will be useful, #
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of #
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
15 # GNU General Public License for more details. #
17 # You should have received a copy of the GNU General Public License #
18 # along with this program. If not, see <http://www.gnu.org/licenses/>. #
20 ###############################################################################
24 import multiprocessing
36 from .constants
import *
39 log
= logging
.getLogger("collecty")
41 class Collecty(object):
42 # The default interval, when all data is written to disk.
47 def __init__(self
, debug
=False):
50 # Reset timezone to UTC
51 # rrdtool is reading that from the environment
52 os
.environ
["TZ"] = "UTC"
54 # Enable debug logging when running in debug mode
56 log
.setLevel(logging
.DEBUG
)
60 # Indicates whether this process should be running or not.
63 # The write queue holds all collected pieces of data which
64 # will be written to disk later.
65 self
.write_queue
= WriteQueue(self
, self
.SUBMIT_INTERVAL
)
67 # Create worker threads
68 self
.worker_threads
= self
.create_worker_threads()
70 self
._timer
_queue
= queue
.PriorityQueue()
71 self
._worker
_queue
= queue
.Queue()
73 # Create a thread that connects to dbus and processes requests we
75 self
.bus
= bus
.Bus(self
)
78 for plugin
in plugins
.get():
79 self
.add_plugin(plugin
)
81 log
.debug(_("Collecty successfully initialized with %s plugins") \
84 log
.debug(_("Supported locales: %s") % ", ".join(locales
.get_supported_locales()))
86 def add_plugin(self
, plugin_class
):
87 # Try initialising a new plugin. If that fails, we will log the
88 # error and try to go on.
90 plugin
= plugin_class(self
)
92 log
.critical(_("Plugin %s could not be initialised") % plugin_class
, exc_info
=True)
95 self
.plugins
.append(plugin
)
99 for plugin
in self
.plugins
:
100 for template
in plugin
.templates
:
104 # Register signal handlers.
105 self
.register_signal_handler()
107 # Cannot do anything if no plugins have been initialised
109 log
.critical(_("No plugins have been initialised"))
115 # Initialise the timer queue
116 self
.initialise_timer_queue()
118 # Start worker threads
119 for w
in self
.worker_threads
:
122 # Run the write queue thread
123 self
.write_queue
.start()
125 # Regularly submit all data to disk.
128 # Try processing one event from the queue. If that succeeded
129 # we will retry immediately.
130 if self
.process_timer_queue():
133 # Otherwise we will sleep for a bit
134 time
.sleep(self
.HEARTBEAT
)
136 # Log warnings if the worker queue is filling up
137 queue_size
= self
._worker
_queue
.qsize()
139 log
.warning(_("Worker queue is filling up with %s events") % queue_size
)
141 except KeyboardInterrupt:
145 # Wait until all worker threads are finished
146 for w
in self
.worker_threads
:
149 # Stop the bus thread
152 # Write all collected data to disk before ending the main thread
153 self
.write_queue
.shutdown()
155 log
.debug(_("Main thread exited"))
161 log
.info(_("Received shutdown signal"))
164 # Propagating shutdown to all threads.
165 for w
in self
.worker_threads
:
168 def register_signal_handler(self
):
169 for s
in (signal
.SIGTERM
, signal
.SIGINT
, signal
.SIGUSR1
):
170 log
.debug(_("Registering signal %d") % s
)
172 signal
.signal(s
, self
.signal_handler
)
174 def signal_handler(self
, sig
, *args
, **kwargs
):
175 log
.info(_("Caught signal %d") % sig
)
177 if sig
in (signal
.SIGTERM
, signal
.SIGINT
):
178 # Shutdown this application.
181 elif sig
== signal
.SIGUSR1
:
183 self
.write_queue
.commit()
185 def get_plugin_from_template(self
, template_name
):
186 for plugin
in self
.plugins
:
187 if not template_name
in [t
.name
for t
in plugin
.templates
]:
192 def generate_graph(self
, template_name
, *args
, **kwargs
):
193 plugin
= self
.get_plugin_from_template(template_name
)
195 raise RuntimeError("Could not find template %s" % template_name
)
197 return plugin
.generate_graph(template_name
, *args
, **kwargs
)
199 def graph_info(self
, template_name
, *args
, **kwargs
):
200 plugin
= self
.get_plugin_from_template(template_name
)
202 raise RuntimeError("Could not find template %s" % template_name
)
204 return plugin
.graph_info(template_name
, *args
, **kwargs
)
206 def last_update(self
, template_name
, *args
, **kwargs
):
207 plugin
= self
.get_plugin_from_template(template_name
)
209 raise RuntimeError("Could not find template %s" % template_name
)
211 return plugin
.last_update(*args
, **kwargs
)
213 def create_worker_threads(self
, num
=None):
215 Creates a number of worker threads
217 # If no number of threads is given, we will create as many as we have
218 # active processor cores but never less than two.
220 num
= max(multiprocessing
.cpu_count(), 2)
224 for id in range(num
):
225 worker_thread
= WorkerThread(self
, id)
226 worker_threads
.append(worker_thread
)
228 return worker_threads
230 def initialise_timer_queue(self
):
231 for p
in self
.plugins
:
232 timer
= PluginTimer(p
)
234 self
._timer
_queue
.put(timer
)
236 def process_timer_queue(self
):
237 # Take the item from the timer queue that is to be due first
238 timer
= self
._timer
_queue
.get()
241 # If the timer event is to be executed, we will put the plugin
242 # into the worker queue and reset the timer
244 self
._worker
_queue
.put(timer
.plugin
)
245 timer
.reset_deadline()
249 # Put the timer back into the timer queue.
250 self
._timer
_queue
.put(timer
)
253 class WorkerThread(threading
.Thread
):
256 def __init__(self
, collecty
, id):
257 threading
.Thread
.__init
__(self
)
260 self
.log
= logging
.getLogger("collecty.worker")
262 self
.collecty
= collecty
265 self
.log
.debug(_("Worker thread %s has been initialised") % self
.id)
270 The queue this thread is getting events from
272 return self
.collecty
._worker
_queue
275 self
.log
.debug(_("Worker thread %s has been started") % self
.id)
280 plugin
= self
.queue
.get(block
=True, timeout
=self
.HEARTBEAT
)
282 # If the queue has been empty we just retry
286 # Execute the collect operation for this plugin
289 self
.log
.debug(_("Worker thread %s has been terminated") % self
.id)
295 class WriteQueue(threading
.Thread
):
296 def __init__(self
, collecty
, submit_interval
):
297 threading
.Thread
.__init
__(self
)
300 self
.collecty
= collecty
302 self
.log
= logging
.getLogger("collecty.queue")
304 self
.timer
= plugins
.Timer(submit_interval
)
305 self
._queue
= queue
.PriorityQueue()
307 self
.log
.debug(_("Initialised write queue"))
310 self
.log
.debug(_("Write queue process started"))
317 # Wait until the timer has successfully elapsed.
318 if self
.timer
.wait():
322 self
.log
.debug(_("Write queue process stopped"))
328 # Wait until all data has been written.
331 def add(self
, object, time
, data
):
332 result
= QueueObject(object.file, time
, data
)
333 self
._queue
.put(result
)
337 Flushes the read data to disk.
339 # There is nothing to do if the queue is empty
340 if self
._queue
.empty():
341 self
.log
.debug(_("No data to commit"))
344 time_start
= time
.time()
346 self
.log
.debug(_("Submitting data to the databases..."))
348 # Get all objects from the queue and group them by the RRD file
349 # to commit them all at once
351 while not self
._queue
.empty():
352 result
= self
._queue
.get()
355 results
[result
.file].append(result
)
357 results
[result
.file] = [result
]
359 # Write the collected data to disk
360 for filename
, results
in list(results
.items()):
361 self
._commit
_file
(filename
, results
)
363 duration
= time
.time() - time_start
364 self
.log
.debug(_("Emptied write queue in %.2fs") % duration
)
366 def _commit_file(self
, filename
, results
):
367 self
.log
.debug(_("Committing %(counter)s entries to %(filename)s") \
368 % { "counter" : len(results
), "filename" : filename
})
370 for result
in results
:
371 self
.log
.debug(" %s: %s" % (result
.time
, result
.data
))
374 rrdtool
.update(filename
, *["%s" % r
for r
in results
])
376 # Catch operational errors like unreadable/unwritable RRD databases
377 # or those where the format has changed. The collected data will be lost.
378 except rrdtool
.OperationalError
as e
:
379 self
.log
.critical(_("Could not update RRD database %s: %s") \
382 def commit_file(self
, filename
):
384 Commits all data that is in the write queue for the given
387 results
, others
= [], []
389 # We will have to walk through the entire queue since we cannot
390 # ready any items selectively. Everything that belongs to our
391 # transaction is kept. Everything else will be put back into the
393 while not self
._queue
.empty():
394 result
= self
._queue
.get()
396 if result
.file == filename
:
397 results
.append(result
)
399 others
.append(result
)
401 # Put back all items that did not match
402 for result
in others
:
403 self
._queue
.put(result
)
405 # Write everything else to disk
407 self
._commit
_file
(filename
, results
)
410 class QueueObject(object):
411 def __init__(self
, file, time
, data
):
417 return "%s:%s" % (self
.time
.strftime("%s"), self
.data
)
419 def __lt__(self
, other
):
420 return self
.time
< other
.time
423 class PluginTimer(object):
424 def __init__(self
, plugin
):
427 self
.deadline
= datetime
.datetime
.utcnow()
430 return "<%s %s>" % (self
.__class
__.__name
__, self
.deadline
)
432 def __lt__(self
, other
):
433 return self
.deadline
< other
.deadline
435 def reset_deadline(self
):
436 self
.deadline
= datetime
.datetime
.utcnow() \
437 + datetime
.timedelta(seconds
=self
.plugin
.interval
)
440 return datetime
.datetime
.utcnow() >= self
.deadline