]>
git.ipfire.org Git - oddments/collecty.git/blob - src/collecty/daemon.py
2 ###############################################################################
4 # collecty - A system statistics collection daemon for IPFire #
5 # Copyright (C) 2012 IPFire development team #
7 # This program is free software: you can redistribute it and/or modify #
8 # it under the terms of the GNU General Public License as published by #
9 # the Free Software Foundation, either version 3 of the License, or #
10 # (at your option) any later version. #
12 # This program is distributed in the hope that it will be useful, #
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of #
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
15 # GNU General Public License for more details. #
17 # You should have received a copy of the GNU General Public License #
18 # along with this program. If not, see <http://www.gnu.org/licenses/>. #
20 ###############################################################################
36 from .constants
import *
39 log
= logging
.getLogger("collecty")
41 class Collecty(object):
42 # The default interval, when all data is written to disk.
45 def __init__(self
, debug
=False):
48 # Reset timezone to UTC
49 # rrdtool is reading that from the environment
50 os
.environ
["TZ"] = "UTC"
52 # Enable debug logging when running in debug mode
54 log
.setLevel(logging
.DEBUG
)
58 # Create the scheduler
59 self
.scheduler
= sched
.scheduler()
60 self
._schedule
_commit
()
62 # The write queue holds all collected pieces of data which
63 # will be written to disk later.
64 self
.write_queue
= WriteQueue(self
)
66 # Create a thread that connects to dbus and processes requests we
68 self
.bus
= bus
.Bus(self
)
70 log
.debug(_("Collecty successfully initialized"))
72 def add_plugin(self
, plugin_class
):
73 # Try initialising a new plugin. If that fails, we will log the
74 # error and try to go on.
76 plugin
= plugin_class(self
)
78 log
.critical(_("Plugin %s could not be initialised") % plugin_class
, exc_info
=True)
81 self
.plugins
.append(plugin
)
84 self
._schedule
_plugin
(plugin
, interval
=0)
88 for plugin
in self
.plugins
:
89 for template
in plugin
.templates
:
92 def _schedule_plugin(self
, plugin
, interval
=None):
94 Schedules a collection event for the given plugin
96 log
.debug("Scheduling plugin %s for executing in %ss" % (plugin
, plugin
.interval
))
99 plugin
.interval
if interval
is None else interval
, plugin
.priority
, self
._collect
, (plugin
,),
102 def _schedule_commit(self
):
103 log
.debug("Scheduling commit in %ss" % self
.COMMIT_INTERVAL
)
105 self
.scheduler
.enter(
106 self
.COMMIT_INTERVAL
, -1, self
._commit
,
109 def _collect(self
, plugin
, **kwargs
):
111 Called for each plugin when it is time to collect some data
113 log
.debug("Collection started for %s" % plugin
)
115 # Add the next collection event to the scheduler
116 self
._schedule
_plugin
(plugin
)
123 Called when all data should be committed to disk
125 # Schedule the next commit
126 self
._schedule
_commit
()
128 # Write everything in the queue
129 self
.write_queue
.commit()
132 # Register signal handlers.
133 self
.register_signal_handler()
139 for plugin
in plugins
.get():
140 self
.add_plugin(plugin
)
145 except KeyboardInterrupt:
151 # Stop the bus thread
154 # Write all collected data to disk before ending the main thread
155 self
.write_queue
.commit()
157 log
.debug(_("Main thread exited"))
160 log
.info(_("Received shutdown signal"))
162 def register_signal_handler(self
):
163 for s
in (signal
.SIGTERM
, signal
.SIGINT
, signal
.SIGUSR1
):
164 log
.debug(_("Registering signal %d") % s
)
166 signal
.signal(s
, self
.signal_handler
)
168 def signal_handler(self
, sig
, *args
, **kwargs
):
169 log
.info(_("Caught signal %d") % sig
)
171 if sig
in (signal
.SIGTERM
, signal
.SIGINT
):
172 # Shutdown this application.
175 elif sig
== signal
.SIGUSR1
:
177 self
.write_queue
.commit()
179 def get_plugin_from_template(self
, template_name
):
180 for plugin
in self
.plugins
:
181 if not template_name
in [t
.name
for t
in plugin
.templates
]:
186 def generate_graph(self
, template_name
, *args
, **kwargs
):
187 plugin
= self
.get_plugin_from_template(template_name
)
189 raise RuntimeError("Could not find template %s" % template_name
)
191 return plugin
.generate_graph(template_name
, *args
, **kwargs
)
193 def graph_info(self
, template_name
, *args
, **kwargs
):
194 plugin
= self
.get_plugin_from_template(template_name
)
196 raise RuntimeError("Could not find template %s" % template_name
)
198 return plugin
.graph_info(template_name
, *args
, **kwargs
)
200 def last_update(self
, template_name
, *args
, **kwargs
):
201 plugin
= self
.get_plugin_from_template(template_name
)
203 raise RuntimeError("Could not find template %s" % template_name
)
205 return plugin
.last_update(*args
, **kwargs
)
207 def backup(self
, filename
):
208 # Write all data to disk first
209 self
.write_queue
.commit()
211 log
.info(_("Backing up to %s..." % filename
))
213 # Opening a compressed tar file with will have all files added to it
214 with tarfile
.open(filename
, mode
="w:gz") as archive
:
215 for path
, directories
, files
in os
.walk(DATABASE_DIR
):
217 # Skip any non-RRD files
218 if not file.endswith(".rrd"):
221 # Compose the full file path
222 file = os
.path
.join(path
, file)
224 log
.debug(_("Adding %s to backup...") % file)
226 with tempfile
.NamedTemporaryFile() as t
:
227 rrdtool
.dump(file, t
.name
)
229 # Add the file to the archive
231 t
.name
, arcname
=file[len(DATABASE_DIR
):],
234 log
.info(_("Backup finished"))
237 class WriteQueue(object):
238 def __init__(self
, collecty
):
239 self
.collecty
= collecty
241 self
.log
= logging
.getLogger("collecty.queue")
243 # Lock to make this class thread-safe
244 self
._lock
= threading
.Lock()
246 self
._queue
= queue
.PriorityQueue()
248 self
.log
.debug(_("Initialised write queue"))
250 def add(self
, object, time
, data
):
251 result
= QueueObject(object.file, time
, data
)
254 self
._queue
.put(result
)
258 Flushes the read data to disk.
260 # There is nothing to do if the queue is empty
261 if self
._queue
.empty():
262 self
.log
.debug(_("No data to commit"))
265 time_start
= time
.time()
267 self
.log
.debug(_("Submitting data to the databases..."))
269 # Get all objects from the queue and group them by the RRD file
270 # to commit them all at once
274 while not self
._queue
.empty():
275 result
= self
._queue
.get()
278 results
[result
.file].append(result
)
280 results
[result
.file] = [result
]
282 # Write the collected data to disk
283 for filename
, results
in list(results
.items()):
284 self
._commit
_file
(filename
, results
)
286 duration
= time
.time() - time_start
287 self
.log
.debug(_("Emptied write queue in %.2fs") % duration
)
289 def _commit_file(self
, filename
, results
):
290 self
.log
.debug(_("Committing %(counter)s entries to %(filename)s") \
291 % { "counter" : len(results
), "filename" : filename
})
293 for result
in results
:
294 self
.log
.debug(" %s: %s" % (result
.time
, result
.data
))
297 rrdtool
.update(filename
, *["%s" % r
for r
in results
])
299 # Catch operational errors like unreadable/unwritable RRD databases
300 # or those where the format has changed. The collected data will be lost.
301 except rrdtool
.OperationalError
as e
:
302 self
.log
.critical(_("Could not update RRD database %s: %s") \
305 def commit_file(self
, filename
):
307 Commits all data that is in the write queue for the given
310 results
, others
= [], []
312 # We will have to walk through the entire queue since we cannot
313 # ready any items selectively. Everything that belongs to our
314 # transaction is kept. Everything else will be put back into the
317 while not self
._queue
.empty():
318 result
= self
._queue
.get()
320 if result
.file == filename
:
321 results
.append(result
)
323 others
.append(result
)
325 # Put back all items that did not match
326 for result
in others
:
327 self
._queue
.put(result
)
329 # Write everything else to disk
331 self
._commit
_file
(filename
, results
)
334 class QueueObject(object):
335 def __init__(self
, file, time
, data
):
341 return "%s:%s" % (self
.time
.strftime("%s"), self
.data
)
343 def __lt__(self
, other
):
344 return self
.time
< other
.time