]>
git.ipfire.org Git - collecty.git/blob - src/collecty/daemon.py
2 ###############################################################################
4 # collecty - A system statistics collection daemon for IPFire #
5 # Copyright (C) 2012 IPFire development team #
7 # This program is free software: you can redistribute it and/or modify #
8 # it under the terms of the GNU General Public License as published by #
9 # the Free Software Foundation, either version 3 of the License, or #
10 # (at your option) any later version. #
12 # This program is distributed in the hope that it will be useful, #
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of #
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
15 # GNU General Public License for more details. #
17 # You should have received a copy of the GNU General Public License #
18 # along with this program. If not, see <http://www.gnu.org/licenses/>. #
20 ###############################################################################
31 from constants
import *
35 log
= logging
.getLogger("collecty")
37 class Collecty(object):
38 # The default interval, when all data is written to disk.
43 def __init__(self
, debug
=False):
46 # Enable debug logging when running in debug mode
48 log
.setLevel(logging
.DEBUG
)
52 # Indicates whether this process should be running or not.
55 # The write queue holds all collected pieces of data which
56 # will be written to disk later.
57 self
.write_queue
= WriteQueue(self
, self
.SUBMIT_INTERVAL
)
59 # Create a thread that connects to dbus and processes requests we
61 self
.bus
= bus
.Bus(self
)
64 for plugin
in plugins
.get():
65 self
.add_plugin(plugin
)
67 log
.debug(_("Collecty successfully initialized with %s plugins") \
70 def add_plugin(self
, plugin_class
):
71 # Try initialising a new plugin. If that fails, we will log the
72 # error and try to go on.
74 plugin
= plugin_class(self
)
76 log
.critical(_("Plugin %s could not be initialised") % plugin_class
, exc_info
=True)
79 self
.plugins
.append(plugin
)
83 for plugin
in self
.plugins
:
84 for template
in plugin
.templates
:
88 # Register signal handlers.
89 self
.register_signal_handler()
94 # Start all data source threads.
95 for p
in self
.plugins
:
98 # Run the write queue thread
99 self
.write_queue
.start()
101 # Regularly submit all data to disk.
104 time
.sleep(self
.HEARTBEAT
)
105 except KeyboardInterrupt:
109 # Wait until all plugins are finished.
110 for p
in self
.plugins
:
113 # Stop the bus thread
116 # Write all collected data to disk before ending the main thread
117 self
.write_queue
.shutdown()
119 log
.debug(_("Main thread exited"))
125 log
.info(_("Received shutdown signal"))
128 # Propagating shutdown to all threads.
129 for p
in self
.plugins
:
132 def register_signal_handler(self
):
133 for s
in (signal
.SIGTERM
, signal
.SIGINT
, signal
.SIGUSR1
):
134 log
.debug(_("Registering signal %d") % s
)
136 signal
.signal(s
, self
.signal_handler
)
138 def signal_handler(self
, sig
, *args
, **kwargs
):
139 log
.info(_("Caught signal %d") % sig
)
141 if sig
in (signal
.SIGTERM
, signal
.SIGINT
):
142 # Shutdown this application.
145 elif sig
== signal
.SIGUSR1
:
147 self
.write_queue
.commit()
149 def get_plugin_from_template(self
, template_name
):
150 for plugin
in self
.plugins
:
151 if not template_name
in [t
.name
for t
in plugin
.templates
]:
156 def generate_graph(self
, template_name
, *args
, **kwargs
):
157 plugin
= self
.get_plugin_from_template(template_name
)
159 raise RuntimeError("Could not find template %s" % template_name
)
161 return plugin
.generate_graph(template_name
, *args
, **kwargs
)
164 class WriteQueue(threading
.Thread
):
165 def __init__(self
, collecty
, submit_interval
):
166 threading
.Thread
.__init
__(self
)
169 self
.collecty
= collecty
171 self
.log
= logging
.getLogger("collecty.queue")
172 self
.log
.propagate
= 1
174 self
.timer
= plugins
.Timer(submit_interval
)
175 self
._queue
= queue
.PriorityQueue()
177 self
.log
.debug(_("Initialised write queue"))
180 self
.log
.debug(_("Write queue process started"))
187 # Wait until the timer has successfully elapsed.
188 if self
.timer
.wait():
192 self
.log
.debug(_("Write queue process stopped"))
198 # Wait until all data has been written.
201 def add(self
, object, time
, data
):
202 result
= QueueObject(object.file, time
, data
)
203 self
._queue
.put(result
)
207 Flushes the read data to disk.
209 # There is nothing to do if the queue is empty
210 if self
._queue
.empty():
211 self
.log
.debug(_("No data to commit"))
214 time_start
= time
.time()
216 self
.log
.debug(_("Submitting data to the databases..."))
218 # Get all objects from the queue and group them by the RRD file
219 # to commit them all at once
221 while not self
._queue
.empty():
222 result
= self
._queue
.get()
225 results
[result
.file].append(result
)
227 results
[result
.file] = [result
]
229 # Write the collected data to disk
230 for filename
, results
in results
.items():
231 self
._commit
_file
(filename
, results
)
233 duration
= time
.time() - time_start
234 self
.log
.debug(_("Emptied write queue in %.2fs") % duration
)
236 def _commit_file(self
, filename
, results
):
237 self
.log
.debug(_("Committing %(counter)s entries to %(filename)s") \
238 % { "counter" : len(results
), "filename" : filename
})
240 for result
in results
:
241 self
.log
.debug(" %s: %s" % (result
.time
, result
.data
))
243 rrdtool
.update(filename
, *["%s" % r
for r
in results
])
246 class QueueObject(object):
247 def __init__(self
, file, time
, data
):
253 return "%s:%s" % (self
.time
.strftime("%s"), self
.data
)
255 def __cmp__(self
, other
):
256 return cmp(self
.time
, other
.time
)