]> git.ipfire.org Git - thirdparty/lldpd.git/blob - src/event.c
dist: provide a complete changelog
[thirdparty/lldpd.git] / src / event.c
1 /*
2 * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "lldpd.h"
18
19 #include <unistd.h>
20 #include <signal.h>
21 #include <event2/event.h>
22
23 static void
24 levent_log_cb(int severity, const char *msg)
25 {
26 switch (severity) {
27 case _EVENT_LOG_DEBUG: log_debug("libevent[debug]: %s", msg); break;
28 case _EVENT_LOG_MSG: log_info ("libevent[info]: %s", msg); break;
29 case _EVENT_LOG_WARN: log_warnx("libevent[warn]: %s", msg); break;
30 case _EVENT_LOG_ERR: log_warnx("libevent[error]: %s", msg); break;
31 }
32 }
33
34 struct lldpd_events {
35 TAILQ_ENTRY(lldpd_events) next;
36 struct event *ev;
37 };
38 TAILQ_HEAD(ev_l, lldpd_events);
39
40 #define levent_snmp_fds(cfg) ((struct ev_l*)(cfg)->g_snmp_fds)
41 #define levent_hardware_fds(hardware) ((struct ev_l*)(hardware)->h_recv)
42
43 #ifdef USE_SNMP
44 #include <net-snmp/net-snmp-config.h>
45 #include <net-snmp/net-snmp-includes.h>
46 #include <net-snmp/agent/net-snmp-agent-includes.h>
47 #include <net-snmp/agent/snmp_vars.h>
48
49 static void levent_snmp_update(struct lldpd *);
50
51 /*
52 * Callback function when we have something to read from SNMP.
53 *
54 * This function is called because we have a read event on one SNMP
55 * file descriptor. When need to call snmp_read() on it.
56 */
57 static void
58 levent_snmp_read(evutil_socket_t fd, short what, void *arg)
59 {
60 struct lldpd *cfg = arg;
61 fd_set fdset;
62 (void)what;
63 FD_ZERO(&fdset);
64 FD_SET(fd, &fdset);
65 snmp_read(&fdset);
66 levent_snmp_update(cfg);
67 }
68
69 /*
70 * Callback function for a SNMP timeout.
71 *
72 * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it.
73 */
74 static void
75 levent_snmp_timeout(evutil_socket_t fd, short what, void *arg)
76 {
77 struct lldpd *cfg = arg;
78 (void)what; (void)fd;
79 snmp_timeout();
80 run_alarms();
81 levent_snmp_update(cfg);
82 }
83
84 /*
85 * Watch a new SNMP FD.
86 *
87 * @param base The libevent base we are working on.
88 * @param fd The file descriptor we want to watch.
89 *
90 * The file descriptor is appended to the list of file descriptors we
91 * want to watch.
92 */
93 static void
94 levent_snmp_add_fd(struct lldpd *cfg, int fd)
95 {
96 struct event_base *base = cfg->g_base;
97 struct lldpd_events *snmpfd = calloc(1, sizeof(struct lldpd_events));
98 if (!snmpfd) {
99 LLOG_WARN("unable to allocate memory for new SNMP event");
100 return;
101 }
102 evutil_make_socket_nonblocking(fd);
103 if ((snmpfd->ev = event_new(base, fd,
104 EV_READ | EV_PERSIST,
105 levent_snmp_read,
106 cfg)) == NULL) {
107 LLOG_WARNX("unable to allocate a new SNMP event for FD %d", fd);
108 free(snmpfd);
109 return;
110 }
111 if (event_add(snmpfd->ev, NULL) == -1) {
112 LLOG_WARNX("unable to schedule new SNMP event for FD %d", fd);
113 event_free(snmpfd->ev);
114 free(snmpfd);
115 return;
116 }
117 TAILQ_INSERT_TAIL(levent_snmp_fds(cfg), snmpfd, next);
118 }
119
120 /*
121 * Update SNMP event loop.
122 *
123 * New events are added and some other are removed. This function
124 * should be called every time a SNMP event happens: either when
125 * handling a SNMP packet, a SNMP timeout or when sending a SNMP
126 * packet. This function will keep libevent in sync with NetSNMP.
127 *
128 * @param base The libevent base we are working on.
129 */
130 static void
131 levent_snmp_update(struct lldpd *cfg)
132 {
133 int maxfd = 0;
134 int block = 1;
135 fd_set fdset;
136 struct timeval timeout;
137 static int howmany = 0;
138 int added = 0, removed = 0, current = 0;
139 struct lldpd_events *snmpfd, *snmpfd_next;
140
141 /* snmp_select_info() can be tricky to understand. We set `block` to
142 1 to means that we don't request a timeout. snmp_select_info()
143 will reset `block` to 0 if it wants us to setup a timeout. In
144 this timeout, `snmp_timeout()` should be invoked.
145
146 Each FD in `fdset` will need to be watched for reading. If one of
147 them become active, `snmp_read()` should be called on it.
148 */
149
150 FD_ZERO(&fdset);
151 snmp_select_info(&maxfd, &fdset, &timeout, &block);
152
153 /* We need to untrack any event whose FD is not in `fdset`
154 anymore */
155 for (snmpfd = TAILQ_FIRST(levent_snmp_fds(cfg));
156 snmpfd;
157 snmpfd = snmpfd_next) {
158 snmpfd_next = TAILQ_NEXT(snmpfd, next);
159 if (event_get_fd(snmpfd->ev) >= maxfd ||
160 (!FD_ISSET(event_get_fd(snmpfd->ev), &fdset))) {
161 event_free(snmpfd->ev);
162 TAILQ_REMOVE(levent_snmp_fds(cfg), snmpfd, next);
163 free(snmpfd);
164 removed++;
165 } else {
166 FD_CLR(event_get_fd(snmpfd->ev), &fdset);
167 current++;
168 }
169 }
170
171 /* Invariant: FD in `fdset` are not in list of FD */
172 for (int fd = 0; fd < maxfd; fd++) {
173 if (FD_ISSET(fd, &fdset)) {
174 levent_snmp_add_fd(cfg, fd);
175 added++;
176 }
177 }
178 current += added;
179 if (howmany != current) {
180 LLOG_DEBUG("added %d events, removed %d events, total of %d events",
181 added, removed, current);
182 howmany = current;
183 }
184
185 /* If needed, handle timeout */
186 if (evtimer_add(cfg->g_snmp_timeout, block?NULL:&timeout) == -1)
187 LLOG_WARNX("unable to schedule timeout function for SNMP");
188 }
189 #endif /* USE_SNMP */
190
191 struct lldpd_one_client {
192 struct lldpd *cfg;
193 struct event *ev;
194 };
195
196 static void
197 levent_ctl_recv(evutil_socket_t fd, short what, void *arg)
198 {
199 struct lldpd_one_client *client = arg;
200 enum hmsg_type type;
201 void *buffer = NULL;
202 int n;
203 (void)what;
204
205 if ((n = ctl_msg_recv(fd, &type, &buffer)) == -1 ||
206 client_handle_client(client->cfg, fd, type, buffer, n) == -1) {
207 close(fd);
208 event_free(client->ev);
209 free(client);
210 }
211 free(buffer);
212 }
213
214 static void
215 levent_ctl_accept(evutil_socket_t fd, short what, void *arg)
216 {
217 struct lldpd *cfg = arg;
218 struct lldpd_one_client *client = NULL;
219 int s;
220 (void)what;
221
222 if ((s = accept(fd, NULL, NULL)) == -1) {
223 LLOG_WARN("unable to accept connection from socket");
224 return;
225 }
226 client = calloc(1, sizeof(struct lldpd_one_client));
227 if (!client) {
228 LLOG_WARNX("unable to allocate memory for new client");
229 goto accept_failed;
230 }
231 client->cfg = cfg;
232 evutil_make_socket_nonblocking(s);
233 if ((client->ev = event_new(cfg->g_base, s,
234 EV_READ | EV_PERSIST,
235 levent_ctl_recv,
236 client)) == NULL) {
237 LLOG_WARNX("unable to allocate a new event for new client");
238 goto accept_failed;
239 }
240 if (event_add(client->ev, NULL) == -1) {
241 LLOG_WARNX("unable to schedule new event for new client");
242 goto accept_failed;
243 }
244 return;
245 accept_failed:
246 if (client && client->ev) event_free(client->ev);
247 free(client);
248 close(s);
249 }
250
251 static void
252 levent_dump(evutil_socket_t fd, short what, void *arg)
253 {
254 struct event_base *base = arg;
255 (void)fd; (void)what;
256 event_base_dump_events(base, stderr);
257 }
258 static void
259 levent_stop(evutil_socket_t fd, short what, void *arg)
260 {
261 struct event_base *base = arg;
262 (void)fd; (void)what;
263 event_base_loopbreak(base);
264 }
265
266 static void
267 levent_update_and_send(evutil_socket_t fd, short what, void *arg)
268 {
269 struct lldpd *cfg = arg;
270 struct timeval tv = {cfg->g_delay, 0};
271 (void)fd; (void)what;
272 lldpd_loop(cfg);
273 event_add(cfg->g_main_loop, &tv);
274 }
275
276 static void
277 levent_init(struct lldpd *cfg)
278 {
279 /* Setup libevent */
280 event_set_log_callback(levent_log_cb);
281 if (!(cfg->g_base = event_base_new()))
282 fatalx("unable to create a new libevent base");
283 LLOG_INFO("libevent %s initialized with %s method",
284 event_get_version(),
285 event_base_get_method(cfg->g_base));
286
287 /* Setup SNMP */
288 #ifdef USE_SNMP
289 if (cfg->g_snmp) {
290 agent_init(cfg, cfg->g_snmp_agentx);
291 cfg->g_snmp_timeout = evtimer_new(cfg->g_base,
292 levent_snmp_timeout,
293 cfg);
294 if (!cfg->g_snmp_timeout)
295 fatalx("unable to setup timeout function for SNMP");
296 if ((cfg->g_snmp_fds =
297 malloc(sizeof(struct ev_l))) == NULL)
298 fatalx("unable to allocate memory for SNMP events");
299 TAILQ_INIT(levent_snmp_fds(cfg));
300 }
301 #endif
302
303 /* Setup loop that will run every 30 seconds. */
304 if (!(cfg->g_main_loop = event_new(cfg->g_base, -1, 0,
305 levent_update_and_send,
306 cfg)))
307 fatalx("unable to setup main timer");
308 event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
309
310 /* Setup unix socket */
311 evutil_make_socket_nonblocking(cfg->g_ctl);
312 if ((cfg->g_ctl_event = event_new(cfg->g_base, cfg->g_ctl,
313 EV_READ|EV_PERSIST, levent_ctl_accept, cfg)) == NULL)
314 fatalx("unable to setup control socket event");
315 event_add(cfg->g_ctl_event, NULL);
316
317 /* Signals */
318 evsignal_add(evsignal_new(cfg->g_base, SIGUSR1,
319 levent_dump, cfg->g_base),
320 NULL);
321 evsignal_add(evsignal_new(cfg->g_base, SIGHUP,
322 levent_stop, cfg->g_base),
323 NULL);
324 evsignal_add(evsignal_new(cfg->g_base, SIGINT,
325 levent_stop, cfg->g_base),
326 NULL);
327 evsignal_add(evsignal_new(cfg->g_base, SIGTERM,
328 levent_stop, cfg->g_base),
329 NULL);
330 }
331
332 /* Initialize libevent and start the event loop */
333 void
334 levent_loop(struct lldpd *cfg)
335 {
336 levent_init(cfg);
337
338 /* libevent loop */
339 do {
340 if (event_base_got_break(cfg->g_base) ||
341 event_base_got_exit(cfg->g_base))
342 break;
343 #ifdef USE_SNMP
344 if (cfg->g_snmp) {
345 /* We don't use delegated requests (request
346 whose answer is delayed). However, we keep
347 the call here in case we use it some
348 day. We don't call run_alarms() here. We do
349 it on timeout only. */
350 netsnmp_check_outstanding_agent_requests();
351 levent_snmp_update(cfg);
352 }
353 #endif
354 } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0);
355
356 #ifdef USE_SNMP
357 if (cfg->g_snmp)
358 agent_shutdown();
359 #endif /* USE_SNMP */
360
361 }
362
363 static void
364 levent_hardware_recv(evutil_socket_t fd, short what, void *arg)
365 {
366 struct lldpd_hardware *hardware = arg;
367 struct lldpd *cfg = hardware->h_cfg;
368 (void)what;
369 lldpd_recv(cfg, hardware, fd);
370 }
371
372 void
373 levent_hardware_init(struct lldpd_hardware *hardware)
374 {
375 if ((hardware->h_recv =
376 malloc(sizeof(struct ev_l))) == NULL) {
377 LLOG_WARNX("unable to allocate memory for %s",
378 hardware->h_ifname);
379 return;
380 }
381 TAILQ_INIT(levent_hardware_fds(hardware));
382 }
383
384 void
385 levent_hardware_add_fd(struct lldpd_hardware *hardware, int fd)
386 {
387 struct lldpd_events *hfd = NULL;
388 if (!hardware->h_recv) return;
389
390 hfd = calloc(1, sizeof(struct lldpd_events));
391 if (!hfd) {
392 LLOG_WARNX("unable to allocate new event for %s",
393 hardware->h_ifname);
394 return;
395 }
396 evutil_make_socket_nonblocking(fd);
397 if ((hfd->ev = event_new(hardware->h_cfg->g_base, fd,
398 EV_READ | EV_PERSIST,
399 levent_hardware_recv,
400 hardware)) == NULL) {
401 LLOG_WARNX("unable to allocate a new event for %s",
402 hardware->h_ifname);
403 free(hfd);
404 return;
405 }
406 if (event_add(hfd->ev, NULL) == -1) {
407 LLOG_WARNX("unable to schedule new event for %s",
408 hardware->h_ifname);
409 event_free(hfd->ev);
410 free(hfd);
411 return;
412 }
413 TAILQ_INSERT_TAIL(levent_hardware_fds(hardware), hfd, next);
414 }
415
416 void
417 levent_hardware_release(struct lldpd_hardware *hardware)
418 {
419 struct lldpd_events *ev, *ev_next;
420 if (!hardware->h_recv) return;
421
422 for (ev = TAILQ_FIRST(levent_hardware_fds(hardware));
423 ev;
424 ev = ev_next) {
425 ev_next = TAILQ_NEXT(ev, next);
426 /* We may close several time the same FD. This is harmless. */
427 close(event_get_fd(ev->ev));
428 event_free(ev->ev);
429 TAILQ_REMOVE(levent_hardware_fds(hardware), ev, next);
430 free(ev);
431 }
432 free(levent_hardware_fds(hardware));
433 }