]> git.ipfire.org Git - thirdparty/lldpd.git/blame - src/daemon/event.c
build: ignore untracked files in submodule for version
[thirdparty/lldpd.git] / src / daemon / event.c
CommitLineData
4b292b55 1/* -*- mode: c; c-file-style: "openbsd" -*- */
d6e889b6
VB
2/*
3 * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "lldpd.h"
bdfe4193 19#include "trace.h"
d6e889b6
VB
20
21#include <unistd.h>
22#include <signal.h>
e0478a46 23#include <errno.h>
864a7bd5 24#include <time.h>
bec75f84 25#include <fcntl.h>
d7460a6f
ALG
26#if defined(__clang__)
27#pragma clang diagnostic push
28#pragma clang diagnostic ignored "-Wdocumentation"
29#endif
d6e889b6 30#include <event2/event.h>
e0478a46
VB
31#include <event2/bufferevent.h>
32#include <event2/buffer.h>
d7460a6f
ALG
33#if defined(__clang__)
34#pragma clang diagnostic pop
35#endif
d6e889b6 36
b0b8841b
ST
37#define EVENT_BUFFER 1024
38
d6e889b6
VB
39static void
40levent_log_cb(int severity, const char *msg)
41{
42 switch (severity) {
26fa5d17
VB
43 case _EVENT_LOG_DEBUG: log_debug("libevent", "%s", msg); break;
44 case _EVENT_LOG_MSG: log_info ("libevent", "%s", msg); break;
45 case _EVENT_LOG_WARN: log_warnx("libevent", "%s", msg); break;
46 case _EVENT_LOG_ERR: log_warnx("libevent", "%s", msg); break;
d6e889b6
VB
47 }
48}
49
d6e889b6
VB
50struct lldpd_events {
51 TAILQ_ENTRY(lldpd_events) next;
52 struct event *ev;
d6e889b6
VB
53};
54TAILQ_HEAD(ev_l, lldpd_events);
55
56#define levent_snmp_fds(cfg) ((struct ev_l*)(cfg)->g_snmp_fds)
d6e889b6
VB
57#define levent_hardware_fds(hardware) ((struct ev_l*)(hardware)->h_recv)
58
59#ifdef USE_SNMP
60#include <net-snmp/net-snmp-config.h>
61#include <net-snmp/net-snmp-includes.h>
62#include <net-snmp/agent/net-snmp-agent-includes.h>
63#include <net-snmp/agent/snmp_vars.h>
64
59c32cf0
VB
65/* Compatibility with older versions of NetSNMP */
66#ifndef HAVE_SNMP_SELECT_INFO2
67# define netsnmp_large_fd_set fd_set
68# define snmp_read2 snmp_read
69# define snmp_select_info2 snmp_select_info
70# define netsnmp_large_fd_set_init(...)
71# define netsnmp_large_fd_set_cleanup(...)
72# define NETSNMP_LARGE_FD_SET FD_SET
73# define NETSNMP_LARGE_FD_CLR FD_CLR
74# define NETSNMP_LARGE_FD_ZERO FD_ZERO
75# define NETSNMP_LARGE_FD_ISSET FD_ISSET
76#else
77# include <net-snmp/library/large_fd_set.h>
78#endif
79
d6e889b6
VB
80static void levent_snmp_update(struct lldpd *);
81
82/*
83 * Callback function when we have something to read from SNMP.
84 *
85 * This function is called because we have a read event on one SNMP
86 * file descriptor. When need to call snmp_read() on it.
87 */
88static void
89levent_snmp_read(evutil_socket_t fd, short what, void *arg)
90{
d6e889b6 91 struct lldpd *cfg = arg;
59c32cf0 92 netsnmp_large_fd_set fdset;
5fd6695c 93 (void)what;
59c32cf0
VB
94 netsnmp_large_fd_set_init(&fdset, FD_SETSIZE);
95 NETSNMP_LARGE_FD_ZERO(&fdset);
96 NETSNMP_LARGE_FD_SET(fd, &fdset);
97 snmp_read2(&fdset);
d6e889b6
VB
98 levent_snmp_update(cfg);
99}
100
101/*
102 * Callback function for a SNMP timeout.
103 *
104 * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it.
105 */
106static void
107levent_snmp_timeout(evutil_socket_t fd, short what, void *arg)
108{
d6e889b6 109 struct lldpd *cfg = arg;
5fd6695c 110 (void)what; (void)fd;
d6e889b6 111 snmp_timeout();
219c432e 112 run_alarms();
d6e889b6
VB
113 levent_snmp_update(cfg);
114}
115
116/*
117 * Watch a new SNMP FD.
118 *
119 * @param base The libevent base we are working on.
120 * @param fd The file descriptor we want to watch.
121 *
122 * The file descriptor is appended to the list of file descriptors we
123 * want to watch.
124 */
125static void
126levent_snmp_add_fd(struct lldpd *cfg, int fd)
127{
128 struct event_base *base = cfg->g_base;
129 struct lldpd_events *snmpfd = calloc(1, sizeof(struct lldpd_events));
130 if (!snmpfd) {
6f8925be 131 log_warn("event", "unable to allocate memory for new SNMP event");
d6e889b6
VB
132 return;
133 }
bec75f84 134 levent_make_socket_nonblocking(fd);
d6e889b6
VB
135 if ((snmpfd->ev = event_new(base, fd,
136 EV_READ | EV_PERSIST,
137 levent_snmp_read,
138 cfg)) == NULL) {
6f8925be 139 log_warnx("event", "unable to allocate a new SNMP event for FD %d", fd);
d6e889b6
VB
140 free(snmpfd);
141 return;
142 }
143 if (event_add(snmpfd->ev, NULL) == -1) {
6f8925be 144 log_warnx("event", "unable to schedule new SNMP event for FD %d", fd);
d6e889b6
VB
145 event_free(snmpfd->ev);
146 free(snmpfd);
147 return;
148 }
149 TAILQ_INSERT_TAIL(levent_snmp_fds(cfg), snmpfd, next);
150}
151
152/*
153 * Update SNMP event loop.
154 *
155 * New events are added and some other are removed. This function
156 * should be called every time a SNMP event happens: either when
157 * handling a SNMP packet, a SNMP timeout or when sending a SNMP
158 * packet. This function will keep libevent in sync with NetSNMP.
159 *
160 * @param base The libevent base we are working on.
161 */
162static void
163levent_snmp_update(struct lldpd *cfg)
164{
165 int maxfd = 0;
166 int block = 1;
d6e889b6
VB
167 struct timeval timeout;
168 static int howmany = 0;
5fd6695c
VB
169 int added = 0, removed = 0, current = 0;
170 struct lldpd_events *snmpfd, *snmpfd_next;
d6e889b6
VB
171
172 /* snmp_select_info() can be tricky to understand. We set `block` to
173 1 to means that we don't request a timeout. snmp_select_info()
174 will reset `block` to 0 if it wants us to setup a timeout. In
175 this timeout, `snmp_timeout()` should be invoked.
59c32cf0 176
d6e889b6
VB
177 Each FD in `fdset` will need to be watched for reading. If one of
178 them become active, `snmp_read()` should be called on it.
179 */
59c32cf0
VB
180
181 netsnmp_large_fd_set fdset;
182 netsnmp_large_fd_set_init(&fdset, FD_SETSIZE);
183 NETSNMP_LARGE_FD_ZERO(&fdset);
184 snmp_select_info2(&maxfd, &fdset, &timeout, &block);
185
d6e889b6
VB
186 /* We need to untrack any event whose FD is not in `fdset`
187 anymore */
d6e889b6
VB
188 for (snmpfd = TAILQ_FIRST(levent_snmp_fds(cfg));
189 snmpfd;
190 snmpfd = snmpfd_next) {
191 snmpfd_next = TAILQ_NEXT(snmpfd, next);
192 if (event_get_fd(snmpfd->ev) >= maxfd ||
59c32cf0 193 (!NETSNMP_LARGE_FD_ISSET(event_get_fd(snmpfd->ev), &fdset))) {
d6e889b6
VB
194 event_free(snmpfd->ev);
195 TAILQ_REMOVE(levent_snmp_fds(cfg), snmpfd, next);
196 free(snmpfd);
197 removed++;
198 } else {
59c32cf0 199 NETSNMP_LARGE_FD_CLR(event_get_fd(snmpfd->ev), &fdset);
d6e889b6
VB
200 current++;
201 }
202 }
59c32cf0 203
d6e889b6
VB
204 /* Invariant: FD in `fdset` are not in list of FD */
205 for (int fd = 0; fd < maxfd; fd++) {
59c32cf0 206 if (NETSNMP_LARGE_FD_ISSET(fd, &fdset)) {
d6e889b6
VB
207 levent_snmp_add_fd(cfg, fd);
208 added++;
209 }
210 }
211 current += added;
212 if (howmany != current) {
6f8925be 213 log_debug("event", "added %d events, removed %d events, total of %d events",
d6e889b6
VB
214 added, removed, current);
215 howmany = current;
216 }
217
218 /* If needed, handle timeout */
219 if (evtimer_add(cfg->g_snmp_timeout, block?NULL:&timeout) == -1)
6f8925be 220 log_warnx("event", "unable to schedule timeout function for SNMP");
59c32cf0
VB
221
222 netsnmp_large_fd_set_cleanup(&fdset);
d6e889b6
VB
223}
224#endif /* USE_SNMP */
225
257db885 226struct lldpd_one_client {
4e90a9e0 227 TAILQ_ENTRY(lldpd_one_client) next;
257db885 228 struct lldpd *cfg;
e0478a46 229 struct bufferevent *bev;
4e90a9e0 230 int subscribed; /* Is this client subscribed to changes? */
257db885 231};
4e90a9e0
VB
232TAILQ_HEAD(, lldpd_one_client) lldpd_clients;
233
234static void
235levent_ctl_free_client(struct lldpd_one_client *client)
236{
237 if (client && client->bev) bufferevent_free(client->bev);
238 if (client) {
239 TAILQ_REMOVE(&lldpd_clients, client, next);
240 free(client);
241 }
242}
257db885 243
e0478a46 244static ssize_t
4e90a9e0 245levent_ctl_send(struct lldpd_one_client *client, int type, void *data, size_t len)
e0478a46 246{
e0478a46
VB
247 struct bufferevent *bev = client->bev;
248 struct hmsg_header hdr = { .len = len, .type = type };
249 bufferevent_disable(bev, EV_WRITE);
250 if (bufferevent_write(bev, &hdr, sizeof(struct hmsg_header)) == -1 ||
251 (len > 0 && bufferevent_write(bev, data, len) == -1)) {
6f8925be 252 log_warnx("event", "unable to create answer to client");
4e90a9e0 253 levent_ctl_free_client(client);
e0478a46
VB
254 return -1;
255 }
256 bufferevent_enable(bev, EV_WRITE);
257 return len;
258}
259
4e90a9e0
VB
260void
261levent_ctl_notify(char *ifname, int state, struct lldpd_port *neighbor)
262{
263 struct lldpd_one_client *client, *client_next;
264 struct lldpd_neighbor_change neigh = {
265 .ifname = ifname,
266 .state = state,
267 .neighbor = neighbor
268 };
269 void *output = NULL;
d79c3de4 270 ssize_t output_len = 0;
4e90a9e0 271
4e90a9e0 272 /* Don't use TAILQ_FOREACH, the client may be deleted in case of errors. */
6f8925be 273 log_debug("control", "notify clients of neighbor changes");
4e90a9e0
VB
274 for (client = TAILQ_FIRST(&lldpd_clients);
275 client;
276 client = client_next) {
277 client_next = TAILQ_NEXT(client, next);
278 if (!client->subscribed) continue;
be969691
VB
279
280 if (output == NULL) {
281 /* Ugly hack: we don't want to transmit a list of
74e0080e 282 * ports. We patch the port to avoid this. */
be969691
VB
283 TAILQ_ENTRY(lldpd_port) backup_p_entries;
284 memcpy(&backup_p_entries, &neighbor->p_entries,
285 sizeof(backup_p_entries));
be969691
VB
286 memset(&neighbor->p_entries, 0,
287 sizeof(backup_p_entries));
985a4cb5 288 output_len = lldpd_neighbor_change_serialize(&neigh, &output);
be969691
VB
289 memcpy(&neighbor->p_entries, &backup_p_entries,
290 sizeof(backup_p_entries));
be969691
VB
291
292 if (output_len <= 0) {
6f8925be 293 log_warnx("event", "unable to serialize changed neighbor");
be969691
VB
294 return;
295 }
296 }
297
4e90a9e0
VB
298 levent_ctl_send(client, NOTIFICATION, output, output_len);
299 }
300
301 free(output);
302}
303
304static ssize_t
305levent_ctl_send_cb(void *out, int type, void *data, size_t len)
306{
307 struct lldpd_one_client *client = out;
308 return levent_ctl_send(client, type, data, len);
309}
310
d6e889b6 311static void
e0478a46 312levent_ctl_recv(struct bufferevent *bev, void *ptr)
d6e889b6 313{
e0478a46
VB
314 struct lldpd_one_client *client = ptr;
315 struct evbuffer *buffer = bufferevent_get_input(bev);
316 size_t buffer_len = evbuffer_get_length(buffer);
317 struct hmsg_header hdr;
318 void *data = NULL;
319
6f8925be 320 log_debug("control", "receive data on Unix socket");
e0478a46
VB
321 if (buffer_len < sizeof(struct hmsg_header))
322 return; /* Not enough data yet */
323 if (evbuffer_copyout(buffer, &hdr,
324 sizeof(struct hmsg_header)) != sizeof(struct hmsg_header)) {
6f8925be 325 log_warnx("event", "not able to read header");
e0478a46
VB
326 return;
327 }
82374540 328 if (hdr.len > HMSG_MAX_SIZE) {
6f8925be 329 log_warnx("event", "message received is too large");
e0478a46
VB
330 goto recv_error;
331 }
d6e889b6 332
e0478a46
VB
333 if (buffer_len < hdr.len + sizeof(struct hmsg_header))
334 return; /* Not enough data yet */
335 if (hdr.len > 0 && (data = malloc(hdr.len)) == NULL) {
6f8925be 336 log_warnx("event", "not enough memory");
e0478a46
VB
337 goto recv_error;
338 }
339 evbuffer_drain(buffer, sizeof(struct hmsg_header));
340 if (hdr.len > 0) evbuffer_remove(buffer, data, hdr.len);
4e90a9e0
VB
341
342 /* Currently, we should not receive notification acknowledgment. But if
343 * we receive one, we can discard it. */
344 if (hdr.len == 0 && hdr.type == NOTIFICATION) return;
e0478a46 345 if (client_handle_client(client->cfg,
4e90a9e0
VB
346 levent_ctl_send_cb, client,
347 hdr.type, data, hdr.len,
348 &client->subscribed) == -1) goto recv_error;
e0478a46
VB
349 free(data);
350 return;
351
352recv_error:
353 free(data);
4e90a9e0 354 levent_ctl_free_client(client);
e0478a46
VB
355}
356
357static void
358levent_ctl_event(struct bufferevent *bev, short events, void *ptr)
359{
360 struct lldpd_one_client *client = ptr;
361 if (events & BEV_EVENT_ERROR) {
6f8925be 362 log_warnx("event", "an error occurred with client: %s",
e0478a46 363 evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
4e90a9e0 364 levent_ctl_free_client(client);
e0478a46 365 } else if (events & BEV_EVENT_EOF) {
6f8925be 366 log_debug("event", "client has been disconnected");
4e90a9e0 367 levent_ctl_free_client(client);
d6e889b6 368 }
d6e889b6
VB
369}
370
371static void
372levent_ctl_accept(evutil_socket_t fd, short what, void *arg)
373{
d6e889b6 374 struct lldpd *cfg = arg;
257db885 375 struct lldpd_one_client *client = NULL;
d6e889b6 376 int s;
5fd6695c
VB
377 (void)what;
378
6f8925be 379 log_debug("control", "accept a new connection");
d6e889b6 380 if ((s = accept(fd, NULL, NULL)) == -1) {
6f8925be 381 log_warn("event", "unable to accept connection from socket");
d6e889b6
VB
382 return;
383 }
257db885
VB
384 client = calloc(1, sizeof(struct lldpd_one_client));
385 if (!client) {
6f8925be 386 log_warnx("event", "unable to allocate memory for new client");
4e90a9e0 387 close(s);
257db885 388 goto accept_failed;
d6e889b6 389 }
257db885 390 client->cfg = cfg;
bec75f84 391 levent_make_socket_nonblocking(s);
506273e9 392 TAILQ_INSERT_TAIL(&lldpd_clients, client, next);
e0478a46
VB
393 if ((client->bev = bufferevent_socket_new(cfg->g_base, s,
394 BEV_OPT_CLOSE_ON_FREE)) == NULL) {
6f8925be 395 log_warnx("event", "unable to allocate a new buffer event for new client");
4e90a9e0 396 close(s);
257db885 397 goto accept_failed;
d6e889b6 398 }
e0478a46
VB
399 bufferevent_setcb(client->bev,
400 levent_ctl_recv, NULL, levent_ctl_event,
401 client);
402 bufferevent_enable(client->bev, EV_READ | EV_WRITE);
6f8925be 403 log_debug("event", "new client accepted");
87dfd175
VB
404 /* coverity[leaked_handle]
405 s has been saved by bufferevent_socket_new */
257db885
VB
406 return;
407accept_failed:
4e90a9e0 408 levent_ctl_free_client(client);
d6e889b6
VB
409}
410
327b1d62
VB
411static void
412levent_priv(evutil_socket_t fd, short what, void *arg)
413{
414 struct event_base *base = arg;
415 ssize_t n;
416 int err;
417 char one;
418 (void)what;
419 /* Check if we have some data available. We need to pass the socket in
420 * non-blocking mode to be able to run the check without disruption. */
421 levent_make_socket_nonblocking(fd);
422 n = read(fd, &one, 0); err = errno;
423 levent_make_socket_blocking(fd);
424
425 switch (n) {
426 case -1:
427 if (err == EAGAIN || err == EWOULDBLOCK)
428 /* No data, all good */
429 return;
430 log_warnx("event", "unable to poll monitor process, exit");
431 break;
432 case 0:
433 log_warnx("event", "monitor process has terminated, exit");
434 break;
435 default:
436 /* Unfortunately, dead code, if we have data, we have requested
437 * 0 byte, so we will fall in the previous case. It seems safer
438 * to ask for 0 byte than asking for 1 byte. In the later case,
439 * if we have to speak with the monitor again before exiting, we
440 * would be out of sync. */
441 log_warnx("event", "received unexpected data from monitor process, exit");
442 break;
443 }
444 event_base_loopbreak(base);
445}
446
d6e889b6
VB
447static void
448levent_dump(evutil_socket_t fd, short what, void *arg)
449{
d6e889b6 450 struct event_base *base = arg;
5fd6695c 451 (void)fd; (void)what;
6f8925be 452 log_debug("event", "dumping all events");
d6e889b6
VB
453 event_base_dump_events(base, stderr);
454}
455static void
456levent_stop(evutil_socket_t fd, short what, void *arg)
457{
d6e889b6 458 struct event_base *base = arg;
5fd6695c 459 (void)fd; (void)what;
d6e889b6
VB
460 event_base_loopbreak(base);
461}
462
463static void
464levent_update_and_send(evutil_socket_t fd, short what, void *arg)
465{
d6e889b6 466 struct lldpd *cfg = arg;
579bedd5 467 struct timeval tv = { cfg->g_config.c_tx_interval, 0 };
5fd6695c 468 (void)fd; (void)what;
d6e889b6 469 lldpd_loop(cfg);
579bedd5
VB
470 if (cfg->g_iface_event != NULL)
471 tv.tv_sec *= 20;
d6e889b6
VB
472 event_add(cfg->g_main_loop, &tv);
473}
474
e681c859
VB
475void
476levent_update_now(struct lldpd *cfg)
477{
478 if (cfg->g_main_loop)
479 event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
480}
481
47287a61
VB
482void
483levent_send_now(struct lldpd *cfg)
484{
959a54d4
VB
485 struct lldpd_hardware *hardware;
486 TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries)
487 event_active(hardware->h_timer, EV_TIMEOUT, 1);
47287a61
VB
488}
489
d6e889b6
VB
490static void
491levent_init(struct lldpd *cfg)
492{
493 /* Setup libevent */
6f8925be 494 log_debug("event", "initialize libevent");
d6e889b6
VB
495 event_set_log_callback(levent_log_cb);
496 if (!(cfg->g_base = event_base_new()))
a87db231 497 fatalx("event", "unable to create a new libevent base");
6f8925be 498 log_info("event", "libevent %s initialized with %s method",
d6e889b6
VB
499 event_get_version(),
500 event_base_get_method(cfg->g_base));
501
502 /* Setup SNMP */
503#ifdef USE_SNMP
504 if (cfg->g_snmp) {
505 agent_init(cfg, cfg->g_snmp_agentx);
506 cfg->g_snmp_timeout = evtimer_new(cfg->g_base,
507 levent_snmp_timeout,
508 cfg);
509 if (!cfg->g_snmp_timeout)
a87db231 510 fatalx("event", "unable to setup timeout function for SNMP");
d6e889b6
VB
511 if ((cfg->g_snmp_fds =
512 malloc(sizeof(struct ev_l))) == NULL)
a87db231 513 fatalx("event", "unable to allocate memory for SNMP events");
d6e889b6
VB
514 TAILQ_INIT(levent_snmp_fds(cfg));
515 }
516#endif
6f8925be 517
579bedd5 518 /* Setup loop that will run every X seconds. */
6f8925be 519 log_debug("event", "register loop timer");
d6e889b6
VB
520 if (!(cfg->g_main_loop = event_new(cfg->g_base, -1, 0,
521 levent_update_and_send,
522 cfg)))
a87db231 523 fatalx("event", "unable to setup main timer");
86b5c6fb 524 event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
d6e889b6
VB
525
526 /* Setup unix socket */
50efc5f7 527 struct event *ctl_event;
6f8925be 528 log_debug("event", "register Unix socket");
4e90a9e0 529 TAILQ_INIT(&lldpd_clients);
bec75f84 530 levent_make_socket_nonblocking(cfg->g_ctl);
50efc5f7 531 if ((ctl_event = event_new(cfg->g_base, cfg->g_ctl,
d6e889b6 532 EV_READ|EV_PERSIST, levent_ctl_accept, cfg)) == NULL)
a87db231 533 fatalx("event", "unable to setup control socket event");
50efc5f7 534 event_add(ctl_event, NULL);
d6e889b6 535
327b1d62
VB
536 /* Somehow monitor the monitor process */
537 struct event *monitor_event;
538 log_debug("event", "monitor the monitor process");
539 if ((monitor_event = event_new(cfg->g_base, priv_fd(PRIV_UNPRIVILEGED),
540 EV_READ|EV_PERSIST, levent_priv, cfg->g_base)) == NULL)
541 fatalx("event", "unable to monitor monitor process");
542 event_add(monitor_event, NULL);
543
d6e889b6 544 /* Signals */
6f8925be 545 log_debug("event", "register signals");
d6e889b6
VB
546 evsignal_add(evsignal_new(cfg->g_base, SIGUSR1,
547 levent_dump, cfg->g_base),
548 NULL);
d6e889b6
VB
549 evsignal_add(evsignal_new(cfg->g_base, SIGINT,
550 levent_stop, cfg->g_base),
551 NULL);
552 evsignal_add(evsignal_new(cfg->g_base, SIGTERM,
553 levent_stop, cfg->g_base),
554 NULL);
555}
556
557/* Initialize libevent and start the event loop */
558void
559levent_loop(struct lldpd *cfg)
560{
561 levent_init(cfg);
e4ff3ed5 562 lldpd_loop(cfg);
dbcb6846
VB
563#ifdef USE_SNMP
564 if (cfg->g_snmp) levent_snmp_update(cfg);
565#endif
d6e889b6
VB
566
567 /* libevent loop */
568 do {
bdfe4193 569 TRACE(LLDPD_EVENT_LOOP());
d6e889b6
VB
570 if (event_base_got_break(cfg->g_base) ||
571 event_base_got_exit(cfg->g_base))
572 break;
d6e889b6
VB
573 } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0);
574
575#ifdef USE_SNMP
576 if (cfg->g_snmp)
577 agent_shutdown();
578#endif /* USE_SNMP */
579
580}
581
582static void
583levent_hardware_recv(evutil_socket_t fd, short what, void *arg)
584{
d6e889b6
VB
585 struct lldpd_hardware *hardware = arg;
586 struct lldpd *cfg = hardware->h_cfg;
5fd6695c 587 (void)what;
6f8925be
VB
588 log_debug("event", "received something for %s",
589 hardware->h_ifname);
d6e889b6 590 lldpd_recv(cfg, hardware, fd);
3333d2a8 591 levent_schedule_cleanup(cfg);
d6e889b6
VB
592}
593
594void
595levent_hardware_init(struct lldpd_hardware *hardware)
596{
6f8925be 597 log_debug("event", "initialize events for %s", hardware->h_ifname);
d6e889b6
VB
598 if ((hardware->h_recv =
599 malloc(sizeof(struct ev_l))) == NULL) {
6f8925be 600 log_warnx("event", "unable to allocate memory for %s",
d6e889b6
VB
601 hardware->h_ifname);
602 return;
603 }
604 TAILQ_INIT(levent_hardware_fds(hardware));
605}
606
607void
608levent_hardware_add_fd(struct lldpd_hardware *hardware, int fd)
609{
5fd6695c 610 struct lldpd_events *hfd = NULL;
d6e889b6
VB
611 if (!hardware->h_recv) return;
612
5fd6695c 613 hfd = calloc(1, sizeof(struct lldpd_events));
d6e889b6 614 if (!hfd) {
6f8925be 615 log_warnx("event", "unable to allocate new event for %s",
d6e889b6
VB
616 hardware->h_ifname);
617 return;
618 }
bec75f84 619 levent_make_socket_nonblocking(fd);
d6e889b6
VB
620 if ((hfd->ev = event_new(hardware->h_cfg->g_base, fd,
621 EV_READ | EV_PERSIST,
622 levent_hardware_recv,
623 hardware)) == NULL) {
6f8925be 624 log_warnx("event", "unable to allocate a new event for %s",
d6e889b6
VB
625 hardware->h_ifname);
626 free(hfd);
627 return;
628 }
629 if (event_add(hfd->ev, NULL) == -1) {
6f8925be 630 log_warnx("event", "unable to schedule new event for %s",
d6e889b6
VB
631 hardware->h_ifname);
632 event_free(hfd->ev);
633 free(hfd);
634 return;
635 }
636 TAILQ_INSERT_TAIL(levent_hardware_fds(hardware), hfd, next);
637}
638
639void
640levent_hardware_release(struct lldpd_hardware *hardware)
641{
5fd6695c 642 struct lldpd_events *ev, *ev_next;
dbfa89c6
VB
643 if (hardware->h_timer) {
644 event_free(hardware->h_timer);
645 hardware->h_timer = NULL;
646 }
d6e889b6
VB
647 if (!hardware->h_recv) return;
648
6f8925be 649 log_debug("event", "release events for %s", hardware->h_ifname);
d6e889b6
VB
650 for (ev = TAILQ_FIRST(levent_hardware_fds(hardware));
651 ev;
652 ev = ev_next) {
653 ev_next = TAILQ_NEXT(ev, next);
654 /* We may close several time the same FD. This is harmless. */
655 close(event_get_fd(ev->ev));
656 event_free(ev->ev);
657 TAILQ_REMOVE(levent_hardware_fds(hardware), ev, next);
658 free(ev);
659 }
660 free(levent_hardware_fds(hardware));
661}
0484f180
VB
662
663static void
664levent_iface_trigger(evutil_socket_t fd, short what, void *arg)
665{
666 struct lldpd *cfg = arg;
4f670a1e 667 log_debug("event",
0484f180
VB
668 "triggering update of all interfaces");
669 lldpd_update_localports(cfg);
670}
671
672static void
673levent_iface_recv(evutil_socket_t fd, short what, void *arg)
674{
675 struct lldpd *cfg = arg;
b0b8841b 676 char buffer[EVENT_BUFFER];
0484f180
VB
677 int n;
678
13181ede
VB
679 if (cfg->g_iface_cb == NULL) {
680 /* Discard the message */
681 while (1) {
682 n = read(fd, buffer, sizeof(buffer));
683 if (n == -1 &&
684 (errno == EWOULDBLOCK ||
685 errno == EAGAIN)) break;
686 if (n == -1) {
687 log_warn("event",
688 "unable to receive interface change notification message");
689 return;
690 }
691 if (n == 0) {
692 log_warnx("event",
693 "end of file reached while getting interface change notification message");
694 return;
695 }
0484f180 696 }
13181ede
VB
697 } else {
698 cfg->g_iface_cb(cfg);
0484f180
VB
699 }
700
701 /* Schedule local port update. We don't run it right away because we may
702 * receive a batch of events like this. */
703 struct timeval one_sec = {1, 0};
bdfe4193 704 TRACE(LLDPD_INTERFACES_NOTIFICATION());
0484f180
VB
705 log_debug("event",
706 "received notification change, schedule an update of all interfaces in one second");
707 if (cfg->g_iface_timer_event == NULL) {
708 if ((cfg->g_iface_timer_event = evtimer_new(cfg->g_base,
709 levent_iface_trigger, cfg)) == NULL) {
710 log_warnx("event",
711 "unable to create a new event to trigger interface update");
712 return;
713 }
714 }
715 if (evtimer_add(cfg->g_iface_timer_event, &one_sec) == -1) {
716 log_warnx("event",
717 "unable to schedule interface updates");
718 return;
719 }
720}
721
aa313f2a 722int
0484f180
VB
723levent_iface_subscribe(struct lldpd *cfg, int socket)
724{
725 log_debug("event", "subscribe to interface changes from socket %d",
726 socket);
bec75f84 727 levent_make_socket_nonblocking(socket);
0484f180
VB
728 cfg->g_iface_event = event_new(cfg->g_base, socket,
729 EV_READ | EV_PERSIST, levent_iface_recv, cfg);
730 if (cfg->g_iface_event == NULL) {
731 log_warnx("event",
732 "unable to allocate a new event for interface changes");
aa313f2a 733 return -1;
0484f180
VB
734 }
735 if (event_add(cfg->g_iface_event, NULL) == -1) {
736 log_warnx("event",
737 "unable to schedule new interface changes event");
738 event_free(cfg->g_iface_event);
739 cfg->g_iface_event = NULL;
aa313f2a 740 return -1;
0484f180 741 }
aa313f2a 742 return 0;
0484f180 743}
579bedd5 744
3333d2a8
VB
745static void
746levent_trigger_cleanup(evutil_socket_t fd, short what, void *arg)
747{
748 struct lldpd *cfg = arg;
749 lldpd_cleanup(cfg);
750}
751
752void
753levent_schedule_cleanup(struct lldpd *cfg)
754{
755 log_debug("event", "schedule next cleanup");
756 if (cfg->g_cleanup_timer != NULL) {
757 event_free(cfg->g_cleanup_timer);
758 }
759 cfg->g_cleanup_timer = evtimer_new(cfg->g_base, levent_trigger_cleanup, cfg);
760 if (cfg->g_cleanup_timer == NULL) {
761 log_warnx("event",
762 "unable to allocate a new event for cleanup tasks");
763 return;
764 }
765
766 /* Compute the next TTL event */
767 struct timeval tv = { LOCAL_CHASSIS(cfg)->c_ttl, 0 };
768 time_t now = time(NULL);
769 time_t next;
770 struct lldpd_hardware *hardware;
771 struct lldpd_port *port;
772 TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) {
773 TAILQ_FOREACH(port, &hardware->h_rports, p_entries) {
6afb24c2 774 if (now >= port->p_lastupdate + port->p_chassis->c_ttl) {
5a215d4b 775 tv.tv_sec = 0;
6afb24c2
VB
776 log_debug("event", "immediate cleanup on port %s (%ld, %d, %ld)",
777 hardware->h_ifname, now, port->p_chassis->c_ttl, port->p_lastupdate);
5a215d4b
VB
778 break;
779 }
3333d2a8 780 next = port->p_chassis->c_ttl - (now - port->p_lastupdate);
5a215d4b 781 if (next < tv.tv_sec)
3333d2a8
VB
782 tv.tv_sec = next;
783 }
784 }
785
786 log_debug("event", "next cleanup in %ld seconds",
787 (long)tv.tv_sec);
788 if (event_add(cfg->g_cleanup_timer, &tv) == -1) {
789 log_warnx("event",
790 "unable to schedula cleanup task");
791 event_free(cfg->g_cleanup_timer);
792 cfg->g_cleanup_timer = NULL;
793 return;
794 }
795}
796
579bedd5
VB
797static void
798levent_send_pdu(evutil_socket_t fd, short what, void *arg)
799{
800 struct lldpd_hardware *hardware = arg;
be511d00 801 int tx_interval = hardware->h_cfg->g_config.c_tx_interval;
b9de0ca6 802
579bedd5
VB
803 log_debug("event", "trigger sending PDU for port %s",
804 hardware->h_ifname);
805 lldpd_send(hardware);
806
be511d00 807#ifdef ENABLE_LLDPMED
b9de0ca6 808 if (hardware->h_tx_fast > 0)
809 hardware->h_tx_fast--;
810
811 if (hardware->h_tx_fast > 0)
812 tx_interval = hardware->h_cfg->g_config.c_tx_fast_interval;
be511d00
VB
813#endif
814
b9de0ca6 815 struct timeval tv = { tx_interval, 0 };
579bedd5
VB
816 if (event_add(hardware->h_timer, &tv) == -1) {
817 log_warnx("event", "unable to re-register timer event for port %s",
818 hardware->h_ifname);
819 event_free(hardware->h_timer);
820 hardware->h_timer = NULL;
821 return;
822 }
823}
824
825void
826levent_schedule_pdu(struct lldpd_hardware *hardware)
827{
828 log_debug("event", "schedule sending PDU on %s",
829 hardware->h_ifname);
830 if (hardware->h_timer == NULL) {
831 hardware->h_timer = evtimer_new(hardware->h_cfg->g_base,
832 levent_send_pdu, hardware);
833 if (hardware->h_timer == NULL) {
834 log_warnx("event", "unable to schedule PDU sending for port %s",
835 hardware->h_ifname);
836 return;
837 }
838 }
839
840 struct timeval tv = { 0, 0 };
841 if (event_add(hardware->h_timer, &tv) == -1) {
842 log_warnx("event", "unable to register timer event for port %s",
843 hardware->h_ifname);
844 event_free(hardware->h_timer);
845 hardware->h_timer = NULL;
846 return;
847 }
848}
bec75f84
VB
849
850int
851levent_make_socket_nonblocking(int fd)
852{
853 int flags;
854 if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
855 log_warn("event", "fcntl(%d, F_GETFL)", fd);
856 return -1;
857 }
858 if (flags & O_NONBLOCK) return 0;
859 if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) {
860 log_warn("event", "fcntl(%d, F_SETFL)", fd);
861 return -1;
862 }
863 return 0;
864}
327b1d62
VB
865
866int
867levent_make_socket_blocking(int fd)
868{
869 int flags;
870 if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
871 log_warn("event", "fcntl(%d, F_GETFL)", fd);
872 return -1;
873 }
874 if (!(flags & O_NONBLOCK)) return 0;
875 if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) {
876 log_warn("event", "fcntl(%d, F_SETFL)", fd);
877 return -1;
878 }
879 return 0;
880}