]> git.ipfire.org Git - thirdparty/lldpd.git/blame - src/daemon/event.c
build: bump cross-platform-actions/action from 0.27.0 to 0.28.0
[thirdparty/lldpd.git] / src / daemon / event.c
CommitLineData
4b292b55 1/* -*- mode: c; c-file-style: "openbsd" -*- */
d6e889b6
VB
2/*
3 * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "lldpd.h"
bdfe4193 19#include "trace.h"
d6e889b6
VB
20
21#include <unistd.h>
22#include <signal.h>
e0478a46 23#include <errno.h>
864a7bd5 24#include <time.h>
bec75f84 25#include <fcntl.h>
d7460a6f 26#if defined(__clang__)
8b549648
VB
27# pragma clang diagnostic push
28# pragma clang diagnostic ignored "-Wdocumentation"
d7460a6f 29#endif
d6e889b6 30#include <event2/event.h>
e0478a46
VB
31#include <event2/bufferevent.h>
32#include <event2/buffer.h>
d7460a6f 33#if defined(__clang__)
8b549648 34# pragma clang diagnostic pop
d7460a6f 35#endif
d6e889b6 36
b0b8841b
ST
37#define EVENT_BUFFER 1024
38
d6e889b6
VB
39static void
40levent_log_cb(int severity, const char *msg)
41{
42 switch (severity) {
8b549648
VB
43 case _EVENT_LOG_DEBUG:
44 log_debug("libevent", "%s", msg);
45 break;
46 case _EVENT_LOG_MSG:
47 log_info("libevent", "%s", msg);
48 break;
49 case _EVENT_LOG_WARN:
50 log_warnx("libevent", "%s", msg);
51 break;
52 case _EVENT_LOG_ERR:
53 log_warnx("libevent", "%s", msg);
54 break;
d6e889b6
VB
55 }
56}
57
d6e889b6
VB
58struct lldpd_events {
59 TAILQ_ENTRY(lldpd_events) next;
60 struct event *ev;
d6e889b6
VB
61};
62TAILQ_HEAD(ev_l, lldpd_events);
63
8b549648
VB
64#define levent_snmp_fds(cfg) ((struct ev_l *)(cfg)->g_snmp_fds)
65#define levent_hardware_fds(hardware) ((struct ev_l *)(hardware)->h_recv)
d6e889b6
VB
66
67#ifdef USE_SNMP
8b549648
VB
68# include <net-snmp/net-snmp-config.h>
69# include <net-snmp/net-snmp-includes.h>
70# include <net-snmp/agent/net-snmp-agent-includes.h>
71# include <net-snmp/agent/snmp_vars.h>
d6e889b6 72
59c32cf0 73/* Compatibility with older versions of NetSNMP */
8b549648
VB
74# ifndef HAVE_SNMP_SELECT_INFO2
75# define netsnmp_large_fd_set fd_set
76# define snmp_read2 snmp_read
77# define snmp_select_info2 snmp_select_info
78# define netsnmp_large_fd_set_init(...)
79# define netsnmp_large_fd_set_cleanup(...)
80# define NETSNMP_LARGE_FD_SET FD_SET
81# define NETSNMP_LARGE_FD_CLR FD_CLR
82# define NETSNMP_LARGE_FD_ZERO FD_ZERO
83# define NETSNMP_LARGE_FD_ISSET FD_ISSET
84# else
85# include <net-snmp/library/large_fd_set.h>
86# endif
59c32cf0 87
d6e889b6
VB
88static void levent_snmp_update(struct lldpd *);
89
90/*
91 * Callback function when we have something to read from SNMP.
92 *
93 * This function is called because we have a read event on one SNMP
94 * file descriptor. When need to call snmp_read() on it.
95 */
96static void
97levent_snmp_read(evutil_socket_t fd, short what, void *arg)
98{
d6e889b6 99 struct lldpd *cfg = arg;
59c32cf0 100 netsnmp_large_fd_set fdset;
5fd6695c 101 (void)what;
59c32cf0
VB
102 netsnmp_large_fd_set_init(&fdset, FD_SETSIZE);
103 NETSNMP_LARGE_FD_ZERO(&fdset);
104 NETSNMP_LARGE_FD_SET(fd, &fdset);
105 snmp_read2(&fdset);
d6e889b6
VB
106 levent_snmp_update(cfg);
107}
108
109/*
110 * Callback function for a SNMP timeout.
111 *
112 * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it.
113 */
114static void
115levent_snmp_timeout(evutil_socket_t fd, short what, void *arg)
116{
d6e889b6 117 struct lldpd *cfg = arg;
8b549648
VB
118 (void)what;
119 (void)fd;
d6e889b6 120 snmp_timeout();
219c432e 121 run_alarms();
d6e889b6
VB
122 levent_snmp_update(cfg);
123}
124
125/*
126 * Watch a new SNMP FD.
127 *
128 * @param base The libevent base we are working on.
129 * @param fd The file descriptor we want to watch.
130 *
131 * The file descriptor is appended to the list of file descriptors we
132 * want to watch.
133 */
134static void
135levent_snmp_add_fd(struct lldpd *cfg, int fd)
136{
137 struct event_base *base = cfg->g_base;
138 struct lldpd_events *snmpfd = calloc(1, sizeof(struct lldpd_events));
139 if (!snmpfd) {
6f8925be 140 log_warn("event", "unable to allocate memory for new SNMP event");
d6e889b6
VB
141 return;
142 }
bec75f84 143 levent_make_socket_nonblocking(fd);
8b549648
VB
144 if ((snmpfd->ev = event_new(base, fd, EV_READ | EV_PERSIST, levent_snmp_read,
145 cfg)) == NULL) {
6f8925be 146 log_warnx("event", "unable to allocate a new SNMP event for FD %d", fd);
d6e889b6
VB
147 free(snmpfd);
148 return;
149 }
150 if (event_add(snmpfd->ev, NULL) == -1) {
6f8925be 151 log_warnx("event", "unable to schedule new SNMP event for FD %d", fd);
d6e889b6
VB
152 event_free(snmpfd->ev);
153 free(snmpfd);
154 return;
155 }
156 TAILQ_INSERT_TAIL(levent_snmp_fds(cfg), snmpfd, next);
157}
158
159/*
160 * Update SNMP event loop.
161 *
162 * New events are added and some other are removed. This function
163 * should be called every time a SNMP event happens: either when
164 * handling a SNMP packet, a SNMP timeout or when sending a SNMP
165 * packet. This function will keep libevent in sync with NetSNMP.
166 *
167 * @param base The libevent base we are working on.
168 */
169static void
170levent_snmp_update(struct lldpd *cfg)
171{
172 int maxfd = 0;
173 int block = 1;
d6e889b6
VB
174 struct timeval timeout;
175 static int howmany = 0;
5fd6695c
VB
176 int added = 0, removed = 0, current = 0;
177 struct lldpd_events *snmpfd, *snmpfd_next;
d6e889b6
VB
178
179 /* snmp_select_info() can be tricky to understand. We set `block` to
180 1 to means that we don't request a timeout. snmp_select_info()
115df32c 181 will reset `block` to 0 if it wants us to set up a timeout. In
d6e889b6 182 this timeout, `snmp_timeout()` should be invoked.
59c32cf0 183
d6e889b6
VB
184 Each FD in `fdset` will need to be watched for reading. If one of
185 them become active, `snmp_read()` should be called on it.
186 */
59c32cf0
VB
187
188 netsnmp_large_fd_set fdset;
189 netsnmp_large_fd_set_init(&fdset, FD_SETSIZE);
8b549648 190 NETSNMP_LARGE_FD_ZERO(&fdset);
59c32cf0
VB
191 snmp_select_info2(&maxfd, &fdset, &timeout, &block);
192
d6e889b6
VB
193 /* We need to untrack any event whose FD is not in `fdset`
194 anymore */
8b549648 195 for (snmpfd = TAILQ_FIRST(levent_snmp_fds(cfg)); snmpfd; snmpfd = snmpfd_next) {
d6e889b6
VB
196 snmpfd_next = TAILQ_NEXT(snmpfd, next);
197 if (event_get_fd(snmpfd->ev) >= maxfd ||
59c32cf0 198 (!NETSNMP_LARGE_FD_ISSET(event_get_fd(snmpfd->ev), &fdset))) {
d6e889b6
VB
199 event_free(snmpfd->ev);
200 TAILQ_REMOVE(levent_snmp_fds(cfg), snmpfd, next);
201 free(snmpfd);
202 removed++;
203 } else {
59c32cf0 204 NETSNMP_LARGE_FD_CLR(event_get_fd(snmpfd->ev), &fdset);
d6e889b6
VB
205 current++;
206 }
207 }
59c32cf0 208
d6e889b6
VB
209 /* Invariant: FD in `fdset` are not in list of FD */
210 for (int fd = 0; fd < maxfd; fd++) {
59c32cf0 211 if (NETSNMP_LARGE_FD_ISSET(fd, &fdset)) {
d6e889b6
VB
212 levent_snmp_add_fd(cfg, fd);
213 added++;
214 }
215 }
216 current += added;
217 if (howmany != current) {
8b549648
VB
218 log_debug("event",
219 "added %d events, removed %d events, total of %d events", added,
220 removed, current);
d6e889b6
VB
221 howmany = current;
222 }
223
224 /* If needed, handle timeout */
8b549648 225 if (evtimer_add(cfg->g_snmp_timeout, block ? NULL : &timeout) == -1)
6f8925be 226 log_warnx("event", "unable to schedule timeout function for SNMP");
59c32cf0
VB
227
228 netsnmp_large_fd_set_cleanup(&fdset);
d6e889b6
VB
229}
230#endif /* USE_SNMP */
231
257db885 232struct lldpd_one_client {
4e90a9e0 233 TAILQ_ENTRY(lldpd_one_client) next;
257db885 234 struct lldpd *cfg;
e0478a46 235 struct bufferevent *bev;
8b549648 236 int subscribed; /* Is this client subscribed to changes? */
257db885 237};
4e90a9e0
VB
238TAILQ_HEAD(, lldpd_one_client) lldpd_clients;
239
240static void
241levent_ctl_free_client(struct lldpd_one_client *client)
242{
243 if (client && client->bev) bufferevent_free(client->bev);
244 if (client) {
245 TAILQ_REMOVE(&lldpd_clients, client, next);
246 free(client);
247 }
248}
257db885 249
762419b6
VB
250static void
251levent_ctl_close_clients()
252{
253 struct lldpd_one_client *client, *client_next;
8b549648 254 for (client = TAILQ_FIRST(&lldpd_clients); client; client = client_next) {
762419b6
VB
255 client_next = TAILQ_NEXT(client, next);
256 levent_ctl_free_client(client);
257 }
258}
259
e0478a46 260static ssize_t
4e90a9e0 261levent_ctl_send(struct lldpd_one_client *client, int type, void *data, size_t len)
e0478a46 262{
e0478a46
VB
263 struct bufferevent *bev = client->bev;
264 struct hmsg_header hdr = { .len = len, .type = type };
265 bufferevent_disable(bev, EV_WRITE);
266 if (bufferevent_write(bev, &hdr, sizeof(struct hmsg_header)) == -1 ||
267 (len > 0 && bufferevent_write(bev, data, len) == -1)) {
6f8925be 268 log_warnx("event", "unable to create answer to client");
4e90a9e0 269 levent_ctl_free_client(client);
e0478a46
VB
270 return -1;
271 }
272 bufferevent_enable(bev, EV_WRITE);
273 return len;
274}
275
4e90a9e0
VB
276void
277levent_ctl_notify(char *ifname, int state, struct lldpd_port *neighbor)
278{
279 struct lldpd_one_client *client, *client_next;
8b549648
VB
280 struct lldpd_neighbor_change neigh = { .ifname = ifname,
281 .state = state,
282 .neighbor = neighbor };
4e90a9e0 283 void *output = NULL;
d79c3de4 284 ssize_t output_len = 0;
4e90a9e0 285
4e90a9e0 286 /* Don't use TAILQ_FOREACH, the client may be deleted in case of errors. */
6f8925be 287 log_debug("control", "notify clients of neighbor changes");
8b549648 288 for (client = TAILQ_FIRST(&lldpd_clients); client; client = client_next) {
4e90a9e0
VB
289 client_next = TAILQ_NEXT(client, next);
290 if (!client->subscribed) continue;
be969691
VB
291
292 if (output == NULL) {
293 /* Ugly hack: we don't want to transmit a list of
74e0080e 294 * ports. We patch the port to avoid this. */
be969691
VB
295 TAILQ_ENTRY(lldpd_port) backup_p_entries;
296 memcpy(&backup_p_entries, &neighbor->p_entries,
297 sizeof(backup_p_entries));
8b549648 298 memset(&neighbor->p_entries, 0, sizeof(backup_p_entries));
985a4cb5 299 output_len = lldpd_neighbor_change_serialize(&neigh, &output);
be969691
VB
300 memcpy(&neighbor->p_entries, &backup_p_entries,
301 sizeof(backup_p_entries));
be969691
VB
302
303 if (output_len <= 0) {
8b549648
VB
304 log_warnx("event",
305 "unable to serialize changed neighbor");
be969691
VB
306 return;
307 }
308 }
309
4e90a9e0
VB
310 levent_ctl_send(client, NOTIFICATION, output, output_len);
311 }
312
313 free(output);
314}
315
316static ssize_t
317levent_ctl_send_cb(void *out, int type, void *data, size_t len)
318{
319 struct lldpd_one_client *client = out;
320 return levent_ctl_send(client, type, data, len);
321}
322
d6e889b6 323static void
e0478a46 324levent_ctl_recv(struct bufferevent *bev, void *ptr)
d6e889b6 325{
e0478a46
VB
326 struct lldpd_one_client *client = ptr;
327 struct evbuffer *buffer = bufferevent_get_input(bev);
8b549648 328 size_t buffer_len = evbuffer_get_length(buffer);
e0478a46
VB
329 struct hmsg_header hdr;
330 void *data = NULL;
331
6f8925be 332 log_debug("control", "receive data on Unix socket");
8b549648
VB
333 if (buffer_len < sizeof(struct hmsg_header)) return; /* Not enough data yet */
334 if (evbuffer_copyout(buffer, &hdr, sizeof(struct hmsg_header)) !=
335 sizeof(struct hmsg_header)) {
6f8925be 336 log_warnx("event", "not able to read header");
e0478a46
VB
337 return;
338 }
82374540 339 if (hdr.len > HMSG_MAX_SIZE) {
6f8925be 340 log_warnx("event", "message received is too large");
e0478a46
VB
341 goto recv_error;
342 }
d6e889b6 343
e0478a46 344 if (buffer_len < hdr.len + sizeof(struct hmsg_header))
8b549648 345 return; /* Not enough data yet */
e0478a46 346 if (hdr.len > 0 && (data = malloc(hdr.len)) == NULL) {
6f8925be 347 log_warnx("event", "not enough memory");
e0478a46
VB
348 goto recv_error;
349 }
350 evbuffer_drain(buffer, sizeof(struct hmsg_header));
351 if (hdr.len > 0) evbuffer_remove(buffer, data, hdr.len);
4e90a9e0
VB
352
353 /* Currently, we should not receive notification acknowledgment. But if
354 * we receive one, we can discard it. */
355 if (hdr.len == 0 && hdr.type == NOTIFICATION) return;
8b549648
VB
356 if (client_handle_client(client->cfg, levent_ctl_send_cb, client, hdr.type,
357 data, hdr.len, &client->subscribed) == -1)
358 goto recv_error;
e0478a46
VB
359 free(data);
360 return;
361
362recv_error:
363 free(data);
4e90a9e0 364 levent_ctl_free_client(client);
e0478a46
VB
365}
366
367static void
368levent_ctl_event(struct bufferevent *bev, short events, void *ptr)
369{
370 struct lldpd_one_client *client = ptr;
371 if (events & BEV_EVENT_ERROR) {
6f8925be 372 log_warnx("event", "an error occurred with client: %s",
e0478a46 373 evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
4e90a9e0 374 levent_ctl_free_client(client);
e0478a46 375 } else if (events & BEV_EVENT_EOF) {
6f8925be 376 log_debug("event", "client has been disconnected");
4e90a9e0 377 levent_ctl_free_client(client);
d6e889b6 378 }
d6e889b6
VB
379}
380
381static void
382levent_ctl_accept(evutil_socket_t fd, short what, void *arg)
383{
d6e889b6 384 struct lldpd *cfg = arg;
257db885 385 struct lldpd_one_client *client = NULL;
d6e889b6 386 int s;
5fd6695c
VB
387 (void)what;
388
6f8925be 389 log_debug("control", "accept a new connection");
d6e889b6 390 if ((s = accept(fd, NULL, NULL)) == -1) {
6f8925be 391 log_warn("event", "unable to accept connection from socket");
d6e889b6
VB
392 return;
393 }
257db885
VB
394 client = calloc(1, sizeof(struct lldpd_one_client));
395 if (!client) {
6f8925be 396 log_warnx("event", "unable to allocate memory for new client");
4e90a9e0 397 close(s);
257db885 398 goto accept_failed;
d6e889b6 399 }
257db885 400 client->cfg = cfg;
bec75f84 401 levent_make_socket_nonblocking(s);
506273e9 402 TAILQ_INSERT_TAIL(&lldpd_clients, client, next);
e0478a46 403 if ((client->bev = bufferevent_socket_new(cfg->g_base, s,
8b549648
VB
404 BEV_OPT_CLOSE_ON_FREE)) == NULL) {
405 log_warnx("event",
406 "unable to allocate a new buffer event for new client");
4e90a9e0 407 close(s);
257db885 408 goto accept_failed;
d6e889b6 409 }
8b549648 410 bufferevent_setcb(client->bev, levent_ctl_recv, NULL, levent_ctl_event, client);
e0478a46 411 bufferevent_enable(client->bev, EV_READ | EV_WRITE);
6f8925be 412 log_debug("event", "new client accepted");
87dfd175
VB
413 /* coverity[leaked_handle]
414 s has been saved by bufferevent_socket_new */
257db885
VB
415 return;
416accept_failed:
4e90a9e0 417 levent_ctl_free_client(client);
d6e889b6
VB
418}
419
327b1d62
VB
420static void
421levent_priv(evutil_socket_t fd, short what, void *arg)
422{
423 struct event_base *base = arg;
424 ssize_t n;
425 int err;
426 char one;
427 (void)what;
428 /* Check if we have some data available. We need to pass the socket in
429 * non-blocking mode to be able to run the check without disruption. */
430 levent_make_socket_nonblocking(fd);
8b549648
VB
431 n = read(fd, &one, 1);
432 err = errno;
327b1d62
VB
433 levent_make_socket_blocking(fd);
434
435 switch (n) {
436 case -1:
8b549648 437 if (err == EAGAIN || err == EWOULDBLOCK) /* No data, all good */
327b1d62
VB
438 return;
439 log_warnx("event", "unable to poll monitor process, exit");
440 break;
441 case 0:
442 log_warnx("event", "monitor process has terminated, exit");
443 break;
444 default:
7d60750c
VB
445 /* This is a bit unsafe as we are now out-of-sync with the
446 * monitor. It would be safer to request 0 byte, but some OS
acc673f9 447 * (illumos) seem to take the shortcut that by asking 0 byte,
7d60750c 448 * we can just return 0 byte. */
8b549648
VB
449 log_warnx("event",
450 "received unexpected data from monitor process, exit");
327b1d62
VB
451 break;
452 }
453 event_base_loopbreak(base);
454}
455
d6e889b6
VB
456static void
457levent_dump(evutil_socket_t fd, short what, void *arg)
458{
d6e889b6 459 struct event_base *base = arg;
8b549648
VB
460 (void)fd;
461 (void)what;
6f8925be 462 log_debug("event", "dumping all events");
d6e889b6
VB
463 event_base_dump_events(base, stderr);
464}
465static void
466levent_stop(evutil_socket_t fd, short what, void *arg)
467{
d6e889b6 468 struct event_base *base = arg;
8b549648
VB
469 (void)fd;
470 (void)what;
d6e889b6
VB
471 event_base_loopbreak(base);
472}
473
474static void
475levent_update_and_send(evutil_socket_t fd, short what, void *arg)
476{
d6e889b6 477 struct lldpd *cfg = arg;
74f55c2e
JPT
478 struct timeval tv;
479 long interval_ms = cfg->g_config.c_tx_interval;
480
8b549648
VB
481 (void)fd;
482 (void)what;
d6e889b6 483 lldpd_loop(cfg);
8b549648
VB
484 if (cfg->g_iface_event != NULL) interval_ms *= 20;
485 if (interval_ms < 30000) interval_ms = 30000;
74f55c2e
JPT
486 tv.tv_sec = interval_ms / 1000;
487 tv.tv_usec = (interval_ms % 1000) * 1000;
d6e889b6
VB
488 event_add(cfg->g_main_loop, &tv);
489}
490
e681c859
VB
491void
492levent_update_now(struct lldpd *cfg)
493{
8b549648 494 if (cfg->g_main_loop) event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
e681c859
VB
495}
496
47287a61
VB
497void
498levent_send_now(struct lldpd *cfg)
499{
959a54d4 500 struct lldpd_hardware *hardware;
8b549648 501 TAILQ_FOREACH (hardware, &cfg->g_hardware, h_entries) {
126970da
VB
502 if (hardware->h_timer)
503 event_active(hardware->h_timer, EV_TIMEOUT, 1);
504 else
505 log_warnx("event", "BUG: no timer present for interface %s",
506 hardware->h_ifname);
507 }
47287a61
VB
508}
509
d6e889b6
VB
510static void
511levent_init(struct lldpd *cfg)
512{
115df32c 513 /* Set up libevent */
6f8925be 514 log_debug("event", "initialize libevent");
d6e889b6
VB
515 event_set_log_callback(levent_log_cb);
516 if (!(cfg->g_base = event_base_new()))
a87db231 517 fatalx("event", "unable to create a new libevent base");
8b549648
VB
518 log_info("event", "libevent %s initialized with %s method", event_get_version(),
519 event_base_get_method(cfg->g_base));
d6e889b6 520
115df32c 521 /* Set up SNMP */
d6e889b6
VB
522#ifdef USE_SNMP
523 if (cfg->g_snmp) {
524 agent_init(cfg, cfg->g_snmp_agentx);
8b549648
VB
525 cfg->g_snmp_timeout =
526 evtimer_new(cfg->g_base, levent_snmp_timeout, cfg);
d6e889b6 527 if (!cfg->g_snmp_timeout)
a87db231 528 fatalx("event", "unable to setup timeout function for SNMP");
8b549648 529 if ((cfg->g_snmp_fds = malloc(sizeof(struct ev_l))) == NULL)
a87db231 530 fatalx("event", "unable to allocate memory for SNMP events");
d6e889b6
VB
531 TAILQ_INIT(levent_snmp_fds(cfg));
532 }
533#endif
6f8925be 534
579bedd5 535 /* Setup loop that will run every X seconds. */
6f8925be 536 log_debug("event", "register loop timer");
8b549648
VB
537 if (!(cfg->g_main_loop =
538 event_new(cfg->g_base, -1, 0, levent_update_and_send, cfg)))
a87db231 539 fatalx("event", "unable to setup main timer");
86b5c6fb 540 event_active(cfg->g_main_loop, EV_TIMEOUT, 1);
d6e889b6 541
115df32c 542 /* Set up unix socket */
50efc5f7 543 struct event *ctl_event;
6f8925be 544 log_debug("event", "register Unix socket");
4e90a9e0 545 TAILQ_INIT(&lldpd_clients);
bec75f84 546 levent_make_socket_nonblocking(cfg->g_ctl);
8b549648
VB
547 if ((ctl_event = event_new(cfg->g_base, cfg->g_ctl, EV_READ | EV_PERSIST,
548 levent_ctl_accept, cfg)) == NULL)
a87db231 549 fatalx("event", "unable to setup control socket event");
50efc5f7 550 event_add(ctl_event, NULL);
d6e889b6 551
327b1d62
VB
552 /* Somehow monitor the monitor process */
553 struct event *monitor_event;
554 log_debug("event", "monitor the monitor process");
555 if ((monitor_event = event_new(cfg->g_base, priv_fd(PRIV_UNPRIVILEGED),
8b549648 556 EV_READ | EV_PERSIST, levent_priv, cfg->g_base)) == NULL)
327b1d62
VB
557 fatalx("event", "unable to monitor monitor process");
558 event_add(monitor_event, NULL);
559
d6e889b6 560 /* Signals */
6f8925be 561 log_debug("event", "register signals");
8b549648 562 evsignal_add(evsignal_new(cfg->g_base, SIGUSR1, levent_dump, cfg->g_base),
d6e889b6 563 NULL);
8b549648
VB
564 evsignal_add(evsignal_new(cfg->g_base, SIGINT, levent_stop, cfg->g_base), NULL);
565 evsignal_add(evsignal_new(cfg->g_base, SIGTERM, levent_stop, cfg->g_base),
d6e889b6
VB
566 NULL);
567}
568
569/* Initialize libevent and start the event loop */
570void
571levent_loop(struct lldpd *cfg)
572{
573 levent_init(cfg);
e4ff3ed5 574 lldpd_loop(cfg);
dbcb6846
VB
575#ifdef USE_SNMP
576 if (cfg->g_snmp) levent_snmp_update(cfg);
577#endif
d6e889b6
VB
578
579 /* libevent loop */
580 do {
bdfe4193 581 TRACE(LLDPD_EVENT_LOOP());
d6e889b6
VB
582 if (event_base_got_break(cfg->g_base) ||
583 event_base_got_exit(cfg->g_base))
584 break;
d6e889b6
VB
585 } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0);
586
8b549648 587 if (cfg->g_iface_timer_event != NULL) event_free(cfg->g_iface_timer_event);
3744168c 588
d6e889b6 589#ifdef USE_SNMP
8b549648 590 if (cfg->g_snmp) agent_shutdown();
d6e889b6
VB
591#endif /* USE_SNMP */
592
762419b6 593 levent_ctl_close_clients();
f144d837
VB
594}
595
596/* Release libevent resources */
597void
598levent_shutdown(struct lldpd *cfg)
599{
8b549648
VB
600 if (cfg->g_iface_event) event_free(cfg->g_iface_event);
601 if (cfg->g_cleanup_timer) event_free(cfg->g_cleanup_timer);
f144d837 602 event_base_free(cfg->g_base);
d6e889b6
VB
603}
604
605static void
606levent_hardware_recv(evutil_socket_t fd, short what, void *arg)
607{
d6e889b6
VB
608 struct lldpd_hardware *hardware = arg;
609 struct lldpd *cfg = hardware->h_cfg;
5fd6695c 610 (void)what;
8b549648 611 log_debug("event", "received something for %s", hardware->h_ifname);
d6e889b6 612 lldpd_recv(cfg, hardware, fd);
3333d2a8 613 levent_schedule_cleanup(cfg);
d6e889b6
VB
614}
615
616void
617levent_hardware_init(struct lldpd_hardware *hardware)
618{
6f8925be 619 log_debug("event", "initialize events for %s", hardware->h_ifname);
8b549648 620 if ((hardware->h_recv = malloc(sizeof(struct ev_l))) == NULL) {
6f8925be 621 log_warnx("event", "unable to allocate memory for %s",
d6e889b6
VB
622 hardware->h_ifname);
623 return;
624 }
625 TAILQ_INIT(levent_hardware_fds(hardware));
626}
627
628void
629levent_hardware_add_fd(struct lldpd_hardware *hardware, int fd)
630{
5fd6695c 631 struct lldpd_events *hfd = NULL;
d6e889b6
VB
632 if (!hardware->h_recv) return;
633
5fd6695c 634 hfd = calloc(1, sizeof(struct lldpd_events));
d6e889b6 635 if (!hfd) {
6f8925be 636 log_warnx("event", "unable to allocate new event for %s",
d6e889b6
VB
637 hardware->h_ifname);
638 return;
639 }
bec75f84 640 levent_make_socket_nonblocking(fd);
8b549648
VB
641 if ((hfd->ev = event_new(hardware->h_cfg->g_base, fd, EV_READ | EV_PERSIST,
642 levent_hardware_recv, hardware)) == NULL) {
6f8925be 643 log_warnx("event", "unable to allocate a new event for %s",
8b549648 644 hardware->h_ifname);
d6e889b6
VB
645 free(hfd);
646 return;
647 }
648 if (event_add(hfd->ev, NULL) == -1) {
6f8925be 649 log_warnx("event", "unable to schedule new event for %s",
8b549648 650 hardware->h_ifname);
d6e889b6
VB
651 event_free(hfd->ev);
652 free(hfd);
653 return;
654 }
655 TAILQ_INSERT_TAIL(levent_hardware_fds(hardware), hfd, next);
656}
657
658void
659levent_hardware_release(struct lldpd_hardware *hardware)
660{
5fd6695c 661 struct lldpd_events *ev, *ev_next;
dbfa89c6
VB
662 if (hardware->h_timer) {
663 event_free(hardware->h_timer);
664 hardware->h_timer = NULL;
665 }
d6e889b6
VB
666 if (!hardware->h_recv) return;
667
6f8925be 668 log_debug("event", "release events for %s", hardware->h_ifname);
8b549648 669 for (ev = TAILQ_FIRST(levent_hardware_fds(hardware)); ev; ev = ev_next) {
d6e889b6
VB
670 ev_next = TAILQ_NEXT(ev, next);
671 /* We may close several time the same FD. This is harmless. */
672 close(event_get_fd(ev->ev));
673 event_free(ev->ev);
674 TAILQ_REMOVE(levent_hardware_fds(hardware), ev, next);
675 free(ev);
676 }
677 free(levent_hardware_fds(hardware));
678}
0484f180
VB
679
680static void
681levent_iface_trigger(evutil_socket_t fd, short what, void *arg)
682{
683 struct lldpd *cfg = arg;
8b549648 684 log_debug("event", "triggering update of all interfaces");
0484f180
VB
685 lldpd_update_localports(cfg);
686}
687
688static void
689levent_iface_recv(evutil_socket_t fd, short what, void *arg)
690{
691 struct lldpd *cfg = arg;
b0b8841b 692 char buffer[EVENT_BUFFER];
0484f180
VB
693 int n;
694
13181ede
VB
695 if (cfg->g_iface_cb == NULL) {
696 /* Discard the message */
697 while (1) {
698 n = read(fd, buffer, sizeof(buffer));
8b549648 699 if (n == -1 && (errno == EWOULDBLOCK || errno == EAGAIN)) break;
13181ede
VB
700 if (n == -1) {
701 log_warn("event",
702 "unable to receive interface change notification message");
703 return;
704 }
705 if (n == 0) {
706 log_warnx("event",
707 "end of file reached while getting interface change notification message");
708 return;
709 }
0484f180 710 }
13181ede
VB
711 } else {
712 cfg->g_iface_cb(cfg);
0484f180
VB
713 }
714
715 /* Schedule local port update. We don't run it right away because we may
716 * receive a batch of events like this. */
8b549648 717 struct timeval one_sec = { 1, 0 };
bdfe4193 718 TRACE(LLDPD_INTERFACES_NOTIFICATION());
0484f180
VB
719 log_debug("event",
720 "received notification change, schedule an update of all interfaces in one second");
721 if (cfg->g_iface_timer_event == NULL) {
722 if ((cfg->g_iface_timer_event = evtimer_new(cfg->g_base,
8b549648 723 levent_iface_trigger, cfg)) == NULL) {
0484f180
VB
724 log_warnx("event",
725 "unable to create a new event to trigger interface update");
726 return;
727 }
728 }
729 if (evtimer_add(cfg->g_iface_timer_event, &one_sec) == -1) {
8b549648 730 log_warnx("event", "unable to schedule interface updates");
0484f180
VB
731 return;
732 }
733}
734
aa313f2a 735int
0484f180
VB
736levent_iface_subscribe(struct lldpd *cfg, int socket)
737{
8b549648 738 log_debug("event", "subscribe to interface changes from socket %d", socket);
66a551ed 739 levent_make_socket_nonblocking(socket);
8b549648
VB
740 cfg->g_iface_event = event_new(cfg->g_base, socket, EV_READ | EV_PERSIST,
741 levent_iface_recv, cfg);
0484f180
VB
742 if (cfg->g_iface_event == NULL) {
743 log_warnx("event",
744 "unable to allocate a new event for interface changes");
aa313f2a 745 return -1;
0484f180
VB
746 }
747 if (event_add(cfg->g_iface_event, NULL) == -1) {
8b549648 748 log_warnx("event", "unable to schedule new interface changes event");
0484f180
VB
749 event_free(cfg->g_iface_event);
750 cfg->g_iface_event = NULL;
aa313f2a 751 return -1;
0484f180 752 }
aa313f2a 753 return 0;
0484f180 754}
579bedd5 755
3333d2a8
VB
756static void
757levent_trigger_cleanup(evutil_socket_t fd, short what, void *arg)
758{
759 struct lldpd *cfg = arg;
760 lldpd_cleanup(cfg);
761}
762
763void
764levent_schedule_cleanup(struct lldpd *cfg)
765{
766 log_debug("event", "schedule next cleanup");
767 if (cfg->g_cleanup_timer != NULL) {
768 event_free(cfg->g_cleanup_timer);
769 }
770 cfg->g_cleanup_timer = evtimer_new(cfg->g_base, levent_trigger_cleanup, cfg);
771 if (cfg->g_cleanup_timer == NULL) {
8b549648 772 log_warnx("event", "unable to allocate a new event for cleanup tasks");
3333d2a8
VB
773 return;
774 }
775
776 /* Compute the next TTL event */
71b0f981 777 struct timeval tv = { cfg->g_config.c_ttl, 0 };
3333d2a8
VB
778 time_t now = time(NULL);
779 time_t next;
780 struct lldpd_hardware *hardware;
781 struct lldpd_port *port;
8b549648
VB
782 TAILQ_FOREACH (hardware, &cfg->g_hardware, h_entries) {
783 TAILQ_FOREACH (port, &hardware->h_rports, p_entries) {
78346c89 784 if (now >= port->p_lastupdate + port->p_ttl) {
5a215d4b 785 tv.tv_sec = 0;
8b549648
VB
786 log_debug("event",
787 "immediate cleanup on port %s (%lld, %d, %lld)",
788 hardware->h_ifname, (long long)now, port->p_ttl,
408c3e02 789 (long long)port->p_lastupdate);
5a215d4b
VB
790 break;
791 }
78346c89 792 next = port->p_ttl - (now - port->p_lastupdate);
8b549648 793 if (next < tv.tv_sec) tv.tv_sec = next;
3333d2a8
VB
794 }
795 }
796
8b549648 797 log_debug("event", "next cleanup in %ld seconds", (long)tv.tv_sec);
3333d2a8 798 if (event_add(cfg->g_cleanup_timer, &tv) == -1) {
8b549648 799 log_warnx("event", "unable to schedule cleanup task");
3333d2a8
VB
800 event_free(cfg->g_cleanup_timer);
801 cfg->g_cleanup_timer = NULL;
802 return;
803 }
804}
805
579bedd5
VB
806static void
807levent_send_pdu(evutil_socket_t fd, short what, void *arg)
808{
809 struct lldpd_hardware *hardware = arg;
be511d00 810 int tx_interval = hardware->h_cfg->g_config.c_tx_interval;
b9de0ca6 811
8b549648 812 log_debug("event", "trigger sending PDU for port %s", hardware->h_ifname);
579bedd5
VB
813 lldpd_send(hardware);
814
be511d00 815#ifdef ENABLE_LLDPMED
8b549648 816 if (hardware->h_tx_fast > 0) hardware->h_tx_fast--;
b9de0ca6 817
818 if (hardware->h_tx_fast > 0)
74f55c2e 819 tx_interval = hardware->h_cfg->g_config.c_tx_fast_interval * 1000;
be511d00
VB
820#endif
821
74f55c2e
JPT
822 struct timeval tv;
823 tv.tv_sec = tx_interval / 1000;
824 tv.tv_usec = (tx_interval % 1000) * 1000;
579bedd5
VB
825 if (event_add(hardware->h_timer, &tv) == -1) {
826 log_warnx("event", "unable to re-register timer event for port %s",
827 hardware->h_ifname);
828 event_free(hardware->h_timer);
829 hardware->h_timer = NULL;
830 return;
831 }
832}
833
834void
835levent_schedule_pdu(struct lldpd_hardware *hardware)
836{
8b549648 837 log_debug("event", "schedule sending PDU on %s", hardware->h_ifname);
579bedd5 838 if (hardware->h_timer == NULL) {
8b549648
VB
839 hardware->h_timer =
840 evtimer_new(hardware->h_cfg->g_base, levent_send_pdu, hardware);
579bedd5
VB
841 if (hardware->h_timer == NULL) {
842 log_warnx("event", "unable to schedule PDU sending for port %s",
843 hardware->h_ifname);
844 return;
845 }
846 }
847
848 struct timeval tv = { 0, 0 };
849 if (event_add(hardware->h_timer, &tv) == -1) {
850 log_warnx("event", "unable to register timer event for port %s",
851 hardware->h_ifname);
852 event_free(hardware->h_timer);
853 hardware->h_timer = NULL;
854 return;
855 }
856}
bec75f84
VB
857
858int
859levent_make_socket_nonblocking(int fd)
860{
861 int flags;
862 if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
863 log_warn("event", "fcntl(%d, F_GETFL)", fd);
864 return -1;
865 }
866 if (flags & O_NONBLOCK) return 0;
867 if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) {
868 log_warn("event", "fcntl(%d, F_SETFL)", fd);
869 return -1;
870 }
871 return 0;
872}
327b1d62
VB
873
874int
875levent_make_socket_blocking(int fd)
876{
877 int flags;
878 if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
879 log_warn("event", "fcntl(%d, F_GETFL)", fd);
880 return -1;
881 }
882 if (!(flags & O_NONBLOCK)) return 0;
883 if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) {
884 log_warn("event", "fcntl(%d, F_SETFL)", fd);
885 return -1;
886 }
887 return 0;
888}
6c3697f2
VB
889
890#ifdef HOST_OS_LINUX
891/* Receive and log error from a socket when there is suspicion of an error. */
892void
893levent_recv_error(int fd, const char *source)
894{
895 do {
896 ssize_t n;
897 char buf[1024] = {};
8b549648
VB
898 struct msghdr msg = { .msg_control = buf,
899 .msg_controllen = sizeof(buf) };
6c3697f2
VB
900 if ((n = recvmsg(fd, &msg, MSG_ERRQUEUE | MSG_DONTWAIT)) <= 0) {
901 return;
902 }
903 struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
904 if (cmsg == NULL)
8b549648 905 log_warnx("event", "received unknown error on %s", source);
6c3697f2
VB
906 else
907 log_warnx("event", "received error (level=%d/type=%d) on %s",
908 cmsg->cmsg_level, cmsg->cmsg_type, source);
909 } while (1);
910}
911#endif