1 /* -*- mode: c; c-file-style: "openbsd" -*- */
3 * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx>
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #if defined(__clang__)
27 # pragma clang diagnostic push
28 # pragma clang diagnostic ignored "-Wdocumentation"
30 #include <event2/event.h>
31 #include <event2/bufferevent.h>
32 #include <event2/buffer.h>
33 #if defined(__clang__)
34 # pragma clang diagnostic pop
37 #define EVENT_BUFFER 1024
40 levent_log_cb(int severity
, const char *msg
)
43 case _EVENT_LOG_DEBUG
:
44 log_debug("libevent", "%s", msg
);
47 log_info("libevent", "%s", msg
);
50 log_warnx("libevent", "%s", msg
);
53 log_warnx("libevent", "%s", msg
);
59 TAILQ_ENTRY(lldpd_events
) next
;
62 TAILQ_HEAD(ev_l
, lldpd_events
);
64 #define levent_snmp_fds(cfg) ((struct ev_l *)(cfg)->g_snmp_fds)
65 #define levent_hardware_fds(hardware) ((struct ev_l *)(hardware)->h_recv)
68 # include <net-snmp/net-snmp-config.h>
69 # include <net-snmp/net-snmp-includes.h>
70 # include <net-snmp/agent/net-snmp-agent-includes.h>
71 # include <net-snmp/agent/snmp_vars.h>
73 /* Compatibility with older versions of NetSNMP */
74 # ifndef HAVE_SNMP_SELECT_INFO2
75 # define netsnmp_large_fd_set fd_set
76 # define snmp_read2 snmp_read
77 # define snmp_select_info2 snmp_select_info
78 # define netsnmp_large_fd_set_init(...)
79 # define netsnmp_large_fd_set_cleanup(...)
80 # define NETSNMP_LARGE_FD_SET FD_SET
81 # define NETSNMP_LARGE_FD_CLR FD_CLR
82 # define NETSNMP_LARGE_FD_ZERO FD_ZERO
83 # define NETSNMP_LARGE_FD_ISSET FD_ISSET
85 # include <net-snmp/library/large_fd_set.h>
88 static void levent_snmp_update(struct lldpd
*);
91 * Callback function when we have something to read from SNMP.
93 * This function is called because we have a read event on one SNMP
94 * file descriptor. When need to call snmp_read() on it.
97 levent_snmp_read(evutil_socket_t fd
, short what
, void *arg
)
99 struct lldpd
*cfg
= arg
;
100 netsnmp_large_fd_set fdset
;
102 netsnmp_large_fd_set_init(&fdset
, FD_SETSIZE
);
103 NETSNMP_LARGE_FD_ZERO(&fdset
);
104 NETSNMP_LARGE_FD_SET(fd
, &fdset
);
106 levent_snmp_update(cfg
);
110 * Callback function for a SNMP timeout.
112 * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it.
115 levent_snmp_timeout(evutil_socket_t fd
, short what
, void *arg
)
117 struct lldpd
*cfg
= arg
;
122 levent_snmp_update(cfg
);
126 * Watch a new SNMP FD.
128 * @param base The libevent base we are working on.
129 * @param fd The file descriptor we want to watch.
131 * The file descriptor is appended to the list of file descriptors we
135 levent_snmp_add_fd(struct lldpd
*cfg
, int fd
)
137 struct event_base
*base
= cfg
->g_base
;
138 struct lldpd_events
*snmpfd
= calloc(1, sizeof(struct lldpd_events
));
140 log_warn("event", "unable to allocate memory for new SNMP event");
143 levent_make_socket_nonblocking(fd
);
144 if ((snmpfd
->ev
= event_new(base
, fd
, EV_READ
| EV_PERSIST
, levent_snmp_read
,
146 log_warnx("event", "unable to allocate a new SNMP event for FD %d", fd
);
150 if (event_add(snmpfd
->ev
, NULL
) == -1) {
151 log_warnx("event", "unable to schedule new SNMP event for FD %d", fd
);
152 event_free(snmpfd
->ev
);
156 TAILQ_INSERT_TAIL(levent_snmp_fds(cfg
), snmpfd
, next
);
160 * Update SNMP event loop.
162 * New events are added and some other are removed. This function
163 * should be called every time a SNMP event happens: either when
164 * handling a SNMP packet, a SNMP timeout or when sending a SNMP
165 * packet. This function will keep libevent in sync with NetSNMP.
167 * @param base The libevent base we are working on.
170 levent_snmp_update(struct lldpd
*cfg
)
174 struct timeval timeout
;
175 static int howmany
= 0;
176 int added
= 0, removed
= 0, current
= 0;
177 struct lldpd_events
*snmpfd
, *snmpfd_next
;
179 /* snmp_select_info() can be tricky to understand. We set `block` to
180 1 to means that we don't request a timeout. snmp_select_info()
181 will reset `block` to 0 if it wants us to set up a timeout. In
182 this timeout, `snmp_timeout()` should be invoked.
184 Each FD in `fdset` will need to be watched for reading. If one of
185 them become active, `snmp_read()` should be called on it.
188 netsnmp_large_fd_set fdset
;
189 netsnmp_large_fd_set_init(&fdset
, FD_SETSIZE
);
190 NETSNMP_LARGE_FD_ZERO(&fdset
);
191 snmp_select_info2(&maxfd
, &fdset
, &timeout
, &block
);
193 /* We need to untrack any event whose FD is not in `fdset`
195 for (snmpfd
= TAILQ_FIRST(levent_snmp_fds(cfg
)); snmpfd
; snmpfd
= snmpfd_next
) {
196 snmpfd_next
= TAILQ_NEXT(snmpfd
, next
);
197 if (event_get_fd(snmpfd
->ev
) >= maxfd
||
198 (!NETSNMP_LARGE_FD_ISSET(event_get_fd(snmpfd
->ev
), &fdset
))) {
199 event_free(snmpfd
->ev
);
200 TAILQ_REMOVE(levent_snmp_fds(cfg
), snmpfd
, next
);
204 NETSNMP_LARGE_FD_CLR(event_get_fd(snmpfd
->ev
), &fdset
);
209 /* Invariant: FD in `fdset` are not in list of FD */
210 for (int fd
= 0; fd
< maxfd
; fd
++) {
211 if (NETSNMP_LARGE_FD_ISSET(fd
, &fdset
)) {
212 levent_snmp_add_fd(cfg
, fd
);
217 if (howmany
!= current
) {
219 "added %d events, removed %d events, total of %d events", added
,
224 /* If needed, handle timeout */
225 if (evtimer_add(cfg
->g_snmp_timeout
, block
? NULL
: &timeout
) == -1)
226 log_warnx("event", "unable to schedule timeout function for SNMP");
228 netsnmp_large_fd_set_cleanup(&fdset
);
230 #endif /* USE_SNMP */
232 struct lldpd_one_client
{
233 TAILQ_ENTRY(lldpd_one_client
) next
;
235 struct bufferevent
*bev
;
236 int subscribed
; /* Is this client subscribed to changes? */
238 TAILQ_HEAD(, lldpd_one_client
) lldpd_clients
;
241 levent_ctl_free_client(struct lldpd_one_client
*client
)
243 if (client
&& client
->bev
) bufferevent_free(client
->bev
);
245 TAILQ_REMOVE(&lldpd_clients
, client
, next
);
251 levent_ctl_close_clients()
253 struct lldpd_one_client
*client
, *client_next
;
254 for (client
= TAILQ_FIRST(&lldpd_clients
); client
; client
= client_next
) {
255 client_next
= TAILQ_NEXT(client
, next
);
256 levent_ctl_free_client(client
);
261 levent_ctl_send(struct lldpd_one_client
*client
, int type
, void *data
, size_t len
)
263 struct bufferevent
*bev
= client
->bev
;
264 struct hmsg_header hdr
= { .len
= len
, .type
= type
};
265 bufferevent_disable(bev
, EV_WRITE
);
266 if (bufferevent_write(bev
, &hdr
, sizeof(struct hmsg_header
)) == -1 ||
267 (len
> 0 && bufferevent_write(bev
, data
, len
) == -1)) {
268 log_warnx("event", "unable to create answer to client");
269 levent_ctl_free_client(client
);
272 bufferevent_enable(bev
, EV_WRITE
);
277 levent_ctl_notify(char *ifname
, int state
, struct lldpd_port
*neighbor
)
279 struct lldpd_one_client
*client
, *client_next
;
280 struct lldpd_neighbor_change neigh
= { .ifname
= ifname
,
282 .neighbor
= neighbor
};
284 ssize_t output_len
= 0;
286 /* Don't use TAILQ_FOREACH, the client may be deleted in case of errors. */
287 log_debug("control", "notify clients of neighbor changes");
288 for (client
= TAILQ_FIRST(&lldpd_clients
); client
; client
= client_next
) {
289 client_next
= TAILQ_NEXT(client
, next
);
290 if (!client
->subscribed
) continue;
292 if (output
== NULL
) {
293 /* Ugly hack: we don't want to transmit a list of
294 * ports. We patch the port to avoid this. */
295 TAILQ_ENTRY(lldpd_port
) backup_p_entries
;
296 memcpy(&backup_p_entries
, &neighbor
->p_entries
,
297 sizeof(backup_p_entries
));
298 memset(&neighbor
->p_entries
, 0, sizeof(backup_p_entries
));
299 output_len
= lldpd_neighbor_change_serialize(&neigh
, &output
);
300 memcpy(&neighbor
->p_entries
, &backup_p_entries
,
301 sizeof(backup_p_entries
));
303 if (output_len
<= 0) {
305 "unable to serialize changed neighbor");
310 levent_ctl_send(client
, NOTIFICATION
, output
, output_len
);
317 levent_ctl_send_cb(void *out
, int type
, void *data
, size_t len
)
319 struct lldpd_one_client
*client
= out
;
320 return levent_ctl_send(client
, type
, data
, len
);
324 levent_ctl_recv(struct bufferevent
*bev
, void *ptr
)
326 struct lldpd_one_client
*client
= ptr
;
327 struct evbuffer
*buffer
= bufferevent_get_input(bev
);
328 size_t buffer_len
= evbuffer_get_length(buffer
);
329 struct hmsg_header hdr
;
332 log_debug("control", "receive data on Unix socket");
333 if (buffer_len
< sizeof(struct hmsg_header
)) return; /* Not enough data yet */
334 if (evbuffer_copyout(buffer
, &hdr
, sizeof(struct hmsg_header
)) !=
335 sizeof(struct hmsg_header
)) {
336 log_warnx("event", "not able to read header");
339 if (hdr
.len
> HMSG_MAX_SIZE
) {
340 log_warnx("event", "message received is too large");
344 if (buffer_len
< hdr
.len
+ sizeof(struct hmsg_header
))
345 return; /* Not enough data yet */
346 if (hdr
.len
> 0 && (data
= malloc(hdr
.len
)) == NULL
) {
347 log_warnx("event", "not enough memory");
350 evbuffer_drain(buffer
, sizeof(struct hmsg_header
));
351 if (hdr
.len
> 0) evbuffer_remove(buffer
, data
, hdr
.len
);
353 /* Currently, we should not receive notification acknowledgment. But if
354 * we receive one, we can discard it. */
355 if (hdr
.len
== 0 && hdr
.type
== NOTIFICATION
) return;
356 if (client_handle_client(client
->cfg
, levent_ctl_send_cb
, client
, hdr
.type
,
357 data
, hdr
.len
, &client
->subscribed
) == -1)
364 levent_ctl_free_client(client
);
368 levent_ctl_event(struct bufferevent
*bev
, short events
, void *ptr
)
370 struct lldpd_one_client
*client
= ptr
;
371 if (events
& BEV_EVENT_ERROR
) {
372 log_warnx("event", "an error occurred with client: %s",
373 evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
374 levent_ctl_free_client(client
);
375 } else if (events
& BEV_EVENT_EOF
) {
376 log_debug("event", "client has been disconnected");
377 levent_ctl_free_client(client
);
382 levent_ctl_accept(evutil_socket_t fd
, short what
, void *arg
)
384 struct lldpd
*cfg
= arg
;
385 struct lldpd_one_client
*client
= NULL
;
389 log_debug("control", "accept a new connection");
390 if ((s
= accept(fd
, NULL
, NULL
)) == -1) {
391 log_warn("event", "unable to accept connection from socket");
394 client
= calloc(1, sizeof(struct lldpd_one_client
));
396 log_warnx("event", "unable to allocate memory for new client");
401 levent_make_socket_nonblocking(s
);
402 TAILQ_INSERT_TAIL(&lldpd_clients
, client
, next
);
403 if ((client
->bev
= bufferevent_socket_new(cfg
->g_base
, s
,
404 BEV_OPT_CLOSE_ON_FREE
)) == NULL
) {
406 "unable to allocate a new buffer event for new client");
410 bufferevent_setcb(client
->bev
, levent_ctl_recv
, NULL
, levent_ctl_event
, client
);
411 bufferevent_enable(client
->bev
, EV_READ
| EV_WRITE
);
412 log_debug("event", "new client accepted");
413 /* coverity[leaked_handle]
414 s has been saved by bufferevent_socket_new */
417 levent_ctl_free_client(client
);
421 levent_priv(evutil_socket_t fd
, short what
, void *arg
)
423 struct event_base
*base
= arg
;
428 /* Check if we have some data available. We need to pass the socket in
429 * non-blocking mode to be able to run the check without disruption. */
430 levent_make_socket_nonblocking(fd
);
431 n
= read(fd
, &one
, 1);
433 levent_make_socket_blocking(fd
);
437 if (err
== EAGAIN
|| err
== EWOULDBLOCK
) /* No data, all good */
439 log_warnx("event", "unable to poll monitor process, exit");
442 log_warnx("event", "monitor process has terminated, exit");
445 /* This is a bit unsafe as we are now out-of-sync with the
446 * monitor. It would be safer to request 0 byte, but some OS
447 * (illumos) seem to take the shortcut that by asking 0 byte,
448 * we can just return 0 byte. */
450 "received unexpected data from monitor process, exit");
453 event_base_loopbreak(base
);
457 levent_dump(evutil_socket_t fd
, short what
, void *arg
)
459 struct event_base
*base
= arg
;
462 log_debug("event", "dumping all events");
463 event_base_dump_events(base
, stderr
);
466 levent_stop(evutil_socket_t fd
, short what
, void *arg
)
468 struct event_base
*base
= arg
;
471 event_base_loopbreak(base
);
475 levent_update_and_send(evutil_socket_t fd
, short what
, void *arg
)
477 struct lldpd
*cfg
= arg
;
479 long interval_ms
= cfg
->g_config
.c_tx_interval
;
484 if (cfg
->g_iface_event
!= NULL
) interval_ms
*= 20;
485 if (interval_ms
< 30000) interval_ms
= 30000;
486 tv
.tv_sec
= interval_ms
/ 1000;
487 tv
.tv_usec
= (interval_ms
% 1000) * 1000;
488 event_add(cfg
->g_main_loop
, &tv
);
492 levent_update_now(struct lldpd
*cfg
)
494 if (cfg
->g_main_loop
) event_active(cfg
->g_main_loop
, EV_TIMEOUT
, 1);
498 levent_send_now(struct lldpd
*cfg
)
500 struct lldpd_hardware
*hardware
;
501 TAILQ_FOREACH (hardware
, &cfg
->g_hardware
, h_entries
) {
502 if (hardware
->h_timer
)
503 event_active(hardware
->h_timer
, EV_TIMEOUT
, 1);
505 log_warnx("event", "BUG: no timer present for interface %s",
511 levent_init(struct lldpd
*cfg
)
513 /* Set up libevent */
514 log_debug("event", "initialize libevent");
515 event_set_log_callback(levent_log_cb
);
516 if (!(cfg
->g_base
= event_base_new()))
517 fatalx("event", "unable to create a new libevent base");
518 log_info("event", "libevent %s initialized with %s method", event_get_version(),
519 event_base_get_method(cfg
->g_base
));
524 agent_init(cfg
, cfg
->g_snmp_agentx
);
525 cfg
->g_snmp_timeout
=
526 evtimer_new(cfg
->g_base
, levent_snmp_timeout
, cfg
);
527 if (!cfg
->g_snmp_timeout
)
528 fatalx("event", "unable to setup timeout function for SNMP");
529 if ((cfg
->g_snmp_fds
= malloc(sizeof(struct ev_l
))) == NULL
)
530 fatalx("event", "unable to allocate memory for SNMP events");
531 TAILQ_INIT(levent_snmp_fds(cfg
));
535 /* Setup loop that will run every X seconds. */
536 log_debug("event", "register loop timer");
537 if (!(cfg
->g_main_loop
=
538 event_new(cfg
->g_base
, -1, 0, levent_update_and_send
, cfg
)))
539 fatalx("event", "unable to setup main timer");
540 event_active(cfg
->g_main_loop
, EV_TIMEOUT
, 1);
542 /* Set up unix socket */
543 struct event
*ctl_event
;
544 log_debug("event", "register Unix socket");
545 TAILQ_INIT(&lldpd_clients
);
546 levent_make_socket_nonblocking(cfg
->g_ctl
);
547 if ((ctl_event
= event_new(cfg
->g_base
, cfg
->g_ctl
, EV_READ
| EV_PERSIST
,
548 levent_ctl_accept
, cfg
)) == NULL
)
549 fatalx("event", "unable to setup control socket event");
550 event_add(ctl_event
, NULL
);
552 /* Somehow monitor the monitor process */
553 struct event
*monitor_event
;
554 log_debug("event", "monitor the monitor process");
555 if ((monitor_event
= event_new(cfg
->g_base
, priv_fd(PRIV_UNPRIVILEGED
),
556 EV_READ
| EV_PERSIST
, levent_priv
, cfg
->g_base
)) == NULL
)
557 fatalx("event", "unable to monitor monitor process");
558 event_add(monitor_event
, NULL
);
561 log_debug("event", "register signals");
562 evsignal_add(evsignal_new(cfg
->g_base
, SIGUSR1
, levent_dump
, cfg
->g_base
),
564 evsignal_add(evsignal_new(cfg
->g_base
, SIGINT
, levent_stop
, cfg
->g_base
), NULL
);
565 evsignal_add(evsignal_new(cfg
->g_base
, SIGTERM
, levent_stop
, cfg
->g_base
),
569 /* Initialize libevent and start the event loop */
571 levent_loop(struct lldpd
*cfg
)
576 if (cfg
->g_snmp
) levent_snmp_update(cfg
);
581 TRACE(LLDPD_EVENT_LOOP());
582 if (event_base_got_break(cfg
->g_base
) ||
583 event_base_got_exit(cfg
->g_base
))
585 } while (event_base_loop(cfg
->g_base
, EVLOOP_ONCE
) == 0);
587 if (cfg
->g_iface_timer_event
!= NULL
) event_free(cfg
->g_iface_timer_event
);
590 if (cfg
->g_snmp
) agent_shutdown();
591 #endif /* USE_SNMP */
593 levent_ctl_close_clients();
596 /* Release libevent resources */
598 levent_shutdown(struct lldpd
*cfg
)
600 if (cfg
->g_iface_event
) event_free(cfg
->g_iface_event
);
601 if (cfg
->g_cleanup_timer
) event_free(cfg
->g_cleanup_timer
);
602 event_base_free(cfg
->g_base
);
606 levent_hardware_recv(evutil_socket_t fd
, short what
, void *arg
)
608 struct lldpd_hardware
*hardware
= arg
;
609 struct lldpd
*cfg
= hardware
->h_cfg
;
611 log_debug("event", "received something for %s", hardware
->h_ifname
);
612 lldpd_recv(cfg
, hardware
, fd
);
613 levent_schedule_cleanup(cfg
);
617 levent_hardware_init(struct lldpd_hardware
*hardware
)
619 log_debug("event", "initialize events for %s", hardware
->h_ifname
);
620 if ((hardware
->h_recv
= malloc(sizeof(struct ev_l
))) == NULL
) {
621 log_warnx("event", "unable to allocate memory for %s",
625 TAILQ_INIT(levent_hardware_fds(hardware
));
629 levent_hardware_add_fd(struct lldpd_hardware
*hardware
, int fd
)
631 struct lldpd_events
*hfd
= NULL
;
632 if (!hardware
->h_recv
) return;
634 hfd
= calloc(1, sizeof(struct lldpd_events
));
636 log_warnx("event", "unable to allocate new event for %s",
640 levent_make_socket_nonblocking(fd
);
641 if ((hfd
->ev
= event_new(hardware
->h_cfg
->g_base
, fd
, EV_READ
| EV_PERSIST
,
642 levent_hardware_recv
, hardware
)) == NULL
) {
643 log_warnx("event", "unable to allocate a new event for %s",
648 if (event_add(hfd
->ev
, NULL
) == -1) {
649 log_warnx("event", "unable to schedule new event for %s",
655 TAILQ_INSERT_TAIL(levent_hardware_fds(hardware
), hfd
, next
);
659 levent_hardware_release(struct lldpd_hardware
*hardware
)
661 struct lldpd_events
*ev
, *ev_next
;
662 if (hardware
->h_timer
) {
663 event_free(hardware
->h_timer
);
664 hardware
->h_timer
= NULL
;
666 if (!hardware
->h_recv
) return;
668 log_debug("event", "release events for %s", hardware
->h_ifname
);
669 for (ev
= TAILQ_FIRST(levent_hardware_fds(hardware
)); ev
; ev
= ev_next
) {
670 ev_next
= TAILQ_NEXT(ev
, next
);
671 /* We may close several time the same FD. This is harmless. */
672 close(event_get_fd(ev
->ev
));
674 TAILQ_REMOVE(levent_hardware_fds(hardware
), ev
, next
);
677 free(levent_hardware_fds(hardware
));
681 levent_iface_trigger(evutil_socket_t fd
, short what
, void *arg
)
683 struct lldpd
*cfg
= arg
;
684 log_debug("event", "triggering update of all interfaces");
685 lldpd_update_localports(cfg
);
689 levent_iface_recv(evutil_socket_t fd
, short what
, void *arg
)
691 struct lldpd
*cfg
= arg
;
692 char buffer
[EVENT_BUFFER
];
695 if (cfg
->g_iface_cb
== NULL
) {
696 /* Discard the message */
698 n
= read(fd
, buffer
, sizeof(buffer
));
699 if (n
== -1 && (errno
== EWOULDBLOCK
|| errno
== EAGAIN
)) break;
702 "unable to receive interface change notification message");
707 "end of file reached while getting interface change notification message");
712 cfg
->g_iface_cb(cfg
);
715 /* Schedule local port update. We don't run it right away because we may
716 * receive a batch of events like this. */
717 struct timeval one_sec
= { 1, 0 };
718 TRACE(LLDPD_INTERFACES_NOTIFICATION());
720 "received notification change, schedule an update of all interfaces in one second");
721 if (cfg
->g_iface_timer_event
== NULL
) {
722 if ((cfg
->g_iface_timer_event
= evtimer_new(cfg
->g_base
,
723 levent_iface_trigger
, cfg
)) == NULL
) {
725 "unable to create a new event to trigger interface update");
729 if (evtimer_add(cfg
->g_iface_timer_event
, &one_sec
) == -1) {
730 log_warnx("event", "unable to schedule interface updates");
736 levent_iface_subscribe(struct lldpd
*cfg
, int socket
)
738 log_debug("event", "subscribe to interface changes from socket %d", socket
);
739 levent_make_socket_nonblocking(socket
);
740 cfg
->g_iface_event
= event_new(cfg
->g_base
, socket
, EV_READ
| EV_PERSIST
,
741 levent_iface_recv
, cfg
);
742 if (cfg
->g_iface_event
== NULL
) {
744 "unable to allocate a new event for interface changes");
747 if (event_add(cfg
->g_iface_event
, NULL
) == -1) {
748 log_warnx("event", "unable to schedule new interface changes event");
749 event_free(cfg
->g_iface_event
);
750 cfg
->g_iface_event
= NULL
;
757 levent_trigger_cleanup(evutil_socket_t fd
, short what
, void *arg
)
759 struct lldpd
*cfg
= arg
;
764 levent_schedule_cleanup(struct lldpd
*cfg
)
766 log_debug("event", "schedule next cleanup");
767 if (cfg
->g_cleanup_timer
!= NULL
) {
768 event_free(cfg
->g_cleanup_timer
);
770 cfg
->g_cleanup_timer
= evtimer_new(cfg
->g_base
, levent_trigger_cleanup
, cfg
);
771 if (cfg
->g_cleanup_timer
== NULL
) {
772 log_warnx("event", "unable to allocate a new event for cleanup tasks");
776 /* Compute the next TTL event */
777 struct timeval tv
= { cfg
->g_config
.c_ttl
, 0 };
778 time_t now
= time(NULL
);
780 struct lldpd_hardware
*hardware
;
781 struct lldpd_port
*port
;
782 TAILQ_FOREACH (hardware
, &cfg
->g_hardware
, h_entries
) {
783 TAILQ_FOREACH (port
, &hardware
->h_rports
, p_entries
) {
784 if (now
>= port
->p_lastupdate
+ port
->p_ttl
) {
787 "immediate cleanup on port %s (%lld, %d, %lld)",
788 hardware
->h_ifname
, (long long)now
, port
->p_ttl
,
789 (long long)port
->p_lastupdate
);
792 next
= port
->p_ttl
- (now
- port
->p_lastupdate
);
793 if (next
< tv
.tv_sec
) tv
.tv_sec
= next
;
797 log_debug("event", "next cleanup in %ld seconds", (long)tv
.tv_sec
);
798 if (event_add(cfg
->g_cleanup_timer
, &tv
) == -1) {
799 log_warnx("event", "unable to schedule cleanup task");
800 event_free(cfg
->g_cleanup_timer
);
801 cfg
->g_cleanup_timer
= NULL
;
807 levent_send_pdu(evutil_socket_t fd
, short what
, void *arg
)
809 struct lldpd_hardware
*hardware
= arg
;
810 int tx_interval
= hardware
->h_cfg
->g_config
.c_tx_interval
;
812 log_debug("event", "trigger sending PDU for port %s", hardware
->h_ifname
);
813 lldpd_send(hardware
);
815 #ifdef ENABLE_LLDPMED
816 if (hardware
->h_tx_fast
> 0) hardware
->h_tx_fast
--;
818 if (hardware
->h_tx_fast
> 0)
819 tx_interval
= hardware
->h_cfg
->g_config
.c_tx_fast_interval
* 1000;
823 tv
.tv_sec
= tx_interval
/ 1000;
824 tv
.tv_usec
= (tx_interval
% 1000) * 1000;
825 if (event_add(hardware
->h_timer
, &tv
) == -1) {
826 log_warnx("event", "unable to re-register timer event for port %s",
828 event_free(hardware
->h_timer
);
829 hardware
->h_timer
= NULL
;
835 levent_schedule_pdu(struct lldpd_hardware
*hardware
)
837 log_debug("event", "schedule sending PDU on %s", hardware
->h_ifname
);
838 if (hardware
->h_timer
== NULL
) {
840 evtimer_new(hardware
->h_cfg
->g_base
, levent_send_pdu
, hardware
);
841 if (hardware
->h_timer
== NULL
) {
842 log_warnx("event", "unable to schedule PDU sending for port %s",
848 struct timeval tv
= { 0, 0 };
849 if (event_add(hardware
->h_timer
, &tv
) == -1) {
850 log_warnx("event", "unable to register timer event for port %s",
852 event_free(hardware
->h_timer
);
853 hardware
->h_timer
= NULL
;
859 levent_make_socket_nonblocking(int fd
)
862 if ((flags
= fcntl(fd
, F_GETFL
, NULL
)) < 0) {
863 log_warn("event", "fcntl(%d, F_GETFL)", fd
);
866 if (flags
& O_NONBLOCK
) return 0;
867 if (fcntl(fd
, F_SETFL
, flags
| O_NONBLOCK
) == -1) {
868 log_warn("event", "fcntl(%d, F_SETFL)", fd
);
875 levent_make_socket_blocking(int fd
)
878 if ((flags
= fcntl(fd
, F_GETFL
, NULL
)) < 0) {
879 log_warn("event", "fcntl(%d, F_GETFL)", fd
);
882 if (!(flags
& O_NONBLOCK
)) return 0;
883 if (fcntl(fd
, F_SETFL
, flags
& ~O_NONBLOCK
) == -1) {
884 log_warn("event", "fcntl(%d, F_SETFL)", fd
);
891 /* Receive and log error from a socket when there is suspicion of an error. */
893 levent_recv_error(int fd
, const char *source
)
898 struct msghdr msg
= { .msg_control
= buf
,
899 .msg_controllen
= sizeof(buf
) };
900 if ((n
= recvmsg(fd
, &msg
, MSG_ERRQUEUE
| MSG_DONTWAIT
)) <= 0) {
903 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(&msg
);
905 log_warnx("event", "received unknown error on %s", source
);
907 log_warnx("event", "received error (level=%d/type=%d) on %s",
908 cmsg
->cmsg_level
, cmsg
->cmsg_type
, source
);