]>
Commit | Line | Data |
---|---|---|
4b292b55 | 1 | /* -*- mode: c; c-file-style: "openbsd" -*- */ |
d6e889b6 VB |
2 | /* |
3 | * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx> | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
18 | #include "lldpd.h" | |
bdfe4193 | 19 | #include "trace.h" |
d6e889b6 VB |
20 | |
21 | #include <unistd.h> | |
22 | #include <signal.h> | |
e0478a46 | 23 | #include <errno.h> |
864a7bd5 | 24 | #include <time.h> |
bec75f84 | 25 | #include <fcntl.h> |
d7460a6f ALG |
26 | #if defined(__clang__) |
27 | #pragma clang diagnostic push | |
28 | #pragma clang diagnostic ignored "-Wdocumentation" | |
29 | #endif | |
d6e889b6 | 30 | #include <event2/event.h> |
e0478a46 VB |
31 | #include <event2/bufferevent.h> |
32 | #include <event2/buffer.h> | |
d7460a6f ALG |
33 | #if defined(__clang__) |
34 | #pragma clang diagnostic pop | |
35 | #endif | |
d6e889b6 | 36 | |
b0b8841b ST |
37 | #define EVENT_BUFFER 1024 |
38 | ||
d6e889b6 VB |
39 | static void |
40 | levent_log_cb(int severity, const char *msg) | |
41 | { | |
42 | switch (severity) { | |
26fa5d17 VB |
43 | case _EVENT_LOG_DEBUG: log_debug("libevent", "%s", msg); break; |
44 | case _EVENT_LOG_MSG: log_info ("libevent", "%s", msg); break; | |
45 | case _EVENT_LOG_WARN: log_warnx("libevent", "%s", msg); break; | |
46 | case _EVENT_LOG_ERR: log_warnx("libevent", "%s", msg); break; | |
d6e889b6 VB |
47 | } |
48 | } | |
49 | ||
d6e889b6 VB |
50 | struct lldpd_events { |
51 | TAILQ_ENTRY(lldpd_events) next; | |
52 | struct event *ev; | |
d6e889b6 VB |
53 | }; |
54 | TAILQ_HEAD(ev_l, lldpd_events); | |
55 | ||
56 | #define levent_snmp_fds(cfg) ((struct ev_l*)(cfg)->g_snmp_fds) | |
d6e889b6 VB |
57 | #define levent_hardware_fds(hardware) ((struct ev_l*)(hardware)->h_recv) |
58 | ||
59 | #ifdef USE_SNMP | |
60 | #include <net-snmp/net-snmp-config.h> | |
61 | #include <net-snmp/net-snmp-includes.h> | |
62 | #include <net-snmp/agent/net-snmp-agent-includes.h> | |
63 | #include <net-snmp/agent/snmp_vars.h> | |
64 | ||
59c32cf0 VB |
65 | /* Compatibility with older versions of NetSNMP */ |
66 | #ifndef HAVE_SNMP_SELECT_INFO2 | |
67 | # define netsnmp_large_fd_set fd_set | |
68 | # define snmp_read2 snmp_read | |
69 | # define snmp_select_info2 snmp_select_info | |
70 | # define netsnmp_large_fd_set_init(...) | |
71 | # define netsnmp_large_fd_set_cleanup(...) | |
72 | # define NETSNMP_LARGE_FD_SET FD_SET | |
73 | # define NETSNMP_LARGE_FD_CLR FD_CLR | |
74 | # define NETSNMP_LARGE_FD_ZERO FD_ZERO | |
75 | # define NETSNMP_LARGE_FD_ISSET FD_ISSET | |
76 | #else | |
77 | # include <net-snmp/library/large_fd_set.h> | |
78 | #endif | |
79 | ||
d6e889b6 VB |
80 | static void levent_snmp_update(struct lldpd *); |
81 | ||
82 | /* | |
83 | * Callback function when we have something to read from SNMP. | |
84 | * | |
85 | * This function is called because we have a read event on one SNMP | |
86 | * file descriptor. When need to call snmp_read() on it. | |
87 | */ | |
88 | static void | |
89 | levent_snmp_read(evutil_socket_t fd, short what, void *arg) | |
90 | { | |
d6e889b6 | 91 | struct lldpd *cfg = arg; |
59c32cf0 | 92 | netsnmp_large_fd_set fdset; |
5fd6695c | 93 | (void)what; |
59c32cf0 VB |
94 | netsnmp_large_fd_set_init(&fdset, FD_SETSIZE); |
95 | NETSNMP_LARGE_FD_ZERO(&fdset); | |
96 | NETSNMP_LARGE_FD_SET(fd, &fdset); | |
97 | snmp_read2(&fdset); | |
d6e889b6 VB |
98 | levent_snmp_update(cfg); |
99 | } | |
100 | ||
101 | /* | |
102 | * Callback function for a SNMP timeout. | |
103 | * | |
104 | * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it. | |
105 | */ | |
106 | static void | |
107 | levent_snmp_timeout(evutil_socket_t fd, short what, void *arg) | |
108 | { | |
d6e889b6 | 109 | struct lldpd *cfg = arg; |
5fd6695c | 110 | (void)what; (void)fd; |
d6e889b6 | 111 | snmp_timeout(); |
219c432e | 112 | run_alarms(); |
d6e889b6 VB |
113 | levent_snmp_update(cfg); |
114 | } | |
115 | ||
116 | /* | |
117 | * Watch a new SNMP FD. | |
118 | * | |
119 | * @param base The libevent base we are working on. | |
120 | * @param fd The file descriptor we want to watch. | |
121 | * | |
122 | * The file descriptor is appended to the list of file descriptors we | |
123 | * want to watch. | |
124 | */ | |
125 | static void | |
126 | levent_snmp_add_fd(struct lldpd *cfg, int fd) | |
127 | { | |
128 | struct event_base *base = cfg->g_base; | |
129 | struct lldpd_events *snmpfd = calloc(1, sizeof(struct lldpd_events)); | |
130 | if (!snmpfd) { | |
6f8925be | 131 | log_warn("event", "unable to allocate memory for new SNMP event"); |
d6e889b6 VB |
132 | return; |
133 | } | |
bec75f84 | 134 | levent_make_socket_nonblocking(fd); |
d6e889b6 VB |
135 | if ((snmpfd->ev = event_new(base, fd, |
136 | EV_READ | EV_PERSIST, | |
137 | levent_snmp_read, | |
138 | cfg)) == NULL) { | |
6f8925be | 139 | log_warnx("event", "unable to allocate a new SNMP event for FD %d", fd); |
d6e889b6 VB |
140 | free(snmpfd); |
141 | return; | |
142 | } | |
143 | if (event_add(snmpfd->ev, NULL) == -1) { | |
6f8925be | 144 | log_warnx("event", "unable to schedule new SNMP event for FD %d", fd); |
d6e889b6 VB |
145 | event_free(snmpfd->ev); |
146 | free(snmpfd); | |
147 | return; | |
148 | } | |
149 | TAILQ_INSERT_TAIL(levent_snmp_fds(cfg), snmpfd, next); | |
150 | } | |
151 | ||
152 | /* | |
153 | * Update SNMP event loop. | |
154 | * | |
155 | * New events are added and some other are removed. This function | |
156 | * should be called every time a SNMP event happens: either when | |
157 | * handling a SNMP packet, a SNMP timeout or when sending a SNMP | |
158 | * packet. This function will keep libevent in sync with NetSNMP. | |
159 | * | |
160 | * @param base The libevent base we are working on. | |
161 | */ | |
162 | static void | |
163 | levent_snmp_update(struct lldpd *cfg) | |
164 | { | |
165 | int maxfd = 0; | |
166 | int block = 1; | |
d6e889b6 VB |
167 | struct timeval timeout; |
168 | static int howmany = 0; | |
5fd6695c VB |
169 | int added = 0, removed = 0, current = 0; |
170 | struct lldpd_events *snmpfd, *snmpfd_next; | |
d6e889b6 VB |
171 | |
172 | /* snmp_select_info() can be tricky to understand. We set `block` to | |
173 | 1 to means that we don't request a timeout. snmp_select_info() | |
174 | will reset `block` to 0 if it wants us to setup a timeout. In | |
175 | this timeout, `snmp_timeout()` should be invoked. | |
59c32cf0 | 176 | |
d6e889b6 VB |
177 | Each FD in `fdset` will need to be watched for reading. If one of |
178 | them become active, `snmp_read()` should be called on it. | |
179 | */ | |
59c32cf0 VB |
180 | |
181 | netsnmp_large_fd_set fdset; | |
182 | netsnmp_large_fd_set_init(&fdset, FD_SETSIZE); | |
183 | NETSNMP_LARGE_FD_ZERO(&fdset); | |
184 | snmp_select_info2(&maxfd, &fdset, &timeout, &block); | |
185 | ||
d6e889b6 VB |
186 | /* We need to untrack any event whose FD is not in `fdset` |
187 | anymore */ | |
d6e889b6 VB |
188 | for (snmpfd = TAILQ_FIRST(levent_snmp_fds(cfg)); |
189 | snmpfd; | |
190 | snmpfd = snmpfd_next) { | |
191 | snmpfd_next = TAILQ_NEXT(snmpfd, next); | |
192 | if (event_get_fd(snmpfd->ev) >= maxfd || | |
59c32cf0 | 193 | (!NETSNMP_LARGE_FD_ISSET(event_get_fd(snmpfd->ev), &fdset))) { |
d6e889b6 VB |
194 | event_free(snmpfd->ev); |
195 | TAILQ_REMOVE(levent_snmp_fds(cfg), snmpfd, next); | |
196 | free(snmpfd); | |
197 | removed++; | |
198 | } else { | |
59c32cf0 | 199 | NETSNMP_LARGE_FD_CLR(event_get_fd(snmpfd->ev), &fdset); |
d6e889b6 VB |
200 | current++; |
201 | } | |
202 | } | |
59c32cf0 | 203 | |
d6e889b6 VB |
204 | /* Invariant: FD in `fdset` are not in list of FD */ |
205 | for (int fd = 0; fd < maxfd; fd++) { | |
59c32cf0 | 206 | if (NETSNMP_LARGE_FD_ISSET(fd, &fdset)) { |
d6e889b6 VB |
207 | levent_snmp_add_fd(cfg, fd); |
208 | added++; | |
209 | } | |
210 | } | |
211 | current += added; | |
212 | if (howmany != current) { | |
6f8925be | 213 | log_debug("event", "added %d events, removed %d events, total of %d events", |
d6e889b6 VB |
214 | added, removed, current); |
215 | howmany = current; | |
216 | } | |
217 | ||
218 | /* If needed, handle timeout */ | |
219 | if (evtimer_add(cfg->g_snmp_timeout, block?NULL:&timeout) == -1) | |
6f8925be | 220 | log_warnx("event", "unable to schedule timeout function for SNMP"); |
59c32cf0 VB |
221 | |
222 | netsnmp_large_fd_set_cleanup(&fdset); | |
d6e889b6 VB |
223 | } |
224 | #endif /* USE_SNMP */ | |
225 | ||
257db885 | 226 | struct lldpd_one_client { |
4e90a9e0 | 227 | TAILQ_ENTRY(lldpd_one_client) next; |
257db885 | 228 | struct lldpd *cfg; |
e0478a46 | 229 | struct bufferevent *bev; |
4e90a9e0 | 230 | int subscribed; /* Is this client subscribed to changes? */ |
257db885 | 231 | }; |
4e90a9e0 VB |
232 | TAILQ_HEAD(, lldpd_one_client) lldpd_clients; |
233 | ||
234 | static void | |
235 | levent_ctl_free_client(struct lldpd_one_client *client) | |
236 | { | |
237 | if (client && client->bev) bufferevent_free(client->bev); | |
238 | if (client) { | |
239 | TAILQ_REMOVE(&lldpd_clients, client, next); | |
240 | free(client); | |
241 | } | |
242 | } | |
257db885 | 243 | |
762419b6 VB |
244 | static void |
245 | levent_ctl_close_clients() | |
246 | { | |
247 | struct lldpd_one_client *client, *client_next; | |
248 | for (client = TAILQ_FIRST(&lldpd_clients); | |
249 | client; | |
250 | client = client_next) { | |
251 | client_next = TAILQ_NEXT(client, next); | |
252 | levent_ctl_free_client(client); | |
253 | } | |
254 | } | |
255 | ||
e0478a46 | 256 | static ssize_t |
4e90a9e0 | 257 | levent_ctl_send(struct lldpd_one_client *client, int type, void *data, size_t len) |
e0478a46 | 258 | { |
e0478a46 VB |
259 | struct bufferevent *bev = client->bev; |
260 | struct hmsg_header hdr = { .len = len, .type = type }; | |
261 | bufferevent_disable(bev, EV_WRITE); | |
262 | if (bufferevent_write(bev, &hdr, sizeof(struct hmsg_header)) == -1 || | |
263 | (len > 0 && bufferevent_write(bev, data, len) == -1)) { | |
6f8925be | 264 | log_warnx("event", "unable to create answer to client"); |
4e90a9e0 | 265 | levent_ctl_free_client(client); |
e0478a46 VB |
266 | return -1; |
267 | } | |
268 | bufferevent_enable(bev, EV_WRITE); | |
269 | return len; | |
270 | } | |
271 | ||
4e90a9e0 VB |
272 | void |
273 | levent_ctl_notify(char *ifname, int state, struct lldpd_port *neighbor) | |
274 | { | |
275 | struct lldpd_one_client *client, *client_next; | |
276 | struct lldpd_neighbor_change neigh = { | |
277 | .ifname = ifname, | |
278 | .state = state, | |
279 | .neighbor = neighbor | |
280 | }; | |
281 | void *output = NULL; | |
d79c3de4 | 282 | ssize_t output_len = 0; |
4e90a9e0 | 283 | |
4e90a9e0 | 284 | /* Don't use TAILQ_FOREACH, the client may be deleted in case of errors. */ |
6f8925be | 285 | log_debug("control", "notify clients of neighbor changes"); |
4e90a9e0 VB |
286 | for (client = TAILQ_FIRST(&lldpd_clients); |
287 | client; | |
288 | client = client_next) { | |
289 | client_next = TAILQ_NEXT(client, next); | |
290 | if (!client->subscribed) continue; | |
be969691 VB |
291 | |
292 | if (output == NULL) { | |
293 | /* Ugly hack: we don't want to transmit a list of | |
74e0080e | 294 | * ports. We patch the port to avoid this. */ |
be969691 VB |
295 | TAILQ_ENTRY(lldpd_port) backup_p_entries; |
296 | memcpy(&backup_p_entries, &neighbor->p_entries, | |
297 | sizeof(backup_p_entries)); | |
be969691 VB |
298 | memset(&neighbor->p_entries, 0, |
299 | sizeof(backup_p_entries)); | |
985a4cb5 | 300 | output_len = lldpd_neighbor_change_serialize(&neigh, &output); |
be969691 VB |
301 | memcpy(&neighbor->p_entries, &backup_p_entries, |
302 | sizeof(backup_p_entries)); | |
be969691 VB |
303 | |
304 | if (output_len <= 0) { | |
6f8925be | 305 | log_warnx("event", "unable to serialize changed neighbor"); |
be969691 VB |
306 | return; |
307 | } | |
308 | } | |
309 | ||
4e90a9e0 VB |
310 | levent_ctl_send(client, NOTIFICATION, output, output_len); |
311 | } | |
312 | ||
313 | free(output); | |
314 | } | |
315 | ||
316 | static ssize_t | |
317 | levent_ctl_send_cb(void *out, int type, void *data, size_t len) | |
318 | { | |
319 | struct lldpd_one_client *client = out; | |
320 | return levent_ctl_send(client, type, data, len); | |
321 | } | |
322 | ||
d6e889b6 | 323 | static void |
e0478a46 | 324 | levent_ctl_recv(struct bufferevent *bev, void *ptr) |
d6e889b6 | 325 | { |
e0478a46 VB |
326 | struct lldpd_one_client *client = ptr; |
327 | struct evbuffer *buffer = bufferevent_get_input(bev); | |
328 | size_t buffer_len = evbuffer_get_length(buffer); | |
329 | struct hmsg_header hdr; | |
330 | void *data = NULL; | |
331 | ||
6f8925be | 332 | log_debug("control", "receive data on Unix socket"); |
e0478a46 VB |
333 | if (buffer_len < sizeof(struct hmsg_header)) |
334 | return; /* Not enough data yet */ | |
335 | if (evbuffer_copyout(buffer, &hdr, | |
336 | sizeof(struct hmsg_header)) != sizeof(struct hmsg_header)) { | |
6f8925be | 337 | log_warnx("event", "not able to read header"); |
e0478a46 VB |
338 | return; |
339 | } | |
82374540 | 340 | if (hdr.len > HMSG_MAX_SIZE) { |
6f8925be | 341 | log_warnx("event", "message received is too large"); |
e0478a46 VB |
342 | goto recv_error; |
343 | } | |
d6e889b6 | 344 | |
e0478a46 VB |
345 | if (buffer_len < hdr.len + sizeof(struct hmsg_header)) |
346 | return; /* Not enough data yet */ | |
347 | if (hdr.len > 0 && (data = malloc(hdr.len)) == NULL) { | |
6f8925be | 348 | log_warnx("event", "not enough memory"); |
e0478a46 VB |
349 | goto recv_error; |
350 | } | |
351 | evbuffer_drain(buffer, sizeof(struct hmsg_header)); | |
352 | if (hdr.len > 0) evbuffer_remove(buffer, data, hdr.len); | |
4e90a9e0 VB |
353 | |
354 | /* Currently, we should not receive notification acknowledgment. But if | |
355 | * we receive one, we can discard it. */ | |
356 | if (hdr.len == 0 && hdr.type == NOTIFICATION) return; | |
e0478a46 | 357 | if (client_handle_client(client->cfg, |
4e90a9e0 VB |
358 | levent_ctl_send_cb, client, |
359 | hdr.type, data, hdr.len, | |
360 | &client->subscribed) == -1) goto recv_error; | |
e0478a46 VB |
361 | free(data); |
362 | return; | |
363 | ||
364 | recv_error: | |
365 | free(data); | |
4e90a9e0 | 366 | levent_ctl_free_client(client); |
e0478a46 VB |
367 | } |
368 | ||
369 | static void | |
370 | levent_ctl_event(struct bufferevent *bev, short events, void *ptr) | |
371 | { | |
372 | struct lldpd_one_client *client = ptr; | |
373 | if (events & BEV_EVENT_ERROR) { | |
6f8925be | 374 | log_warnx("event", "an error occurred with client: %s", |
e0478a46 | 375 | evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR())); |
4e90a9e0 | 376 | levent_ctl_free_client(client); |
e0478a46 | 377 | } else if (events & BEV_EVENT_EOF) { |
6f8925be | 378 | log_debug("event", "client has been disconnected"); |
4e90a9e0 | 379 | levent_ctl_free_client(client); |
d6e889b6 | 380 | } |
d6e889b6 VB |
381 | } |
382 | ||
383 | static void | |
384 | levent_ctl_accept(evutil_socket_t fd, short what, void *arg) | |
385 | { | |
d6e889b6 | 386 | struct lldpd *cfg = arg; |
257db885 | 387 | struct lldpd_one_client *client = NULL; |
d6e889b6 | 388 | int s; |
5fd6695c VB |
389 | (void)what; |
390 | ||
6f8925be | 391 | log_debug("control", "accept a new connection"); |
d6e889b6 | 392 | if ((s = accept(fd, NULL, NULL)) == -1) { |
6f8925be | 393 | log_warn("event", "unable to accept connection from socket"); |
d6e889b6 VB |
394 | return; |
395 | } | |
257db885 VB |
396 | client = calloc(1, sizeof(struct lldpd_one_client)); |
397 | if (!client) { | |
6f8925be | 398 | log_warnx("event", "unable to allocate memory for new client"); |
4e90a9e0 | 399 | close(s); |
257db885 | 400 | goto accept_failed; |
d6e889b6 | 401 | } |
257db885 | 402 | client->cfg = cfg; |
bec75f84 | 403 | levent_make_socket_nonblocking(s); |
506273e9 | 404 | TAILQ_INSERT_TAIL(&lldpd_clients, client, next); |
e0478a46 VB |
405 | if ((client->bev = bufferevent_socket_new(cfg->g_base, s, |
406 | BEV_OPT_CLOSE_ON_FREE)) == NULL) { | |
6f8925be | 407 | log_warnx("event", "unable to allocate a new buffer event for new client"); |
4e90a9e0 | 408 | close(s); |
257db885 | 409 | goto accept_failed; |
d6e889b6 | 410 | } |
e0478a46 VB |
411 | bufferevent_setcb(client->bev, |
412 | levent_ctl_recv, NULL, levent_ctl_event, | |
413 | client); | |
414 | bufferevent_enable(client->bev, EV_READ | EV_WRITE); | |
6f8925be | 415 | log_debug("event", "new client accepted"); |
87dfd175 VB |
416 | /* coverity[leaked_handle] |
417 | s has been saved by bufferevent_socket_new */ | |
257db885 VB |
418 | return; |
419 | accept_failed: | |
4e90a9e0 | 420 | levent_ctl_free_client(client); |
d6e889b6 VB |
421 | } |
422 | ||
327b1d62 VB |
423 | static void |
424 | levent_priv(evutil_socket_t fd, short what, void *arg) | |
425 | { | |
426 | struct event_base *base = arg; | |
427 | ssize_t n; | |
428 | int err; | |
429 | char one; | |
430 | (void)what; | |
431 | /* Check if we have some data available. We need to pass the socket in | |
432 | * non-blocking mode to be able to run the check without disruption. */ | |
433 | levent_make_socket_nonblocking(fd); | |
434 | n = read(fd, &one, 0); err = errno; | |
435 | levent_make_socket_blocking(fd); | |
436 | ||
437 | switch (n) { | |
438 | case -1: | |
439 | if (err == EAGAIN || err == EWOULDBLOCK) | |
440 | /* No data, all good */ | |
441 | return; | |
442 | log_warnx("event", "unable to poll monitor process, exit"); | |
443 | break; | |
444 | case 0: | |
445 | log_warnx("event", "monitor process has terminated, exit"); | |
446 | break; | |
447 | default: | |
448 | /* Unfortunately, dead code, if we have data, we have requested | |
449 | * 0 byte, so we will fall in the previous case. It seems safer | |
450 | * to ask for 0 byte than asking for 1 byte. In the later case, | |
451 | * if we have to speak with the monitor again before exiting, we | |
452 | * would be out of sync. */ | |
453 | log_warnx("event", "received unexpected data from monitor process, exit"); | |
454 | break; | |
455 | } | |
456 | event_base_loopbreak(base); | |
457 | } | |
458 | ||
d6e889b6 VB |
459 | static void |
460 | levent_dump(evutil_socket_t fd, short what, void *arg) | |
461 | { | |
d6e889b6 | 462 | struct event_base *base = arg; |
5fd6695c | 463 | (void)fd; (void)what; |
6f8925be | 464 | log_debug("event", "dumping all events"); |
d6e889b6 VB |
465 | event_base_dump_events(base, stderr); |
466 | } | |
467 | static void | |
468 | levent_stop(evutil_socket_t fd, short what, void *arg) | |
469 | { | |
d6e889b6 | 470 | struct event_base *base = arg; |
5fd6695c | 471 | (void)fd; (void)what; |
d6e889b6 VB |
472 | event_base_loopbreak(base); |
473 | } | |
474 | ||
475 | static void | |
476 | levent_update_and_send(evutil_socket_t fd, short what, void *arg) | |
477 | { | |
d6e889b6 | 478 | struct lldpd *cfg = arg; |
579bedd5 | 479 | struct timeval tv = { cfg->g_config.c_tx_interval, 0 }; |
5fd6695c | 480 | (void)fd; (void)what; |
d6e889b6 | 481 | lldpd_loop(cfg); |
579bedd5 VB |
482 | if (cfg->g_iface_event != NULL) |
483 | tv.tv_sec *= 20; | |
d6e889b6 VB |
484 | event_add(cfg->g_main_loop, &tv); |
485 | } | |
486 | ||
e681c859 VB |
487 | void |
488 | levent_update_now(struct lldpd *cfg) | |
489 | { | |
490 | if (cfg->g_main_loop) | |
491 | event_active(cfg->g_main_loop, EV_TIMEOUT, 1); | |
492 | } | |
493 | ||
47287a61 VB |
494 | void |
495 | levent_send_now(struct lldpd *cfg) | |
496 | { | |
959a54d4 | 497 | struct lldpd_hardware *hardware; |
126970da VB |
498 | TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { |
499 | if (hardware->h_timer) | |
500 | event_active(hardware->h_timer, EV_TIMEOUT, 1); | |
501 | else | |
502 | log_warnx("event", "BUG: no timer present for interface %s", | |
503 | hardware->h_ifname); | |
504 | } | |
47287a61 VB |
505 | } |
506 | ||
d6e889b6 VB |
507 | static void |
508 | levent_init(struct lldpd *cfg) | |
509 | { | |
510 | /* Setup libevent */ | |
6f8925be | 511 | log_debug("event", "initialize libevent"); |
d6e889b6 VB |
512 | event_set_log_callback(levent_log_cb); |
513 | if (!(cfg->g_base = event_base_new())) | |
a87db231 | 514 | fatalx("event", "unable to create a new libevent base"); |
6f8925be | 515 | log_info("event", "libevent %s initialized with %s method", |
d6e889b6 VB |
516 | event_get_version(), |
517 | event_base_get_method(cfg->g_base)); | |
518 | ||
519 | /* Setup SNMP */ | |
520 | #ifdef USE_SNMP | |
521 | if (cfg->g_snmp) { | |
522 | agent_init(cfg, cfg->g_snmp_agentx); | |
523 | cfg->g_snmp_timeout = evtimer_new(cfg->g_base, | |
524 | levent_snmp_timeout, | |
525 | cfg); | |
526 | if (!cfg->g_snmp_timeout) | |
a87db231 | 527 | fatalx("event", "unable to setup timeout function for SNMP"); |
d6e889b6 VB |
528 | if ((cfg->g_snmp_fds = |
529 | malloc(sizeof(struct ev_l))) == NULL) | |
a87db231 | 530 | fatalx("event", "unable to allocate memory for SNMP events"); |
d6e889b6 VB |
531 | TAILQ_INIT(levent_snmp_fds(cfg)); |
532 | } | |
533 | #endif | |
6f8925be | 534 | |
579bedd5 | 535 | /* Setup loop that will run every X seconds. */ |
6f8925be | 536 | log_debug("event", "register loop timer"); |
d6e889b6 VB |
537 | if (!(cfg->g_main_loop = event_new(cfg->g_base, -1, 0, |
538 | levent_update_and_send, | |
539 | cfg))) | |
a87db231 | 540 | fatalx("event", "unable to setup main timer"); |
86b5c6fb | 541 | event_active(cfg->g_main_loop, EV_TIMEOUT, 1); |
d6e889b6 VB |
542 | |
543 | /* Setup unix socket */ | |
50efc5f7 | 544 | struct event *ctl_event; |
6f8925be | 545 | log_debug("event", "register Unix socket"); |
4e90a9e0 | 546 | TAILQ_INIT(&lldpd_clients); |
bec75f84 | 547 | levent_make_socket_nonblocking(cfg->g_ctl); |
50efc5f7 | 548 | if ((ctl_event = event_new(cfg->g_base, cfg->g_ctl, |
d6e889b6 | 549 | EV_READ|EV_PERSIST, levent_ctl_accept, cfg)) == NULL) |
a87db231 | 550 | fatalx("event", "unable to setup control socket event"); |
50efc5f7 | 551 | event_add(ctl_event, NULL); |
d6e889b6 | 552 | |
327b1d62 VB |
553 | /* Somehow monitor the monitor process */ |
554 | struct event *monitor_event; | |
555 | log_debug("event", "monitor the monitor process"); | |
556 | if ((monitor_event = event_new(cfg->g_base, priv_fd(PRIV_UNPRIVILEGED), | |
557 | EV_READ|EV_PERSIST, levent_priv, cfg->g_base)) == NULL) | |
558 | fatalx("event", "unable to monitor monitor process"); | |
559 | event_add(monitor_event, NULL); | |
560 | ||
d6e889b6 | 561 | /* Signals */ |
6f8925be | 562 | log_debug("event", "register signals"); |
d6e889b6 VB |
563 | evsignal_add(evsignal_new(cfg->g_base, SIGUSR1, |
564 | levent_dump, cfg->g_base), | |
565 | NULL); | |
d6e889b6 VB |
566 | evsignal_add(evsignal_new(cfg->g_base, SIGINT, |
567 | levent_stop, cfg->g_base), | |
568 | NULL); | |
569 | evsignal_add(evsignal_new(cfg->g_base, SIGTERM, | |
570 | levent_stop, cfg->g_base), | |
571 | NULL); | |
572 | } | |
573 | ||
574 | /* Initialize libevent and start the event loop */ | |
575 | void | |
576 | levent_loop(struct lldpd *cfg) | |
577 | { | |
578 | levent_init(cfg); | |
e4ff3ed5 | 579 | lldpd_loop(cfg); |
dbcb6846 VB |
580 | #ifdef USE_SNMP |
581 | if (cfg->g_snmp) levent_snmp_update(cfg); | |
582 | #endif | |
d6e889b6 VB |
583 | |
584 | /* libevent loop */ | |
585 | do { | |
bdfe4193 | 586 | TRACE(LLDPD_EVENT_LOOP()); |
d6e889b6 VB |
587 | if (event_base_got_break(cfg->g_base) || |
588 | event_base_got_exit(cfg->g_base)) | |
589 | break; | |
d6e889b6 VB |
590 | } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0); |
591 | ||
3744168c VB |
592 | if (cfg->g_iface_timer_event != NULL) |
593 | event_free(cfg->g_iface_timer_event); | |
594 | ||
d6e889b6 VB |
595 | #ifdef USE_SNMP |
596 | if (cfg->g_snmp) | |
597 | agent_shutdown(); | |
598 | #endif /* USE_SNMP */ | |
599 | ||
762419b6 | 600 | levent_ctl_close_clients(); |
f144d837 VB |
601 | } |
602 | ||
603 | /* Release libevent resources */ | |
604 | void | |
605 | levent_shutdown(struct lldpd *cfg) | |
606 | { | |
762419b6 VB |
607 | if (cfg->g_iface_event) |
608 | event_free(cfg->g_iface_event); | |
609 | if (cfg->g_cleanup_timer) | |
610 | event_free(cfg->g_cleanup_timer); | |
f144d837 | 611 | event_base_free(cfg->g_base); |
d6e889b6 VB |
612 | } |
613 | ||
614 | static void | |
615 | levent_hardware_recv(evutil_socket_t fd, short what, void *arg) | |
616 | { | |
d6e889b6 VB |
617 | struct lldpd_hardware *hardware = arg; |
618 | struct lldpd *cfg = hardware->h_cfg; | |
5fd6695c | 619 | (void)what; |
6f8925be VB |
620 | log_debug("event", "received something for %s", |
621 | hardware->h_ifname); | |
d6e889b6 | 622 | lldpd_recv(cfg, hardware, fd); |
3333d2a8 | 623 | levent_schedule_cleanup(cfg); |
d6e889b6 VB |
624 | } |
625 | ||
626 | void | |
627 | levent_hardware_init(struct lldpd_hardware *hardware) | |
628 | { | |
6f8925be | 629 | log_debug("event", "initialize events for %s", hardware->h_ifname); |
d6e889b6 VB |
630 | if ((hardware->h_recv = |
631 | malloc(sizeof(struct ev_l))) == NULL) { | |
6f8925be | 632 | log_warnx("event", "unable to allocate memory for %s", |
d6e889b6 VB |
633 | hardware->h_ifname); |
634 | return; | |
635 | } | |
636 | TAILQ_INIT(levent_hardware_fds(hardware)); | |
637 | } | |
638 | ||
639 | void | |
640 | levent_hardware_add_fd(struct lldpd_hardware *hardware, int fd) | |
641 | { | |
5fd6695c | 642 | struct lldpd_events *hfd = NULL; |
d6e889b6 VB |
643 | if (!hardware->h_recv) return; |
644 | ||
5fd6695c | 645 | hfd = calloc(1, sizeof(struct lldpd_events)); |
d6e889b6 | 646 | if (!hfd) { |
6f8925be | 647 | log_warnx("event", "unable to allocate new event for %s", |
d6e889b6 VB |
648 | hardware->h_ifname); |
649 | return; | |
650 | } | |
bec75f84 | 651 | levent_make_socket_nonblocking(fd); |
d6e889b6 VB |
652 | if ((hfd->ev = event_new(hardware->h_cfg->g_base, fd, |
653 | EV_READ | EV_PERSIST, | |
654 | levent_hardware_recv, | |
655 | hardware)) == NULL) { | |
6f8925be | 656 | log_warnx("event", "unable to allocate a new event for %s", |
d6e889b6 VB |
657 | hardware->h_ifname); |
658 | free(hfd); | |
659 | return; | |
660 | } | |
661 | if (event_add(hfd->ev, NULL) == -1) { | |
6f8925be | 662 | log_warnx("event", "unable to schedule new event for %s", |
d6e889b6 VB |
663 | hardware->h_ifname); |
664 | event_free(hfd->ev); | |
665 | free(hfd); | |
666 | return; | |
667 | } | |
668 | TAILQ_INSERT_TAIL(levent_hardware_fds(hardware), hfd, next); | |
669 | } | |
670 | ||
671 | void | |
672 | levent_hardware_release(struct lldpd_hardware *hardware) | |
673 | { | |
5fd6695c | 674 | struct lldpd_events *ev, *ev_next; |
dbfa89c6 VB |
675 | if (hardware->h_timer) { |
676 | event_free(hardware->h_timer); | |
677 | hardware->h_timer = NULL; | |
678 | } | |
d6e889b6 VB |
679 | if (!hardware->h_recv) return; |
680 | ||
6f8925be | 681 | log_debug("event", "release events for %s", hardware->h_ifname); |
d6e889b6 VB |
682 | for (ev = TAILQ_FIRST(levent_hardware_fds(hardware)); |
683 | ev; | |
684 | ev = ev_next) { | |
685 | ev_next = TAILQ_NEXT(ev, next); | |
686 | /* We may close several time the same FD. This is harmless. */ | |
687 | close(event_get_fd(ev->ev)); | |
688 | event_free(ev->ev); | |
689 | TAILQ_REMOVE(levent_hardware_fds(hardware), ev, next); | |
690 | free(ev); | |
691 | } | |
692 | free(levent_hardware_fds(hardware)); | |
693 | } | |
0484f180 VB |
694 | |
695 | static void | |
696 | levent_iface_trigger(evutil_socket_t fd, short what, void *arg) | |
697 | { | |
698 | struct lldpd *cfg = arg; | |
4f670a1e | 699 | log_debug("event", |
0484f180 VB |
700 | "triggering update of all interfaces"); |
701 | lldpd_update_localports(cfg); | |
702 | } | |
703 | ||
704 | static void | |
705 | levent_iface_recv(evutil_socket_t fd, short what, void *arg) | |
706 | { | |
707 | struct lldpd *cfg = arg; | |
b0b8841b | 708 | char buffer[EVENT_BUFFER]; |
0484f180 VB |
709 | int n; |
710 | ||
13181ede VB |
711 | if (cfg->g_iface_cb == NULL) { |
712 | /* Discard the message */ | |
713 | while (1) { | |
714 | n = read(fd, buffer, sizeof(buffer)); | |
715 | if (n == -1 && | |
716 | (errno == EWOULDBLOCK || | |
717 | errno == EAGAIN)) break; | |
718 | if (n == -1) { | |
719 | log_warn("event", | |
720 | "unable to receive interface change notification message"); | |
721 | return; | |
722 | } | |
723 | if (n == 0) { | |
724 | log_warnx("event", | |
725 | "end of file reached while getting interface change notification message"); | |
726 | return; | |
727 | } | |
0484f180 | 728 | } |
13181ede VB |
729 | } else { |
730 | cfg->g_iface_cb(cfg); | |
0484f180 VB |
731 | } |
732 | ||
733 | /* Schedule local port update. We don't run it right away because we may | |
734 | * receive a batch of events like this. */ | |
735 | struct timeval one_sec = {1, 0}; | |
bdfe4193 | 736 | TRACE(LLDPD_INTERFACES_NOTIFICATION()); |
0484f180 VB |
737 | log_debug("event", |
738 | "received notification change, schedule an update of all interfaces in one second"); | |
739 | if (cfg->g_iface_timer_event == NULL) { | |
740 | if ((cfg->g_iface_timer_event = evtimer_new(cfg->g_base, | |
741 | levent_iface_trigger, cfg)) == NULL) { | |
742 | log_warnx("event", | |
743 | "unable to create a new event to trigger interface update"); | |
744 | return; | |
745 | } | |
746 | } | |
747 | if (evtimer_add(cfg->g_iface_timer_event, &one_sec) == -1) { | |
748 | log_warnx("event", | |
749 | "unable to schedule interface updates"); | |
750 | return; | |
751 | } | |
752 | } | |
753 | ||
aa313f2a | 754 | int |
0484f180 VB |
755 | levent_iface_subscribe(struct lldpd *cfg, int socket) |
756 | { | |
757 | log_debug("event", "subscribe to interface changes from socket %d", | |
758 | socket); | |
0fa2254b VB |
759 | if (cfg->g_iface_cb == NULL) |
760 | levent_make_socket_nonblocking(socket); | |
0484f180 VB |
761 | cfg->g_iface_event = event_new(cfg->g_base, socket, |
762 | EV_READ | EV_PERSIST, levent_iface_recv, cfg); | |
763 | if (cfg->g_iface_event == NULL) { | |
764 | log_warnx("event", | |
765 | "unable to allocate a new event for interface changes"); | |
aa313f2a | 766 | return -1; |
0484f180 VB |
767 | } |
768 | if (event_add(cfg->g_iface_event, NULL) == -1) { | |
769 | log_warnx("event", | |
770 | "unable to schedule new interface changes event"); | |
771 | event_free(cfg->g_iface_event); | |
772 | cfg->g_iface_event = NULL; | |
aa313f2a | 773 | return -1; |
0484f180 | 774 | } |
aa313f2a | 775 | return 0; |
0484f180 | 776 | } |
579bedd5 | 777 | |
3333d2a8 VB |
778 | static void |
779 | levent_trigger_cleanup(evutil_socket_t fd, short what, void *arg) | |
780 | { | |
781 | struct lldpd *cfg = arg; | |
782 | lldpd_cleanup(cfg); | |
783 | } | |
784 | ||
785 | void | |
786 | levent_schedule_cleanup(struct lldpd *cfg) | |
787 | { | |
788 | log_debug("event", "schedule next cleanup"); | |
789 | if (cfg->g_cleanup_timer != NULL) { | |
790 | event_free(cfg->g_cleanup_timer); | |
791 | } | |
792 | cfg->g_cleanup_timer = evtimer_new(cfg->g_base, levent_trigger_cleanup, cfg); | |
793 | if (cfg->g_cleanup_timer == NULL) { | |
794 | log_warnx("event", | |
795 | "unable to allocate a new event for cleanup tasks"); | |
796 | return; | |
797 | } | |
798 | ||
799 | /* Compute the next TTL event */ | |
71b0f981 | 800 | struct timeval tv = { cfg->g_config.c_ttl, 0 }; |
3333d2a8 VB |
801 | time_t now = time(NULL); |
802 | time_t next; | |
803 | struct lldpd_hardware *hardware; | |
804 | struct lldpd_port *port; | |
805 | TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { | |
806 | TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { | |
78346c89 | 807 | if (now >= port->p_lastupdate + port->p_ttl) { |
5a215d4b | 808 | tv.tv_sec = 0; |
408c3e02 VB |
809 | log_debug("event", "immediate cleanup on port %s (%lld, %d, %lld)", |
810 | hardware->h_ifname, | |
811 | (long long)now, | |
78346c89 | 812 | port->p_ttl, |
408c3e02 | 813 | (long long)port->p_lastupdate); |
5a215d4b VB |
814 | break; |
815 | } | |
78346c89 | 816 | next = port->p_ttl - (now - port->p_lastupdate); |
5a215d4b | 817 | if (next < tv.tv_sec) |
3333d2a8 VB |
818 | tv.tv_sec = next; |
819 | } | |
820 | } | |
821 | ||
822 | log_debug("event", "next cleanup in %ld seconds", | |
823 | (long)tv.tv_sec); | |
824 | if (event_add(cfg->g_cleanup_timer, &tv) == -1) { | |
825 | log_warnx("event", | |
826 | "unable to schedula cleanup task"); | |
827 | event_free(cfg->g_cleanup_timer); | |
828 | cfg->g_cleanup_timer = NULL; | |
829 | return; | |
830 | } | |
831 | } | |
832 | ||
579bedd5 VB |
833 | static void |
834 | levent_send_pdu(evutil_socket_t fd, short what, void *arg) | |
835 | { | |
836 | struct lldpd_hardware *hardware = arg; | |
be511d00 | 837 | int tx_interval = hardware->h_cfg->g_config.c_tx_interval; |
b9de0ca6 | 838 | |
579bedd5 VB |
839 | log_debug("event", "trigger sending PDU for port %s", |
840 | hardware->h_ifname); | |
841 | lldpd_send(hardware); | |
842 | ||
be511d00 | 843 | #ifdef ENABLE_LLDPMED |
b9de0ca6 | 844 | if (hardware->h_tx_fast > 0) |
845 | hardware->h_tx_fast--; | |
846 | ||
847 | if (hardware->h_tx_fast > 0) | |
848 | tx_interval = hardware->h_cfg->g_config.c_tx_fast_interval; | |
be511d00 VB |
849 | #endif |
850 | ||
b9de0ca6 | 851 | struct timeval tv = { tx_interval, 0 }; |
579bedd5 VB |
852 | if (event_add(hardware->h_timer, &tv) == -1) { |
853 | log_warnx("event", "unable to re-register timer event for port %s", | |
854 | hardware->h_ifname); | |
855 | event_free(hardware->h_timer); | |
856 | hardware->h_timer = NULL; | |
857 | return; | |
858 | } | |
859 | } | |
860 | ||
861 | void | |
862 | levent_schedule_pdu(struct lldpd_hardware *hardware) | |
863 | { | |
864 | log_debug("event", "schedule sending PDU on %s", | |
865 | hardware->h_ifname); | |
866 | if (hardware->h_timer == NULL) { | |
867 | hardware->h_timer = evtimer_new(hardware->h_cfg->g_base, | |
868 | levent_send_pdu, hardware); | |
869 | if (hardware->h_timer == NULL) { | |
870 | log_warnx("event", "unable to schedule PDU sending for port %s", | |
871 | hardware->h_ifname); | |
872 | return; | |
873 | } | |
874 | } | |
875 | ||
876 | struct timeval tv = { 0, 0 }; | |
877 | if (event_add(hardware->h_timer, &tv) == -1) { | |
878 | log_warnx("event", "unable to register timer event for port %s", | |
879 | hardware->h_ifname); | |
880 | event_free(hardware->h_timer); | |
881 | hardware->h_timer = NULL; | |
882 | return; | |
883 | } | |
884 | } | |
bec75f84 VB |
885 | |
886 | int | |
887 | levent_make_socket_nonblocking(int fd) | |
888 | { | |
889 | int flags; | |
890 | if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) { | |
891 | log_warn("event", "fcntl(%d, F_GETFL)", fd); | |
892 | return -1; | |
893 | } | |
894 | if (flags & O_NONBLOCK) return 0; | |
895 | if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) { | |
896 | log_warn("event", "fcntl(%d, F_SETFL)", fd); | |
897 | return -1; | |
898 | } | |
899 | return 0; | |
900 | } | |
327b1d62 VB |
901 | |
902 | int | |
903 | levent_make_socket_blocking(int fd) | |
904 | { | |
905 | int flags; | |
906 | if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) { | |
907 | log_warn("event", "fcntl(%d, F_GETFL)", fd); | |
908 | return -1; | |
909 | } | |
910 | if (!(flags & O_NONBLOCK)) return 0; | |
911 | if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) { | |
912 | log_warn("event", "fcntl(%d, F_SETFL)", fd); | |
913 | return -1; | |
914 | } | |
915 | return 0; | |
916 | } | |
6c3697f2 VB |
917 | |
918 | #ifdef HOST_OS_LINUX | |
919 | /* Receive and log error from a socket when there is suspicion of an error. */ | |
920 | void | |
921 | levent_recv_error(int fd, const char *source) | |
922 | { | |
923 | do { | |
924 | ssize_t n; | |
925 | char buf[1024] = {}; | |
926 | struct msghdr msg = { | |
927 | .msg_control = buf, | |
928 | .msg_controllen = sizeof(buf) | |
929 | }; | |
930 | if ((n = recvmsg(fd, &msg, MSG_ERRQUEUE | MSG_DONTWAIT)) <= 0) { | |
931 | return; | |
932 | } | |
933 | struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); | |
934 | if (cmsg == NULL) | |
935 | log_warnx("event", "received unknown error on %s", | |
936 | source); | |
937 | else | |
938 | log_warnx("event", "received error (level=%d/type=%d) on %s", | |
939 | cmsg->cmsg_level, cmsg->cmsg_type, source); | |
940 | } while (1); | |
941 | } | |
942 | #endif |