]>
Commit | Line | Data |
---|---|---|
1 | /* -*- mode: c; c-file-style: "openbsd" -*- */ | |
2 | /* | |
3 | * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx> | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
18 | #include "lldpd.h" | |
19 | #include "trace.h" | |
20 | ||
21 | #include <unistd.h> | |
22 | #include <signal.h> | |
23 | #include <errno.h> | |
24 | #include <time.h> | |
25 | #include <fcntl.h> | |
26 | #if defined(__clang__) | |
27 | # pragma clang diagnostic push | |
28 | # pragma clang diagnostic ignored "-Wdocumentation" | |
29 | #endif | |
30 | #include <event2/event.h> | |
31 | #include <event2/bufferevent.h> | |
32 | #include <event2/buffer.h> | |
33 | #if defined(__clang__) | |
34 | # pragma clang diagnostic pop | |
35 | #endif | |
36 | ||
37 | #define EVENT_BUFFER 1024 | |
38 | ||
39 | static void | |
40 | levent_log_cb(int severity, const char *msg) | |
41 | { | |
42 | switch (severity) { | |
43 | case _EVENT_LOG_DEBUG: | |
44 | log_debug("libevent", "%s", msg); | |
45 | break; | |
46 | case _EVENT_LOG_MSG: | |
47 | log_info("libevent", "%s", msg); | |
48 | break; | |
49 | case _EVENT_LOG_WARN: | |
50 | log_warnx("libevent", "%s", msg); | |
51 | break; | |
52 | case _EVENT_LOG_ERR: | |
53 | log_warnx("libevent", "%s", msg); | |
54 | break; | |
55 | } | |
56 | } | |
57 | ||
58 | struct lldpd_events { | |
59 | TAILQ_ENTRY(lldpd_events) next; | |
60 | struct event *ev; | |
61 | }; | |
62 | TAILQ_HEAD(ev_l, lldpd_events); | |
63 | ||
64 | #define levent_snmp_fds(cfg) ((struct ev_l *)(cfg)->g_snmp_fds) | |
65 | #define levent_hardware_fds(hardware) ((struct ev_l *)(hardware)->h_recv) | |
66 | ||
67 | #ifdef USE_SNMP | |
68 | # include <net-snmp/net-snmp-config.h> | |
69 | # include <net-snmp/net-snmp-includes.h> | |
70 | # include <net-snmp/agent/net-snmp-agent-includes.h> | |
71 | # include <net-snmp/agent/snmp_vars.h> | |
72 | ||
73 | /* Compatibility with older versions of NetSNMP */ | |
74 | # ifndef HAVE_SNMP_SELECT_INFO2 | |
75 | # define netsnmp_large_fd_set fd_set | |
76 | # define snmp_read2 snmp_read | |
77 | # define snmp_select_info2 snmp_select_info | |
78 | # define netsnmp_large_fd_set_init(...) | |
79 | # define netsnmp_large_fd_set_cleanup(...) | |
80 | # define NETSNMP_LARGE_FD_SET FD_SET | |
81 | # define NETSNMP_LARGE_FD_CLR FD_CLR | |
82 | # define NETSNMP_LARGE_FD_ZERO FD_ZERO | |
83 | # define NETSNMP_LARGE_FD_ISSET FD_ISSET | |
84 | # else | |
85 | # include <net-snmp/library/large_fd_set.h> | |
86 | # endif | |
87 | ||
88 | static void levent_snmp_update(struct lldpd *); | |
89 | ||
90 | /* | |
91 | * Callback function when we have something to read from SNMP. | |
92 | * | |
93 | * This function is called because we have a read event on one SNMP | |
94 | * file descriptor. When need to call snmp_read() on it. | |
95 | */ | |
96 | static void | |
97 | levent_snmp_read(evutil_socket_t fd, short what, void *arg) | |
98 | { | |
99 | struct lldpd *cfg = arg; | |
100 | netsnmp_large_fd_set fdset; | |
101 | (void)what; | |
102 | netsnmp_large_fd_set_init(&fdset, FD_SETSIZE); | |
103 | NETSNMP_LARGE_FD_ZERO(&fdset); | |
104 | NETSNMP_LARGE_FD_SET(fd, &fdset); | |
105 | snmp_read2(&fdset); | |
106 | levent_snmp_update(cfg); | |
107 | } | |
108 | ||
109 | /* | |
110 | * Callback function for a SNMP timeout. | |
111 | * | |
112 | * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it. | |
113 | */ | |
114 | static void | |
115 | levent_snmp_timeout(evutil_socket_t fd, short what, void *arg) | |
116 | { | |
117 | struct lldpd *cfg = arg; | |
118 | (void)what; | |
119 | (void)fd; | |
120 | snmp_timeout(); | |
121 | run_alarms(); | |
122 | levent_snmp_update(cfg); | |
123 | } | |
124 | ||
125 | /* | |
126 | * Watch a new SNMP FD. | |
127 | * | |
128 | * @param base The libevent base we are working on. | |
129 | * @param fd The file descriptor we want to watch. | |
130 | * | |
131 | * The file descriptor is appended to the list of file descriptors we | |
132 | * want to watch. | |
133 | */ | |
134 | static void | |
135 | levent_snmp_add_fd(struct lldpd *cfg, int fd) | |
136 | { | |
137 | struct event_base *base = cfg->g_base; | |
138 | struct lldpd_events *snmpfd = calloc(1, sizeof(struct lldpd_events)); | |
139 | if (!snmpfd) { | |
140 | log_warn("event", "unable to allocate memory for new SNMP event"); | |
141 | return; | |
142 | } | |
143 | levent_make_socket_nonblocking(fd); | |
144 | if ((snmpfd->ev = event_new(base, fd, EV_READ | EV_PERSIST, levent_snmp_read, | |
145 | cfg)) == NULL) { | |
146 | log_warnx("event", "unable to allocate a new SNMP event for FD %d", fd); | |
147 | free(snmpfd); | |
148 | return; | |
149 | } | |
150 | if (event_add(snmpfd->ev, NULL) == -1) { | |
151 | log_warnx("event", "unable to schedule new SNMP event for FD %d", fd); | |
152 | event_free(snmpfd->ev); | |
153 | free(snmpfd); | |
154 | return; | |
155 | } | |
156 | TAILQ_INSERT_TAIL(levent_snmp_fds(cfg), snmpfd, next); | |
157 | } | |
158 | ||
159 | /* | |
160 | * Update SNMP event loop. | |
161 | * | |
162 | * New events are added and some other are removed. This function | |
163 | * should be called every time a SNMP event happens: either when | |
164 | * handling a SNMP packet, a SNMP timeout or when sending a SNMP | |
165 | * packet. This function will keep libevent in sync with NetSNMP. | |
166 | * | |
167 | * @param base The libevent base we are working on. | |
168 | */ | |
169 | static void | |
170 | levent_snmp_update(struct lldpd *cfg) | |
171 | { | |
172 | int maxfd = 0; | |
173 | int block = 1; | |
174 | struct timeval timeout; | |
175 | static int howmany = 0; | |
176 | int added = 0, removed = 0, current = 0; | |
177 | struct lldpd_events *snmpfd, *snmpfd_next; | |
178 | ||
179 | /* snmp_select_info() can be tricky to understand. We set `block` to | |
180 | 1 to means that we don't request a timeout. snmp_select_info() | |
181 | will reset `block` to 0 if it wants us to set up a timeout. In | |
182 | this timeout, `snmp_timeout()` should be invoked. | |
183 | ||
184 | Each FD in `fdset` will need to be watched for reading. If one of | |
185 | them become active, `snmp_read()` should be called on it. | |
186 | */ | |
187 | ||
188 | netsnmp_large_fd_set fdset; | |
189 | netsnmp_large_fd_set_init(&fdset, FD_SETSIZE); | |
190 | NETSNMP_LARGE_FD_ZERO(&fdset); | |
191 | snmp_select_info2(&maxfd, &fdset, &timeout, &block); | |
192 | ||
193 | /* We need to untrack any event whose FD is not in `fdset` | |
194 | anymore */ | |
195 | for (snmpfd = TAILQ_FIRST(levent_snmp_fds(cfg)); snmpfd; snmpfd = snmpfd_next) { | |
196 | snmpfd_next = TAILQ_NEXT(snmpfd, next); | |
197 | if (event_get_fd(snmpfd->ev) >= maxfd || | |
198 | (!NETSNMP_LARGE_FD_ISSET(event_get_fd(snmpfd->ev), &fdset))) { | |
199 | event_free(snmpfd->ev); | |
200 | TAILQ_REMOVE(levent_snmp_fds(cfg), snmpfd, next); | |
201 | free(snmpfd); | |
202 | removed++; | |
203 | } else { | |
204 | NETSNMP_LARGE_FD_CLR(event_get_fd(snmpfd->ev), &fdset); | |
205 | current++; | |
206 | } | |
207 | } | |
208 | ||
209 | /* Invariant: FD in `fdset` are not in list of FD */ | |
210 | for (int fd = 0; fd < maxfd; fd++) { | |
211 | if (NETSNMP_LARGE_FD_ISSET(fd, &fdset)) { | |
212 | levent_snmp_add_fd(cfg, fd); | |
213 | added++; | |
214 | } | |
215 | } | |
216 | current += added; | |
217 | if (howmany != current) { | |
218 | log_debug("event", | |
219 | "added %d events, removed %d events, total of %d events", added, | |
220 | removed, current); | |
221 | howmany = current; | |
222 | } | |
223 | ||
224 | /* If needed, handle timeout */ | |
225 | if (evtimer_add(cfg->g_snmp_timeout, block ? NULL : &timeout) == -1) | |
226 | log_warnx("event", "unable to schedule timeout function for SNMP"); | |
227 | ||
228 | netsnmp_large_fd_set_cleanup(&fdset); | |
229 | } | |
230 | #endif /* USE_SNMP */ | |
231 | ||
232 | struct lldpd_one_client { | |
233 | TAILQ_ENTRY(lldpd_one_client) next; | |
234 | struct lldpd *cfg; | |
235 | struct bufferevent *bev; | |
236 | int subscribed; /* Is this client subscribed to changes? */ | |
237 | }; | |
238 | TAILQ_HEAD(, lldpd_one_client) lldpd_clients; | |
239 | ||
240 | static void | |
241 | levent_ctl_free_client(struct lldpd_one_client *client) | |
242 | { | |
243 | if (client && client->bev) bufferevent_free(client->bev); | |
244 | if (client) { | |
245 | TAILQ_REMOVE(&lldpd_clients, client, next); | |
246 | free(client); | |
247 | } | |
248 | } | |
249 | ||
250 | static void | |
251 | levent_ctl_close_clients() | |
252 | { | |
253 | struct lldpd_one_client *client, *client_next; | |
254 | for (client = TAILQ_FIRST(&lldpd_clients); client; client = client_next) { | |
255 | client_next = TAILQ_NEXT(client, next); | |
256 | levent_ctl_free_client(client); | |
257 | } | |
258 | } | |
259 | ||
260 | static ssize_t | |
261 | levent_ctl_send(struct lldpd_one_client *client, int type, void *data, size_t len) | |
262 | { | |
263 | struct bufferevent *bev = client->bev; | |
264 | struct hmsg_header hdr = { .len = len, .type = type }; | |
265 | bufferevent_disable(bev, EV_WRITE); | |
266 | if (bufferevent_write(bev, &hdr, sizeof(struct hmsg_header)) == -1 || | |
267 | (len > 0 && bufferevent_write(bev, data, len) == -1)) { | |
268 | log_warnx("event", "unable to create answer to client"); | |
269 | levent_ctl_free_client(client); | |
270 | return -1; | |
271 | } | |
272 | bufferevent_enable(bev, EV_WRITE); | |
273 | return len; | |
274 | } | |
275 | ||
276 | void | |
277 | levent_ctl_notify(char *ifname, int state, struct lldpd_port *neighbor) | |
278 | { | |
279 | struct lldpd_one_client *client, *client_next; | |
280 | struct lldpd_neighbor_change neigh = { .ifname = ifname, | |
281 | .state = state, | |
282 | .neighbor = neighbor }; | |
283 | void *output = NULL; | |
284 | ssize_t output_len = 0; | |
285 | ||
286 | /* Don't use TAILQ_FOREACH, the client may be deleted in case of errors. */ | |
287 | log_debug("control", "notify clients of neighbor changes"); | |
288 | for (client = TAILQ_FIRST(&lldpd_clients); client; client = client_next) { | |
289 | client_next = TAILQ_NEXT(client, next); | |
290 | if (!client->subscribed) continue; | |
291 | ||
292 | if (output == NULL) { | |
293 | /* Ugly hack: we don't want to transmit a list of | |
294 | * ports. We patch the port to avoid this. */ | |
295 | TAILQ_ENTRY(lldpd_port) backup_p_entries; | |
296 | memcpy(&backup_p_entries, &neighbor->p_entries, | |
297 | sizeof(backup_p_entries)); | |
298 | memset(&neighbor->p_entries, 0, sizeof(backup_p_entries)); | |
299 | output_len = lldpd_neighbor_change_serialize(&neigh, &output); | |
300 | memcpy(&neighbor->p_entries, &backup_p_entries, | |
301 | sizeof(backup_p_entries)); | |
302 | ||
303 | if (output_len <= 0) { | |
304 | log_warnx("event", | |
305 | "unable to serialize changed neighbor"); | |
306 | return; | |
307 | } | |
308 | } | |
309 | ||
310 | levent_ctl_send(client, NOTIFICATION, output, output_len); | |
311 | } | |
312 | ||
313 | free(output); | |
314 | } | |
315 | ||
316 | static ssize_t | |
317 | levent_ctl_send_cb(void *out, int type, void *data, size_t len) | |
318 | { | |
319 | struct lldpd_one_client *client = out; | |
320 | return levent_ctl_send(client, type, data, len); | |
321 | } | |
322 | ||
323 | static void | |
324 | levent_ctl_recv(struct bufferevent *bev, void *ptr) | |
325 | { | |
326 | struct lldpd_one_client *client = ptr; | |
327 | struct evbuffer *buffer = bufferevent_get_input(bev); | |
328 | size_t buffer_len = evbuffer_get_length(buffer); | |
329 | struct hmsg_header hdr; | |
330 | void *data = NULL; | |
331 | ||
332 | log_debug("control", "receive data on Unix socket"); | |
333 | if (buffer_len < sizeof(struct hmsg_header)) return; /* Not enough data yet */ | |
334 | if (evbuffer_copyout(buffer, &hdr, sizeof(struct hmsg_header)) != | |
335 | sizeof(struct hmsg_header)) { | |
336 | log_warnx("event", "not able to read header"); | |
337 | return; | |
338 | } | |
339 | if (hdr.len > HMSG_MAX_SIZE) { | |
340 | log_warnx("event", "message received is too large"); | |
341 | goto recv_error; | |
342 | } | |
343 | ||
344 | if (buffer_len < hdr.len + sizeof(struct hmsg_header)) | |
345 | return; /* Not enough data yet */ | |
346 | if (hdr.len > 0 && (data = malloc(hdr.len)) == NULL) { | |
347 | log_warnx("event", "not enough memory"); | |
348 | goto recv_error; | |
349 | } | |
350 | evbuffer_drain(buffer, sizeof(struct hmsg_header)); | |
351 | if (hdr.len > 0) evbuffer_remove(buffer, data, hdr.len); | |
352 | ||
353 | /* Currently, we should not receive notification acknowledgment. But if | |
354 | * we receive one, we can discard it. */ | |
355 | if (hdr.len == 0 && hdr.type == NOTIFICATION) return; | |
356 | if (client_handle_client(client->cfg, levent_ctl_send_cb, client, hdr.type, | |
357 | data, hdr.len, &client->subscribed) == -1) | |
358 | goto recv_error; | |
359 | free(data); | |
360 | return; | |
361 | ||
362 | recv_error: | |
363 | free(data); | |
364 | levent_ctl_free_client(client); | |
365 | } | |
366 | ||
367 | static void | |
368 | levent_ctl_event(struct bufferevent *bev, short events, void *ptr) | |
369 | { | |
370 | struct lldpd_one_client *client = ptr; | |
371 | if (events & BEV_EVENT_ERROR) { | |
372 | log_warnx("event", "an error occurred with client: %s", | |
373 | evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR())); | |
374 | levent_ctl_free_client(client); | |
375 | } else if (events & BEV_EVENT_EOF) { | |
376 | log_debug("event", "client has been disconnected"); | |
377 | levent_ctl_free_client(client); | |
378 | } | |
379 | } | |
380 | ||
381 | static void | |
382 | levent_ctl_accept(evutil_socket_t fd, short what, void *arg) | |
383 | { | |
384 | struct lldpd *cfg = arg; | |
385 | struct lldpd_one_client *client = NULL; | |
386 | int s; | |
387 | (void)what; | |
388 | ||
389 | log_debug("control", "accept a new connection"); | |
390 | if ((s = accept(fd, NULL, NULL)) == -1) { | |
391 | log_warn("event", "unable to accept connection from socket"); | |
392 | return; | |
393 | } | |
394 | client = calloc(1, sizeof(struct lldpd_one_client)); | |
395 | if (!client) { | |
396 | log_warnx("event", "unable to allocate memory for new client"); | |
397 | close(s); | |
398 | goto accept_failed; | |
399 | } | |
400 | client->cfg = cfg; | |
401 | levent_make_socket_nonblocking(s); | |
402 | TAILQ_INSERT_TAIL(&lldpd_clients, client, next); | |
403 | if ((client->bev = bufferevent_socket_new(cfg->g_base, s, | |
404 | BEV_OPT_CLOSE_ON_FREE)) == NULL) { | |
405 | log_warnx("event", | |
406 | "unable to allocate a new buffer event for new client"); | |
407 | close(s); | |
408 | goto accept_failed; | |
409 | } | |
410 | bufferevent_setcb(client->bev, levent_ctl_recv, NULL, levent_ctl_event, client); | |
411 | bufferevent_enable(client->bev, EV_READ | EV_WRITE); | |
412 | log_debug("event", "new client accepted"); | |
413 | /* coverity[leaked_handle] | |
414 | s has been saved by bufferevent_socket_new */ | |
415 | return; | |
416 | accept_failed: | |
417 | levent_ctl_free_client(client); | |
418 | } | |
419 | ||
420 | static void | |
421 | levent_priv(evutil_socket_t fd, short what, void *arg) | |
422 | { | |
423 | struct event_base *base = arg; | |
424 | ssize_t n; | |
425 | int err; | |
426 | char one; | |
427 | (void)what; | |
428 | /* Check if we have some data available. We need to pass the socket in | |
429 | * non-blocking mode to be able to run the check without disruption. */ | |
430 | levent_make_socket_nonblocking(fd); | |
431 | n = read(fd, &one, 1); | |
432 | err = errno; | |
433 | levent_make_socket_blocking(fd); | |
434 | ||
435 | switch (n) { | |
436 | case -1: | |
437 | if (err == EAGAIN || err == EWOULDBLOCK) /* No data, all good */ | |
438 | return; | |
439 | log_warnx("event", "unable to poll monitor process, exit"); | |
440 | break; | |
441 | case 0: | |
442 | log_warnx("event", "monitor process has terminated, exit"); | |
443 | break; | |
444 | default: | |
445 | /* This is a bit unsafe as we are now out-of-sync with the | |
446 | * monitor. It would be safer to request 0 byte, but some OS | |
447 | * (illumos) seem to take the shortcut that by asking 0 byte, | |
448 | * we can just return 0 byte. */ | |
449 | log_warnx("event", | |
450 | "received unexpected data from monitor process, exit"); | |
451 | break; | |
452 | } | |
453 | event_base_loopbreak(base); | |
454 | } | |
455 | ||
456 | static void | |
457 | levent_dump(evutil_socket_t fd, short what, void *arg) | |
458 | { | |
459 | struct event_base *base = arg; | |
460 | (void)fd; | |
461 | (void)what; | |
462 | log_debug("event", "dumping all events"); | |
463 | event_base_dump_events(base, stderr); | |
464 | } | |
465 | static void | |
466 | levent_stop(evutil_socket_t fd, short what, void *arg) | |
467 | { | |
468 | struct event_base *base = arg; | |
469 | (void)fd; | |
470 | (void)what; | |
471 | event_base_loopbreak(base); | |
472 | } | |
473 | ||
474 | static void | |
475 | levent_update_and_send(evutil_socket_t fd, short what, void *arg) | |
476 | { | |
477 | struct lldpd *cfg = arg; | |
478 | struct timeval tv; | |
479 | long interval_ms = cfg->g_config.c_tx_interval; | |
480 | ||
481 | (void)fd; | |
482 | (void)what; | |
483 | lldpd_loop(cfg); | |
484 | if (cfg->g_iface_event != NULL) interval_ms *= 20; | |
485 | if (interval_ms < 30000) interval_ms = 30000; | |
486 | tv.tv_sec = interval_ms / 1000; | |
487 | tv.tv_usec = (interval_ms % 1000) * 1000; | |
488 | event_add(cfg->g_main_loop, &tv); | |
489 | } | |
490 | ||
491 | void | |
492 | levent_update_now(struct lldpd *cfg) | |
493 | { | |
494 | if (cfg->g_main_loop) event_active(cfg->g_main_loop, EV_TIMEOUT, 1); | |
495 | } | |
496 | ||
497 | void | |
498 | levent_send_now(struct lldpd *cfg) | |
499 | { | |
500 | struct lldpd_hardware *hardware; | |
501 | TAILQ_FOREACH (hardware, &cfg->g_hardware, h_entries) { | |
502 | if (hardware->h_timer) | |
503 | event_active(hardware->h_timer, EV_TIMEOUT, 1); | |
504 | else | |
505 | log_warnx("event", "BUG: no timer present for interface %s", | |
506 | hardware->h_ifname); | |
507 | } | |
508 | } | |
509 | ||
510 | static void | |
511 | levent_init(struct lldpd *cfg) | |
512 | { | |
513 | /* Set up libevent */ | |
514 | log_debug("event", "initialize libevent"); | |
515 | event_set_log_callback(levent_log_cb); | |
516 | if (!(cfg->g_base = event_base_new())) | |
517 | fatalx("event", "unable to create a new libevent base"); | |
518 | log_info("event", "libevent %s initialized with %s method", event_get_version(), | |
519 | event_base_get_method(cfg->g_base)); | |
520 | ||
521 | /* Set up SNMP */ | |
522 | #ifdef USE_SNMP | |
523 | if (cfg->g_snmp) { | |
524 | agent_init(cfg, cfg->g_snmp_agentx); | |
525 | cfg->g_snmp_timeout = | |
526 | evtimer_new(cfg->g_base, levent_snmp_timeout, cfg); | |
527 | if (!cfg->g_snmp_timeout) | |
528 | fatalx("event", "unable to setup timeout function for SNMP"); | |
529 | if ((cfg->g_snmp_fds = malloc(sizeof(struct ev_l))) == NULL) | |
530 | fatalx("event", "unable to allocate memory for SNMP events"); | |
531 | TAILQ_INIT(levent_snmp_fds(cfg)); | |
532 | } | |
533 | #endif | |
534 | ||
535 | /* Setup loop that will run every X seconds. */ | |
536 | log_debug("event", "register loop timer"); | |
537 | if (!(cfg->g_main_loop = | |
538 | event_new(cfg->g_base, -1, 0, levent_update_and_send, cfg))) | |
539 | fatalx("event", "unable to setup main timer"); | |
540 | event_active(cfg->g_main_loop, EV_TIMEOUT, 1); | |
541 | ||
542 | /* Set up unix socket */ | |
543 | struct event *ctl_event; | |
544 | log_debug("event", "register Unix socket"); | |
545 | TAILQ_INIT(&lldpd_clients); | |
546 | levent_make_socket_nonblocking(cfg->g_ctl); | |
547 | if ((ctl_event = event_new(cfg->g_base, cfg->g_ctl, EV_READ | EV_PERSIST, | |
548 | levent_ctl_accept, cfg)) == NULL) | |
549 | fatalx("event", "unable to setup control socket event"); | |
550 | event_add(ctl_event, NULL); | |
551 | ||
552 | /* Somehow monitor the monitor process */ | |
553 | struct event *monitor_event; | |
554 | log_debug("event", "monitor the monitor process"); | |
555 | if ((monitor_event = event_new(cfg->g_base, priv_fd(PRIV_UNPRIVILEGED), | |
556 | EV_READ | EV_PERSIST, levent_priv, cfg->g_base)) == NULL) | |
557 | fatalx("event", "unable to monitor monitor process"); | |
558 | event_add(monitor_event, NULL); | |
559 | ||
560 | /* Signals */ | |
561 | log_debug("event", "register signals"); | |
562 | evsignal_add(evsignal_new(cfg->g_base, SIGUSR1, levent_dump, cfg->g_base), | |
563 | NULL); | |
564 | evsignal_add(evsignal_new(cfg->g_base, SIGINT, levent_stop, cfg->g_base), NULL); | |
565 | evsignal_add(evsignal_new(cfg->g_base, SIGTERM, levent_stop, cfg->g_base), | |
566 | NULL); | |
567 | } | |
568 | ||
569 | /* Initialize libevent and start the event loop */ | |
570 | void | |
571 | levent_loop(struct lldpd *cfg) | |
572 | { | |
573 | levent_init(cfg); | |
574 | lldpd_loop(cfg); | |
575 | #ifdef USE_SNMP | |
576 | if (cfg->g_snmp) levent_snmp_update(cfg); | |
577 | #endif | |
578 | ||
579 | /* libevent loop */ | |
580 | do { | |
581 | TRACE(LLDPD_EVENT_LOOP()); | |
582 | if (event_base_got_break(cfg->g_base) || | |
583 | event_base_got_exit(cfg->g_base)) | |
584 | break; | |
585 | } while (event_base_loop(cfg->g_base, EVLOOP_ONCE) == 0); | |
586 | ||
587 | if (cfg->g_iface_timer_event != NULL) event_free(cfg->g_iface_timer_event); | |
588 | ||
589 | #ifdef USE_SNMP | |
590 | if (cfg->g_snmp) agent_shutdown(); | |
591 | #endif /* USE_SNMP */ | |
592 | ||
593 | levent_ctl_close_clients(); | |
594 | } | |
595 | ||
596 | /* Release libevent resources */ | |
597 | void | |
598 | levent_shutdown(struct lldpd *cfg) | |
599 | { | |
600 | if (cfg->g_iface_event) event_free(cfg->g_iface_event); | |
601 | if (cfg->g_cleanup_timer) event_free(cfg->g_cleanup_timer); | |
602 | event_base_free(cfg->g_base); | |
603 | } | |
604 | ||
605 | static void | |
606 | levent_hardware_recv(evutil_socket_t fd, short what, void *arg) | |
607 | { | |
608 | struct lldpd_hardware *hardware = arg; | |
609 | struct lldpd *cfg = hardware->h_cfg; | |
610 | (void)what; | |
611 | log_debug("event", "received something for %s", hardware->h_ifname); | |
612 | lldpd_recv(cfg, hardware, fd); | |
613 | levent_schedule_cleanup(cfg); | |
614 | } | |
615 | ||
616 | void | |
617 | levent_hardware_init(struct lldpd_hardware *hardware) | |
618 | { | |
619 | log_debug("event", "initialize events for %s", hardware->h_ifname); | |
620 | if ((hardware->h_recv = malloc(sizeof(struct ev_l))) == NULL) { | |
621 | log_warnx("event", "unable to allocate memory for %s", | |
622 | hardware->h_ifname); | |
623 | return; | |
624 | } | |
625 | TAILQ_INIT(levent_hardware_fds(hardware)); | |
626 | } | |
627 | ||
628 | void | |
629 | levent_hardware_add_fd(struct lldpd_hardware *hardware, int fd) | |
630 | { | |
631 | struct lldpd_events *hfd = NULL; | |
632 | if (!hardware->h_recv) return; | |
633 | ||
634 | hfd = calloc(1, sizeof(struct lldpd_events)); | |
635 | if (!hfd) { | |
636 | log_warnx("event", "unable to allocate new event for %s", | |
637 | hardware->h_ifname); | |
638 | return; | |
639 | } | |
640 | levent_make_socket_nonblocking(fd); | |
641 | if ((hfd->ev = event_new(hardware->h_cfg->g_base, fd, EV_READ | EV_PERSIST, | |
642 | levent_hardware_recv, hardware)) == NULL) { | |
643 | log_warnx("event", "unable to allocate a new event for %s", | |
644 | hardware->h_ifname); | |
645 | free(hfd); | |
646 | return; | |
647 | } | |
648 | if (event_add(hfd->ev, NULL) == -1) { | |
649 | log_warnx("event", "unable to schedule new event for %s", | |
650 | hardware->h_ifname); | |
651 | event_free(hfd->ev); | |
652 | free(hfd); | |
653 | return; | |
654 | } | |
655 | TAILQ_INSERT_TAIL(levent_hardware_fds(hardware), hfd, next); | |
656 | } | |
657 | ||
658 | void | |
659 | levent_hardware_release(struct lldpd_hardware *hardware) | |
660 | { | |
661 | struct lldpd_events *ev, *ev_next; | |
662 | if (hardware->h_timer) { | |
663 | event_free(hardware->h_timer); | |
664 | hardware->h_timer = NULL; | |
665 | } | |
666 | if (!hardware->h_recv) return; | |
667 | ||
668 | log_debug("event", "release events for %s", hardware->h_ifname); | |
669 | for (ev = TAILQ_FIRST(levent_hardware_fds(hardware)); ev; ev = ev_next) { | |
670 | ev_next = TAILQ_NEXT(ev, next); | |
671 | /* We may close several time the same FD. This is harmless. */ | |
672 | close(event_get_fd(ev->ev)); | |
673 | event_free(ev->ev); | |
674 | TAILQ_REMOVE(levent_hardware_fds(hardware), ev, next); | |
675 | free(ev); | |
676 | } | |
677 | free(levent_hardware_fds(hardware)); | |
678 | } | |
679 | ||
680 | static void | |
681 | levent_iface_trigger(evutil_socket_t fd, short what, void *arg) | |
682 | { | |
683 | struct lldpd *cfg = arg; | |
684 | log_debug("event", "triggering update of all interfaces"); | |
685 | lldpd_update_localports(cfg); | |
686 | } | |
687 | ||
688 | static void | |
689 | levent_iface_recv(evutil_socket_t fd, short what, void *arg) | |
690 | { | |
691 | struct lldpd *cfg = arg; | |
692 | char buffer[EVENT_BUFFER]; | |
693 | int n; | |
694 | ||
695 | if (cfg->g_iface_cb == NULL) { | |
696 | /* Discard the message */ | |
697 | while (1) { | |
698 | n = read(fd, buffer, sizeof(buffer)); | |
699 | if (n == -1 && (errno == EWOULDBLOCK || errno == EAGAIN)) break; | |
700 | if (n == -1) { | |
701 | log_warn("event", | |
702 | "unable to receive interface change notification message"); | |
703 | return; | |
704 | } | |
705 | if (n == 0) { | |
706 | log_warnx("event", | |
707 | "end of file reached while getting interface change notification message"); | |
708 | return; | |
709 | } | |
710 | } | |
711 | } else { | |
712 | cfg->g_iface_cb(cfg); | |
713 | } | |
714 | ||
715 | /* Schedule local port update. We don't run it right away because we may | |
716 | * receive a batch of events like this. */ | |
717 | struct timeval one_sec = { 1, 0 }; | |
718 | TRACE(LLDPD_INTERFACES_NOTIFICATION()); | |
719 | log_debug("event", | |
720 | "received notification change, schedule an update of all interfaces in one second"); | |
721 | if (cfg->g_iface_timer_event == NULL) { | |
722 | if ((cfg->g_iface_timer_event = evtimer_new(cfg->g_base, | |
723 | levent_iface_trigger, cfg)) == NULL) { | |
724 | log_warnx("event", | |
725 | "unable to create a new event to trigger interface update"); | |
726 | return; | |
727 | } | |
728 | } | |
729 | if (evtimer_add(cfg->g_iface_timer_event, &one_sec) == -1) { | |
730 | log_warnx("event", "unable to schedule interface updates"); | |
731 | return; | |
732 | } | |
733 | } | |
734 | ||
735 | int | |
736 | levent_iface_subscribe(struct lldpd *cfg, int socket) | |
737 | { | |
738 | log_debug("event", "subscribe to interface changes from socket %d", socket); | |
739 | levent_make_socket_nonblocking(socket); | |
740 | cfg->g_iface_event = event_new(cfg->g_base, socket, EV_READ | EV_PERSIST, | |
741 | levent_iface_recv, cfg); | |
742 | if (cfg->g_iface_event == NULL) { | |
743 | log_warnx("event", | |
744 | "unable to allocate a new event for interface changes"); | |
745 | return -1; | |
746 | } | |
747 | if (event_add(cfg->g_iface_event, NULL) == -1) { | |
748 | log_warnx("event", "unable to schedule new interface changes event"); | |
749 | event_free(cfg->g_iface_event); | |
750 | cfg->g_iface_event = NULL; | |
751 | return -1; | |
752 | } | |
753 | return 0; | |
754 | } | |
755 | ||
756 | static void | |
757 | levent_trigger_cleanup(evutil_socket_t fd, short what, void *arg) | |
758 | { | |
759 | struct lldpd *cfg = arg; | |
760 | lldpd_cleanup(cfg); | |
761 | } | |
762 | ||
763 | void | |
764 | levent_schedule_cleanup(struct lldpd *cfg) | |
765 | { | |
766 | log_debug("event", "schedule next cleanup"); | |
767 | if (cfg->g_cleanup_timer != NULL) { | |
768 | event_free(cfg->g_cleanup_timer); | |
769 | } | |
770 | cfg->g_cleanup_timer = evtimer_new(cfg->g_base, levent_trigger_cleanup, cfg); | |
771 | if (cfg->g_cleanup_timer == NULL) { | |
772 | log_warnx("event", "unable to allocate a new event for cleanup tasks"); | |
773 | return; | |
774 | } | |
775 | ||
776 | /* Compute the next TTL event */ | |
777 | struct timeval tv = { cfg->g_config.c_ttl, 0 }; | |
778 | time_t now = time(NULL); | |
779 | time_t next; | |
780 | struct lldpd_hardware *hardware; | |
781 | struct lldpd_port *port; | |
782 | TAILQ_FOREACH (hardware, &cfg->g_hardware, h_entries) { | |
783 | TAILQ_FOREACH (port, &hardware->h_rports, p_entries) { | |
784 | if (now >= port->p_lastupdate + port->p_ttl) { | |
785 | tv.tv_sec = 0; | |
786 | log_debug("event", | |
787 | "immediate cleanup on port %s (%lld, %d, %lld)", | |
788 | hardware->h_ifname, (long long)now, port->p_ttl, | |
789 | (long long)port->p_lastupdate); | |
790 | break; | |
791 | } | |
792 | next = port->p_ttl - (now - port->p_lastupdate); | |
793 | if (next < tv.tv_sec) tv.tv_sec = next; | |
794 | } | |
795 | } | |
796 | ||
797 | log_debug("event", "next cleanup in %ld seconds", (long)tv.tv_sec); | |
798 | if (event_add(cfg->g_cleanup_timer, &tv) == -1) { | |
799 | log_warnx("event", "unable to schedule cleanup task"); | |
800 | event_free(cfg->g_cleanup_timer); | |
801 | cfg->g_cleanup_timer = NULL; | |
802 | return; | |
803 | } | |
804 | } | |
805 | ||
806 | static void | |
807 | levent_send_pdu(evutil_socket_t fd, short what, void *arg) | |
808 | { | |
809 | struct lldpd_hardware *hardware = arg; | |
810 | int tx_interval = hardware->h_cfg->g_config.c_tx_interval; | |
811 | ||
812 | log_debug("event", "trigger sending PDU for port %s", hardware->h_ifname); | |
813 | lldpd_send(hardware); | |
814 | ||
815 | #ifdef ENABLE_LLDPMED | |
816 | if (hardware->h_tx_fast > 0) hardware->h_tx_fast--; | |
817 | ||
818 | if (hardware->h_tx_fast > 0) | |
819 | tx_interval = hardware->h_cfg->g_config.c_tx_fast_interval * 1000; | |
820 | #endif | |
821 | ||
822 | struct timeval tv; | |
823 | tv.tv_sec = tx_interval / 1000; | |
824 | tv.tv_usec = (tx_interval % 1000) * 1000; | |
825 | if (event_add(hardware->h_timer, &tv) == -1) { | |
826 | log_warnx("event", "unable to re-register timer event for port %s", | |
827 | hardware->h_ifname); | |
828 | event_free(hardware->h_timer); | |
829 | hardware->h_timer = NULL; | |
830 | return; | |
831 | } | |
832 | } | |
833 | ||
834 | void | |
835 | levent_schedule_pdu(struct lldpd_hardware *hardware) | |
836 | { | |
837 | log_debug("event", "schedule sending PDU on %s", hardware->h_ifname); | |
838 | if (hardware->h_timer == NULL) { | |
839 | hardware->h_timer = | |
840 | evtimer_new(hardware->h_cfg->g_base, levent_send_pdu, hardware); | |
841 | if (hardware->h_timer == NULL) { | |
842 | log_warnx("event", "unable to schedule PDU sending for port %s", | |
843 | hardware->h_ifname); | |
844 | return; | |
845 | } | |
846 | } | |
847 | ||
848 | struct timeval tv = { 0, 0 }; | |
849 | if (event_add(hardware->h_timer, &tv) == -1) { | |
850 | log_warnx("event", "unable to register timer event for port %s", | |
851 | hardware->h_ifname); | |
852 | event_free(hardware->h_timer); | |
853 | hardware->h_timer = NULL; | |
854 | return; | |
855 | } | |
856 | } | |
857 | ||
858 | int | |
859 | levent_make_socket_nonblocking(int fd) | |
860 | { | |
861 | int flags; | |
862 | if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) { | |
863 | log_warn("event", "fcntl(%d, F_GETFL)", fd); | |
864 | return -1; | |
865 | } | |
866 | if (flags & O_NONBLOCK) return 0; | |
867 | if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) { | |
868 | log_warn("event", "fcntl(%d, F_SETFL)", fd); | |
869 | return -1; | |
870 | } | |
871 | return 0; | |
872 | } | |
873 | ||
874 | int | |
875 | levent_make_socket_blocking(int fd) | |
876 | { | |
877 | int flags; | |
878 | if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) { | |
879 | log_warn("event", "fcntl(%d, F_GETFL)", fd); | |
880 | return -1; | |
881 | } | |
882 | if (!(flags & O_NONBLOCK)) return 0; | |
883 | if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) { | |
884 | log_warn("event", "fcntl(%d, F_SETFL)", fd); | |
885 | return -1; | |
886 | } | |
887 | return 0; | |
888 | } | |
889 | ||
890 | #ifdef HOST_OS_LINUX | |
891 | /* Receive and log error from a socket when there is suspicion of an error. */ | |
892 | void | |
893 | levent_recv_error(int fd, const char *source) | |
894 | { | |
895 | do { | |
896 | ssize_t n; | |
897 | char buf[1024] = {}; | |
898 | struct msghdr msg = { .msg_control = buf, | |
899 | .msg_controllen = sizeof(buf) }; | |
900 | if ((n = recvmsg(fd, &msg, MSG_ERRQUEUE | MSG_DONTWAIT)) <= 0) { | |
901 | return; | |
902 | } | |
903 | struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); | |
904 | if (cmsg == NULL) | |
905 | log_warnx("event", "received unknown error on %s", source); | |
906 | else | |
907 | log_warnx("event", "received error (level=%d/type=%d) on %s", | |
908 | cmsg->cmsg_level, cmsg->cmsg_type, source); | |
909 | } while (1); | |
910 | } | |
911 | #endif |