]>
git.ipfire.org Git - thirdparty/lldpd.git/blob - src/event.c
2 * Copyright (c) 2012 Vincent Bernat <bernat@luffy.cx>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <event2/event.h>
24 levent_log_cb(int severity
, const char *msg
)
27 case _EVENT_LOG_DEBUG
: log_debug("libevent[debug]: %s", msg
); break;
28 case _EVENT_LOG_MSG
: log_info ("libevent[info]: %s", msg
); break;
29 case _EVENT_LOG_WARN
: log_warnx("libevent[warn]: %s", msg
); break;
30 case _EVENT_LOG_ERR
: log_warnx("libevent[error]: %s", msg
); break;
35 TAILQ_ENTRY(lldpd_events
) next
;
38 TAILQ_HEAD(ev_l
, lldpd_events
);
40 #define levent_snmp_fds(cfg) ((struct ev_l*)(cfg)->g_snmp_fds)
41 #define levent_hardware_fds(hardware) ((struct ev_l*)(hardware)->h_recv)
44 #include <net-snmp/net-snmp-config.h>
45 #include <net-snmp/net-snmp-includes.h>
46 #include <net-snmp/agent/net-snmp-agent-includes.h>
47 #include <net-snmp/agent/snmp_vars.h>
49 static void levent_snmp_update(struct lldpd
*);
52 * Callback function when we have something to read from SNMP.
54 * This function is called because we have a read event on one SNMP
55 * file descriptor. When need to call snmp_read() on it.
58 levent_snmp_read(evutil_socket_t fd
, short what
, void *arg
)
60 struct lldpd
*cfg
= arg
;
66 levent_snmp_update(cfg
);
70 * Callback function for a SNMP timeout.
72 * A SNMP timeout has occurred. Call `snmp_timeout()` to handle it.
75 levent_snmp_timeout(evutil_socket_t fd
, short what
, void *arg
)
77 struct lldpd
*cfg
= arg
;
81 levent_snmp_update(cfg
);
85 * Watch a new SNMP FD.
87 * @param base The libevent base we are working on.
88 * @param fd The file descriptor we want to watch.
90 * The file descriptor is appended to the list of file descriptors we
94 levent_snmp_add_fd(struct lldpd
*cfg
, int fd
)
96 struct event_base
*base
= cfg
->g_base
;
97 struct lldpd_events
*snmpfd
= calloc(1, sizeof(struct lldpd_events
));
99 LLOG_WARN("unable to allocate memory for new SNMP event");
102 evutil_make_socket_nonblocking(fd
);
103 if ((snmpfd
->ev
= event_new(base
, fd
,
104 EV_READ
| EV_PERSIST
,
107 LLOG_WARNX("unable to allocate a new SNMP event for FD %d", fd
);
111 if (event_add(snmpfd
->ev
, NULL
) == -1) {
112 LLOG_WARNX("unable to schedule new SNMP event for FD %d", fd
);
113 event_free(snmpfd
->ev
);
117 TAILQ_INSERT_TAIL(levent_snmp_fds(cfg
), snmpfd
, next
);
121 * Update SNMP event loop.
123 * New events are added and some other are removed. This function
124 * should be called every time a SNMP event happens: either when
125 * handling a SNMP packet, a SNMP timeout or when sending a SNMP
126 * packet. This function will keep libevent in sync with NetSNMP.
128 * @param base The libevent base we are working on.
131 levent_snmp_update(struct lldpd
*cfg
)
136 struct timeval timeout
;
137 static int howmany
= 0;
138 int added
= 0, removed
= 0, current
= 0;
139 struct lldpd_events
*snmpfd
, *snmpfd_next
;
141 /* snmp_select_info() can be tricky to understand. We set `block` to
142 1 to means that we don't request a timeout. snmp_select_info()
143 will reset `block` to 0 if it wants us to setup a timeout. In
144 this timeout, `snmp_timeout()` should be invoked.
146 Each FD in `fdset` will need to be watched for reading. If one of
147 them become active, `snmp_read()` should be called on it.
151 snmp_select_info(&maxfd
, &fdset
, &timeout
, &block
);
153 /* We need to untrack any event whose FD is not in `fdset`
155 for (snmpfd
= TAILQ_FIRST(levent_snmp_fds(cfg
));
157 snmpfd
= snmpfd_next
) {
158 snmpfd_next
= TAILQ_NEXT(snmpfd
, next
);
159 if (event_get_fd(snmpfd
->ev
) >= maxfd
||
160 (!FD_ISSET(event_get_fd(snmpfd
->ev
), &fdset
))) {
161 event_free(snmpfd
->ev
);
162 TAILQ_REMOVE(levent_snmp_fds(cfg
), snmpfd
, next
);
166 FD_CLR(event_get_fd(snmpfd
->ev
), &fdset
);
171 /* Invariant: FD in `fdset` are not in list of FD */
172 for (int fd
= 0; fd
< maxfd
; fd
++) {
173 if (FD_ISSET(fd
, &fdset
)) {
174 levent_snmp_add_fd(cfg
, fd
);
179 if (howmany
!= current
) {
180 LLOG_DEBUG("added %d events, removed %d events, total of %d events",
181 added
, removed
, current
);
185 /* If needed, handle timeout */
186 if (evtimer_add(cfg
->g_snmp_timeout
, block
?NULL
:&timeout
) == -1)
187 LLOG_WARNX("unable to schedule timeout function for SNMP");
189 #endif /* USE_SNMP */
191 struct lldpd_one_client
{
197 levent_ctl_recv(evutil_socket_t fd
, short what
, void *arg
)
199 struct lldpd_one_client
*client
= arg
;
205 if ((n
= ctl_msg_recv(fd
, &type
, &buffer
)) == -1 ||
206 client_handle_client(client
->cfg
, fd
, type
, buffer
, n
) == -1) {
208 event_free(client
->ev
);
215 levent_ctl_accept(evutil_socket_t fd
, short what
, void *arg
)
217 struct lldpd
*cfg
= arg
;
218 struct lldpd_one_client
*client
= NULL
;
222 if ((s
= accept(fd
, NULL
, NULL
)) == -1) {
223 LLOG_WARN("unable to accept connection from socket");
226 client
= calloc(1, sizeof(struct lldpd_one_client
));
228 LLOG_WARNX("unable to allocate memory for new client");
232 evutil_make_socket_nonblocking(s
);
233 if ((client
->ev
= event_new(cfg
->g_base
, s
,
234 EV_READ
| EV_PERSIST
,
237 LLOG_WARNX("unable to allocate a new event for new client");
240 if (event_add(client
->ev
, NULL
) == -1) {
241 LLOG_WARNX("unable to schedule new event for new client");
246 if (client
&& client
->ev
) event_free(client
->ev
);
252 levent_dump(evutil_socket_t fd
, short what
, void *arg
)
254 struct event_base
*base
= arg
;
255 (void)fd
; (void)what
;
256 event_base_dump_events(base
, stderr
);
259 levent_stop(evutil_socket_t fd
, short what
, void *arg
)
261 struct event_base
*base
= arg
;
262 (void)fd
; (void)what
;
263 event_base_loopbreak(base
);
267 levent_update_and_send(evutil_socket_t fd
, short what
, void *arg
)
269 struct lldpd
*cfg
= arg
;
270 struct timeval tv
= {cfg
->g_delay
, 0};
271 (void)fd
; (void)what
;
273 event_add(cfg
->g_main_loop
, &tv
);
277 levent_init(struct lldpd
*cfg
)
280 event_set_log_callback(levent_log_cb
);
281 if (!(cfg
->g_base
= event_base_new()))
282 fatalx("unable to create a new libevent base");
283 LLOG_INFO("libevent %s initialized with %s method",
285 event_base_get_method(cfg
->g_base
));
290 agent_init(cfg
, cfg
->g_snmp_agentx
);
291 cfg
->g_snmp_timeout
= evtimer_new(cfg
->g_base
,
294 if (!cfg
->g_snmp_timeout
)
295 fatalx("unable to setup timeout function for SNMP");
296 if ((cfg
->g_snmp_fds
=
297 malloc(sizeof(struct ev_l
))) == NULL
)
298 fatalx("unable to allocate memory for SNMP events");
299 TAILQ_INIT(levent_snmp_fds(cfg
));
303 /* Setup loop that will run every 30 seconds. */
304 if (!(cfg
->g_main_loop
= event_new(cfg
->g_base
, -1, 0,
305 levent_update_and_send
,
307 fatalx("unable to setup main timer");
308 event_active(cfg
->g_main_loop
, EV_TIMEOUT
, 1);
310 /* Setup unix socket */
311 evutil_make_socket_nonblocking(cfg
->g_ctl
);
312 if ((cfg
->g_ctl_event
= event_new(cfg
->g_base
, cfg
->g_ctl
,
313 EV_READ
|EV_PERSIST
, levent_ctl_accept
, cfg
)) == NULL
)
314 fatalx("unable to setup control socket event");
315 event_add(cfg
->g_ctl_event
, NULL
);
318 evsignal_add(evsignal_new(cfg
->g_base
, SIGUSR1
,
319 levent_dump
, cfg
->g_base
),
321 evsignal_add(evsignal_new(cfg
->g_base
, SIGHUP
,
322 levent_stop
, cfg
->g_base
),
324 evsignal_add(evsignal_new(cfg
->g_base
, SIGINT
,
325 levent_stop
, cfg
->g_base
),
327 evsignal_add(evsignal_new(cfg
->g_base
, SIGTERM
,
328 levent_stop
, cfg
->g_base
),
332 /* Initialize libevent and start the event loop */
334 levent_loop(struct lldpd
*cfg
)
340 if (event_base_got_break(cfg
->g_base
) ||
341 event_base_got_exit(cfg
->g_base
))
345 /* We don't use delegated requests (request
346 whose answer is delayed). However, we keep
347 the call here in case we use it some
348 day. We don't call run_alarms() here. We do
349 it on timeout only. */
350 netsnmp_check_outstanding_agent_requests();
351 levent_snmp_update(cfg
);
354 } while (event_base_loop(cfg
->g_base
, EVLOOP_ONCE
) == 0);
359 #endif /* USE_SNMP */
364 levent_hardware_recv(evutil_socket_t fd
, short what
, void *arg
)
366 struct lldpd_hardware
*hardware
= arg
;
367 struct lldpd
*cfg
= hardware
->h_cfg
;
369 lldpd_recv(cfg
, hardware
, fd
);
373 levent_hardware_init(struct lldpd_hardware
*hardware
)
375 if ((hardware
->h_recv
=
376 malloc(sizeof(struct ev_l
))) == NULL
) {
377 LLOG_WARNX("unable to allocate memory for %s",
381 TAILQ_INIT(levent_hardware_fds(hardware
));
385 levent_hardware_add_fd(struct lldpd_hardware
*hardware
, int fd
)
387 struct lldpd_events
*hfd
= NULL
;
388 if (!hardware
->h_recv
) return;
390 hfd
= calloc(1, sizeof(struct lldpd_events
));
392 LLOG_WARNX("unable to allocate new event for %s",
396 evutil_make_socket_nonblocking(fd
);
397 if ((hfd
->ev
= event_new(hardware
->h_cfg
->g_base
, fd
,
398 EV_READ
| EV_PERSIST
,
399 levent_hardware_recv
,
400 hardware
)) == NULL
) {
401 LLOG_WARNX("unable to allocate a new event for %s",
406 if (event_add(hfd
->ev
, NULL
) == -1) {
407 LLOG_WARNX("unable to schedule new event for %s",
413 TAILQ_INSERT_TAIL(levent_hardware_fds(hardware
), hfd
, next
);
417 levent_hardware_release(struct lldpd_hardware
*hardware
)
419 struct lldpd_events
*ev
, *ev_next
;
420 if (!hardware
->h_recv
) return;
422 for (ev
= TAILQ_FIRST(levent_hardware_fds(hardware
));
425 ev_next
= TAILQ_NEXT(ev
, next
);
426 /* We may close several time the same FD. This is harmless. */
427 close(event_get_fd(ev
->ev
));
429 TAILQ_REMOVE(levent_hardware_fds(hardware
), ev
, next
);
432 free(levent_hardware_fds(hardware
));