2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26 !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
30 #ifdef CONFIG_ELOOP_POLL
32 #endif /* CONFIG_ELOOP_POLL */
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
46 eloop_sock_handler handler
;
52 struct eloop_timeout
{
54 struct os_reltime time
;
57 eloop_timeout_handler handler
;
66 eloop_signal_handler handler
;
70 struct eloop_sock_table
{
72 struct eloop_sock
*table
;
73 eloop_event_type type
;
80 int count
; /* sum of all table counts */
81 #ifdef CONFIG_ELOOP_POLL
82 int max_pollfd_map
; /* number of pollfds_map currently allocated */
83 int max_poll_fds
; /* number of pollfds currently allocated */
84 struct pollfd
*pollfds
;
85 struct pollfd
**pollfds_map
;
86 #endif /* CONFIG_ELOOP_POLL */
87 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
89 struct eloop_sock
*fd_table
;
90 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
91 #ifdef CONFIG_ELOOP_EPOLL
93 int epoll_max_event_num
;
94 struct epoll_event
*epoll_events
;
95 #endif /* CONFIG_ELOOP_EPOLL */
96 #ifdef CONFIG_ELOOP_KQUEUE
99 struct kevent
*kqueue_events
;
100 #endif /* CONFIG_ELOOP_KQUEUE */
101 struct eloop_sock_table readers
;
102 struct eloop_sock_table writers
;
103 struct eloop_sock_table exceptions
;
105 struct dl_list timeout
;
108 struct eloop_signal
*signals
;
110 int pending_terminate
;
115 static struct eloop_data eloop
;
120 static void eloop_sigsegv_handler(int sig
)
122 wpa_trace_show("eloop SIGSEGV");
126 static void eloop_trace_sock_add_ref(struct eloop_sock_table
*table
)
129 if (table
== NULL
|| table
->table
== NULL
)
131 for (i
= 0; i
< table
->count
; i
++) {
132 wpa_trace_add_ref(&table
->table
[i
], eloop
,
133 table
->table
[i
].eloop_data
);
134 wpa_trace_add_ref(&table
->table
[i
], user
,
135 table
->table
[i
].user_data
);
140 static void eloop_trace_sock_remove_ref(struct eloop_sock_table
*table
)
143 if (table
== NULL
|| table
->table
== NULL
)
145 for (i
= 0; i
< table
->count
; i
++) {
146 wpa_trace_remove_ref(&table
->table
[i
], eloop
,
147 table
->table
[i
].eloop_data
);
148 wpa_trace_remove_ref(&table
->table
[i
], user
,
149 table
->table
[i
].user_data
);
153 #else /* WPA_TRACE */
155 #define eloop_trace_sock_add_ref(table) do { } while (0)
156 #define eloop_trace_sock_remove_ref(table) do { } while (0)
158 #endif /* WPA_TRACE */
163 os_memset(&eloop
, 0, sizeof(eloop
));
164 dl_list_init(&eloop
.timeout
);
165 #ifdef CONFIG_ELOOP_EPOLL
166 eloop
.epollfd
= epoll_create1(0);
167 if (eloop
.epollfd
< 0) {
168 wpa_printf(MSG_ERROR
, "%s: epoll_create1 failed. %s",
169 __func__
, strerror(errno
));
172 #endif /* CONFIG_ELOOP_EPOLL */
173 #ifdef CONFIG_ELOOP_KQUEUE
174 eloop
.kqueuefd
= kqueue();
175 if (eloop
.kqueuefd
< 0) {
176 wpa_printf(MSG_ERROR
, "%s: kqueue failed: %s",
177 __func__
, strerror(errno
));
180 #endif /* CONFIG_ELOOP_KQUEUE */
181 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
182 eloop
.readers
.type
= EVENT_TYPE_READ
;
183 eloop
.writers
.type
= EVENT_TYPE_WRITE
;
184 eloop
.exceptions
.type
= EVENT_TYPE_EXCEPTION
;
185 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
187 signal(SIGSEGV
, eloop_sigsegv_handler
);
188 #endif /* WPA_TRACE */
193 #ifdef CONFIG_ELOOP_EPOLL
194 static int eloop_sock_queue(int sock
, eloop_event_type type
)
196 struct epoll_event ev
;
198 os_memset(&ev
, 0, sizeof(ev
));
200 case EVENT_TYPE_READ
:
203 case EVENT_TYPE_WRITE
:
204 ev
.events
= EPOLLOUT
;
207 * Exceptions are always checked when using epoll, but I suppose it's
208 * possible that someone registered a socket *only* for exception
211 case EVENT_TYPE_EXCEPTION
:
212 ev
.events
= EPOLLERR
| EPOLLHUP
;
216 if (epoll_ctl(eloop
.epollfd
, EPOLL_CTL_ADD
, sock
, &ev
) < 0) {
217 wpa_printf(MSG_ERROR
, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
218 __func__
, sock
, strerror(errno
));
223 #endif /* CONFIG_ELOOP_EPOLL */
226 #ifdef CONFIG_ELOOP_KQUEUE
227 static int eloop_sock_queue(int sock
, eloop_event_type type
)
233 case EVENT_TYPE_READ
:
234 filter
= EVFILT_READ
;
236 case EVENT_TYPE_WRITE
:
237 filter
= EVFILT_WRITE
;
242 EV_SET(&ke
, sock
, filter
, EV_ADD
, 0, 0, NULL
);
243 if (kevent(eloop
.kqueuefd
, &ke
, 1, NULL
, 0, NULL
) == -1) {
244 wpa_printf(MSG_ERROR
, "%s: kevent(ADD) for fd=%d failed: %s",
245 __func__
, sock
, strerror(errno
));
250 #endif /* CONFIG_ELOOP_KQUEUE */
253 static int eloop_sock_table_add_sock(struct eloop_sock_table
*table
,
254 int sock
, eloop_sock_handler handler
,
255 void *eloop_data
, void *user_data
)
257 #ifdef CONFIG_ELOOP_EPOLL
258 struct epoll_event
*temp_events
;
259 #endif /* CONFIG_ELOOP_EPOLL */
260 #ifdef CONFIG_ELOOP_KQUEUE
261 struct kevent
*temp_events
;
262 #endif /* CONFIG_ELOOP_EPOLL */
263 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
264 struct eloop_sock
*temp_table
;
266 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
267 struct eloop_sock
*tmp
;
270 if (sock
> eloop
.max_sock
)
273 new_max_sock
= eloop
.max_sock
;
278 #ifdef CONFIG_ELOOP_POLL
279 if (new_max_sock
>= eloop
.max_pollfd_map
) {
280 struct pollfd
**nmap
;
281 nmap
= os_realloc_array(eloop
.pollfds_map
, new_max_sock
+ 50,
282 sizeof(struct pollfd
*));
286 eloop
.max_pollfd_map
= new_max_sock
+ 50;
287 eloop
.pollfds_map
= nmap
;
290 if (eloop
.count
+ 1 > eloop
.max_poll_fds
) {
292 int nmax
= eloop
.count
+ 1 + 50;
293 n
= os_realloc_array(eloop
.pollfds
, nmax
,
294 sizeof(struct pollfd
));
298 eloop
.max_poll_fds
= nmax
;
301 #endif /* CONFIG_ELOOP_POLL */
302 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
303 if (new_max_sock
>= eloop
.max_fd
) {
304 next
= eloop
.max_fd
== 0 ? 16 : eloop
.max_fd
* 2;
305 temp_table
= os_realloc_array(eloop
.fd_table
, next
,
306 sizeof(struct eloop_sock
));
307 if (temp_table
== NULL
)
311 eloop
.fd_table
= temp_table
;
313 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
315 #ifdef CONFIG_ELOOP_EPOLL
316 if (eloop
.count
+ 1 > eloop
.epoll_max_event_num
) {
317 next
= eloop
.epoll_max_event_num
== 0 ? 8 :
318 eloop
.epoll_max_event_num
* 2;
319 temp_events
= os_realloc_array(eloop
.epoll_events
, next
,
320 sizeof(struct epoll_event
));
321 if (temp_events
== NULL
) {
322 wpa_printf(MSG_ERROR
, "%s: malloc for epoll failed: %s",
323 __func__
, strerror(errno
));
327 eloop
.epoll_max_event_num
= next
;
328 eloop
.epoll_events
= temp_events
;
330 #endif /* CONFIG_ELOOP_EPOLL */
331 #ifdef CONFIG_ELOOP_KQUEUE
332 if (eloop
.count
+ 1 > eloop
.kqueue_nevents
) {
333 next
= eloop
.kqueue_nevents
== 0 ? 8 : eloop
.kqueue_nevents
* 2;
334 temp_events
= os_malloc(next
* sizeof(*temp_events
));
336 wpa_printf(MSG_ERROR
,
337 "%s: malloc for kqueue failed: %s",
338 __func__
, strerror(errno
));
342 os_free(eloop
.kqueue_events
);
343 eloop
.kqueue_events
= temp_events
;
344 eloop
.kqueue_nevents
= next
;
346 #endif /* CONFIG_ELOOP_KQUEUE */
348 eloop_trace_sock_remove_ref(table
);
349 tmp
= os_realloc_array(table
->table
, table
->count
+ 1,
350 sizeof(struct eloop_sock
));
352 eloop_trace_sock_add_ref(table
);
356 tmp
[table
->count
].sock
= sock
;
357 tmp
[table
->count
].eloop_data
= eloop_data
;
358 tmp
[table
->count
].user_data
= user_data
;
359 tmp
[table
->count
].handler
= handler
;
360 wpa_trace_record(&tmp
[table
->count
]);
363 eloop
.max_sock
= new_max_sock
;
366 eloop_trace_sock_add_ref(table
);
368 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
369 if (eloop_sock_queue(sock
, table
->type
) < 0)
371 os_memcpy(&eloop
.fd_table
[sock
], &table
->table
[table
->count
- 1],
372 sizeof(struct eloop_sock
));
373 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
378 static void eloop_sock_table_remove_sock(struct eloop_sock_table
*table
,
381 #ifdef CONFIG_ELOOP_KQUEUE
383 #endif /* CONFIG_ELOOP_KQUEUE */
386 if (table
== NULL
|| table
->table
== NULL
|| table
->count
== 0)
389 for (i
= 0; i
< table
->count
; i
++) {
390 if (table
->table
[i
].sock
== sock
)
393 if (i
== table
->count
)
395 eloop_trace_sock_remove_ref(table
);
396 if (i
!= table
->count
- 1) {
397 os_memmove(&table
->table
[i
], &table
->table
[i
+ 1],
398 (table
->count
- i
- 1) *
399 sizeof(struct eloop_sock
));
404 eloop_trace_sock_add_ref(table
);
405 #ifdef CONFIG_ELOOP_EPOLL
406 if (epoll_ctl(eloop
.epollfd
, EPOLL_CTL_DEL
, sock
, NULL
) < 0) {
407 wpa_printf(MSG_ERROR
, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
408 __func__
, sock
, strerror(errno
));
411 os_memset(&eloop
.fd_table
[sock
], 0, sizeof(struct eloop_sock
));
412 #endif /* CONFIG_ELOOP_EPOLL */
413 #ifdef CONFIG_ELOOP_KQUEUE
414 EV_SET(&ke
, sock
, 0, EV_DELETE
, 0, 0, NULL
);
415 if (kevent(eloop
.kqueuefd
, &ke
, 1, NULL
, 0, NULL
) < 0) {
416 wpa_printf(MSG_ERROR
, "%s: kevent(DEL) for fd=%d failed: %s",
417 __func__
, sock
, strerror(errno
));
420 os_memset(&eloop
.fd_table
[sock
], 0, sizeof(struct eloop_sock
));
421 #endif /* CONFIG_ELOOP_KQUEUE */
425 #ifdef CONFIG_ELOOP_POLL
427 static struct pollfd
* find_pollfd(struct pollfd
**pollfds_map
, int fd
, int mx
)
429 if (fd
< mx
&& fd
>= 0)
430 return pollfds_map
[fd
];
435 static int eloop_sock_table_set_fds(struct eloop_sock_table
*readers
,
436 struct eloop_sock_table
*writers
,
437 struct eloop_sock_table
*exceptions
,
438 struct pollfd
*pollfds
,
439 struct pollfd
**pollfds_map
,
447 /* Clear pollfd lookup map. It will be re-populated below. */
448 os_memset(pollfds_map
, 0, sizeof(struct pollfd
*) * max_pollfd_map
);
450 if (readers
&& readers
->table
) {
451 for (i
= 0; i
< readers
->count
; i
++) {
452 fd
= readers
->table
[i
].sock
;
453 assert(fd
>= 0 && fd
< max_pollfd_map
);
454 pollfds
[nxt
].fd
= fd
;
455 pollfds
[nxt
].events
= POLLIN
;
456 pollfds
[nxt
].revents
= 0;
457 pollfds_map
[fd
] = &(pollfds
[nxt
]);
462 if (writers
&& writers
->table
) {
463 for (i
= 0; i
< writers
->count
; i
++) {
465 * See if we already added this descriptor, update it
468 fd
= writers
->table
[i
].sock
;
469 assert(fd
>= 0 && fd
< max_pollfd_map
);
470 pfd
= pollfds_map
[fd
];
472 pfd
= &(pollfds
[nxt
]);
475 pollfds
[i
].revents
= 0;
476 pollfds_map
[fd
] = pfd
;
479 pfd
->events
|= POLLOUT
;
484 * Exceptions are always checked when using poll, but I suppose it's
485 * possible that someone registered a socket *only* for exception
486 * handling. Set the POLLIN bit in this case.
488 if (exceptions
&& exceptions
->table
) {
489 for (i
= 0; i
< exceptions
->count
; i
++) {
491 * See if we already added this descriptor, just use it
494 fd
= exceptions
->table
[i
].sock
;
495 assert(fd
>= 0 && fd
< max_pollfd_map
);
496 pfd
= pollfds_map
[fd
];
498 pfd
= &(pollfds
[nxt
]);
499 pfd
->events
= POLLIN
;
501 pollfds
[i
].revents
= 0;
502 pollfds_map
[fd
] = pfd
;
512 static int eloop_sock_table_dispatch_table(struct eloop_sock_table
*table
,
513 struct pollfd
**pollfds_map
,
520 if (!table
|| !table
->table
)
524 for (i
= 0; i
< table
->count
; i
++) {
525 pfd
= find_pollfd(pollfds_map
, table
->table
[i
].sock
,
530 if (!(pfd
->revents
& revents
))
533 table
->table
[i
].handler(table
->table
[i
].sock
,
534 table
->table
[i
].eloop_data
,
535 table
->table
[i
].user_data
);
544 static void eloop_sock_table_dispatch(struct eloop_sock_table
*readers
,
545 struct eloop_sock_table
*writers
,
546 struct eloop_sock_table
*exceptions
,
547 struct pollfd
**pollfds_map
,
550 if (eloop_sock_table_dispatch_table(readers
, pollfds_map
,
551 max_pollfd_map
, POLLIN
| POLLERR
|
553 return; /* pollfds may be invalid at this point */
555 if (eloop_sock_table_dispatch_table(writers
, pollfds_map
,
556 max_pollfd_map
, POLLOUT
))
557 return; /* pollfds may be invalid at this point */
559 eloop_sock_table_dispatch_table(exceptions
, pollfds_map
,
560 max_pollfd_map
, POLLERR
| POLLHUP
);
563 #endif /* CONFIG_ELOOP_POLL */
565 #ifdef CONFIG_ELOOP_SELECT
567 static void eloop_sock_table_set_fds(struct eloop_sock_table
*table
,
574 if (table
->table
== NULL
)
577 for (i
= 0; i
< table
->count
; i
++) {
578 assert(table
->table
[i
].sock
>= 0);
579 FD_SET(table
->table
[i
].sock
, fds
);
584 static void eloop_sock_table_dispatch(struct eloop_sock_table
*table
,
589 if (table
== NULL
|| table
->table
== NULL
)
593 for (i
= 0; i
< table
->count
; i
++) {
594 if (FD_ISSET(table
->table
[i
].sock
, fds
)) {
595 table
->table
[i
].handler(table
->table
[i
].sock
,
596 table
->table
[i
].eloop_data
,
597 table
->table
[i
].user_data
);
604 #endif /* CONFIG_ELOOP_SELECT */
607 #ifdef CONFIG_ELOOP_EPOLL
608 static void eloop_sock_table_dispatch(struct epoll_event
*events
, int nfds
)
610 struct eloop_sock
*table
;
613 for (i
= 0; i
< nfds
; i
++) {
614 table
= &eloop
.fd_table
[events
[i
].data
.fd
];
615 if (table
->handler
== NULL
)
617 table
->handler(table
->sock
, table
->eloop_data
,
619 if (eloop
.readers
.changed
||
620 eloop
.writers
.changed
||
621 eloop
.exceptions
.changed
)
625 #endif /* CONFIG_ELOOP_EPOLL */
628 #ifdef CONFIG_ELOOP_KQUEUE
630 static void eloop_sock_table_dispatch(struct kevent
*events
, int nfds
)
632 struct eloop_sock
*table
;
635 for (i
= 0; i
< nfds
; i
++) {
636 table
= &eloop
.fd_table
[events
[i
].ident
];
637 if (table
->handler
== NULL
)
639 table
->handler(table
->sock
, table
->eloop_data
,
641 if (eloop
.readers
.changed
||
642 eloop
.writers
.changed
||
643 eloop
.exceptions
.changed
)
649 static int eloop_sock_table_requeue(struct eloop_sock_table
*table
)
654 for (i
= 0; i
< table
->count
&& table
->table
; i
++) {
655 if (eloop_sock_queue(table
->table
[i
].sock
, table
->type
) == -1)
661 #endif /* CONFIG_ELOOP_KQUEUE */
664 int eloop_sock_requeue(void)
668 #ifdef CONFIG_ELOOP_KQUEUE
669 close(eloop
.kqueuefd
);
670 eloop
.kqueuefd
= kqueue();
671 if (eloop
.kqueuefd
< 0) {
672 wpa_printf(MSG_ERROR
, "%s: kqueue failed: %s",
673 __func__
, strerror(errno
));
677 if (eloop_sock_table_requeue(&eloop
.readers
) < 0)
679 if (eloop_sock_table_requeue(&eloop
.writers
) < 0)
681 if (eloop_sock_table_requeue(&eloop
.exceptions
) < 0)
683 #endif /* CONFIG_ELOOP_KQUEUE */
689 static void eloop_sock_table_destroy(struct eloop_sock_table
*table
)
693 for (i
= 0; i
< table
->count
&& table
->table
; i
++) {
694 wpa_printf(MSG_INFO
, "ELOOP: remaining socket: "
695 "sock=%d eloop_data=%p user_data=%p "
697 table
->table
[i
].sock
,
698 table
->table
[i
].eloop_data
,
699 table
->table
[i
].user_data
,
700 table
->table
[i
].handler
);
701 wpa_trace_dump_funcname("eloop unregistered socket "
703 table
->table
[i
].handler
);
704 wpa_trace_dump("eloop sock", &table
->table
[i
]);
706 os_free(table
->table
);
711 int eloop_register_read_sock(int sock
, eloop_sock_handler handler
,
712 void *eloop_data
, void *user_data
)
714 return eloop_register_sock(sock
, EVENT_TYPE_READ
, handler
,
715 eloop_data
, user_data
);
719 void eloop_unregister_read_sock(int sock
)
721 eloop_unregister_sock(sock
, EVENT_TYPE_READ
);
725 static struct eloop_sock_table
*eloop_get_sock_table(eloop_event_type type
)
728 case EVENT_TYPE_READ
:
729 return &eloop
.readers
;
730 case EVENT_TYPE_WRITE
:
731 return &eloop
.writers
;
732 case EVENT_TYPE_EXCEPTION
:
733 return &eloop
.exceptions
;
740 int eloop_register_sock(int sock
, eloop_event_type type
,
741 eloop_sock_handler handler
,
742 void *eloop_data
, void *user_data
)
744 struct eloop_sock_table
*table
;
747 table
= eloop_get_sock_table(type
);
748 return eloop_sock_table_add_sock(table
, sock
, handler
,
749 eloop_data
, user_data
);
753 void eloop_unregister_sock(int sock
, eloop_event_type type
)
755 struct eloop_sock_table
*table
;
757 table
= eloop_get_sock_table(type
);
758 eloop_sock_table_remove_sock(table
, sock
);
762 int eloop_register_timeout(unsigned int secs
, unsigned int usecs
,
763 eloop_timeout_handler handler
,
764 void *eloop_data
, void *user_data
)
766 struct eloop_timeout
*timeout
, *tmp
;
769 timeout
= os_zalloc(sizeof(*timeout
));
772 if (os_get_reltime(&timeout
->time
) < 0) {
776 now_sec
= timeout
->time
.sec
;
777 timeout
->time
.sec
+= secs
;
778 if (timeout
->time
.sec
< now_sec
) {
780 * Integer overflow - assume long enough timeout to be assumed
781 * to be infinite, i.e., the timeout would never happen.
783 wpa_printf(MSG_DEBUG
, "ELOOP: Too long timeout (secs=%u) to "
784 "ever happen - ignore it", secs
);
788 timeout
->time
.usec
+= usecs
;
789 while (timeout
->time
.usec
>= 1000000) {
791 timeout
->time
.usec
-= 1000000;
793 timeout
->eloop_data
= eloop_data
;
794 timeout
->user_data
= user_data
;
795 timeout
->handler
= handler
;
796 wpa_trace_add_ref(timeout
, eloop
, eloop_data
);
797 wpa_trace_add_ref(timeout
, user
, user_data
);
798 wpa_trace_record(timeout
);
800 /* Maintain timeouts in order of increasing time */
801 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
802 if (os_reltime_before(&timeout
->time
, &tmp
->time
)) {
803 dl_list_add(tmp
->list
.prev
, &timeout
->list
);
807 dl_list_add_tail(&eloop
.timeout
, &timeout
->list
);
813 static void eloop_remove_timeout(struct eloop_timeout
*timeout
)
815 dl_list_del(&timeout
->list
);
816 wpa_trace_remove_ref(timeout
, eloop
, timeout
->eloop_data
);
817 wpa_trace_remove_ref(timeout
, user
, timeout
->user_data
);
822 int eloop_cancel_timeout(eloop_timeout_handler handler
,
823 void *eloop_data
, void *user_data
)
825 struct eloop_timeout
*timeout
, *prev
;
828 dl_list_for_each_safe(timeout
, prev
, &eloop
.timeout
,
829 struct eloop_timeout
, list
) {
830 if (timeout
->handler
== handler
&&
831 (timeout
->eloop_data
== eloop_data
||
832 eloop_data
== ELOOP_ALL_CTX
) &&
833 (timeout
->user_data
== user_data
||
834 user_data
== ELOOP_ALL_CTX
)) {
835 eloop_remove_timeout(timeout
);
844 int eloop_cancel_timeout_one(eloop_timeout_handler handler
,
845 void *eloop_data
, void *user_data
,
846 struct os_reltime
*remaining
)
848 struct eloop_timeout
*timeout
, *prev
;
850 struct os_reltime now
;
852 os_get_reltime(&now
);
853 remaining
->sec
= remaining
->usec
= 0;
855 dl_list_for_each_safe(timeout
, prev
, &eloop
.timeout
,
856 struct eloop_timeout
, list
) {
857 if (timeout
->handler
== handler
&&
858 (timeout
->eloop_data
== eloop_data
) &&
859 (timeout
->user_data
== user_data
)) {
861 if (os_reltime_before(&now
, &timeout
->time
))
862 os_reltime_sub(&timeout
->time
, &now
, remaining
);
863 eloop_remove_timeout(timeout
);
871 int eloop_is_timeout_registered(eloop_timeout_handler handler
,
872 void *eloop_data
, void *user_data
)
874 struct eloop_timeout
*tmp
;
876 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
877 if (tmp
->handler
== handler
&&
878 tmp
->eloop_data
== eloop_data
&&
879 tmp
->user_data
== user_data
)
887 int eloop_deplete_timeout(unsigned int req_secs
, unsigned int req_usecs
,
888 eloop_timeout_handler handler
, void *eloop_data
,
891 struct os_reltime now
, requested
, remaining
;
892 struct eloop_timeout
*tmp
;
894 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
895 if (tmp
->handler
== handler
&&
896 tmp
->eloop_data
== eloop_data
&&
897 tmp
->user_data
== user_data
) {
898 requested
.sec
= req_secs
;
899 requested
.usec
= req_usecs
;
900 os_get_reltime(&now
);
901 os_reltime_sub(&tmp
->time
, &now
, &remaining
);
902 if (os_reltime_before(&requested
, &remaining
)) {
903 eloop_cancel_timeout(handler
, eloop_data
,
905 eloop_register_timeout(requested
.sec
,
919 int eloop_replenish_timeout(unsigned int req_secs
, unsigned int req_usecs
,
920 eloop_timeout_handler handler
, void *eloop_data
,
923 struct os_reltime now
, requested
, remaining
;
924 struct eloop_timeout
*tmp
;
926 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
927 if (tmp
->handler
== handler
&&
928 tmp
->eloop_data
== eloop_data
&&
929 tmp
->user_data
== user_data
) {
930 requested
.sec
= req_secs
;
931 requested
.usec
= req_usecs
;
932 os_get_reltime(&now
);
933 os_reltime_sub(&tmp
->time
, &now
, &remaining
);
934 if (os_reltime_before(&remaining
, &requested
)) {
935 eloop_cancel_timeout(handler
, eloop_data
,
937 eloop_register_timeout(requested
.sec
,
951 #ifndef CONFIG_NATIVE_WINDOWS
952 static void eloop_handle_alarm(int sig
)
954 wpa_printf(MSG_ERROR
, "eloop: could not process SIGINT or SIGTERM in "
955 "two seconds. Looks like there\n"
956 "is a bug that ends up in a busy loop that "
957 "prevents clean shutdown.\n"
958 "Killing program forcefully.\n");
961 #endif /* CONFIG_NATIVE_WINDOWS */
964 static void eloop_handle_signal(int sig
)
968 #ifndef CONFIG_NATIVE_WINDOWS
969 if ((sig
== SIGINT
|| sig
== SIGTERM
) && !eloop
.pending_terminate
) {
970 /* Use SIGALRM to break out from potential busy loops that
971 * would not allow the program to be killed. */
972 eloop
.pending_terminate
= 1;
973 signal(SIGALRM
, eloop_handle_alarm
);
976 #endif /* CONFIG_NATIVE_WINDOWS */
979 for (i
= 0; i
< eloop
.signal_count
; i
++) {
980 if (eloop
.signals
[i
].sig
== sig
) {
981 eloop
.signals
[i
].signaled
++;
988 static void eloop_process_pending_signals(void)
992 if (eloop
.signaled
== 0)
996 if (eloop
.pending_terminate
) {
997 #ifndef CONFIG_NATIVE_WINDOWS
999 #endif /* CONFIG_NATIVE_WINDOWS */
1000 eloop
.pending_terminate
= 0;
1003 for (i
= 0; i
< eloop
.signal_count
; i
++) {
1004 if (eloop
.signals
[i
].signaled
) {
1005 eloop
.signals
[i
].signaled
= 0;
1006 eloop
.signals
[i
].handler(eloop
.signals
[i
].sig
,
1007 eloop
.signals
[i
].user_data
);
1013 int eloop_register_signal(int sig
, eloop_signal_handler handler
,
1016 struct eloop_signal
*tmp
;
1018 tmp
= os_realloc_array(eloop
.signals
, eloop
.signal_count
+ 1,
1019 sizeof(struct eloop_signal
));
1023 tmp
[eloop
.signal_count
].sig
= sig
;
1024 tmp
[eloop
.signal_count
].user_data
= user_data
;
1025 tmp
[eloop
.signal_count
].handler
= handler
;
1026 tmp
[eloop
.signal_count
].signaled
= 0;
1027 eloop
.signal_count
++;
1028 eloop
.signals
= tmp
;
1029 signal(sig
, eloop_handle_signal
);
1035 int eloop_register_signal_terminate(eloop_signal_handler handler
,
1038 int ret
= eloop_register_signal(SIGINT
, handler
, user_data
);
1040 ret
= eloop_register_signal(SIGTERM
, handler
, user_data
);
1045 int eloop_register_signal_reconfig(eloop_signal_handler handler
,
1048 #ifdef CONFIG_NATIVE_WINDOWS
1050 #else /* CONFIG_NATIVE_WINDOWS */
1051 return eloop_register_signal(SIGHUP
, handler
, user_data
);
1052 #endif /* CONFIG_NATIVE_WINDOWS */
1056 void eloop_run(void)
1058 #ifdef CONFIG_ELOOP_POLL
1061 #endif /* CONFIG_ELOOP_POLL */
1062 #ifdef CONFIG_ELOOP_SELECT
1063 fd_set
*rfds
, *wfds
, *efds
;
1065 #endif /* CONFIG_ELOOP_SELECT */
1066 #ifdef CONFIG_ELOOP_EPOLL
1067 int timeout_ms
= -1;
1068 #endif /* CONFIG_ELOOP_EPOLL */
1069 #ifdef CONFIG_ELOOP_KQUEUE
1071 #endif /* CONFIG_ELOOP_KQUEUE */
1073 struct os_reltime tv
, now
;
1075 #ifdef CONFIG_ELOOP_SELECT
1076 rfds
= os_malloc(sizeof(*rfds
));
1077 wfds
= os_malloc(sizeof(*wfds
));
1078 efds
= os_malloc(sizeof(*efds
));
1079 if (rfds
== NULL
|| wfds
== NULL
|| efds
== NULL
)
1081 #endif /* CONFIG_ELOOP_SELECT */
1083 while (!eloop
.terminate
&&
1084 (!dl_list_empty(&eloop
.timeout
) || eloop
.readers
.count
> 0 ||
1085 eloop
.writers
.count
> 0 || eloop
.exceptions
.count
> 0)) {
1086 struct eloop_timeout
*timeout
;
1088 if (eloop
.pending_terminate
) {
1090 * This may happen in some corner cases where a signal
1091 * is received during a blocking operation. We need to
1092 * process the pending signals and exit if requested to
1093 * avoid hitting the SIGALRM limit if the blocking
1094 * operation took more than two seconds.
1096 eloop_process_pending_signals();
1097 if (eloop
.terminate
)
1101 timeout
= dl_list_first(&eloop
.timeout
, struct eloop_timeout
,
1104 os_get_reltime(&now
);
1105 if (os_reltime_before(&now
, &timeout
->time
))
1106 os_reltime_sub(&timeout
->time
, &now
, &tv
);
1108 tv
.sec
= tv
.usec
= 0;
1109 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1110 timeout_ms
= tv
.sec
* 1000 + tv
.usec
/ 1000;
1111 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1112 #ifdef CONFIG_ELOOP_SELECT
1113 _tv
.tv_sec
= tv
.sec
;
1114 _tv
.tv_usec
= tv
.usec
;
1115 #endif /* CONFIG_ELOOP_SELECT */
1116 #ifdef CONFIG_ELOOP_KQUEUE
1118 ts
.tv_nsec
= tv
.usec
* 1000L;
1119 #endif /* CONFIG_ELOOP_KQUEUE */
1122 #ifdef CONFIG_ELOOP_POLL
1123 num_poll_fds
= eloop_sock_table_set_fds(
1124 &eloop
.readers
, &eloop
.writers
, &eloop
.exceptions
,
1125 eloop
.pollfds
, eloop
.pollfds_map
,
1126 eloop
.max_pollfd_map
);
1127 res
= poll(eloop
.pollfds
, num_poll_fds
,
1128 timeout
? timeout_ms
: -1);
1129 #endif /* CONFIG_ELOOP_POLL */
1130 #ifdef CONFIG_ELOOP_SELECT
1131 eloop_sock_table_set_fds(&eloop
.readers
, rfds
);
1132 eloop_sock_table_set_fds(&eloop
.writers
, wfds
);
1133 eloop_sock_table_set_fds(&eloop
.exceptions
, efds
);
1134 res
= select(eloop
.max_sock
+ 1, rfds
, wfds
, efds
,
1135 timeout
? &_tv
: NULL
);
1136 #endif /* CONFIG_ELOOP_SELECT */
1137 #ifdef CONFIG_ELOOP_EPOLL
1138 if (eloop
.count
== 0) {
1141 res
= epoll_wait(eloop
.epollfd
, eloop
.epoll_events
,
1142 eloop
.count
, timeout_ms
);
1144 #endif /* CONFIG_ELOOP_EPOLL */
1145 #ifdef CONFIG_ELOOP_KQUEUE
1146 if (eloop
.count
== 0) {
1149 res
= kevent(eloop
.kqueuefd
, NULL
, 0,
1150 eloop
.kqueue_events
, eloop
.kqueue_nevents
,
1151 timeout
? &ts
: NULL
);
1153 #endif /* CONFIG_ELOOP_KQUEUE */
1154 if (res
< 0 && errno
!= EINTR
&& errno
!= 0) {
1155 wpa_printf(MSG_ERROR
, "eloop: %s: %s",
1156 #ifdef CONFIG_ELOOP_POLL
1158 #endif /* CONFIG_ELOOP_POLL */
1159 #ifdef CONFIG_ELOOP_SELECT
1161 #endif /* CONFIG_ELOOP_SELECT */
1162 #ifdef CONFIG_ELOOP_EPOLL
1164 #endif /* CONFIG_ELOOP_EPOLL */
1165 #ifdef CONFIG_ELOOP_KQUEUE
1167 #endif /* CONFIG_ELOOP_EKQUEUE */
1173 eloop
.readers
.changed
= 0;
1174 eloop
.writers
.changed
= 0;
1175 eloop
.exceptions
.changed
= 0;
1177 eloop_process_pending_signals();
1180 /* check if some registered timeouts have occurred */
1181 timeout
= dl_list_first(&eloop
.timeout
, struct eloop_timeout
,
1184 os_get_reltime(&now
);
1185 if (!os_reltime_before(&now
, &timeout
->time
)) {
1186 void *eloop_data
= timeout
->eloop_data
;
1187 void *user_data
= timeout
->user_data
;
1188 eloop_timeout_handler handler
=
1190 eloop_remove_timeout(timeout
);
1191 handler(eloop_data
, user_data
);
1199 if (eloop
.readers
.changed
||
1200 eloop
.writers
.changed
||
1201 eloop
.exceptions
.changed
) {
1203 * Sockets may have been closed and reopened with the
1204 * same FD in the signal or timeout handlers, so we
1205 * must skip the previous results and check again
1206 * whether any of the currently registered sockets have
1212 #ifdef CONFIG_ELOOP_POLL
1213 eloop_sock_table_dispatch(&eloop
.readers
, &eloop
.writers
,
1214 &eloop
.exceptions
, eloop
.pollfds_map
,
1215 eloop
.max_pollfd_map
);
1216 #endif /* CONFIG_ELOOP_POLL */
1217 #ifdef CONFIG_ELOOP_SELECT
1218 eloop_sock_table_dispatch(&eloop
.readers
, rfds
);
1219 eloop_sock_table_dispatch(&eloop
.writers
, wfds
);
1220 eloop_sock_table_dispatch(&eloop
.exceptions
, efds
);
1221 #endif /* CONFIG_ELOOP_SELECT */
1222 #ifdef CONFIG_ELOOP_EPOLL
1223 eloop_sock_table_dispatch(eloop
.epoll_events
, res
);
1224 #endif /* CONFIG_ELOOP_EPOLL */
1225 #ifdef CONFIG_ELOOP_KQUEUE
1226 eloop_sock_table_dispatch(eloop
.kqueue_events
, res
);
1227 #endif /* CONFIG_ELOOP_KQUEUE */
1230 eloop
.terminate
= 0;
1232 #ifdef CONFIG_ELOOP_SELECT
1236 #endif /* CONFIG_ELOOP_SELECT */
1241 void eloop_terminate(void)
1243 eloop
.terminate
= 1;
1247 void eloop_destroy(void)
1249 struct eloop_timeout
*timeout
, *prev
;
1250 struct os_reltime now
;
1252 os_get_reltime(&now
);
1253 dl_list_for_each_safe(timeout
, prev
, &eloop
.timeout
,
1254 struct eloop_timeout
, list
) {
1256 sec
= timeout
->time
.sec
- now
.sec
;
1257 usec
= timeout
->time
.usec
- now
.usec
;
1258 if (timeout
->time
.usec
< now
.usec
) {
1262 wpa_printf(MSG_INFO
, "ELOOP: remaining timeout: %d.%06d "
1263 "eloop_data=%p user_data=%p handler=%p",
1264 sec
, usec
, timeout
->eloop_data
, timeout
->user_data
,
1266 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1268 wpa_trace_dump("eloop timeout", timeout
);
1269 eloop_remove_timeout(timeout
);
1271 eloop_sock_table_destroy(&eloop
.readers
);
1272 eloop_sock_table_destroy(&eloop
.writers
);
1273 eloop_sock_table_destroy(&eloop
.exceptions
);
1274 os_free(eloop
.signals
);
1276 #ifdef CONFIG_ELOOP_POLL
1277 os_free(eloop
.pollfds
);
1278 os_free(eloop
.pollfds_map
);
1279 #endif /* CONFIG_ELOOP_POLL */
1280 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1281 os_free(eloop
.fd_table
);
1282 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1283 #ifdef CONFIG_ELOOP_EPOLL
1284 os_free(eloop
.epoll_events
);
1285 close(eloop
.epollfd
);
1286 #endif /* CONFIG_ELOOP_EPOLL */
1287 #ifdef CONFIG_ELOOP_KQUEUE
1288 os_free(eloop
.kqueue_events
);
1289 close(eloop
.kqueuefd
);
1290 #endif /* CONFIG_ELOOP_KQUEUE */
1294 int eloop_terminated(void)
1296 return eloop
.terminate
|| eloop
.pending_terminate
;
1300 void eloop_wait_for_read_sock(int sock
)
1302 #ifdef CONFIG_ELOOP_POLL
1308 os_memset(&pfd
, 0, sizeof(pfd
));
1310 pfd
.events
= POLLIN
;
1313 #endif /* CONFIG_ELOOP_POLL */
1314 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1316 * We can use epoll() here. But epoll() requres 4 system calls.
1317 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1318 * epoll fd. So select() is better for performance here.
1326 FD_SET(sock
, &rfds
);
1327 select(sock
+ 1, &rfds
, NULL
, NULL
, NULL
);
1328 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1329 #ifdef CONFIG_ELOOP_KQUEUE
1331 struct kevent ke1
, ke2
;
1336 EV_SET(&ke1
, sock
, EVFILT_READ
, EV_ADD
| EV_ONESHOT
, 0, 0, NULL
);
1337 kevent(kfd
, &ke1
, 1, &ke2
, 1, NULL
);
1339 #endif /* CONFIG_ELOOP_KQUEUE */
1342 #ifdef CONFIG_ELOOP_SELECT
1343 #undef CONFIG_ELOOP_SELECT
1344 #endif /* CONFIG_ELOOP_SELECT */