2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
16 #ifdef CONFIG_ELOOP_POLL
19 #endif /* CONFIG_ELOOP_POLL */
26 eloop_sock_handler handler
;
32 struct eloop_timeout
{
37 eloop_timeout_handler handler
;
46 eloop_signal_handler handler
;
50 struct eloop_sock_table
{
52 struct eloop_sock
*table
;
59 int count
; /* sum of all table counts */
60 #ifdef CONFIG_ELOOP_POLL
61 int max_pollfd_map
; /* number of pollfds_map currently allocated */
62 int max_poll_fds
; /* number of pollfds currently allocated */
63 struct pollfd
*pollfds
;
64 struct pollfd
**pollfds_map
;
65 #endif /* CONFIG_ELOOP_POLL */
66 struct eloop_sock_table readers
;
67 struct eloop_sock_table writers
;
68 struct eloop_sock_table exceptions
;
70 struct dl_list timeout
;
73 struct eloop_signal
*signals
;
75 int pending_terminate
;
78 int reader_table_changed
;
81 static struct eloop_data eloop
;
86 static void eloop_sigsegv_handler(int sig
)
88 wpa_trace_show("eloop SIGSEGV");
92 static void eloop_trace_sock_add_ref(struct eloop_sock_table
*table
)
95 if (table
== NULL
|| table
->table
== NULL
)
97 for (i
= 0; i
< table
->count
; i
++) {
98 wpa_trace_add_ref(&table
->table
[i
], eloop
,
99 table
->table
[i
].eloop_data
);
100 wpa_trace_add_ref(&table
->table
[i
], user
,
101 table
->table
[i
].user_data
);
106 static void eloop_trace_sock_remove_ref(struct eloop_sock_table
*table
)
109 if (table
== NULL
|| table
->table
== NULL
)
111 for (i
= 0; i
< table
->count
; i
++) {
112 wpa_trace_remove_ref(&table
->table
[i
], eloop
,
113 table
->table
[i
].eloop_data
);
114 wpa_trace_remove_ref(&table
->table
[i
], user
,
115 table
->table
[i
].user_data
);
119 #else /* WPA_TRACE */
121 #define eloop_trace_sock_add_ref(table) do { } while (0)
122 #define eloop_trace_sock_remove_ref(table) do { } while (0)
124 #endif /* WPA_TRACE */
129 os_memset(&eloop
, 0, sizeof(eloop
));
130 dl_list_init(&eloop
.timeout
);
132 signal(SIGSEGV
, eloop_sigsegv_handler
);
133 #endif /* WPA_TRACE */
138 static int eloop_sock_table_add_sock(struct eloop_sock_table
*table
,
139 int sock
, eloop_sock_handler handler
,
140 void *eloop_data
, void *user_data
)
142 struct eloop_sock
*tmp
;
145 if (sock
> eloop
.max_sock
)
148 new_max_sock
= eloop
.max_sock
;
153 #ifdef CONFIG_ELOOP_POLL
154 if (new_max_sock
>= eloop
.max_pollfd_map
) {
155 struct pollfd
**nmap
;
156 nmap
= os_realloc(eloop
.pollfds_map
, sizeof(struct pollfd
*) *
157 (new_max_sock
+ 50));
161 eloop
.max_pollfd_map
= new_max_sock
+ 50;
162 eloop
.pollfds_map
= nmap
;
165 if (eloop
.count
+ 1 > eloop
.max_poll_fds
) {
167 int nmax
= eloop
.count
+ 1 + 50;
168 n
= os_realloc(eloop
.pollfds
, sizeof(struct pollfd
) * nmax
);
172 eloop
.max_poll_fds
= nmax
;
175 #endif /* CONFIG_ELOOP_POLL */
177 eloop_trace_sock_remove_ref(table
);
178 tmp
= (struct eloop_sock
*)
179 os_realloc(table
->table
,
180 (table
->count
+ 1) * sizeof(struct eloop_sock
));
184 tmp
[table
->count
].sock
= sock
;
185 tmp
[table
->count
].eloop_data
= eloop_data
;
186 tmp
[table
->count
].user_data
= user_data
;
187 tmp
[table
->count
].handler
= handler
;
188 wpa_trace_record(&tmp
[table
->count
]);
191 eloop
.max_sock
= new_max_sock
;
194 eloop_trace_sock_add_ref(table
);
200 static void eloop_sock_table_remove_sock(struct eloop_sock_table
*table
,
205 if (table
== NULL
|| table
->table
== NULL
|| table
->count
== 0)
208 for (i
= 0; i
< table
->count
; i
++) {
209 if (table
->table
[i
].sock
== sock
)
212 if (i
== table
->count
)
214 eloop_trace_sock_remove_ref(table
);
215 if (i
!= table
->count
- 1) {
216 os_memmove(&table
->table
[i
], &table
->table
[i
+ 1],
217 (table
->count
- i
- 1) *
218 sizeof(struct eloop_sock
));
223 eloop_trace_sock_add_ref(table
);
227 #ifdef CONFIG_ELOOP_POLL
229 static struct pollfd
* find_pollfd(struct pollfd
**pollfds_map
, int fd
, int mx
)
231 if (fd
< mx
&& fd
>= 0)
232 return pollfds_map
[fd
];
237 static int eloop_sock_table_set_fds(struct eloop_sock_table
*readers
,
238 struct eloop_sock_table
*writers
,
239 struct eloop_sock_table
*exceptions
,
240 struct pollfd
*pollfds
,
241 struct pollfd
**pollfds_map
,
249 /* Clear pollfd lookup map. It will be re-populated below. */
250 os_memset(pollfds_map
, 0, sizeof(struct pollfd
*) * max_pollfd_map
);
252 if (readers
&& readers
->table
) {
253 for (i
= 0; i
< readers
->count
; i
++) {
254 fd
= readers
->table
[i
].sock
;
255 assert(fd
>= 0 && fd
< max_pollfd_map
);
256 pollfds
[nxt
].fd
= fd
;
257 pollfds
[nxt
].events
= POLLIN
;
258 pollfds
[nxt
].revents
= 0;
259 pollfds_map
[fd
] = &(pollfds
[nxt
]);
264 if (writers
&& writers
->table
) {
265 for (i
= 0; i
< writers
->count
; i
++) {
267 * See if we already added this descriptor, update it
270 fd
= writers
->table
[i
].sock
;
271 assert(fd
>= 0 && fd
< max_pollfd_map
);
272 pfd
= pollfds_map
[fd
];
274 pfd
= &(pollfds
[nxt
]);
277 pollfds
[i
].revents
= 0;
278 pollfds_map
[fd
] = pfd
;
281 pfd
->events
|= POLLOUT
;
286 * Exceptions are always checked when using poll, but I suppose it's
287 * possible that someone registered a socket *only* for exception
288 * handling. Set the POLLIN bit in this case.
290 if (exceptions
&& exceptions
->table
) {
291 for (i
= 0; i
< exceptions
->count
; i
++) {
293 * See if we already added this descriptor, just use it
296 fd
= exceptions
->table
[i
].sock
;
297 assert(fd
>= 0 && fd
< max_pollfd_map
);
298 pfd
= pollfds_map
[fd
];
300 pfd
= &(pollfds
[nxt
]);
301 pfd
->events
= POLLIN
;
303 pollfds
[i
].revents
= 0;
304 pollfds_map
[fd
] = pfd
;
314 static int eloop_sock_table_dispatch_table(struct eloop_sock_table
*table
,
315 struct pollfd
**pollfds_map
,
322 if (!table
|| !table
->table
)
326 for (i
= 0; i
< table
->count
; i
++) {
327 pfd
= find_pollfd(pollfds_map
, table
->table
[i
].sock
,
332 if (!(pfd
->revents
& revents
))
335 table
->table
[i
].handler(table
->table
[i
].sock
,
336 table
->table
[i
].eloop_data
,
337 table
->table
[i
].user_data
);
346 static void eloop_sock_table_dispatch(struct eloop_sock_table
*readers
,
347 struct eloop_sock_table
*writers
,
348 struct eloop_sock_table
*exceptions
,
349 struct pollfd
**pollfds_map
,
352 if (eloop_sock_table_dispatch_table(readers
, pollfds_map
,
353 max_pollfd_map
, POLLIN
| POLLERR
|
355 return; /* pollfds may be invalid at this point */
357 if (eloop_sock_table_dispatch_table(writers
, pollfds_map
,
358 max_pollfd_map
, POLLOUT
))
359 return; /* pollfds may be invalid at this point */
361 eloop_sock_table_dispatch_table(exceptions
, pollfds_map
,
362 max_pollfd_map
, POLLERR
| POLLHUP
);
365 #else /* CONFIG_ELOOP_POLL */
367 static void eloop_sock_table_set_fds(struct eloop_sock_table
*table
,
374 if (table
->table
== NULL
)
377 for (i
= 0; i
< table
->count
; i
++)
378 FD_SET(table
->table
[i
].sock
, fds
);
382 static void eloop_sock_table_dispatch(struct eloop_sock_table
*table
,
387 if (table
== NULL
|| table
->table
== NULL
)
391 for (i
= 0; i
< table
->count
; i
++) {
392 if (FD_ISSET(table
->table
[i
].sock
, fds
)) {
393 table
->table
[i
].handler(table
->table
[i
].sock
,
394 table
->table
[i
].eloop_data
,
395 table
->table
[i
].user_data
);
402 #endif /* CONFIG_ELOOP_POLL */
405 static void eloop_sock_table_destroy(struct eloop_sock_table
*table
)
409 for (i
= 0; i
< table
->count
&& table
->table
; i
++) {
410 wpa_printf(MSG_INFO
, "ELOOP: remaining socket: "
411 "sock=%d eloop_data=%p user_data=%p "
413 table
->table
[i
].sock
,
414 table
->table
[i
].eloop_data
,
415 table
->table
[i
].user_data
,
416 table
->table
[i
].handler
);
417 wpa_trace_dump_funcname("eloop unregistered socket "
419 table
->table
[i
].handler
);
420 wpa_trace_dump("eloop sock", &table
->table
[i
]);
422 os_free(table
->table
);
427 int eloop_register_read_sock(int sock
, eloop_sock_handler handler
,
428 void *eloop_data
, void *user_data
)
430 return eloop_register_sock(sock
, EVENT_TYPE_READ
, handler
,
431 eloop_data
, user_data
);
435 void eloop_unregister_read_sock(int sock
)
437 eloop_unregister_sock(sock
, EVENT_TYPE_READ
);
441 static struct eloop_sock_table
*eloop_get_sock_table(eloop_event_type type
)
444 case EVENT_TYPE_READ
:
445 return &eloop
.readers
;
446 case EVENT_TYPE_WRITE
:
447 return &eloop
.writers
;
448 case EVENT_TYPE_EXCEPTION
:
449 return &eloop
.exceptions
;
456 int eloop_register_sock(int sock
, eloop_event_type type
,
457 eloop_sock_handler handler
,
458 void *eloop_data
, void *user_data
)
460 struct eloop_sock_table
*table
;
462 table
= eloop_get_sock_table(type
);
463 return eloop_sock_table_add_sock(table
, sock
, handler
,
464 eloop_data
, user_data
);
468 void eloop_unregister_sock(int sock
, eloop_event_type type
)
470 struct eloop_sock_table
*table
;
472 table
= eloop_get_sock_table(type
);
473 eloop_sock_table_remove_sock(table
, sock
);
477 int eloop_register_timeout(unsigned int secs
, unsigned int usecs
,
478 eloop_timeout_handler handler
,
479 void *eloop_data
, void *user_data
)
481 struct eloop_timeout
*timeout
, *tmp
;
484 timeout
= os_zalloc(sizeof(*timeout
));
487 if (os_get_time(&timeout
->time
) < 0) {
491 now_sec
= timeout
->time
.sec
;
492 timeout
->time
.sec
+= secs
;
493 if (timeout
->time
.sec
< now_sec
) {
495 * Integer overflow - assume long enough timeout to be assumed
496 * to be infinite, i.e., the timeout would never happen.
498 wpa_printf(MSG_DEBUG
, "ELOOP: Too long timeout (secs=%u) to "
499 "ever happen - ignore it", secs
);
503 timeout
->time
.usec
+= usecs
;
504 while (timeout
->time
.usec
>= 1000000) {
506 timeout
->time
.usec
-= 1000000;
508 timeout
->eloop_data
= eloop_data
;
509 timeout
->user_data
= user_data
;
510 timeout
->handler
= handler
;
511 wpa_trace_add_ref(timeout
, eloop
, eloop_data
);
512 wpa_trace_add_ref(timeout
, user
, user_data
);
513 wpa_trace_record(timeout
);
515 /* Maintain timeouts in order of increasing time */
516 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
517 if (os_time_before(&timeout
->time
, &tmp
->time
)) {
518 dl_list_add(tmp
->list
.prev
, &timeout
->list
);
522 dl_list_add_tail(&eloop
.timeout
, &timeout
->list
);
528 static void eloop_remove_timeout(struct eloop_timeout
*timeout
)
530 dl_list_del(&timeout
->list
);
531 wpa_trace_remove_ref(timeout
, eloop
, timeout
->eloop_data
);
532 wpa_trace_remove_ref(timeout
, user
, timeout
->user_data
);
537 int eloop_cancel_timeout(eloop_timeout_handler handler
,
538 void *eloop_data
, void *user_data
)
540 struct eloop_timeout
*timeout
, *prev
;
543 dl_list_for_each_safe(timeout
, prev
, &eloop
.timeout
,
544 struct eloop_timeout
, list
) {
545 if (timeout
->handler
== handler
&&
546 (timeout
->eloop_data
== eloop_data
||
547 eloop_data
== ELOOP_ALL_CTX
) &&
548 (timeout
->user_data
== user_data
||
549 user_data
== ELOOP_ALL_CTX
)) {
550 eloop_remove_timeout(timeout
);
559 int eloop_is_timeout_registered(eloop_timeout_handler handler
,
560 void *eloop_data
, void *user_data
)
562 struct eloop_timeout
*tmp
;
564 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
565 if (tmp
->handler
== handler
&&
566 tmp
->eloop_data
== eloop_data
&&
567 tmp
->user_data
== user_data
)
575 #ifndef CONFIG_NATIVE_WINDOWS
576 static void eloop_handle_alarm(int sig
)
578 wpa_printf(MSG_ERROR
, "eloop: could not process SIGINT or SIGTERM in "
579 "two seconds. Looks like there\n"
580 "is a bug that ends up in a busy loop that "
581 "prevents clean shutdown.\n"
582 "Killing program forcefully.\n");
585 #endif /* CONFIG_NATIVE_WINDOWS */
588 static void eloop_handle_signal(int sig
)
592 #ifndef CONFIG_NATIVE_WINDOWS
593 if ((sig
== SIGINT
|| sig
== SIGTERM
) && !eloop
.pending_terminate
) {
594 /* Use SIGALRM to break out from potential busy loops that
595 * would not allow the program to be killed. */
596 eloop
.pending_terminate
= 1;
597 signal(SIGALRM
, eloop_handle_alarm
);
600 #endif /* CONFIG_NATIVE_WINDOWS */
603 for (i
= 0; i
< eloop
.signal_count
; i
++) {
604 if (eloop
.signals
[i
].sig
== sig
) {
605 eloop
.signals
[i
].signaled
++;
612 static void eloop_process_pending_signals(void)
616 if (eloop
.signaled
== 0)
620 if (eloop
.pending_terminate
) {
621 #ifndef CONFIG_NATIVE_WINDOWS
623 #endif /* CONFIG_NATIVE_WINDOWS */
624 eloop
.pending_terminate
= 0;
627 for (i
= 0; i
< eloop
.signal_count
; i
++) {
628 if (eloop
.signals
[i
].signaled
) {
629 eloop
.signals
[i
].signaled
= 0;
630 eloop
.signals
[i
].handler(eloop
.signals
[i
].sig
,
631 eloop
.signals
[i
].user_data
);
637 int eloop_register_signal(int sig
, eloop_signal_handler handler
,
640 struct eloop_signal
*tmp
;
642 tmp
= (struct eloop_signal
*)
643 os_realloc(eloop
.signals
,
644 (eloop
.signal_count
+ 1) *
645 sizeof(struct eloop_signal
));
649 tmp
[eloop
.signal_count
].sig
= sig
;
650 tmp
[eloop
.signal_count
].user_data
= user_data
;
651 tmp
[eloop
.signal_count
].handler
= handler
;
652 tmp
[eloop
.signal_count
].signaled
= 0;
653 eloop
.signal_count
++;
655 signal(sig
, eloop_handle_signal
);
661 int eloop_register_signal_terminate(eloop_signal_handler handler
,
664 int ret
= eloop_register_signal(SIGINT
, handler
, user_data
);
666 ret
= eloop_register_signal(SIGTERM
, handler
, user_data
);
671 int eloop_register_signal_reconfig(eloop_signal_handler handler
,
674 #ifdef CONFIG_NATIVE_WINDOWS
676 #else /* CONFIG_NATIVE_WINDOWS */
677 return eloop_register_signal(SIGHUP
, handler
, user_data
);
678 #endif /* CONFIG_NATIVE_WINDOWS */
684 #ifdef CONFIG_ELOOP_POLL
687 #else /* CONFIG_ELOOP_POLL */
688 fd_set
*rfds
, *wfds
, *efds
;
690 #endif /* CONFIG_ELOOP_POLL */
692 struct os_time tv
, now
;
694 #ifndef CONFIG_ELOOP_POLL
695 rfds
= os_malloc(sizeof(*rfds
));
696 wfds
= os_malloc(sizeof(*wfds
));
697 efds
= os_malloc(sizeof(*efds
));
698 if (rfds
== NULL
|| wfds
== NULL
|| efds
== NULL
)
700 #endif /* CONFIG_ELOOP_POLL */
702 while (!eloop
.terminate
&&
703 (!dl_list_empty(&eloop
.timeout
) || eloop
.readers
.count
> 0 ||
704 eloop
.writers
.count
> 0 || eloop
.exceptions
.count
> 0)) {
705 struct eloop_timeout
*timeout
;
706 timeout
= dl_list_first(&eloop
.timeout
, struct eloop_timeout
,
710 if (os_time_before(&now
, &timeout
->time
))
711 os_time_sub(&timeout
->time
, &now
, &tv
);
713 tv
.sec
= tv
.usec
= 0;
714 #ifdef CONFIG_ELOOP_POLL
715 timeout_ms
= tv
.sec
* 1000 + tv
.usec
/ 1000;
716 #else /* CONFIG_ELOOP_POLL */
718 _tv
.tv_usec
= tv
.usec
;
719 #endif /* CONFIG_ELOOP_POLL */
722 #ifdef CONFIG_ELOOP_POLL
723 num_poll_fds
= eloop_sock_table_set_fds(
724 &eloop
.readers
, &eloop
.writers
, &eloop
.exceptions
,
725 eloop
.pollfds
, eloop
.pollfds_map
,
726 eloop
.max_pollfd_map
);
727 res
= poll(eloop
.pollfds
, num_poll_fds
,
728 timeout
? timeout_ms
: -1);
730 if (res
< 0 && errno
!= EINTR
&& errno
!= 0) {
734 #else /* CONFIG_ELOOP_POLL */
735 eloop_sock_table_set_fds(&eloop
.readers
, rfds
);
736 eloop_sock_table_set_fds(&eloop
.writers
, wfds
);
737 eloop_sock_table_set_fds(&eloop
.exceptions
, efds
);
738 res
= select(eloop
.max_sock
+ 1, rfds
, wfds
, efds
,
739 timeout
? &_tv
: NULL
);
740 if (res
< 0 && errno
!= EINTR
&& errno
!= 0) {
744 #endif /* CONFIG_ELOOP_POLL */
745 eloop_process_pending_signals();
747 /* check if some registered timeouts have occurred */
748 timeout
= dl_list_first(&eloop
.timeout
, struct eloop_timeout
,
752 if (!os_time_before(&now
, &timeout
->time
)) {
753 void *eloop_data
= timeout
->eloop_data
;
754 void *user_data
= timeout
->user_data
;
755 eloop_timeout_handler handler
=
757 eloop_remove_timeout(timeout
);
758 handler(eloop_data
, user_data
);
766 #ifdef CONFIG_ELOOP_POLL
767 eloop_sock_table_dispatch(&eloop
.readers
, &eloop
.writers
,
768 &eloop
.exceptions
, eloop
.pollfds_map
,
769 eloop
.max_pollfd_map
);
770 #else /* CONFIG_ELOOP_POLL */
771 eloop_sock_table_dispatch(&eloop
.readers
, rfds
);
772 eloop_sock_table_dispatch(&eloop
.writers
, wfds
);
773 eloop_sock_table_dispatch(&eloop
.exceptions
, efds
);
774 #endif /* CONFIG_ELOOP_POLL */
778 #ifndef CONFIG_ELOOP_POLL
782 #endif /* CONFIG_ELOOP_POLL */
787 void eloop_terminate(void)
793 void eloop_destroy(void)
795 struct eloop_timeout
*timeout
, *prev
;
799 dl_list_for_each_safe(timeout
, prev
, &eloop
.timeout
,
800 struct eloop_timeout
, list
) {
802 sec
= timeout
->time
.sec
- now
.sec
;
803 usec
= timeout
->time
.usec
- now
.usec
;
804 if (timeout
->time
.usec
< now
.usec
) {
808 wpa_printf(MSG_INFO
, "ELOOP: remaining timeout: %d.%06d "
809 "eloop_data=%p user_data=%p handler=%p",
810 sec
, usec
, timeout
->eloop_data
, timeout
->user_data
,
812 wpa_trace_dump_funcname("eloop unregistered timeout handler",
814 wpa_trace_dump("eloop timeout", timeout
);
815 eloop_remove_timeout(timeout
);
817 eloop_sock_table_destroy(&eloop
.readers
);
818 eloop_sock_table_destroy(&eloop
.writers
);
819 eloop_sock_table_destroy(&eloop
.exceptions
);
820 os_free(eloop
.signals
);
822 #ifdef CONFIG_ELOOP_POLL
823 os_free(eloop
.pollfds
);
824 os_free(eloop
.pollfds_map
);
825 #endif /* CONFIG_ELOOP_POLL */
829 int eloop_terminated(void)
831 return eloop
.terminate
;
835 void eloop_wait_for_read_sock(int sock
)
837 #ifdef CONFIG_ELOOP_POLL
843 os_memset(&pfd
, 0, sizeof(pfd
));
848 #else /* CONFIG_ELOOP_POLL */
856 select(sock
+ 1, &rfds
, NULL
, NULL
, NULL
);
857 #endif /* CONFIG_ELOOP_POLL */