2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
16 #ifdef CONFIG_ELOOP_POLL
19 #endif /* CONFIG_ELOOP_POLL */
26 eloop_sock_handler handler
;
32 struct eloop_timeout
{
34 struct os_reltime time
;
37 eloop_timeout_handler handler
;
46 eloop_signal_handler handler
;
50 struct eloop_sock_table
{
52 struct eloop_sock
*table
;
59 int count
; /* sum of all table counts */
60 #ifdef CONFIG_ELOOP_POLL
61 int max_pollfd_map
; /* number of pollfds_map currently allocated */
62 int max_poll_fds
; /* number of pollfds currently allocated */
63 struct pollfd
*pollfds
;
64 struct pollfd
**pollfds_map
;
65 #endif /* CONFIG_ELOOP_POLL */
66 struct eloop_sock_table readers
;
67 struct eloop_sock_table writers
;
68 struct eloop_sock_table exceptions
;
70 struct dl_list timeout
;
73 struct eloop_signal
*signals
;
75 int pending_terminate
;
78 int reader_table_changed
;
81 static struct eloop_data eloop
;
86 static void eloop_sigsegv_handler(int sig
)
88 wpa_trace_show("eloop SIGSEGV");
92 static void eloop_trace_sock_add_ref(struct eloop_sock_table
*table
)
95 if (table
== NULL
|| table
->table
== NULL
)
97 for (i
= 0; i
< table
->count
; i
++) {
98 wpa_trace_add_ref(&table
->table
[i
], eloop
,
99 table
->table
[i
].eloop_data
);
100 wpa_trace_add_ref(&table
->table
[i
], user
,
101 table
->table
[i
].user_data
);
106 static void eloop_trace_sock_remove_ref(struct eloop_sock_table
*table
)
109 if (table
== NULL
|| table
->table
== NULL
)
111 for (i
= 0; i
< table
->count
; i
++) {
112 wpa_trace_remove_ref(&table
->table
[i
], eloop
,
113 table
->table
[i
].eloop_data
);
114 wpa_trace_remove_ref(&table
->table
[i
], user
,
115 table
->table
[i
].user_data
);
119 #else /* WPA_TRACE */
121 #define eloop_trace_sock_add_ref(table) do { } while (0)
122 #define eloop_trace_sock_remove_ref(table) do { } while (0)
124 #endif /* WPA_TRACE */
129 os_memset(&eloop
, 0, sizeof(eloop
));
130 dl_list_init(&eloop
.timeout
);
132 signal(SIGSEGV
, eloop_sigsegv_handler
);
133 #endif /* WPA_TRACE */
138 static int eloop_sock_table_add_sock(struct eloop_sock_table
*table
,
139 int sock
, eloop_sock_handler handler
,
140 void *eloop_data
, void *user_data
)
142 struct eloop_sock
*tmp
;
145 if (sock
> eloop
.max_sock
)
148 new_max_sock
= eloop
.max_sock
;
153 #ifdef CONFIG_ELOOP_POLL
154 if (new_max_sock
>= eloop
.max_pollfd_map
) {
155 struct pollfd
**nmap
;
156 nmap
= os_realloc_array(eloop
.pollfds_map
, new_max_sock
+ 50,
157 sizeof(struct pollfd
*));
161 eloop
.max_pollfd_map
= new_max_sock
+ 50;
162 eloop
.pollfds_map
= nmap
;
165 if (eloop
.count
+ 1 > eloop
.max_poll_fds
) {
167 int nmax
= eloop
.count
+ 1 + 50;
168 n
= os_realloc_array(eloop
.pollfds
, nmax
,
169 sizeof(struct pollfd
));
173 eloop
.max_poll_fds
= nmax
;
176 #endif /* CONFIG_ELOOP_POLL */
178 eloop_trace_sock_remove_ref(table
);
179 tmp
= os_realloc_array(table
->table
, table
->count
+ 1,
180 sizeof(struct eloop_sock
));
184 tmp
[table
->count
].sock
= sock
;
185 tmp
[table
->count
].eloop_data
= eloop_data
;
186 tmp
[table
->count
].user_data
= user_data
;
187 tmp
[table
->count
].handler
= handler
;
188 wpa_trace_record(&tmp
[table
->count
]);
191 eloop
.max_sock
= new_max_sock
;
194 eloop_trace_sock_add_ref(table
);
200 static void eloop_sock_table_remove_sock(struct eloop_sock_table
*table
,
205 if (table
== NULL
|| table
->table
== NULL
|| table
->count
== 0)
208 for (i
= 0; i
< table
->count
; i
++) {
209 if (table
->table
[i
].sock
== sock
)
212 if (i
== table
->count
)
214 eloop_trace_sock_remove_ref(table
);
215 if (i
!= table
->count
- 1) {
216 os_memmove(&table
->table
[i
], &table
->table
[i
+ 1],
217 (table
->count
- i
- 1) *
218 sizeof(struct eloop_sock
));
223 eloop_trace_sock_add_ref(table
);
227 #ifdef CONFIG_ELOOP_POLL
229 static struct pollfd
* find_pollfd(struct pollfd
**pollfds_map
, int fd
, int mx
)
231 if (fd
< mx
&& fd
>= 0)
232 return pollfds_map
[fd
];
237 static int eloop_sock_table_set_fds(struct eloop_sock_table
*readers
,
238 struct eloop_sock_table
*writers
,
239 struct eloop_sock_table
*exceptions
,
240 struct pollfd
*pollfds
,
241 struct pollfd
**pollfds_map
,
249 /* Clear pollfd lookup map. It will be re-populated below. */
250 os_memset(pollfds_map
, 0, sizeof(struct pollfd
*) * max_pollfd_map
);
252 if (readers
&& readers
->table
) {
253 for (i
= 0; i
< readers
->count
; i
++) {
254 fd
= readers
->table
[i
].sock
;
255 assert(fd
>= 0 && fd
< max_pollfd_map
);
256 pollfds
[nxt
].fd
= fd
;
257 pollfds
[nxt
].events
= POLLIN
;
258 pollfds
[nxt
].revents
= 0;
259 pollfds_map
[fd
] = &(pollfds
[nxt
]);
264 if (writers
&& writers
->table
) {
265 for (i
= 0; i
< writers
->count
; i
++) {
267 * See if we already added this descriptor, update it
270 fd
= writers
->table
[i
].sock
;
271 assert(fd
>= 0 && fd
< max_pollfd_map
);
272 pfd
= pollfds_map
[fd
];
274 pfd
= &(pollfds
[nxt
]);
277 pollfds
[i
].revents
= 0;
278 pollfds_map
[fd
] = pfd
;
281 pfd
->events
|= POLLOUT
;
286 * Exceptions are always checked when using poll, but I suppose it's
287 * possible that someone registered a socket *only* for exception
288 * handling. Set the POLLIN bit in this case.
290 if (exceptions
&& exceptions
->table
) {
291 for (i
= 0; i
< exceptions
->count
; i
++) {
293 * See if we already added this descriptor, just use it
296 fd
= exceptions
->table
[i
].sock
;
297 assert(fd
>= 0 && fd
< max_pollfd_map
);
298 pfd
= pollfds_map
[fd
];
300 pfd
= &(pollfds
[nxt
]);
301 pfd
->events
= POLLIN
;
303 pollfds
[i
].revents
= 0;
304 pollfds_map
[fd
] = pfd
;
314 static int eloop_sock_table_dispatch_table(struct eloop_sock_table
*table
,
315 struct pollfd
**pollfds_map
,
322 if (!table
|| !table
->table
)
326 for (i
= 0; i
< table
->count
; i
++) {
327 pfd
= find_pollfd(pollfds_map
, table
->table
[i
].sock
,
332 if (!(pfd
->revents
& revents
))
335 table
->table
[i
].handler(table
->table
[i
].sock
,
336 table
->table
[i
].eloop_data
,
337 table
->table
[i
].user_data
);
346 static void eloop_sock_table_dispatch(struct eloop_sock_table
*readers
,
347 struct eloop_sock_table
*writers
,
348 struct eloop_sock_table
*exceptions
,
349 struct pollfd
**pollfds_map
,
352 if (eloop_sock_table_dispatch_table(readers
, pollfds_map
,
353 max_pollfd_map
, POLLIN
| POLLERR
|
355 return; /* pollfds may be invalid at this point */
357 if (eloop_sock_table_dispatch_table(writers
, pollfds_map
,
358 max_pollfd_map
, POLLOUT
))
359 return; /* pollfds may be invalid at this point */
361 eloop_sock_table_dispatch_table(exceptions
, pollfds_map
,
362 max_pollfd_map
, POLLERR
| POLLHUP
);
365 #else /* CONFIG_ELOOP_POLL */
367 static void eloop_sock_table_set_fds(struct eloop_sock_table
*table
,
374 if (table
->table
== NULL
)
377 for (i
= 0; i
< table
->count
; i
++)
378 FD_SET(table
->table
[i
].sock
, fds
);
382 static void eloop_sock_table_dispatch(struct eloop_sock_table
*table
,
387 if (table
== NULL
|| table
->table
== NULL
)
391 for (i
= 0; i
< table
->count
; i
++) {
392 if (FD_ISSET(table
->table
[i
].sock
, fds
)) {
393 table
->table
[i
].handler(table
->table
[i
].sock
,
394 table
->table
[i
].eloop_data
,
395 table
->table
[i
].user_data
);
402 #endif /* CONFIG_ELOOP_POLL */
405 static void eloop_sock_table_destroy(struct eloop_sock_table
*table
)
409 for (i
= 0; i
< table
->count
&& table
->table
; i
++) {
410 wpa_printf(MSG_INFO
, "ELOOP: remaining socket: "
411 "sock=%d eloop_data=%p user_data=%p "
413 table
->table
[i
].sock
,
414 table
->table
[i
].eloop_data
,
415 table
->table
[i
].user_data
,
416 table
->table
[i
].handler
);
417 wpa_trace_dump_funcname("eloop unregistered socket "
419 table
->table
[i
].handler
);
420 wpa_trace_dump("eloop sock", &table
->table
[i
]);
422 os_free(table
->table
);
427 int eloop_register_read_sock(int sock
, eloop_sock_handler handler
,
428 void *eloop_data
, void *user_data
)
430 return eloop_register_sock(sock
, EVENT_TYPE_READ
, handler
,
431 eloop_data
, user_data
);
435 void eloop_unregister_read_sock(int sock
)
437 eloop_unregister_sock(sock
, EVENT_TYPE_READ
);
441 static struct eloop_sock_table
*eloop_get_sock_table(eloop_event_type type
)
444 case EVENT_TYPE_READ
:
445 return &eloop
.readers
;
446 case EVENT_TYPE_WRITE
:
447 return &eloop
.writers
;
448 case EVENT_TYPE_EXCEPTION
:
449 return &eloop
.exceptions
;
456 int eloop_register_sock(int sock
, eloop_event_type type
,
457 eloop_sock_handler handler
,
458 void *eloop_data
, void *user_data
)
460 struct eloop_sock_table
*table
;
462 table
= eloop_get_sock_table(type
);
463 return eloop_sock_table_add_sock(table
, sock
, handler
,
464 eloop_data
, user_data
);
468 void eloop_unregister_sock(int sock
, eloop_event_type type
)
470 struct eloop_sock_table
*table
;
472 table
= eloop_get_sock_table(type
);
473 eloop_sock_table_remove_sock(table
, sock
);
477 int eloop_register_timeout(unsigned int secs
, unsigned int usecs
,
478 eloop_timeout_handler handler
,
479 void *eloop_data
, void *user_data
)
481 struct eloop_timeout
*timeout
, *tmp
;
484 timeout
= os_zalloc(sizeof(*timeout
));
487 if (os_get_reltime(&timeout
->time
) < 0) {
491 now_sec
= timeout
->time
.sec
;
492 timeout
->time
.sec
+= secs
;
493 if (timeout
->time
.sec
< now_sec
) {
495 * Integer overflow - assume long enough timeout to be assumed
496 * to be infinite, i.e., the timeout would never happen.
498 wpa_printf(MSG_DEBUG
, "ELOOP: Too long timeout (secs=%u) to "
499 "ever happen - ignore it", secs
);
503 timeout
->time
.usec
+= usecs
;
504 while (timeout
->time
.usec
>= 1000000) {
506 timeout
->time
.usec
-= 1000000;
508 timeout
->eloop_data
= eloop_data
;
509 timeout
->user_data
= user_data
;
510 timeout
->handler
= handler
;
511 wpa_trace_add_ref(timeout
, eloop
, eloop_data
);
512 wpa_trace_add_ref(timeout
, user
, user_data
);
513 wpa_trace_record(timeout
);
515 /* Maintain timeouts in order of increasing time */
516 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
517 if (os_reltime_before(&timeout
->time
, &tmp
->time
)) {
518 dl_list_add(tmp
->list
.prev
, &timeout
->list
);
522 dl_list_add_tail(&eloop
.timeout
, &timeout
->list
);
528 static void eloop_remove_timeout(struct eloop_timeout
*timeout
)
530 dl_list_del(&timeout
->list
);
531 wpa_trace_remove_ref(timeout
, eloop
, timeout
->eloop_data
);
532 wpa_trace_remove_ref(timeout
, user
, timeout
->user_data
);
537 int eloop_cancel_timeout(eloop_timeout_handler handler
,
538 void *eloop_data
, void *user_data
)
540 struct eloop_timeout
*timeout
, *prev
;
543 dl_list_for_each_safe(timeout
, prev
, &eloop
.timeout
,
544 struct eloop_timeout
, list
) {
545 if (timeout
->handler
== handler
&&
546 (timeout
->eloop_data
== eloop_data
||
547 eloop_data
== ELOOP_ALL_CTX
) &&
548 (timeout
->user_data
== user_data
||
549 user_data
== ELOOP_ALL_CTX
)) {
550 eloop_remove_timeout(timeout
);
559 int eloop_cancel_timeout_one(eloop_timeout_handler handler
,
560 void *eloop_data
, void *user_data
,
561 struct os_reltime
*remaining
)
563 struct eloop_timeout
*timeout
, *prev
;
565 struct os_reltime now
;
567 os_get_reltime(&now
);
568 remaining
->sec
= remaining
->usec
= 0;
570 dl_list_for_each_safe(timeout
, prev
, &eloop
.timeout
,
571 struct eloop_timeout
, list
) {
572 if (timeout
->handler
== handler
&&
573 (timeout
->eloop_data
== eloop_data
) &&
574 (timeout
->user_data
== user_data
)) {
576 if (os_reltime_before(&now
, &timeout
->time
))
577 os_reltime_sub(&timeout
->time
, &now
, remaining
);
578 eloop_remove_timeout(timeout
);
586 int eloop_is_timeout_registered(eloop_timeout_handler handler
,
587 void *eloop_data
, void *user_data
)
589 struct eloop_timeout
*tmp
;
591 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
592 if (tmp
->handler
== handler
&&
593 tmp
->eloop_data
== eloop_data
&&
594 tmp
->user_data
== user_data
)
602 int eloop_deplete_timeout(unsigned int req_secs
, unsigned int req_usecs
,
603 eloop_timeout_handler handler
, void *eloop_data
,
606 struct os_reltime now
, requested
, remaining
;
607 struct eloop_timeout
*tmp
;
609 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
610 if (tmp
->handler
== handler
&&
611 tmp
->eloop_data
== eloop_data
&&
612 tmp
->user_data
== user_data
) {
613 requested
.sec
= req_secs
;
614 requested
.usec
= req_usecs
;
615 os_get_reltime(&now
);
616 os_reltime_sub(&tmp
->time
, &now
, &remaining
);
617 if (os_reltime_before(&requested
, &remaining
)) {
618 eloop_cancel_timeout(handler
, eloop_data
,
620 eloop_register_timeout(requested
.sec
,
633 int eloop_replenish_timeout(unsigned int req_secs
, unsigned int req_usecs
,
634 eloop_timeout_handler handler
, void *eloop_data
,
637 struct os_reltime now
, requested
, remaining
;
638 struct eloop_timeout
*tmp
;
640 dl_list_for_each(tmp
, &eloop
.timeout
, struct eloop_timeout
, list
) {
641 if (tmp
->handler
== handler
&&
642 tmp
->eloop_data
== eloop_data
&&
643 tmp
->user_data
== user_data
) {
644 requested
.sec
= req_secs
;
645 requested
.usec
= req_usecs
;
646 os_get_reltime(&now
);
647 os_reltime_sub(&tmp
->time
, &now
, &remaining
);
648 if (os_reltime_before(&remaining
, &requested
)) {
649 eloop_cancel_timeout(handler
, eloop_data
,
651 eloop_register_timeout(requested
.sec
,
664 #ifndef CONFIG_NATIVE_WINDOWS
665 static void eloop_handle_alarm(int sig
)
667 wpa_printf(MSG_ERROR
, "eloop: could not process SIGINT or SIGTERM in "
668 "two seconds. Looks like there\n"
669 "is a bug that ends up in a busy loop that "
670 "prevents clean shutdown.\n"
671 "Killing program forcefully.\n");
674 #endif /* CONFIG_NATIVE_WINDOWS */
677 static void eloop_handle_signal(int sig
)
681 #ifndef CONFIG_NATIVE_WINDOWS
682 if ((sig
== SIGINT
|| sig
== SIGTERM
) && !eloop
.pending_terminate
) {
683 /* Use SIGALRM to break out from potential busy loops that
684 * would not allow the program to be killed. */
685 eloop
.pending_terminate
= 1;
686 signal(SIGALRM
, eloop_handle_alarm
);
689 #endif /* CONFIG_NATIVE_WINDOWS */
692 for (i
= 0; i
< eloop
.signal_count
; i
++) {
693 if (eloop
.signals
[i
].sig
== sig
) {
694 eloop
.signals
[i
].signaled
++;
701 static void eloop_process_pending_signals(void)
705 if (eloop
.signaled
== 0)
709 if (eloop
.pending_terminate
) {
710 #ifndef CONFIG_NATIVE_WINDOWS
712 #endif /* CONFIG_NATIVE_WINDOWS */
713 eloop
.pending_terminate
= 0;
716 for (i
= 0; i
< eloop
.signal_count
; i
++) {
717 if (eloop
.signals
[i
].signaled
) {
718 eloop
.signals
[i
].signaled
= 0;
719 eloop
.signals
[i
].handler(eloop
.signals
[i
].sig
,
720 eloop
.signals
[i
].user_data
);
726 int eloop_register_signal(int sig
, eloop_signal_handler handler
,
729 struct eloop_signal
*tmp
;
731 tmp
= os_realloc_array(eloop
.signals
, eloop
.signal_count
+ 1,
732 sizeof(struct eloop_signal
));
736 tmp
[eloop
.signal_count
].sig
= sig
;
737 tmp
[eloop
.signal_count
].user_data
= user_data
;
738 tmp
[eloop
.signal_count
].handler
= handler
;
739 tmp
[eloop
.signal_count
].signaled
= 0;
740 eloop
.signal_count
++;
742 signal(sig
, eloop_handle_signal
);
748 int eloop_register_signal_terminate(eloop_signal_handler handler
,
751 int ret
= eloop_register_signal(SIGINT
, handler
, user_data
);
753 ret
= eloop_register_signal(SIGTERM
, handler
, user_data
);
758 int eloop_register_signal_reconfig(eloop_signal_handler handler
,
761 #ifdef CONFIG_NATIVE_WINDOWS
763 #else /* CONFIG_NATIVE_WINDOWS */
764 return eloop_register_signal(SIGHUP
, handler
, user_data
);
765 #endif /* CONFIG_NATIVE_WINDOWS */
771 #ifdef CONFIG_ELOOP_POLL
774 #else /* CONFIG_ELOOP_POLL */
775 fd_set
*rfds
, *wfds
, *efds
;
777 #endif /* CONFIG_ELOOP_POLL */
779 struct os_reltime tv
, now
;
781 #ifndef CONFIG_ELOOP_POLL
782 rfds
= os_malloc(sizeof(*rfds
));
783 wfds
= os_malloc(sizeof(*wfds
));
784 efds
= os_malloc(sizeof(*efds
));
785 if (rfds
== NULL
|| wfds
== NULL
|| efds
== NULL
)
787 #endif /* CONFIG_ELOOP_POLL */
789 while (!eloop
.terminate
&&
790 (!dl_list_empty(&eloop
.timeout
) || eloop
.readers
.count
> 0 ||
791 eloop
.writers
.count
> 0 || eloop
.exceptions
.count
> 0)) {
792 struct eloop_timeout
*timeout
;
793 timeout
= dl_list_first(&eloop
.timeout
, struct eloop_timeout
,
796 os_get_reltime(&now
);
797 if (os_reltime_before(&now
, &timeout
->time
))
798 os_reltime_sub(&timeout
->time
, &now
, &tv
);
800 tv
.sec
= tv
.usec
= 0;
801 #ifdef CONFIG_ELOOP_POLL
802 timeout_ms
= tv
.sec
* 1000 + tv
.usec
/ 1000;
803 #else /* CONFIG_ELOOP_POLL */
805 _tv
.tv_usec
= tv
.usec
;
806 #endif /* CONFIG_ELOOP_POLL */
809 #ifdef CONFIG_ELOOP_POLL
810 num_poll_fds
= eloop_sock_table_set_fds(
811 &eloop
.readers
, &eloop
.writers
, &eloop
.exceptions
,
812 eloop
.pollfds
, eloop
.pollfds_map
,
813 eloop
.max_pollfd_map
);
814 res
= poll(eloop
.pollfds
, num_poll_fds
,
815 timeout
? timeout_ms
: -1);
817 if (res
< 0 && errno
!= EINTR
&& errno
!= 0) {
818 wpa_printf(MSG_INFO
, "eloop: poll: %s",
822 #else /* CONFIG_ELOOP_POLL */
823 eloop_sock_table_set_fds(&eloop
.readers
, rfds
);
824 eloop_sock_table_set_fds(&eloop
.writers
, wfds
);
825 eloop_sock_table_set_fds(&eloop
.exceptions
, efds
);
826 res
= select(eloop
.max_sock
+ 1, rfds
, wfds
, efds
,
827 timeout
? &_tv
: NULL
);
828 if (res
< 0 && errno
!= EINTR
&& errno
!= 0) {
829 wpa_printf(MSG_INFO
, "eloop: select: %s",
833 #endif /* CONFIG_ELOOP_POLL */
834 eloop_process_pending_signals();
836 /* check if some registered timeouts have occurred */
837 timeout
= dl_list_first(&eloop
.timeout
, struct eloop_timeout
,
840 os_get_reltime(&now
);
841 if (!os_reltime_before(&now
, &timeout
->time
)) {
842 void *eloop_data
= timeout
->eloop_data
;
843 void *user_data
= timeout
->user_data
;
844 eloop_timeout_handler handler
=
846 eloop_remove_timeout(timeout
);
847 handler(eloop_data
, user_data
);
855 #ifdef CONFIG_ELOOP_POLL
856 eloop_sock_table_dispatch(&eloop
.readers
, &eloop
.writers
,
857 &eloop
.exceptions
, eloop
.pollfds_map
,
858 eloop
.max_pollfd_map
);
859 #else /* CONFIG_ELOOP_POLL */
860 eloop_sock_table_dispatch(&eloop
.readers
, rfds
);
861 eloop_sock_table_dispatch(&eloop
.writers
, wfds
);
862 eloop_sock_table_dispatch(&eloop
.exceptions
, efds
);
863 #endif /* CONFIG_ELOOP_POLL */
868 #ifndef CONFIG_ELOOP_POLL
872 #endif /* CONFIG_ELOOP_POLL */
877 void eloop_terminate(void)
883 void eloop_destroy(void)
885 struct eloop_timeout
*timeout
, *prev
;
886 struct os_reltime now
;
888 os_get_reltime(&now
);
889 dl_list_for_each_safe(timeout
, prev
, &eloop
.timeout
,
890 struct eloop_timeout
, list
) {
892 sec
= timeout
->time
.sec
- now
.sec
;
893 usec
= timeout
->time
.usec
- now
.usec
;
894 if (timeout
->time
.usec
< now
.usec
) {
898 wpa_printf(MSG_INFO
, "ELOOP: remaining timeout: %d.%06d "
899 "eloop_data=%p user_data=%p handler=%p",
900 sec
, usec
, timeout
->eloop_data
, timeout
->user_data
,
902 wpa_trace_dump_funcname("eloop unregistered timeout handler",
904 wpa_trace_dump("eloop timeout", timeout
);
905 eloop_remove_timeout(timeout
);
907 eloop_sock_table_destroy(&eloop
.readers
);
908 eloop_sock_table_destroy(&eloop
.writers
);
909 eloop_sock_table_destroy(&eloop
.exceptions
);
910 os_free(eloop
.signals
);
912 #ifdef CONFIG_ELOOP_POLL
913 os_free(eloop
.pollfds
);
914 os_free(eloop
.pollfds_map
);
915 #endif /* CONFIG_ELOOP_POLL */
919 int eloop_terminated(void)
921 return eloop
.terminate
;
925 void eloop_wait_for_read_sock(int sock
)
927 #ifdef CONFIG_ELOOP_POLL
933 os_memset(&pfd
, 0, sizeof(pfd
));
938 #else /* CONFIG_ELOOP_POLL */
946 select(sock
+ 1, &rfds
, NULL
, NULL
, NULL
);
947 #endif /* CONFIG_ELOOP_POLL */