2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
17 #ifdef CONFIG_ELOOP_POLL
19 #endif /* CONFIG_ELOOP_POLL */
26 eloop_sock_handler handler;
32 struct eloop_timeout {
34 struct os_reltime time;
37 eloop_timeout_handler handler;
46 eloop_signal_handler handler;
50 struct eloop_sock_table {
52 struct eloop_sock *table;
59 int count; /* sum of all table counts */
60 #ifdef CONFIG_ELOOP_POLL
61 int max_pollfd_map; /* number of pollfds_map currently allocated */
62 int max_poll_fds; /* number of pollfds currently allocated */
63 struct pollfd *pollfds;
64 struct pollfd **pollfds_map;
65 #endif /* CONFIG_ELOOP_POLL */
66 struct eloop_sock_table readers;
67 struct eloop_sock_table writers;
68 struct eloop_sock_table exceptions;
70 struct dl_list timeout;
73 struct eloop_signal *signals;
75 int pending_terminate;
78 int reader_table_changed;
81 static struct eloop_data eloop;
86 static void eloop_sigsegv_handler(int sig)
88 wpa_trace_show("eloop SIGSEGV");
92 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
95 if (table == NULL || table->table == NULL)
97 for (i = 0; i < table->count; i++) {
98 wpa_trace_add_ref(&table->table[i], eloop,
99 table->table[i].eloop_data);
100 wpa_trace_add_ref(&table->table[i], user,
101 table->table[i].user_data);
106 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
109 if (table == NULL || table->table == NULL)
111 for (i = 0; i < table->count; i++) {
112 wpa_trace_remove_ref(&table->table[i], eloop,
113 table->table[i].eloop_data);
114 wpa_trace_remove_ref(&table->table[i], user,
115 table->table[i].user_data);
119 #else /* WPA_TRACE */
121 #define eloop_trace_sock_add_ref(table) do { } while (0)
122 #define eloop_trace_sock_remove_ref(table) do { } while (0)
124 #endif /* WPA_TRACE */
129 os_memset(&eloop, 0, sizeof(eloop));
130 dl_list_init(&eloop.timeout);
132 signal(SIGSEGV, eloop_sigsegv_handler);
133 #endif /* WPA_TRACE */
138 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
139 int sock, eloop_sock_handler handler,
140 void *eloop_data, void *user_data)
142 struct eloop_sock *tmp;
145 if (sock > eloop.max_sock)
148 new_max_sock = eloop.max_sock;
153 #ifdef CONFIG_ELOOP_POLL
154 if (new_max_sock >= eloop.max_pollfd_map) {
155 struct pollfd **nmap;
156 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
157 sizeof(struct pollfd *));
161 eloop.max_pollfd_map = new_max_sock + 50;
162 eloop.pollfds_map = nmap;
165 if (eloop.count + 1 > eloop.max_poll_fds) {
167 int nmax = eloop.count + 1 + 50;
168 n = os_realloc_array(eloop.pollfds, nmax,
169 sizeof(struct pollfd));
173 eloop.max_poll_fds = nmax;
176 #endif /* CONFIG_ELOOP_POLL */
178 eloop_trace_sock_remove_ref(table);
179 tmp = os_realloc_array(table->table, table->count + 1,
180 sizeof(struct eloop_sock));
184 tmp[table->count].sock = sock;
185 tmp[table->count].eloop_data = eloop_data;
186 tmp[table->count].user_data = user_data;
187 tmp[table->count].handler = handler;
188 wpa_trace_record(&tmp[table->count]);
191 eloop.max_sock = new_max_sock;
194 eloop_trace_sock_add_ref(table);
200 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
205 if (table == NULL || table->table == NULL || table->count == 0)
208 for (i = 0; i < table->count; i++) {
209 if (table->table[i].sock == sock)
212 if (i == table->count)
214 eloop_trace_sock_remove_ref(table);
215 if (i != table->count - 1) {
216 os_memmove(&table->table[i], &table->table[i + 1],
217 (table->count - i - 1) *
218 sizeof(struct eloop_sock));
223 eloop_trace_sock_add_ref(table);
227 #ifdef CONFIG_ELOOP_POLL
229 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
231 if (fd < mx && fd >= 0)
232 return pollfds_map[fd];
237 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
238 struct eloop_sock_table *writers,
239 struct eloop_sock_table *exceptions,
240 struct pollfd *pollfds,
241 struct pollfd **pollfds_map,
249 /* Clear pollfd lookup map. It will be re-populated below. */
250 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
252 if (readers && readers->table) {
253 for (i = 0; i < readers->count; i++) {
254 fd = readers->table[i].sock;
255 assert(fd >= 0 && fd < max_pollfd_map);
256 pollfds[nxt].fd = fd;
257 pollfds[nxt].events = POLLIN;
258 pollfds[nxt].revents = 0;
259 pollfds_map[fd] = &(pollfds[nxt]);
264 if (writers && writers->table) {
265 for (i = 0; i < writers->count; i++) {
267 * See if we already added this descriptor, update it
270 fd = writers->table[i].sock;
271 assert(fd >= 0 && fd < max_pollfd_map);
272 pfd = pollfds_map[fd];
274 pfd = &(pollfds[nxt]);
277 pollfds[i].revents = 0;
278 pollfds_map[fd] = pfd;
281 pfd->events |= POLLOUT;
286 * Exceptions are always checked when using poll, but I suppose it's
287 * possible that someone registered a socket *only* for exception
288 * handling. Set the POLLIN bit in this case.
290 if (exceptions && exceptions->table) {
291 for (i = 0; i < exceptions->count; i++) {
293 * See if we already added this descriptor, just use it
296 fd = exceptions->table[i].sock;
297 assert(fd >= 0 && fd < max_pollfd_map);
298 pfd = pollfds_map[fd];
300 pfd = &(pollfds[nxt]);
301 pfd->events = POLLIN;
303 pollfds[i].revents = 0;
304 pollfds_map[fd] = pfd;
314 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
315 struct pollfd **pollfds_map,
322 if (!table || !table->table)
326 for (i = 0; i < table->count; i++) {
327 pfd = find_pollfd(pollfds_map, table->table[i].sock,
332 if (!(pfd->revents & revents))
335 table->table[i].handler(table->table[i].sock,
336 table->table[i].eloop_data,
337 table->table[i].user_data);
346 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
347 struct eloop_sock_table *writers,
348 struct eloop_sock_table *exceptions,
349 struct pollfd **pollfds_map,
352 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
353 max_pollfd_map, POLLIN | POLLERR |
355 return; /* pollfds may be invalid at this point */
357 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
358 max_pollfd_map, POLLOUT))
359 return; /* pollfds may be invalid at this point */
361 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
362 max_pollfd_map, POLLERR | POLLHUP);
365 #else /* CONFIG_ELOOP_POLL */
367 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
374 if (table->table == NULL)
377 for (i = 0; i < table->count; i++) {
378 assert(table->table[i].sock >= 0);
379 FD_SET(table->table[i].sock, fds);
384 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
389 if (table == NULL || table->table == NULL)
393 for (i = 0; i < table->count; i++) {
394 if (FD_ISSET(table->table[i].sock, fds)) {
395 table->table[i].handler(table->table[i].sock,
396 table->table[i].eloop_data,
397 table->table[i].user_data);
404 #endif /* CONFIG_ELOOP_POLL */
407 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
411 for (i = 0; i < table->count && table->table; i++) {
412 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
413 "sock=%d eloop_data=%p user_data=%p "
415 table->table[i].sock,
416 table->table[i].eloop_data,
417 table->table[i].user_data,
418 table->table[i].handler);
419 wpa_trace_dump_funcname("eloop unregistered socket "
421 table->table[i].handler);
422 wpa_trace_dump("eloop sock", &table->table[i]);
424 os_free(table->table);
429 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
430 void *eloop_data, void *user_data)
432 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
433 eloop_data, user_data);
437 void eloop_unregister_read_sock(int sock)
439 eloop_unregister_sock(sock, EVENT_TYPE_READ);
443 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
446 case EVENT_TYPE_READ:
447 return &eloop.readers;
448 case EVENT_TYPE_WRITE:
449 return &eloop.writers;
450 case EVENT_TYPE_EXCEPTION:
451 return &eloop.exceptions;
458 int eloop_register_sock(int sock, eloop_event_type type,
459 eloop_sock_handler handler,
460 void *eloop_data, void *user_data)
462 struct eloop_sock_table *table;
465 table = eloop_get_sock_table(type);
466 return eloop_sock_table_add_sock(table, sock, handler,
467 eloop_data, user_data);
471 void eloop_unregister_sock(int sock, eloop_event_type type)
473 struct eloop_sock_table *table;
475 table = eloop_get_sock_table(type);
476 eloop_sock_table_remove_sock(table, sock);
480 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
481 eloop_timeout_handler handler,
482 void *eloop_data, void *user_data)
484 struct eloop_timeout *timeout, *tmp;
487 timeout = os_zalloc(sizeof(*timeout));
490 if (os_get_reltime(&timeout->time) < 0) {
494 now_sec = timeout->time.sec;
495 timeout->time.sec += secs;
496 if (timeout->time.sec < now_sec) {
498 * Integer overflow - assume long enough timeout to be assumed
499 * to be infinite, i.e., the timeout would never happen.
501 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
502 "ever happen - ignore it", secs);
506 timeout->time.usec += usecs;
507 while (timeout->time.usec >= 1000000) {
509 timeout->time.usec -= 1000000;
511 timeout->eloop_data = eloop_data;
512 timeout->user_data = user_data;
513 timeout->handler = handler;
514 wpa_trace_add_ref(timeout, eloop, eloop_data);
515 wpa_trace_add_ref(timeout, user, user_data);
516 wpa_trace_record(timeout);
518 /* Maintain timeouts in order of increasing time */
519 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
520 if (os_reltime_before(&timeout->time, &tmp->time)) {
521 dl_list_add(tmp->list.prev, &timeout->list);
525 dl_list_add_tail(&eloop.timeout, &timeout->list);
531 static void eloop_remove_timeout(struct eloop_timeout *timeout)
533 dl_list_del(&timeout->list);
534 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
535 wpa_trace_remove_ref(timeout, user, timeout->user_data);
540 int eloop_cancel_timeout(eloop_timeout_handler handler,
541 void *eloop_data, void *user_data)
543 struct eloop_timeout *timeout, *prev;
546 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
547 struct eloop_timeout, list) {
548 if (timeout->handler == handler &&
549 (timeout->eloop_data == eloop_data ||
550 eloop_data == ELOOP_ALL_CTX) &&
551 (timeout->user_data == user_data ||
552 user_data == ELOOP_ALL_CTX)) {
553 eloop_remove_timeout(timeout);
562 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
563 void *eloop_data, void *user_data,
564 struct os_reltime *remaining)
566 struct eloop_timeout *timeout, *prev;
568 struct os_reltime now;
570 os_get_reltime(&now);
571 remaining->sec = remaining->usec = 0;
573 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
574 struct eloop_timeout, list) {
575 if (timeout->handler == handler &&
576 (timeout->eloop_data == eloop_data) &&
577 (timeout->user_data == user_data)) {
579 if (os_reltime_before(&now, &timeout->time))
580 os_reltime_sub(&timeout->time, &now, remaining);
581 eloop_remove_timeout(timeout);
589 int eloop_is_timeout_registered(eloop_timeout_handler handler,
590 void *eloop_data, void *user_data)
592 struct eloop_timeout *tmp;
594 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
595 if (tmp->handler == handler &&
596 tmp->eloop_data == eloop_data &&
597 tmp->user_data == user_data)
605 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
606 eloop_timeout_handler handler, void *eloop_data,
609 struct os_reltime now, requested, remaining;
610 struct eloop_timeout *tmp;
612 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
613 if (tmp->handler == handler &&
614 tmp->eloop_data == eloop_data &&
615 tmp->user_data == user_data) {
616 requested.sec = req_secs;
617 requested.usec = req_usecs;
618 os_get_reltime(&now);
619 os_reltime_sub(&tmp->time, &now, &remaining);
620 if (os_reltime_before(&requested, &remaining)) {
621 eloop_cancel_timeout(handler, eloop_data,
623 eloop_register_timeout(requested.sec,
637 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
638 eloop_timeout_handler handler, void *eloop_data,
641 struct os_reltime now, requested, remaining;
642 struct eloop_timeout *tmp;
644 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
645 if (tmp->handler == handler &&
646 tmp->eloop_data == eloop_data &&
647 tmp->user_data == user_data) {
648 requested.sec = req_secs;
649 requested.usec = req_usecs;
650 os_get_reltime(&now);
651 os_reltime_sub(&tmp->time, &now, &remaining);
652 if (os_reltime_before(&remaining, &requested)) {
653 eloop_cancel_timeout(handler, eloop_data,
655 eloop_register_timeout(requested.sec,
669 #ifndef CONFIG_NATIVE_WINDOWS
670 static void eloop_handle_alarm(int sig)
672 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
673 "two seconds. Looks like there\n"
674 "is a bug that ends up in a busy loop that "
675 "prevents clean shutdown.\n"
676 "Killing program forcefully.\n");
679 #endif /* CONFIG_NATIVE_WINDOWS */
682 static void eloop_handle_signal(int sig)
686 #ifndef CONFIG_NATIVE_WINDOWS
687 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
688 /* Use SIGALRM to break out from potential busy loops that
689 * would not allow the program to be killed. */
690 eloop.pending_terminate = 1;
691 signal(SIGALRM, eloop_handle_alarm);
694 #endif /* CONFIG_NATIVE_WINDOWS */
697 for (i = 0; i < eloop.signal_count; i++) {
698 if (eloop.signals[i].sig == sig) {
699 eloop.signals[i].signaled++;
706 static void eloop_process_pending_signals(void)
710 if (eloop.signaled == 0)
714 if (eloop.pending_terminate) {
715 #ifndef CONFIG_NATIVE_WINDOWS
717 #endif /* CONFIG_NATIVE_WINDOWS */
718 eloop.pending_terminate = 0;
721 for (i = 0; i < eloop.signal_count; i++) {
722 if (eloop.signals[i].signaled) {
723 eloop.signals[i].signaled = 0;
724 eloop.signals[i].handler(eloop.signals[i].sig,
725 eloop.signals[i].user_data);
731 int eloop_register_signal(int sig, eloop_signal_handler handler,
734 struct eloop_signal *tmp;
736 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
737 sizeof(struct eloop_signal));
741 tmp[eloop.signal_count].sig = sig;
742 tmp[eloop.signal_count].user_data = user_data;
743 tmp[eloop.signal_count].handler = handler;
744 tmp[eloop.signal_count].signaled = 0;
745 eloop.signal_count++;
747 signal(sig, eloop_handle_signal);
753 int eloop_register_signal_terminate(eloop_signal_handler handler,
756 int ret = eloop_register_signal(SIGINT, handler, user_data);
758 ret = eloop_register_signal(SIGTERM, handler, user_data);
763 int eloop_register_signal_reconfig(eloop_signal_handler handler,
766 #ifdef CONFIG_NATIVE_WINDOWS
768 #else /* CONFIG_NATIVE_WINDOWS */
769 return eloop_register_signal(SIGHUP, handler, user_data);
770 #endif /* CONFIG_NATIVE_WINDOWS */
776 #ifdef CONFIG_ELOOP_POLL
779 #else /* CONFIG_ELOOP_POLL */
780 fd_set *rfds, *wfds, *efds;
782 #endif /* CONFIG_ELOOP_POLL */
784 struct os_reltime tv, now;
786 #ifndef CONFIG_ELOOP_POLL
787 rfds = os_malloc(sizeof(*rfds));
788 wfds = os_malloc(sizeof(*wfds));
789 efds = os_malloc(sizeof(*efds));
790 if (rfds == NULL || wfds == NULL || efds == NULL)
792 #endif /* CONFIG_ELOOP_POLL */
794 while (!eloop.terminate &&
795 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
796 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
797 struct eloop_timeout *timeout;
798 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
801 os_get_reltime(&now);
802 if (os_reltime_before(&now, &timeout->time))
803 os_reltime_sub(&timeout->time, &now, &tv);
805 tv.sec = tv.usec = 0;
806 #ifdef CONFIG_ELOOP_POLL
807 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
808 #else /* CONFIG_ELOOP_POLL */
810 _tv.tv_usec = tv.usec;
811 #endif /* CONFIG_ELOOP_POLL */
814 #ifdef CONFIG_ELOOP_POLL
815 num_poll_fds = eloop_sock_table_set_fds(
816 &eloop.readers, &eloop.writers, &eloop.exceptions,
817 eloop.pollfds, eloop.pollfds_map,
818 eloop.max_pollfd_map);
819 res = poll(eloop.pollfds, num_poll_fds,
820 timeout ? timeout_ms : -1);
822 if (res < 0 && errno != EINTR && errno != 0) {
823 wpa_printf(MSG_INFO, "eloop: poll: %s",
827 #else /* CONFIG_ELOOP_POLL */
828 eloop_sock_table_set_fds(&eloop.readers, rfds);
829 eloop_sock_table_set_fds(&eloop.writers, wfds);
830 eloop_sock_table_set_fds(&eloop.exceptions, efds);
831 res = select(eloop.max_sock + 1, rfds, wfds, efds,
832 timeout ? &_tv : NULL);
833 if (res < 0 && errno != EINTR && errno != 0) {
834 wpa_printf(MSG_INFO, "eloop: select: %s",
838 #endif /* CONFIG_ELOOP_POLL */
839 eloop_process_pending_signals();
841 /* check if some registered timeouts have occurred */
842 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
845 os_get_reltime(&now);
846 if (!os_reltime_before(&now, &timeout->time)) {
847 void *eloop_data = timeout->eloop_data;
848 void *user_data = timeout->user_data;
849 eloop_timeout_handler handler =
851 eloop_remove_timeout(timeout);
852 handler(eloop_data, user_data);
860 #ifdef CONFIG_ELOOP_POLL
861 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
862 &eloop.exceptions, eloop.pollfds_map,
863 eloop.max_pollfd_map);
864 #else /* CONFIG_ELOOP_POLL */
865 eloop_sock_table_dispatch(&eloop.readers, rfds);
866 eloop_sock_table_dispatch(&eloop.writers, wfds);
867 eloop_sock_table_dispatch(&eloop.exceptions, efds);
868 #endif /* CONFIG_ELOOP_POLL */
873 #ifndef CONFIG_ELOOP_POLL
877 #endif /* CONFIG_ELOOP_POLL */
882 void eloop_terminate(void)
888 void eloop_destroy(void)
890 struct eloop_timeout *timeout, *prev;
891 struct os_reltime now;
893 os_get_reltime(&now);
894 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
895 struct eloop_timeout, list) {
897 sec = timeout->time.sec - now.sec;
898 usec = timeout->time.usec - now.usec;
899 if (timeout->time.usec < now.usec) {
903 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
904 "eloop_data=%p user_data=%p handler=%p",
905 sec, usec, timeout->eloop_data, timeout->user_data,
907 wpa_trace_dump_funcname("eloop unregistered timeout handler",
909 wpa_trace_dump("eloop timeout", timeout);
910 eloop_remove_timeout(timeout);
912 eloop_sock_table_destroy(&eloop.readers);
913 eloop_sock_table_destroy(&eloop.writers);
914 eloop_sock_table_destroy(&eloop.exceptions);
915 os_free(eloop.signals);
917 #ifdef CONFIG_ELOOP_POLL
918 os_free(eloop.pollfds);
919 os_free(eloop.pollfds_map);
920 #endif /* CONFIG_ELOOP_POLL */
924 int eloop_terminated(void)
926 return eloop.terminate;
930 void eloop_wait_for_read_sock(int sock)
932 #ifdef CONFIG_ELOOP_POLL
938 os_memset(&pfd, 0, sizeof(pfd));
943 #else /* CONFIG_ELOOP_POLL */
951 select(sock + 1, &rfds, NULL, NULL, NULL);
952 #endif /* CONFIG_ELOOP_POLL */