2 * threads.c request threading support
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20 * Copyright 2000,2006 The FreeRADIUS server project
21 * Copyright 2000 Alan DeKok <aland@ox.org>
24 #include <freeradius-devel/ident.h>
27 #include <freeradius-devel/radiusd.h>
28 #include <freeradius-devel/rad_assert.h>
31 * Other OS's have sem_init, OS X doesn't.
33 #ifdef HAVE_SEMAPHORE_H
34 #include <semaphore.h>
38 #include <mach/task.h>
39 #include <mach/semaphore.h>
42 #define sem_t semaphore_t
44 #define sem_init(s,p,c) semaphore_create(mach_task_self(),s,SYNC_POLICY_FIFO,c)
46 #define sem_wait(s) semaphore_wait(*s)
48 #define sem_post(s) semaphore_signal(*s)
51 #ifdef HAVE_SYS_WAIT_H
57 #ifdef HAVE_OPENSSL_CRYPTO_H
58 #include <openssl/crypto.h>
60 #ifdef HAVE_OPENSSL_ERR_H
61 #include <openssl/err.h>
63 #ifdef HAVE_OPENSSL_EVP_H
64 #include <openssl/evp.h>
67 #define SEMAPHORE_LOCKED (0)
68 #define SEMAPHORE_UNLOCKED (1)
70 #define THREAD_RUNNING (1)
71 #define THREAD_CANCELLED (2)
72 #define THREAD_EXITED (3)
74 #define NUM_FIFOS RAD_LISTEN_MAX
78 * A data structure which contains the information about
81 * pthread_id pthread id
82 * thread_num server thread number, 1...number of threads
83 * semaphore used to block the thread until a request comes in
84 * status is the thread running or exited?
85 * request_count the number of requests that this thread has handled
86 * timestamp when the thread started executing.
88 typedef struct THREAD_HANDLE {
89 struct THREAD_HANDLE *prev;
90 struct THREAD_HANDLE *next;
94 unsigned int request_count;
100 * For the request queue.
102 typedef struct request_queue_t {
104 RAD_REQUEST_FUNP fun;
107 typedef struct thread_fork_t {
115 * A data structure to manage the thread pool. There's no real
116 * need for a data structure, but it makes things conceptually
119 typedef struct THREAD_POOL {
124 int active_threads; /* protected by queue_mutex */
128 int min_spare_threads;
129 int max_spare_threads;
130 unsigned int max_requests_per_thread;
131 unsigned long request_count;
132 time_t time_last_spawned;
137 pthread_mutex_t wait_mutex;
138 fr_hash_table_t *waiters;
142 * All threads wait on this semaphore, for requests
143 * to enter the queue.
148 * To ensure only one thread at a time touches the queue.
150 pthread_mutex_t queue_mutex;
154 fr_fifo_t *fifo[NUM_FIFOS];
157 static THREAD_POOL thread_pool;
158 static int pool_initialized = FALSE;
159 static time_t last_cleaned = 0;
161 static void thread_pool_manage(time_t now);
164 * A mapping of configuration file names to internal integers
166 static const CONF_PARSER thread_config[] = {
167 { "start_servers", PW_TYPE_INTEGER, 0, &thread_pool.start_threads, "5" },
168 { "max_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_threads, "32" },
169 { "min_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.min_spare_threads, "3" },
170 { "max_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_spare_threads, "10" },
171 { "max_requests_per_server", PW_TYPE_INTEGER, 0, &thread_pool.max_requests_per_thread, "0" },
172 { "cleanup_delay", PW_TYPE_INTEGER, 0, &thread_pool.cleanup_delay, "5" },
173 { "max_queue_size", PW_TYPE_INTEGER, 0, &thread_pool.max_queue_size, "65536" },
174 { NULL, -1, 0, NULL, NULL }
178 #ifdef HAVE_OPENSSL_CRYPTO_H
181 * If we're linking against OpenSSL, then it is the
182 * duty of the application, if it is multithreaded,
183 * to provide OpenSSL with appropriate thread id
184 * and mutex locking functions
186 * Note: this only implements static callbacks.
187 * OpenSSL does not use dynamic locking callbacks
188 * right now, but may in the futiure, so we will have
189 * to add them at some point.
192 static pthread_mutex_t *ssl_mutexes = NULL;
194 static unsigned long ssl_id_function(void)
196 return (unsigned long) pthread_self();
199 static void ssl_locking_function(int mode, int n, const char *file, int line)
201 file = file; /* -Wunused */
202 line = line; /* -Wunused */
204 if (mode & CRYPTO_LOCK) {
205 pthread_mutex_lock(&(ssl_mutexes[n]));
207 pthread_mutex_unlock(&(ssl_mutexes[n]));
211 static int setup_ssl_mutexes(void)
215 #ifdef HAVE_OPENSSL_EVP_H
217 * Enable all ciphers and digests.
219 OpenSSL_add_all_algorithms();
222 ssl_mutexes = rad_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t));
224 radlog(L_ERR, "Error allocating memory for SSL mutexes!");
228 for (i = 0; i < CRYPTO_num_locks(); i++) {
229 pthread_mutex_init(&(ssl_mutexes[i]), NULL);
232 CRYPTO_set_id_callback(ssl_id_function);
233 CRYPTO_set_locking_callback(ssl_locking_function);
241 * We don't want to catch SIGCHLD for a host of reasons.
243 * - exec_wait means that someone, somewhere, somewhen, will
244 * call waitpid(), and catch the child.
246 * - SIGCHLD is delivered to a random thread, not the one that
249 * - if another thread catches the child, we have to coordinate
250 * with the thread doing the waiting.
252 * - if we don't waitpid() for non-wait children, they'll be zombies,
253 * and will hang around forever.
256 static void reap_children(void)
260 thread_fork_t mytf, *tf;
263 pthread_mutex_lock(&thread_pool.wait_mutex);
267 pid = waitpid(0, &status, WNOHANG);
271 tf = fr_hash_table_finddata(thread_pool.waiters, &mytf);
276 } while (fr_hash_table_num_elements(thread_pool.waiters) > 0);
278 pthread_mutex_unlock(&thread_pool.wait_mutex);
281 #define reap_children()
285 * Add a request to the list of waiting requests.
286 * This function gets called ONLY from the main handler thread...
288 * This function should never fail.
290 static int request_enqueue(REQUEST *request, RAD_REQUEST_FUNP fun)
292 request_queue_t *entry;
294 pthread_mutex_lock(&thread_pool.queue_mutex);
296 thread_pool.request_count++;
298 if (thread_pool.num_queued >= thread_pool.max_queue_size) {
299 pthread_mutex_unlock(&thread_pool.queue_mutex);
302 * Mark the request as done.
304 radlog(L_ERR, "!!! ERROR !!! The server is blocked: discarding new request %d", request->number);
305 request->child_state = REQUEST_DONE;
309 entry = rad_malloc(sizeof(*entry));
310 entry->request = request;
314 * Push the request onto the appropriate fifo for that
316 if (!fr_fifo_push(thread_pool.fifo[request->priority],
318 pthread_mutex_unlock(&thread_pool.queue_mutex);
319 radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number);
320 request->child_state = REQUEST_DONE;
324 thread_pool.num_queued++;
326 pthread_mutex_unlock(&thread_pool.queue_mutex);
329 * There's one more request in the queue.
331 * Note that we're not touching the queue any more, so
332 * the semaphore post is outside of the mutex. This also
333 * means that when the thread wakes up and tries to lock
334 * the mutex, it will be unlocked, and there won't be
337 sem_post(&thread_pool.semaphore);
343 * Remove a request from the queue.
345 static int request_dequeue(REQUEST **request, RAD_REQUEST_FUNP *fun)
347 RAD_LISTEN_TYPE i, start;
348 request_queue_t *entry;
352 pthread_mutex_lock(&thread_pool.queue_mutex);
355 * Clear old requests from all queues.
357 * We only do one pass over the queue, in order to
358 * amortize the work across the child threads. Since we
359 * do N checks for one request de-queued, the old
360 * requests will be quickly cleared.
362 for (i = 0; i < RAD_LISTEN_MAX; i++) {
363 entry = fr_fifo_peek(thread_pool.fifo[i]);
365 (entry->request->master_state != REQUEST_STOP_PROCESSING)) {
369 * This entry was marked to be stopped. Acknowledge it.
371 entry = fr_fifo_pop(thread_pool.fifo[i]);
372 rad_assert(entry != NULL);
373 entry->request->child_state = REQUEST_DONE;
374 thread_pool.num_queued--;
382 * Pop results from the top of the queue
384 for (i = start; i < RAD_LISTEN_MAX; i++) {
385 entry = fr_fifo_pop(thread_pool.fifo[i]);
393 pthread_mutex_unlock(&thread_pool.queue_mutex);
399 rad_assert(thread_pool.num_queued > 0);
400 thread_pool.num_queued--;
401 *request = entry->request;
406 rad_assert(*request != NULL);
407 rad_assert((*request)->magic == REQUEST_MAGIC);
408 rad_assert(*fun != NULL);
411 * If the request has sat in the queue for too long,
414 * The main clean-up code can't delete the request from
415 * the queue, and therefore won't clean it up until we
416 * have acknowledged it as "done".
418 if ((*request)->master_state == REQUEST_STOP_PROCESSING) {
419 (*request)->child_state = REQUEST_DONE;
424 * The thread is currently processing a request.
426 thread_pool.active_threads++;
428 pthread_mutex_unlock(&thread_pool.queue_mutex);
435 * The main thread handler for requests.
437 * Wait on the semaphore until we have it, and process the request.
439 static void *request_handler_thread(void *arg)
441 RAD_REQUEST_FUNP fun;
442 THREAD_HANDLE *self = (THREAD_HANDLE *) arg;
445 * Loop forever, until told to exit.
449 * Wait to be signalled.
451 DEBUG2("Thread %d waiting to be assigned a request",
454 if (sem_wait(&thread_pool.semaphore) != 0) {
456 * Interrupted system call. Go back to
457 * waiting, but DON'T print out any more
460 if (errno == EINTR) {
461 DEBUG2("Re-wait %d", self->thread_num);
464 radlog(L_ERR, "Thread %d failed waiting for semaphore: %s: Exiting\n",
465 self->thread_num, strerror(errno));
469 DEBUG2("Thread %d got semaphore", self->thread_num);
471 #ifdef HAVE_OPENSSL_ERR_H
473 * Clear the error queue for the current thread.
479 * Try to grab a request from the queue.
481 * It may be empty, in which case we fail
484 if (!request_dequeue(&self->request, &fun)) continue;
486 self->request->child_pid = self->pthread_id;
487 self->request_count++;
489 DEBUG2("Thread %d handling request %d, (%d handled so far)",
490 self->thread_num, self->request->number,
491 self->request_count);
493 radius_handle_request(self->request, fun);
496 * Update the active threads.
498 pthread_mutex_lock(&thread_pool.queue_mutex);
499 rad_assert(thread_pool.active_threads > 0);
500 thread_pool.active_threads--;
501 pthread_mutex_unlock(&thread_pool.queue_mutex);
502 } while (self->status != THREAD_CANCELLED);
504 DEBUG2("Thread %d exiting...", self->thread_num);
506 #ifdef HAVE_OPENSSL_ERR_H
508 * If we linked with OpenSSL, the application
509 * must remove the thread's error queue before
510 * exiting to prevent memory leaks.
516 * Do this as the LAST thing before exiting.
518 self->request = NULL;
519 self->status = THREAD_EXITED;
525 * Take a THREAD_HANDLE, delete it from the thread pool and
526 * free its resources.
528 * This function is called ONLY from the main server thread,
529 * ONLY after the thread has exited.
531 static void delete_thread(THREAD_HANDLE *handle)
536 rad_assert(handle->request == NULL);
538 DEBUG2("Deleting thread %d", handle->thread_num);
542 rad_assert(thread_pool.total_threads > 0);
543 thread_pool.total_threads--;
546 * Remove the handle from the list.
549 rad_assert(thread_pool.head == handle);
550 thread_pool.head = next;
556 rad_assert(thread_pool.tail == handle);
557 thread_pool.tail = prev;
563 * Free the handle, now that it's no longer referencable.
570 * Spawn a new thread, and place it in the thread pool.
572 * The thread is started initially in the blocked state, waiting
575 static THREAD_HANDLE *spawn_thread(time_t now)
578 THREAD_HANDLE *handle;
582 * Ensure that we don't spawn too many threads.
584 if (thread_pool.total_threads >= thread_pool.max_threads) {
585 DEBUG2("Thread spawn failed. Maximum number of threads (%d) already running.", thread_pool.max_threads);
590 * Allocate a new thread handle.
592 handle = (THREAD_HANDLE *) rad_malloc(sizeof(THREAD_HANDLE));
593 memset(handle, 0, sizeof(THREAD_HANDLE));
596 handle->thread_num = thread_pool.max_thread_num++;
597 handle->request_count = 0;
598 handle->status = THREAD_RUNNING;
599 handle->timestamp = time(NULL);
602 * Initialize the thread's attributes to detached.
604 * We could call pthread_detach() later, but if the thread
605 * exits between the create & detach calls, it will need to
606 * be joined, which will never happen.
608 pthread_attr_init(&attr);
609 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
612 * Create the thread detached, so that it cleans up it's
613 * own memory when it exits.
615 * Note that the function returns non-zero on error, NOT
616 * -1. The return code is the error, and errno isn't set.
618 rcode = pthread_create(&handle->pthread_id, &attr,
619 request_handler_thread, handle);
621 radlog(L_ERR, "Thread create failed: %s",
625 pthread_attr_destroy(&attr);
628 * One more thread to go into the list.
630 thread_pool.total_threads++;
631 DEBUG2("Thread spawned new child %d. Total threads in pool: %d",
632 handle->thread_num, thread_pool.total_threads);
635 * Add the thread handle to the tail of the thread pool list.
637 if (thread_pool.tail) {
638 thread_pool.tail->next = handle;
639 handle->prev = thread_pool.tail;
640 thread_pool.tail = handle;
642 rad_assert(thread_pool.head == NULL);
643 thread_pool.head = thread_pool.tail = handle;
647 * Update the time we last spawned a thread.
649 thread_pool.time_last_spawned = now;
652 * And return the new handle to the caller.
658 * Temporary function to prevent server from executing a SIGHUP
659 * until all threads are finished handling requests. This returns
660 * the number of active threads to 'radiusd.c'.
662 int total_active_threads(void)
665 * We don't acquire the mutex, so this is just an estimate.
666 * We can't return with the lock held, so there's no point
667 * in getting the guaranteed correct value; by the time
668 * the caller sees it, it can be wrong again.
670 return thread_pool.active_threads;
675 static uint32_t pid_hash(const void *data)
677 const thread_fork_t *tf = data;
679 return fr_hash(&tf->pid, sizeof(tf->pid));
682 static int pid_cmp(const void *one, const void *two)
684 const thread_fork_t *a = one;
685 const thread_fork_t *b = two;
687 return (a->pid - b->pid);
692 * Allocate the thread pool, and seed it with an initial number
695 * FIXME: What to do on a SIGHUP???
697 int thread_pool_init(CONF_SECTION *cs, int *spawn_flag)
700 CONF_SECTION *pool_cf;
705 rad_assert(spawn_flag != NULL);
706 rad_assert(*spawn_flag == TRUE);
707 rad_assert(pool_initialized == FALSE); /* not called on HUP */
709 pool_cf = cf_subsection_find_next(cs, NULL, "thread");
710 if (!pool_cf) *spawn_flag = FALSE;
713 * Initialize the thread pool to some reasonable values.
715 memset(&thread_pool, 0, sizeof(THREAD_POOL));
716 thread_pool.head = NULL;
717 thread_pool.tail = NULL;
718 thread_pool.total_threads = 0;
719 thread_pool.max_thread_num = 1;
720 thread_pool.cleanup_delay = 5;
721 thread_pool.spawn_flag = *spawn_flag;
724 * Don't bother initializing the mutexes or
725 * creating the hash tables. They won't be used.
727 if (!*spawn_flag) return 0;
730 if ((pthread_mutex_init(&thread_pool.wait_mutex,NULL) != 0)) {
731 radlog(L_ERR, "FATAL: Failed to initialize wait mutex: %s",
737 * Create the hash table of child PID's
739 thread_pool.waiters = fr_hash_table_create(pid_hash,
742 if (!thread_pool.waiters) {
743 radlog(L_ERR, "FATAL: Failed to set up wait hash");
748 if (cf_section_parse(pool_cf, NULL, thread_config) < 0) {
753 * Catch corner cases.
755 if (thread_pool.min_spare_threads < 1)
756 thread_pool.min_spare_threads = 1;
757 if (thread_pool.max_spare_threads < 1)
758 thread_pool.max_spare_threads = 1;
759 if (thread_pool.max_spare_threads < thread_pool.min_spare_threads)
760 thread_pool.max_spare_threads = thread_pool.min_spare_threads;
763 * The pool has already been initialized. Don't spawn
764 * new threads, and don't forget about forked children,
766 if (pool_initialized) {
771 * Initialize the queue of requests.
773 memset(&thread_pool.semaphore, 0, sizeof(thread_pool.semaphore));
774 rcode = sem_init(&thread_pool.semaphore, 0, SEMAPHORE_LOCKED);
776 radlog(L_ERR, "FATAL: Failed to initialize semaphore: %s",
781 rcode = pthread_mutex_init(&thread_pool.queue_mutex,NULL);
783 radlog(L_ERR, "FATAL: Failed to initialize queue mutex: %s",
789 * Allocate multiple fifos.
791 for (i = 0; i < RAD_LISTEN_MAX; i++) {
792 thread_pool.fifo[i] = fr_fifo_create(65536, NULL);
793 if (!thread_pool.fifo[i]) {
794 radlog(L_ERR, "FATAL: Failed to set up request fifo");
799 #ifdef HAVE_OPENSSL_CRYPTO_H
801 * If we're linking with OpenSSL too, then we need
802 * to set up the mutexes and enable the thread callbacks.
804 if (!setup_ssl_mutexes()) {
805 radlog(L_ERR, "FATAL: Failed to set up SSL mutexes");
812 * Create a number of waiting threads.
814 * If we fail while creating them, do something intelligent.
816 for (i = 0; i < thread_pool.start_threads; i++) {
817 if (spawn_thread(now) == NULL) {
822 DEBUG2("Thread pool initialized");
823 pool_initialized = TRUE;
829 * Assign a new request to a free thread.
831 * If there isn't a free thread, then try to create a new one,
832 * up to the configured limits.
834 int thread_pool_addrequest(REQUEST *request, RAD_REQUEST_FUNP fun)
836 time_t now = request->timestamp;
839 * We've been told not to spawn threads, so don't.
841 if (!thread_pool.spawn_flag) {
842 radius_handle_request(request, fun);
846 * Requests that care about child process exit
847 * codes have already either called
848 * rad_waitpid(), or they've given up.
856 * Add the new request to the queue.
858 if (!request_enqueue(request, fun)) return 0;
861 * If we haven't checked the number of child threads
862 * in a while, OR if the thread pool appears to be full,
865 if ((last_cleaned < now) ||
866 (thread_pool.active_threads == thread_pool.total_threads)) {
867 thread_pool_manage(now);
874 * Check the min_spare_threads and max_spare_threads.
876 * If there are too many or too few threads waiting, then we
877 * either create some more, or delete some.
879 static void thread_pool_manage(time_t now)
883 THREAD_HANDLE *handle, *next;
887 * We don't need a mutex lock here, as we're reading
888 * active_threads, and not modifying it. We want a close
889 * approximation of the number of active threads, and this
892 active_threads = thread_pool.active_threads;
893 spare = thread_pool.total_threads - active_threads;
895 static int old_total = -1;
896 static int old_active = -1;
898 if ((old_total != thread_pool.total_threads) ||
899 (old_active != active_threads)) {
900 DEBUG2("Threads: total/active/spare threads = %d/%d/%d",
901 thread_pool.total_threads, active_threads, spare);
902 old_total = thread_pool.total_threads;
903 old_active = active_threads;
908 * If there are too few spare threads. Go create some more.
910 if (spare < thread_pool.min_spare_threads) {
911 total = thread_pool.min_spare_threads - spare;
913 DEBUG2("Threads: Spawning %d spares", total);
916 * Create a number of spare threads.
918 for (i = 0; i < total; i++) {
919 handle = spawn_thread(now);
920 if (handle == NULL) {
925 return; /* there aren't too many spare threads */
929 * Only delete spare threads if we haven't already done
932 if (now == last_cleaned) {
938 * Loop over the thread pool, deleting exited threads.
940 for (handle = thread_pool.head; handle; handle = next) {
944 * Maybe we've asked the thread to exit, and it
947 if (handle->status == THREAD_EXITED) {
948 delete_thread(handle);
953 * Only delete the spare threads if sufficient time has
954 * passed since we last created one. This helps to minimize
955 * the amount of create/delete cycles.
957 if ((now - thread_pool.time_last_spawned) < thread_pool.cleanup_delay) {
962 * If there are too many spare threads, delete one.
964 * Note that we only delete ONE at a time, instead of
965 * wiping out many. This allows the excess servers to
966 * be slowly reaped, just in case the load spike comes again.
968 if (spare > thread_pool.max_spare_threads) {
970 spare -= thread_pool.max_spare_threads;
972 DEBUG2("Threads: deleting 1 spare out of %d spares", spare);
975 * Walk through the thread pool, deleting the
976 * first idle thread we come across.
978 for (handle = thread_pool.head; (handle != NULL) && (spare > 0) ; handle = next) {
982 * If the thread is not handling a
983 * request, but still live, then tell it
986 * It will eventually wake up, and realize
987 * it's been told to commit suicide.
989 if ((handle->request == NULL) &&
990 (handle->status == THREAD_RUNNING)) {
991 handle->status = THREAD_CANCELLED;
993 * Post an extra semaphore, as a
994 * signal to wake up, and exit.
996 sem_post(&thread_pool.semaphore);
1004 * If the thread has handled too many requests, then make it
1007 if (thread_pool.max_requests_per_thread > 0) {
1008 for (handle = thread_pool.head; handle; handle = next) {
1009 next = handle->next;
1012 * Not handling a request, but otherwise
1013 * live, we can kill it.
1015 if ((handle->request == NULL) &&
1016 (handle->status == THREAD_RUNNING) &&
1017 (handle->request_count > thread_pool.max_requests_per_thread)) {
1018 handle->status = THREAD_CANCELLED;
1019 sem_post(&thread_pool.semaphore);
1025 * Otherwise everything's kosher. There are not too few,
1026 * or too many spare threads. Exit happily.
1034 * Thread wrapper for fork().
1036 pid_t rad_fork(void)
1040 if (!pool_initialized) return fork();
1042 reap_children(); /* be nice to non-wait thingies */
1044 if (fr_hash_table_num_elements(thread_pool.waiters) >= 1024) {
1049 * Fork & save the PID for later reaping.
1052 if (child_pid > 0) {
1056 tf = rad_malloc(sizeof(*tf));
1057 memset(tf, 0, sizeof(*tf));
1059 tf->pid = child_pid;
1061 pthread_mutex_lock(&thread_pool.wait_mutex);
1062 rcode = fr_hash_table_insert(thread_pool.waiters, tf);
1063 pthread_mutex_unlock(&thread_pool.wait_mutex);
1066 radlog(L_ERR, "Failed to store PID, creating what will be a zombie process %d",
1073 * Return whatever we were told.
1080 * Wait 10 seconds at most for a child to exit, then give up.
1082 pid_t rad_waitpid(pid_t pid, int *status)
1085 thread_fork_t mytf, *tf;
1087 if (!pool_initialized) return waitpid(pid, status, 0);
1089 if (pid <= 0) return -1;
1093 pthread_mutex_lock(&thread_pool.wait_mutex);
1094 tf = fr_hash_table_finddata(thread_pool.waiters, &mytf);
1095 pthread_mutex_unlock(&thread_pool.wait_mutex);
1099 for (i = 0; i < 100; i++) {
1103 *status = tf->status;
1105 pthread_mutex_lock(&thread_pool.wait_mutex);
1106 fr_hash_table_delete(thread_pool.waiters, &mytf);
1107 pthread_mutex_unlock(&thread_pool.wait_mutex);
1110 usleep(100000); /* sleep for 1/10 of a second */
1114 * 10 seconds have passed, give up on the child.
1116 pthread_mutex_lock(&thread_pool.wait_mutex);
1117 fr_hash_table_delete(thread_pool.waiters, &mytf);
1118 pthread_mutex_unlock(&thread_pool.wait_mutex);
1124 * No rad_fork or rad_waitpid
1128 void thread_pool_lock(void)
1130 pthread_mutex_lock(&thread_pool.queue_mutex);
1133 void thread_pool_unlock(void)
1135 pthread_mutex_unlock(&thread_pool.queue_mutex);
1138 void thread_pool_queue_stats(int *array)
1142 if (pool_initialized) {
1143 for (i = 0; i < RAD_LISTEN_MAX; i++) {
1144 array[i] = fr_fifo_num_elements(thread_pool.fifo[i]);
1147 for (i = 0; i < RAD_LISTEN_MAX; i++) {
1152 #endif /* HAVE_PTHREAD_H */