2 * threads.c request threading support
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20 * Copyright 2000,2006 The FreeRADIUS server project
21 * Copyright 2000 Alan DeKok <aland@ox.org>
24 #include <freeradius-devel/ident.h>
27 #include <freeradius-devel/radiusd.h>
28 #include <freeradius-devel/rad_assert.h>
31 * Other OS's have sem_init, OS X doesn't.
33 #ifdef HAVE_SEMAPHORE_H
34 #include <semaphore.h>
38 #include <mach/task.h>
39 #include <mach/semaphore.h>
42 #define sem_t semaphore_t
44 #define sem_init(s,p,c) semaphore_create(mach_task_self(),s,SYNC_POLICY_FIFO,c)
46 #define sem_wait(s) semaphore_wait(*s)
48 #define sem_post(s) semaphore_signal(*s)
51 #ifdef HAVE_SYS_WAIT_H
57 #ifdef HAVE_OPENSSL_CRYPTO_H
58 #include <openssl/crypto.h>
60 #ifdef HAVE_OPENSSL_ERR_H
61 #include <openssl/err.h>
63 #ifdef HAVE_OPENSSL_EVP_H
64 #include <openssl/evp.h>
67 #define SEMAPHORE_LOCKED (0)
68 #define SEMAPHORE_UNLOCKED (1)
70 #define THREAD_RUNNING (1)
71 #define THREAD_CANCELLED (2)
72 #define THREAD_EXITED (3)
74 #define NUM_FIFOS RAD_LISTEN_MAX
78 * A data structure which contains the information about
81 * pthread_id pthread id
82 * thread_num server thread number, 1...number of threads
83 * semaphore used to block the thread until a request comes in
84 * status is the thread running or exited?
85 * request_count the number of requests that this thread has handled
86 * timestamp when the thread started executing.
88 typedef struct THREAD_HANDLE {
89 struct THREAD_HANDLE *prev;
90 struct THREAD_HANDLE *next;
94 unsigned int request_count;
100 * For the request queue.
102 typedef struct request_queue_t {
104 RAD_REQUEST_FUNP fun;
107 typedef struct thread_fork_t {
115 * A data structure to manage the thread pool. There's no real
116 * need for a data structure, but it makes things conceptually
119 typedef struct THREAD_POOL {
124 int active_threads; /* protected by queue_mutex */
128 int min_spare_threads;
129 int max_spare_threads;
130 unsigned int max_requests_per_thread;
131 unsigned long request_count;
132 time_t time_last_spawned;
136 pthread_mutex_t wait_mutex;
137 fr_hash_table_t *waiters;
140 * All threads wait on this semaphore, for requests
141 * to enter the queue.
146 * To ensure only one thread at a time touches the queue.
148 pthread_mutex_t queue_mutex;
152 fr_fifo_t *fifo[NUM_FIFOS];
155 static THREAD_POOL thread_pool;
156 static int pool_initialized = FALSE;
157 static time_t last_cleaned = 0;
159 static void thread_pool_manage(time_t now);
162 * A mapping of configuration file names to internal integers
164 static const CONF_PARSER thread_config[] = {
165 { "start_servers", PW_TYPE_INTEGER, 0, &thread_pool.start_threads, "5" },
166 { "max_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_threads, "32" },
167 { "min_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.min_spare_threads, "3" },
168 { "max_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_spare_threads, "10" },
169 { "max_requests_per_server", PW_TYPE_INTEGER, 0, &thread_pool.max_requests_per_thread, "0" },
170 { "cleanup_delay", PW_TYPE_INTEGER, 0, &thread_pool.cleanup_delay, "5" },
171 { "max_queue_size", PW_TYPE_INTEGER, 0, &thread_pool.max_queue_size, "65536" },
172 { NULL, -1, 0, NULL, NULL }
176 #ifdef HAVE_OPENSSL_CRYPTO_H
179 * If we're linking against OpenSSL, then it is the
180 * duty of the application, if it is multithreaded,
181 * to provide OpenSSL with appropriate thread id
182 * and mutex locking functions
184 * Note: this only implements static callbacks.
185 * OpenSSL does not use dynamic locking callbacks
186 * right now, but may in the futiure, so we will have
187 * to add them at some point.
190 static pthread_mutex_t *ssl_mutexes = NULL;
192 static unsigned long ssl_id_function(void)
194 return (unsigned long) pthread_self();
197 static void ssl_locking_function(int mode, int n, const char *file, int line)
199 file = file; /* -Wunused */
200 line = line; /* -Wunused */
202 if (mode & CRYPTO_LOCK) {
203 pthread_mutex_lock(&(ssl_mutexes[n]));
205 pthread_mutex_unlock(&(ssl_mutexes[n]));
209 static int setup_ssl_mutexes(void)
213 #ifdef HAVE_OPENSSL_EVP_H
215 * Enable all ciphers and digests.
217 OpenSSL_add_all_algorithms();
220 ssl_mutexes = rad_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t));
222 radlog(L_ERR, "Error allocating memory for SSL mutexes!");
226 for (i = 0; i < CRYPTO_num_locks(); i++) {
227 pthread_mutex_init(&(ssl_mutexes[i]), NULL);
230 CRYPTO_set_id_callback(ssl_id_function);
231 CRYPTO_set_locking_callback(ssl_locking_function);
239 * We don't want to catch SIGCHLD for a host of reasons.
241 * - exec_wait means that someone, somewhere, somewhen, will
242 * call waitpid(), and catch the child.
244 * - SIGCHLD is delivered to a random thread, not the one that
247 * - if another thread catches the child, we have to coordinate
248 * with the thread doing the waiting.
250 * - if we don't waitpid() for non-wait children, they'll be zombies,
251 * and will hang around forever.
254 static void reap_children(void)
258 thread_fork_t mytf, *tf;
261 pthread_mutex_lock(&thread_pool.wait_mutex);
264 pid = waitpid(0, &status, WNOHANG);
268 tf = fr_hash_table_finddata(thread_pool.waiters, &mytf);
273 } while (fr_hash_table_num_elements(thread_pool.waiters) > 0);
275 pthread_mutex_unlock(&thread_pool.wait_mutex);
279 * Add a request to the list of waiting requests.
280 * This function gets called ONLY from the main handler thread...
282 * This function should never fail.
284 static int request_enqueue(REQUEST *request, RAD_REQUEST_FUNP fun)
286 request_queue_t *entry;
288 pthread_mutex_lock(&thread_pool.queue_mutex);
290 thread_pool.request_count++;
292 if (thread_pool.num_queued >= thread_pool.max_queue_size) {
293 pthread_mutex_unlock(&thread_pool.queue_mutex);
296 * Mark the request as done.
298 radlog(L_ERR, "!!! ERROR !!! The server is blocked: discarding new request %d", request->number);
299 request->child_state = REQUEST_DONE;
303 entry = rad_malloc(sizeof(*entry));
304 entry->request = request;
308 * Push the request onto the appropriate fifo for that
310 if (!fr_fifo_push(thread_pool.fifo[request->priority],
312 pthread_mutex_unlock(&thread_pool.queue_mutex);
313 radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number);
314 request->child_state = REQUEST_DONE;
318 thread_pool.num_queued++;
320 pthread_mutex_unlock(&thread_pool.queue_mutex);
323 * There's one more request in the queue.
325 * Note that we're not touching the queue any more, so
326 * the semaphore post is outside of the mutex. This also
327 * means that when the thread wakes up and tries to lock
328 * the mutex, it will be unlocked, and there won't be
331 sem_post(&thread_pool.semaphore);
337 * Remove a request from the queue.
339 static int request_dequeue(REQUEST **request, RAD_REQUEST_FUNP *fun)
341 RAD_LISTEN_TYPE i, start;
342 request_queue_t *entry;
346 pthread_mutex_lock(&thread_pool.queue_mutex);
349 * Clear old requests from all queues.
351 * We only do one pass over the queue, in order to
352 * amortize the work across the child threads. Since we
353 * do N checks for one request de-queued, the old
354 * requests will be quickly cleared.
356 for (i = 0; i < RAD_LISTEN_MAX; i++) {
357 entry = fr_fifo_peek(thread_pool.fifo[i]);
359 (entry->request->master_state != REQUEST_STOP_PROCESSING)) {
363 * This entry was marked to be stopped. Acknowledge it.
365 entry = fr_fifo_pop(thread_pool.fifo[i]);
366 rad_assert(entry != NULL);
367 entry->request->child_state = REQUEST_DONE;
368 thread_pool.num_queued--;
375 * Pop results from the top of the queue
377 for (i = start; i < RAD_LISTEN_MAX; i++) {
378 entry = fr_fifo_pop(thread_pool.fifo[i]);
386 pthread_mutex_unlock(&thread_pool.queue_mutex);
392 rad_assert(thread_pool.num_queued > 0);
393 thread_pool.num_queued--;
394 *request = entry->request;
398 rad_assert(*request != NULL);
399 rad_assert((*request)->magic == REQUEST_MAGIC);
400 rad_assert(*fun != NULL);
403 * If the request has sat in the queue for too long,
406 * The main clean-up code can't delete the request from
407 * the queue, and therefore won't clean it up until we
408 * have acknowledged it as "done".
410 if ((*request)->master_state == REQUEST_STOP_PROCESSING) {
411 (*request)->child_state = REQUEST_DONE;
416 * The thread is currently processing a request.
418 thread_pool.active_threads++;
420 pthread_mutex_unlock(&thread_pool.queue_mutex);
427 * The main thread handler for requests.
429 * Wait on the semaphore until we have it, and process the request.
431 static void *request_handler_thread(void *arg)
433 RAD_REQUEST_FUNP fun;
434 THREAD_HANDLE *self = (THREAD_HANDLE *) arg;
437 * Loop forever, until told to exit.
441 * Wait to be signalled.
443 DEBUG2("Thread %d waiting to be assigned a request",
446 if (sem_wait(&thread_pool.semaphore) != 0) {
448 * Interrupted system call. Go back to
449 * waiting, but DON'T print out any more
452 if (errno == EINTR) {
453 DEBUG2("Re-wait %d", self->thread_num);
456 radlog(L_ERR, "Thread %d failed waiting for semaphore: %s: Exiting\n",
457 self->thread_num, strerror(errno));
461 DEBUG2("Thread %d got semaphore", self->thread_num);
464 * Try to grab a request from the queue.
466 * It may be empty, in which case we fail
469 if (!request_dequeue(&self->request, &fun)) continue;
471 self->request->child_pid = self->pthread_id;
472 self->request_count++;
474 DEBUG2("Thread %d handling request %d, (%d handled so far)",
475 self->thread_num, self->request->number,
476 self->request_count);
478 radius_handle_request(self->request, fun);
481 * Update the active threads.
483 pthread_mutex_lock(&thread_pool.queue_mutex);
484 rad_assert(thread_pool.active_threads > 0);
485 thread_pool.active_threads--;
486 pthread_mutex_unlock(&thread_pool.queue_mutex);
487 } while (self->status != THREAD_CANCELLED);
489 DEBUG2("Thread %d exiting...", self->thread_num);
491 #ifdef HAVE_OPENSSL_ERR_H
493 * If we linked with OpenSSL, the application
494 * must remove the thread's error queue before
495 * exiting to prevent memory leaks.
501 * Do this as the LAST thing before exiting.
503 self->request = NULL;
504 self->status = THREAD_EXITED;
510 * Take a THREAD_HANDLE, delete it from the thread pool and
511 * free its resources.
513 * This function is called ONLY from the main server thread,
514 * ONLY after the thread has exited.
516 static void delete_thread(THREAD_HANDLE *handle)
521 rad_assert(handle->request == NULL);
523 DEBUG2("Deleting thread %d", handle->thread_num);
527 rad_assert(thread_pool.total_threads > 0);
528 thread_pool.total_threads--;
531 * Remove the handle from the list.
534 rad_assert(thread_pool.head == handle);
535 thread_pool.head = next;
541 rad_assert(thread_pool.tail == handle);
542 thread_pool.tail = prev;
548 * Free the handle, now that it's no longer referencable.
555 * Spawn a new thread, and place it in the thread pool.
557 * The thread is started initially in the blocked state, waiting
560 static THREAD_HANDLE *spawn_thread(time_t now)
563 THREAD_HANDLE *handle;
567 * Ensure that we don't spawn too many threads.
569 if (thread_pool.total_threads >= thread_pool.max_threads) {
570 DEBUG2("Thread spawn failed. Maximum number of threads (%d) already running.", thread_pool.max_threads);
575 * Allocate a new thread handle.
577 handle = (THREAD_HANDLE *) rad_malloc(sizeof(THREAD_HANDLE));
578 memset(handle, 0, sizeof(THREAD_HANDLE));
581 handle->pthread_id = NO_SUCH_CHILD_PID;
582 handle->thread_num = thread_pool.max_thread_num++;
583 handle->request_count = 0;
584 handle->status = THREAD_RUNNING;
585 handle->timestamp = time(NULL);
588 * Initialize the thread's attributes to detached.
590 * We could call pthread_detach() later, but if the thread
591 * exits between the create & detach calls, it will need to
592 * be joined, which will never happen.
594 pthread_attr_init(&attr);
595 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
598 * Create the thread detached, so that it cleans up it's
599 * own memory when it exits.
601 * Note that the function returns non-zero on error, NOT
602 * -1. The return code is the error, and errno isn't set.
604 rcode = pthread_create(&handle->pthread_id, &attr,
605 request_handler_thread, handle);
607 radlog(L_ERR, "Thread create failed: %s",
611 pthread_attr_destroy(&attr);
614 * One more thread to go into the list.
616 thread_pool.total_threads++;
617 DEBUG2("Thread spawned new child %d. Total threads in pool: %d",
618 handle->thread_num, thread_pool.total_threads);
621 * Add the thread handle to the tail of the thread pool list.
623 if (thread_pool.tail) {
624 thread_pool.tail->next = handle;
625 handle->prev = thread_pool.tail;
626 thread_pool.tail = handle;
628 rad_assert(thread_pool.head == NULL);
629 thread_pool.head = thread_pool.tail = handle;
633 * Update the time we last spawned a thread.
635 thread_pool.time_last_spawned = now;
638 * And return the new handle to the caller.
644 * Temporary function to prevent server from executing a SIGHUP
645 * until all threads are finished handling requests. This returns
646 * the number of active threads to 'radiusd.c'.
648 int total_active_threads(void)
651 * We don't acquire the mutex, so this is just an estimate.
652 * We can't return with the lock held, so there's no point
653 * in getting the guaranteed correct value; by the time
654 * the caller sees it, it can be wrong again.
656 return thread_pool.active_threads;
660 static uint32_t pid_hash(const void *data)
662 const thread_fork_t *tf = data;
664 return fr_hash(&tf->pid, sizeof(tf->pid));
667 static int pid_cmp(const void *one, const void *two)
669 const thread_fork_t *a = one;
670 const thread_fork_t *b = two;
672 return (a->pid - b->pid);
676 * Allocate the thread pool, and seed it with an initial number
679 * FIXME: What to do on a SIGHUP???
681 int thread_pool_init(CONF_SECTION *cs, int spawn_flag)
684 CONF_SECTION *pool_cf;
690 * We're not spawning new threads, don't do
693 if (!spawn_flag) return 0;
696 * After a SIGHUP, we don't over-write the previous values.
698 if (!pool_initialized) {
700 * Initialize the thread pool to some reasonable values.
702 memset(&thread_pool, 0, sizeof(THREAD_POOL));
703 thread_pool.head = NULL;
704 thread_pool.tail = NULL;
705 thread_pool.total_threads = 0;
706 thread_pool.max_thread_num = 1;
707 thread_pool.cleanup_delay = 5;
708 thread_pool.spawn_flag = spawn_flag;
710 if ((pthread_mutex_init(&thread_pool.wait_mutex,NULL) != 0)) {
711 radlog(L_ERR, "FATAL: Failed to initialize wait mutex: %s",
717 * Create the hash table of child PID's
719 thread_pool.waiters = fr_hash_table_create(pid_hash,
722 if (!thread_pool.waiters) {
723 radlog(L_ERR, "FATAL: Failed to set up wait hash");
728 pool_cf = cf_subsection_find_next(cs, NULL, "thread");
730 radlog(L_ERR, "FATAL: Attempting to start in multi-threaded mode with no thread configuration in radiusd.conf");
734 if (cf_section_parse(pool_cf, NULL, thread_config) < 0) {
739 * Catch corner cases.
741 if (thread_pool.min_spare_threads < 1)
742 thread_pool.min_spare_threads = 1;
743 if (thread_pool.max_spare_threads < 1)
744 thread_pool.max_spare_threads = 1;
745 if (thread_pool.max_spare_threads < thread_pool.min_spare_threads)
746 thread_pool.max_spare_threads = thread_pool.min_spare_threads;
749 * The pool has already been initialized. Don't spawn
750 * new threads, and don't forget about forked children,
752 if (pool_initialized) {
757 * Initialize the queue of requests.
759 memset(&thread_pool.semaphore, 0, sizeof(thread_pool.semaphore));
760 rcode = sem_init(&thread_pool.semaphore, 0, SEMAPHORE_LOCKED);
762 radlog(L_ERR, "FATAL: Failed to initialize semaphore: %s",
767 rcode = pthread_mutex_init(&thread_pool.queue_mutex,NULL);
769 radlog(L_ERR, "FATAL: Failed to initialize queue mutex: %s",
775 * Allocate multiple fifos.
777 for (i = 0; i < RAD_LISTEN_MAX; i++) {
778 thread_pool.fifo[i] = fr_fifo_create(65536, NULL);
779 if (!thread_pool.fifo[i]) {
780 radlog(L_ERR, "FATAL: Failed to set up request fifo");
785 #ifdef HAVE_OPENSSL_CRYPTO_H
787 * If we're linking with OpenSSL too, then we need
788 * to set up the mutexes and enable the thread callbacks.
790 if (!setup_ssl_mutexes()) {
791 radlog(L_ERR, "FATAL: Failed to set up SSL mutexes");
798 * Create a number of waiting threads.
800 * If we fail while creating them, do something intelligent.
802 for (i = 0; i < thread_pool.start_threads; i++) {
803 if (spawn_thread(now) == NULL) {
808 DEBUG2("Thread pool initialized");
809 pool_initialized = TRUE;
815 * Assign a new request to a free thread.
817 * If there isn't a free thread, then try to create a new one,
818 * up to the configured limits.
820 int thread_pool_addrequest(REQUEST *request, RAD_REQUEST_FUNP fun)
822 time_t now = request->timestamp;
825 * We've been told not to spawn threads, so don't.
827 if (!thread_pool.spawn_flag) {
828 radius_handle_request(request, fun);
831 * Requests that care about child process exit
832 * codes have already either called
833 * rad_waitpid(), or they've given up.
840 * Add the new request to the queue.
842 if (!request_enqueue(request, fun)) return 0;
845 * If we haven't checked the number of child threads
846 * in a while, OR if the thread pool appears to be full,
849 if ((last_cleaned < now) ||
850 (thread_pool.active_threads == thread_pool.total_threads)) {
851 thread_pool_manage(now);
858 * Check the min_spare_threads and max_spare_threads.
860 * If there are too many or too few threads waiting, then we
861 * either create some more, or delete some.
863 static void thread_pool_manage(time_t now)
867 THREAD_HANDLE *handle, *next;
871 * We don't need a mutex lock here, as we're reading
872 * active_threads, and not modifying it. We want a close
873 * approximation of the number of active threads, and this
876 active_threads = thread_pool.active_threads;
877 spare = thread_pool.total_threads - active_threads;
879 static int old_total = -1;
880 static int old_active = -1;
882 if ((old_total != thread_pool.total_threads) ||
883 (old_active != active_threads)) {
884 DEBUG2("Threads: total/active/spare threads = %d/%d/%d",
885 thread_pool.total_threads, active_threads, spare);
886 old_total = thread_pool.total_threads;
887 old_active = active_threads;
892 * If there are too few spare threads. Go create some more.
894 if (spare < thread_pool.min_spare_threads) {
895 total = thread_pool.min_spare_threads - spare;
897 DEBUG2("Threads: Spawning %d spares", total);
900 * Create a number of spare threads.
902 for (i = 0; i < total; i++) {
903 handle = spawn_thread(now);
904 if (handle == NULL) {
909 return; /* there aren't too many spare threads */
913 * Only delete spare threads if we haven't already done
916 if (now == last_cleaned) {
922 * Loop over the thread pool, deleting exited threads.
924 for (handle = thread_pool.head; handle; handle = next) {
928 * Maybe we've asked the thread to exit, and it
931 if (handle->status == THREAD_EXITED) {
932 delete_thread(handle);
937 * Only delete the spare threads if sufficient time has
938 * passed since we last created one. This helps to minimize
939 * the amount of create/delete cycles.
941 if ((now - thread_pool.time_last_spawned) < thread_pool.cleanup_delay) {
946 * If there are too many spare threads, delete one.
948 * Note that we only delete ONE at a time, instead of
949 * wiping out many. This allows the excess servers to
950 * be slowly reaped, just in case the load spike comes again.
952 if (spare > thread_pool.max_spare_threads) {
954 spare -= thread_pool.max_spare_threads;
956 DEBUG2("Threads: deleting 1 spare out of %d spares", spare);
959 * Walk through the thread pool, deleting the
960 * first idle thread we come across.
962 for (handle = thread_pool.head; (handle != NULL) && (spare > 0) ; handle = next) {
966 * If the thread is not handling a
967 * request, but still live, then tell it
970 * It will eventually wake up, and realize
971 * it's been told to commit suicide.
973 if ((handle->request == NULL) &&
974 (handle->status == THREAD_RUNNING)) {
975 handle->status = THREAD_CANCELLED;
977 * Post an extra semaphore, as a
978 * signal to wake up, and exit.
980 sem_post(&thread_pool.semaphore);
988 * If the thread has handled too many requests, then make it
991 if (thread_pool.max_requests_per_thread > 0) {
992 for (handle = thread_pool.head; handle; handle = next) {
996 * Not handling a request, but otherwise
997 * live, we can kill it.
999 if ((handle->request == NULL) &&
1000 (handle->status == THREAD_RUNNING) &&
1001 (handle->request_count > thread_pool.max_requests_per_thread)) {
1002 handle->status = THREAD_CANCELLED;
1003 sem_post(&thread_pool.semaphore);
1009 * Otherwise everything's kosher. There are not too few,
1010 * or too many spare threads. Exit happily.
1017 * Thread wrapper for fork().
1019 pid_t rad_fork(void)
1023 if (!pool_initialized) return fork();
1025 reap_children(); /* be nice to non-wait thingies */
1027 if (fr_hash_table_num_elements(thread_pool.waiters) >= 1024) {
1032 * Fork & save the PID for later reaping.
1035 if (child_pid > 0) {
1039 tf = rad_malloc(sizeof(*tf));
1040 memset(tf, 0, sizeof(*tf));
1042 tf->pid = child_pid;
1044 pthread_mutex_lock(&thread_pool.wait_mutex);
1045 rcode = fr_hash_table_insert(thread_pool.waiters, tf);
1046 pthread_mutex_unlock(&thread_pool.wait_mutex);
1049 radlog(L_ERR, "Failed to store PID, creating what will be a zombie process %d",
1055 * Return whatever we were told.
1062 * Wait 10 seconds at most for a child to exit, then give up.
1064 pid_t rad_waitpid(pid_t pid, int *status)
1067 thread_fork_t mytf, *tf;
1069 if (!pool_initialized) return waitpid(pid, status, 0);
1071 if (pid <= 0) return -1;
1075 pthread_mutex_lock(&thread_pool.wait_mutex);
1076 tf = fr_hash_table_finddata(thread_pool.waiters, &mytf);
1077 pthread_mutex_unlock(&thread_pool.wait_mutex);
1081 for (i = 0; i < 100; i++) {
1085 *status = tf->status;
1087 pthread_mutex_lock(&thread_pool.wait_mutex);
1088 fr_hash_table_delete(thread_pool.waiters, &mytf);
1089 pthread_mutex_unlock(&thread_pool.wait_mutex);
1092 usleep(100000); /* sleep for 1/10 of a second */
1096 * 10 seconds have passed, give up on the child.
1098 pthread_mutex_lock(&thread_pool.wait_mutex);
1099 fr_hash_table_delete(thread_pool.waiters, &mytf);
1100 pthread_mutex_unlock(&thread_pool.wait_mutex);
1105 void thread_pool_lock(void)
1107 pthread_mutex_lock(&thread_pool.queue_mutex);
1110 void thread_pool_unlock(void)
1112 pthread_mutex_unlock(&thread_pool.queue_mutex);
1114 #endif /* HAVE_PTHREAD_H */