2 * threads.c request threading support
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20 * Copyright 2000,2006 The FreeRADIUS server project
21 * Copyright 2000 Alan DeKok <aland@ox.org>
24 #include <freeradius-devel/ident.h>
27 #include <freeradius-devel/autoconf.h>
33 * Other OS's have sem_init, OS X doesn't.
36 #include <semaphore.h>
38 #include <mach/task.h>
39 #include <mach/semaphore.h>
42 #define sem_t semaphore_t
44 #define sem_init(s,p,c) semaphore_create(mach_task_self(),s,SYNC_POLICY_FIFO,c)
46 #define sem_wait(s) semaphore_wait(*s)
48 #define sem_post(s) semaphore_signal(*s)
53 #ifdef HAVE_SYS_WAIT_H
57 #include <freeradius-devel/radiusd.h>
58 #include <freeradius-devel/rad_assert.h>
59 #include <freeradius-devel/modules.h>
63 #ifdef HAVE_OPENSSL_CRYPTO_H
64 #include <openssl/crypto.h>
66 #ifdef HAVE_OPENSSL_ERR_H
67 #include <openssl/err.h>
70 #define SEMAPHORE_LOCKED (0)
71 #define SEMAPHORE_UNLOCKED (1)
73 #define THREAD_RUNNING (1)
74 #define THREAD_CANCELLED (2)
75 #define THREAD_EXITED (3)
80 * Ordered this way because we prefer proxy, then ongoing, then
83 #define FIFO_START (1)
84 #define FIFO_PROXY (0)
87 * A data structure which contains the information about
90 * pthread_id pthread id
91 * thread_num server thread number, 1...number of threads
92 * semaphore used to block the thread until a request comes in
93 * status is the thread running or exited?
94 * request_count the number of requests that this thread has handled
95 * timestamp when the thread started executing.
97 typedef struct THREAD_HANDLE {
98 struct THREAD_HANDLE *prev;
99 struct THREAD_HANDLE *next;
100 pthread_t pthread_id;
103 unsigned int request_count;
109 * For the request queue.
111 typedef struct request_queue_t {
113 RAD_REQUEST_FUNP fun;
116 typedef struct thread_fork_t {
124 * A data structure to manage the thread pool. There's no real
125 * need for a data structure, but it makes things conceptually
128 typedef struct THREAD_POOL {
133 int active_threads; /* protected by queue_mutex */
137 int min_spare_threads;
138 int max_spare_threads;
139 unsigned int max_requests_per_thread;
140 unsigned long request_count;
141 time_t time_last_spawned;
145 pthread_mutex_t wait_mutex;
146 lrad_hash_table_t *waiters;
149 * All threads wait on this semaphore, for requests
150 * to enter the queue.
155 * To ensure only one thread at a time touches the queue.
157 pthread_mutex_t queue_mutex;
162 lrad_fifo_t *fifo[NUM_FIFOS];
165 static THREAD_POOL thread_pool;
166 static int pool_initialized = FALSE;
170 * A mapping of configuration file names to internal integers
172 static const CONF_PARSER thread_config[] = {
173 { "start_servers", PW_TYPE_INTEGER, 0, &thread_pool.start_threads, "5" },
174 { "max_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_threads, "32" },
175 { "min_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.min_spare_threads, "3" },
176 { "max_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_spare_threads, "10" },
177 { "max_requests_per_server", PW_TYPE_INTEGER, 0, &thread_pool.max_requests_per_thread, "0" },
178 { "cleanup_delay", PW_TYPE_INTEGER, 0, &thread_pool.cleanup_delay, "5" },
179 { "max_queue_size", PW_TYPE_INTEGER, 0, &thread_pool.max_queue_size, "65536" },
180 { NULL, -1, 0, NULL, NULL }
184 #ifdef HAVE_OPENSSL_CRYPTO_H
187 * If we're linking against OpenSSL, then it is the
188 * duty of the application, if it is multithreaded,
189 * to provide OpenSSL with appropriate thread id
190 * and mutex locking functions
192 * Note: this only implements static callbacks.
193 * OpenSSL does not use dynamic locking callbacks
194 * right now, but may in the futiure, so we will have
195 * to add them at some point.
198 static pthread_mutex_t *ssl_mutexes = NULL;
200 static unsigned long ssl_id_function(void)
202 return (unsigned long) pthread_self();
205 static void ssl_locking_function(int mode, int n, const char *file, int line)
207 file = file; /* -Wunused */
208 line = line; /* -Wunused */
210 if (mode & CRYPTO_LOCK) {
211 pthread_mutex_lock(&(ssl_mutexes[n]));
213 pthread_mutex_unlock(&(ssl_mutexes[n]));
217 static int setup_ssl_mutexes(void)
221 ssl_mutexes = rad_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t));
223 radlog(L_ERR, "Error allocating memory for SSL mutexes!");
227 for (i = 0; i < CRYPTO_num_locks(); i++) {
228 pthread_mutex_init(&(ssl_mutexes[i]), NULL);
231 CRYPTO_set_id_callback(ssl_id_function);
232 CRYPTO_set_locking_callback(ssl_locking_function);
240 * We don't want to catch SIGCHLD for a host of reasons.
242 * - exec_wait means that someone, somewhere, somewhen, will
243 * call waitpid(), and catch the child.
245 * - SIGCHLD is delivered to a random thread, not the one that
248 * - if another thread catches the child, we have to coordinate
249 * with the thread doing the waiting.
251 * - if we don't waitpid() for non-wait children, they'll be zombies,
252 * and will hang around forever.
255 static void reap_children(void)
259 thread_fork_t mytf, *tf;
262 pthread_mutex_lock(&thread_pool.wait_mutex);
265 pid = waitpid(0, &status, WNOHANG);
269 tf = lrad_hash_table_finddata(thread_pool.waiters, &mytf);
274 } while (lrad_hash_table_num_elements(thread_pool.waiters) > 0);
276 pthread_mutex_unlock(&thread_pool.wait_mutex);
280 * Add a request to the list of waiting requests.
281 * This function gets called ONLY from the main handler thread...
283 * This function should never fail.
285 static int request_enqueue(REQUEST *request, RAD_REQUEST_FUNP fun)
287 int fifo = FIFO_START;
288 request_queue_t *entry;
290 pthread_mutex_lock(&thread_pool.queue_mutex);
292 thread_pool.request_count++;
295 * FIXME: Handle proxy replies separately?
297 if (thread_pool.num_queued >= thread_pool.max_queue_size) {
298 pthread_mutex_unlock(&thread_pool.queue_mutex);
301 * Mark the request as done.
303 radlog(L_ERR|L_CONS, "!!! ERROR !!! The server is blocked: discarding new request %d", request->number);
304 request->finished = TRUE;
309 * Requests get handled in priority. First, we handle
310 * replies from a home server, to finish ongoing requests.
312 * Then, we handle requests with State, to finish
313 * multi-packet transactions.
315 * Finally, we handle new requests.
317 if (request->proxy_reply) {
323 entry = rad_malloc(sizeof(*entry));
324 entry->request = request;
327 if (!lrad_fifo_push(thread_pool.fifo[fifo], entry)) {
328 pthread_mutex_unlock(&thread_pool.queue_mutex);
329 radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number);
330 request->finished = TRUE;
334 thread_pool.num_queued++;
336 pthread_mutex_unlock(&thread_pool.queue_mutex);
339 * There's one more request in the queue.
341 * Note that we're not touching the queue any more, so
342 * the semaphore post is outside of the mutex. This also
343 * means that when the thread wakes up and tries to lock
344 * the mutex, it will be unlocked, and there won't be
347 sem_post(&thread_pool.semaphore);
353 * Remove a request from the queue.
355 static int request_dequeue(REQUEST **request, RAD_REQUEST_FUNP *fun)
358 request_queue_t *entry;
362 pthread_mutex_lock(&thread_pool.queue_mutex);
364 fifo_state = thread_pool.fifo_state;
369 * Pop an entry from the current queue, and go to
372 entry = lrad_fifo_pop(thread_pool.fifo[fifo_state]);
374 if (fifo_state >= NUM_FIFOS) fifo_state = 0;
375 } while ((fifo_state != thread_pool.fifo_state) && !entry);
378 pthread_mutex_unlock(&thread_pool.queue_mutex);
384 rad_assert(thread_pool.num_queued > 0);
385 thread_pool.num_queued--;
386 *request = entry->request;
390 rad_assert(*request != NULL);
391 rad_assert((*request)->magic == REQUEST_MAGIC);
392 rad_assert(*fun != NULL);
395 * If the request has sat in the queue for too long,
398 * The main clean-up code won't delete the request from
399 * the request list, until it's marked "finished"
401 if ((*request)->options & RAD_REQUEST_OPTION_STOP_NOW) {
402 (*request)->finished = 1;
407 * The thread is currently processing a request.
409 thread_pool.active_threads++;
410 thread_pool.fifo_state = fifo_state;
412 pthread_mutex_unlock(&thread_pool.queue_mutex);
414 rad_assert((*request)->child_pid == NO_SUCH_CHILD_PID);
421 * The main thread handler for requests.
423 * Wait on the semaphore until we have it, and process the request.
425 static void *request_handler_thread(void *arg)
427 RAD_REQUEST_FUNP fun;
428 THREAD_HANDLE *self = (THREAD_HANDLE *) arg;
429 #ifdef HAVE_PTHREAD_SIGMASK
433 * Block SIGHUP handling for the child threads.
435 * This ensures that only the main server thread will
436 * process HUP signals.
438 * If we don't have sigprocmask, then it shouldn't be
439 * a problem, either, as the sig_hup handler should check
440 * for this condition.
443 sigaddset(&set, SIGHUP);
444 sigaddset(&set, SIGINT);
445 sigaddset(&set, SIGQUIT);
446 sigaddset(&set, SIGTERM);
447 pthread_sigmask(SIG_BLOCK, &set, NULL);
451 * Loop forever, until told to exit.
457 * Wait to be signalled.
459 DEBUG2("Thread %d waiting to be assigned a request",
462 if (sem_wait(&thread_pool.semaphore) != 0) {
464 * Interrupted system call. Go back to
465 * waiting, but DON'T print out any more
468 if (errno == EINTR) {
469 DEBUG2("Re-wait %d", self->thread_num);
472 radlog(L_ERR, "Thread %d failed waiting for semaphore: %s: Exiting\n",
473 self->thread_num, strerror(errno));
477 DEBUG2("Thread %d got semaphore", self->thread_num);
480 * Try to grab a request from the queue.
482 * It may be empty, in which case we fail
485 if (!request_dequeue(&self->request, &fun)) continue;
487 self->request->child_pid = self->pthread_id;
488 self->request_count++;
490 DEBUG2("Thread %d handling request %d, (%d handled so far)",
491 self->thread_num, self->request->number,
492 self->request_count);
495 * Respond, and reset request->child_pid
497 finished = rad_respond(self->request, fun);
500 * Update the active threads.
502 pthread_mutex_lock(&thread_pool.queue_mutex);
505 * We haven't replied to the client, but we HAVE
506 * sent a proxied packet, and we have NOT
507 * received a proxy response. In that case, send
508 * the proxied packet now. Doing this in the mutex
509 * avoids race conditions.
511 * FIXME: this work should really depend on a
512 * "state", and "next handler", rather than
513 * horrid hacks like thise.
515 if (!self->request->reply->data &&
516 self->request->proxy && self->request->proxy->data
517 && !self->request->proxy_reply)
518 self->request->proxy_listener->send(self->request->proxy_listener,
521 self->request->child_pid = NO_SUCH_CHILD_PID;
522 self->request->finished = finished;
523 self->request = NULL;
525 rad_assert(thread_pool.active_threads > 0);
526 thread_pool.active_threads--;
527 pthread_mutex_unlock(&thread_pool.queue_mutex);
528 } while (self->status != THREAD_CANCELLED);
530 DEBUG2("Thread %d exiting...", self->thread_num);
532 #ifdef HAVE_OPENSSL_ERR_H
534 * If we linked with OpenSSL, the application
535 * must remove the thread's error queue before
536 * exiting to prevent memory leaks.
542 * Do this as the LAST thing before exiting.
544 self->status = THREAD_EXITED;
550 * Take a THREAD_HANDLE, delete it from the thread pool and
551 * free its resources.
553 * This function is called ONLY from the main server thread,
554 * ONLY after the thread has exited.
556 static void delete_thread(THREAD_HANDLE *handle)
561 rad_assert(handle->request == NULL);
563 DEBUG2("Deleting thread %d", handle->thread_num);
567 rad_assert(thread_pool.total_threads > 0);
568 thread_pool.total_threads--;
571 * Remove the handle from the list.
574 rad_assert(thread_pool.head == handle);
575 thread_pool.head = next;
581 rad_assert(thread_pool.tail == handle);
582 thread_pool.tail = prev;
588 * Free the handle, now that it's no longer referencable.
595 * Spawn a new thread, and place it in the thread pool.
597 * The thread is started initially in the blocked state, waiting
600 static THREAD_HANDLE *spawn_thread(time_t now)
603 THREAD_HANDLE *handle;
607 * Ensure that we don't spawn too many threads.
609 if (thread_pool.total_threads >= thread_pool.max_threads) {
610 DEBUG2("Thread spawn failed. Maximum number of threads (%d) already running.", thread_pool.max_threads);
615 * Allocate a new thread handle.
617 handle = (THREAD_HANDLE *) rad_malloc(sizeof(THREAD_HANDLE));
618 memset(handle, 0, sizeof(THREAD_HANDLE));
621 handle->pthread_id = NO_SUCH_CHILD_PID;
622 handle->thread_num = thread_pool.max_thread_num++;
623 handle->request_count = 0;
624 handle->status = THREAD_RUNNING;
625 handle->timestamp = time(NULL);
628 * Initialize the thread's attributes to detached.
630 * We could call pthread_detach() later, but if the thread
631 * exits between the create & detach calls, it will need to
632 * be joined, which will never happen.
634 pthread_attr_init(&attr);
635 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
638 * Create the thread detached, so that it cleans up it's
639 * own memory when it exits.
641 * Note that the function returns non-zero on error, NOT
642 * -1. The return code is the error, and errno isn't set.
644 rcode = pthread_create(&handle->pthread_id, &attr,
645 request_handler_thread, handle);
647 radlog(L_ERR|L_CONS, "FATAL: Thread create failed: %s",
651 pthread_attr_destroy(&attr);
654 * One more thread to go into the list.
656 thread_pool.total_threads++;
657 DEBUG2("Thread spawned new child %d. Total threads in pool: %d",
658 handle->thread_num, thread_pool.total_threads);
661 * Add the thread handle to the tail of the thread pool list.
663 if (thread_pool.tail) {
664 thread_pool.tail->next = handle;
665 handle->prev = thread_pool.tail;
666 thread_pool.tail = handle;
668 rad_assert(thread_pool.head == NULL);
669 thread_pool.head = thread_pool.tail = handle;
673 * Update the time we last spawned a thread.
675 thread_pool.time_last_spawned = now;
678 * And return the new handle to the caller.
684 * Temporary function to prevent server from executing a SIGHUP
685 * until all threads are finished handling requests. This returns
686 * the number of active threads to 'radiusd.c'.
688 int total_active_threads(void)
691 * We don't acquire the mutex, so this is just an estimate.
692 * We can't return with the lock held, so there's no point
693 * in getting the guaranteed correct value; by the time
694 * the caller sees it, it can be wrong again.
696 return thread_pool.active_threads;
700 static uint32_t pid_hash(const void *data)
702 const thread_fork_t *tf = data;
704 return lrad_hash(&tf->pid, sizeof(tf->pid));
707 static int pid_cmp(const void *one, const void *two)
709 const thread_fork_t *a = one;
710 const thread_fork_t *b = two;
712 return (a->pid - b->pid);
716 * Allocate the thread pool, and seed it with an initial number
719 * FIXME: What to do on a SIGHUP???
721 int thread_pool_init(int spawn_flag)
724 CONF_SECTION *pool_cf;
727 DEBUG("Initializing the thread pool...");
731 * We're not spawning new threads, don't do
734 if (!spawn_flag) return 0;
737 * After a SIGHUP, we don't over-write the previous values.
739 if (!pool_initialized) {
741 * Initialize the thread pool to some reasonable values.
743 memset(&thread_pool, 0, sizeof(THREAD_POOL));
744 thread_pool.head = NULL;
745 thread_pool.tail = NULL;
746 thread_pool.total_threads = 0;
747 thread_pool.max_thread_num = 1;
748 thread_pool.cleanup_delay = 5;
749 thread_pool.spawn_flag = spawn_flag;
751 if ((pthread_mutex_init(&thread_pool.wait_mutex,NULL) != 0)) {
752 radlog(L_ERR, "FATAL: Failed to initialize wait mutex: %s",
758 * Create the hash table of child PID's
760 thread_pool.waiters = lrad_hash_table_create(pid_hash,
763 if (!thread_pool.waiters) {
764 radlog(L_ERR, "FATAL: Failed to set up wait hash");
769 pool_cf = cf_section_find("thread");
770 if (pool_cf != NULL) {
772 * FIXME: Check for errors?
774 cf_section_parse(pool_cf, NULL, thread_config);
778 * The pool has already been initialized. Don't spawn
779 * new threads, and don't forget about forked children,
781 if (pool_initialized) {
786 * Initialize the queue of requests.
788 memset(&thread_pool.semaphore, 0, sizeof(thread_pool.semaphore));
789 rcode = sem_init(&thread_pool.semaphore, 0, SEMAPHORE_LOCKED);
791 radlog(L_ERR|L_CONS, "FATAL: Failed to initialize semaphore: %s",
796 rcode = pthread_mutex_init(&thread_pool.queue_mutex,NULL);
798 radlog(L_ERR, "FATAL: Failed to initialize queue mutex: %s",
804 * Allocate multiple fifos.
806 for (i = 0; i < NUM_FIFOS; i++) {
807 thread_pool.fifo[i] = lrad_fifo_create(65536, NULL);
808 if (!thread_pool.fifo[i]) {
809 radlog(L_ERR, "FATAL: Failed to set up request fifo");
814 #ifdef HAVE_OPENSSL_CRYPTO_H
816 * If we're linking with OpenSSL too, then we need
817 * to set up the mutexes and enable the thread callbacks.
819 if (!setup_ssl_mutexes()) {
820 radlog(L_ERR, "FATAL: Failed to set up SSL mutexes");
827 * Create a number of waiting threads.
829 * If we fail while creating them, do something intelligent.
831 for (i = 0; i < thread_pool.start_threads; i++) {
832 if (spawn_thread(now) == NULL) {
837 DEBUG2("Thread pool initialized");
838 pool_initialized = TRUE;
844 * Assign a new request to a free thread.
846 * If there isn't a free thread, then try to create a new one,
847 * up to the configured limits.
849 int thread_pool_addrequest(REQUEST *request, RAD_REQUEST_FUNP fun)
852 * We've been told not to spawn threads, so don't.
854 if (!thread_pool.spawn_flag) {
855 request->finished = rad_respond(request, fun);
858 * Requests that care about child process exit
859 * codes have already either called
860 * rad_waitpid(), or they've given up.
867 * Add the new request to the queue.
869 if (!request_enqueue(request, fun)) return 0;
872 * If the thread pool is busy handling requests, then
873 * try to spawn another one. We don't acquire the mutex
874 * before reading active_threads, so our thread count is
875 * just an estimate. It's fine to go ahead and spawn an
876 * extra thread in that case.
877 * NOTE: the log message may be in error since active_threads
878 * is an estimate, but it's only in error about the thread
879 * count, not about the fact that we can't create a new one.
881 if (thread_pool.active_threads == thread_pool.total_threads) {
882 if (spawn_thread(request->timestamp) == NULL) {
884 "The maximum number of threads (%d) are active, cannot spawn new thread to handle request",
885 thread_pool.max_threads);
894 * Check the min_spare_threads and max_spare_threads.
896 * If there are too many or too few threads waiting, then we
897 * either create some more, or delete some.
899 int thread_pool_clean(time_t now)
903 THREAD_HANDLE *handle, *next;
905 static time_t last_cleaned = 0;
908 * Loop over the thread pool deleting exited threads.
910 for (handle = thread_pool.head; handle; handle = next) {
914 * Maybe we've asked the thread to exit, and it
917 if (handle->status == THREAD_EXITED) {
918 delete_thread(handle);
923 * We don't need a mutex lock here, as we're reading
924 * active_threads, and not modifying it. We want a close
925 * approximation of the number of active threads, and this
928 active_threads = thread_pool.active_threads;
929 spare = thread_pool.total_threads - active_threads;
931 static int old_total = -1;
932 static int old_active = -1;
934 if ((old_total != thread_pool.total_threads) ||
935 (old_active != active_threads)) {
936 DEBUG2("Threads: total/active/spare threads = %d/%d/%d",
937 thread_pool.total_threads, active_threads, spare);
938 old_total = thread_pool.total_threads;
939 old_active = active_threads;
944 * If there are too few spare threads, create some more.
946 if (spare < thread_pool.min_spare_threads) {
947 total = thread_pool.min_spare_threads - spare;
949 DEBUG2("Threads: Spawning %d spares", total);
951 * Create a number of spare threads.
953 for (i = 0; i < total; i++) {
954 handle = spawn_thread(now);
955 if (handle == NULL) {
961 * And exit, as there can't be too many spare threads.
967 * Only delete spare threads if we haven't already done
970 if (now == last_cleaned) {
976 * Only delete the spare threads if sufficient time has
977 * passed since we last created one. This helps to minimize
978 * the amount of create/delete cycles.
980 if ((now - thread_pool.time_last_spawned) < thread_pool.cleanup_delay) {
985 * If there are too many spare threads, delete one.
987 * Note that we only delete ONE at a time, instead of
988 * wiping out many. This allows the excess servers to
989 * be slowly reaped, just in case the load spike comes again.
991 if (spare > thread_pool.max_spare_threads) {
993 spare -= thread_pool.max_spare_threads;
995 DEBUG2("Threads: deleting 1 spare out of %d spares", spare);
998 * Walk through the thread pool, deleting the
999 * first idle thread we come across.
1001 for (handle = thread_pool.head; (handle != NULL) && (spare > 0) ; handle = next) {
1002 next = handle->next;
1005 * If the thread is not handling a
1006 * request, but still live, then tell it
1009 * It will eventually wake up, and realize
1010 * it's been told to commit suicide.
1012 if ((handle->request == NULL) &&
1013 (handle->status == THREAD_RUNNING)) {
1014 handle->status = THREAD_CANCELLED;
1016 * Post an extra semaphore, as a
1017 * signal to wake up, and exit.
1019 sem_post(&thread_pool.semaphore);
1027 * If the thread has handled too many requests, then make it
1030 if (thread_pool.max_requests_per_thread > 0) {
1031 for (handle = thread_pool.head; handle; handle = next) {
1032 next = handle->next;
1035 * Not handling a request, but otherwise
1036 * live, we can kill it.
1038 if ((handle->request == NULL) &&
1039 (handle->status == THREAD_RUNNING) &&
1040 (handle->request_count > thread_pool.max_requests_per_thread)) {
1041 handle->status = THREAD_CANCELLED;
1042 sem_post(&thread_pool.semaphore);
1048 * Otherwise everything's kosher. There are not too few,
1049 * or too many spare threads. Exit happily.
1056 * Thread wrapper for fork().
1058 pid_t rad_fork(void)
1062 if (!pool_initialized) return fork();
1064 reap_children(); /* be nice to non-wait thingies */
1066 if (lrad_hash_table_num_elements(thread_pool.waiters) >= 1024) {
1071 * Fork & save the PID for later reaping.
1074 if (child_pid > 0) {
1078 tf = rad_malloc(sizeof(*tf));
1079 memset(tf, 0, sizeof(*tf));
1081 tf->pid = child_pid;
1083 pthread_mutex_lock(&thread_pool.wait_mutex);
1084 rcode = lrad_hash_table_insert(thread_pool.waiters, tf);
1085 pthread_mutex_unlock(&thread_pool.wait_mutex);
1088 radlog(L_ERR, "Failed to store PID, creating what will be a zombie process %d",
1094 * Return whatever we were told.
1101 * Wait 10 seconds at most for a child to exit, then give up.
1103 pid_t rad_waitpid(pid_t pid, int *status)
1106 thread_fork_t mytf, *tf;
1108 if (!pool_initialized) return waitpid(pid, status, 0);
1110 if (pid <= 0) return -1;
1114 pthread_mutex_lock(&thread_pool.wait_mutex);
1115 tf = lrad_hash_table_finddata(thread_pool.waiters, &mytf);
1116 pthread_mutex_unlock(&thread_pool.wait_mutex);
1120 for (i = 0; i < 100; i++) {
1124 *status = tf->status;
1126 pthread_mutex_lock(&thread_pool.wait_mutex);
1127 lrad_hash_table_delete(thread_pool.waiters, &mytf);
1128 pthread_mutex_unlock(&thread_pool.wait_mutex);
1131 usleep(100000); /* sleep for 1/10 of a second */
1135 * 10 seconds have passed, give up on the child.
1137 pthread_mutex_lock(&thread_pool.wait_mutex);
1138 lrad_hash_table_delete(thread_pool.waiters, &mytf);
1139 pthread_mutex_unlock(&thread_pool.wait_mutex);
1144 #else /* HAVE_PTHREAD_H */
1146 * "thread" code when we don't have threads.
1148 int thread_pool_init(int spawn_flag)
1154 * call "radrespond".
1156 int thread_pool_addrequest(REQUEST *request, RAD_REQUEST_FUNP fun)
1158 rad_respond(request, fun);
1162 #endif /* HAVE_PTHREAD_H */