2 * threads.c request threading support
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20 * Copyright 2000,2006 The FreeRADIUS server project
21 * Copyright 2000 Alan DeKok <aland@ox.org>
24 #include <freeradius-devel/ident.h>
27 #include <freeradius-devel/radiusd.h>
28 #include <freeradius-devel/rad_assert.h>
31 * Other OS's have sem_init, OS X doesn't.
33 #ifdef HAVE_SEMAPHORE_H
34 #include <semaphore.h>
38 #include <mach/task.h>
39 #include <mach/semaphore.h>
42 #define sem_t semaphore_t
44 #define sem_init(s,p,c) semaphore_create(mach_task_self(),s,SYNC_POLICY_FIFO,c)
46 #define sem_wait(s) semaphore_wait(*s)
48 #define sem_post(s) semaphore_signal(*s)
51 #ifdef HAVE_SYS_WAIT_H
57 #ifdef HAVE_OPENSSL_CRYPTO_H
58 #include <openssl/crypto.h>
60 #ifdef HAVE_OPENSSL_ERR_H
61 #include <openssl/err.h>
64 #define SEMAPHORE_LOCKED (0)
65 #define SEMAPHORE_UNLOCKED (1)
67 #define THREAD_RUNNING (1)
68 #define THREAD_CANCELLED (2)
69 #define THREAD_EXITED (3)
71 #define NUM_FIFOS RAD_LISTEN_MAX
75 * A data structure which contains the information about
78 * pthread_id pthread id
79 * thread_num server thread number, 1...number of threads
80 * semaphore used to block the thread until a request comes in
81 * status is the thread running or exited?
82 * request_count the number of requests that this thread has handled
83 * timestamp when the thread started executing.
85 typedef struct THREAD_HANDLE {
86 struct THREAD_HANDLE *prev;
87 struct THREAD_HANDLE *next;
91 unsigned int request_count;
97 * For the request queue.
99 typedef struct request_queue_t {
101 RAD_REQUEST_FUNP fun;
104 typedef struct thread_fork_t {
112 * A data structure to manage the thread pool. There's no real
113 * need for a data structure, but it makes things conceptually
116 typedef struct THREAD_POOL {
121 int active_threads; /* protected by queue_mutex */
125 int min_spare_threads;
126 int max_spare_threads;
127 unsigned int max_requests_per_thread;
128 unsigned long request_count;
129 time_t time_last_spawned;
133 pthread_mutex_t wait_mutex;
134 lrad_hash_table_t *waiters;
137 * All threads wait on this semaphore, for requests
138 * to enter the queue.
143 * To ensure only one thread at a time touches the queue.
145 pthread_mutex_t queue_mutex;
150 lrad_fifo_t *fifo[NUM_FIFOS];
153 static THREAD_POOL thread_pool;
154 static int pool_initialized = FALSE;
158 * A mapping of configuration file names to internal integers
160 static const CONF_PARSER thread_config[] = {
161 { "start_servers", PW_TYPE_INTEGER, 0, &thread_pool.start_threads, "5" },
162 { "max_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_threads, "32" },
163 { "min_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.min_spare_threads, "3" },
164 { "max_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_spare_threads, "10" },
165 { "max_requests_per_server", PW_TYPE_INTEGER, 0, &thread_pool.max_requests_per_thread, "0" },
166 { "cleanup_delay", PW_TYPE_INTEGER, 0, &thread_pool.cleanup_delay, "5" },
167 { "max_queue_size", PW_TYPE_INTEGER, 0, &thread_pool.max_queue_size, "65536" },
168 { NULL, -1, 0, NULL, NULL }
172 #ifdef HAVE_OPENSSL_CRYPTO_H
175 * If we're linking against OpenSSL, then it is the
176 * duty of the application, if it is multithreaded,
177 * to provide OpenSSL with appropriate thread id
178 * and mutex locking functions
180 * Note: this only implements static callbacks.
181 * OpenSSL does not use dynamic locking callbacks
182 * right now, but may in the futiure, so we will have
183 * to add them at some point.
186 static pthread_mutex_t *ssl_mutexes = NULL;
188 static unsigned long ssl_id_function(void)
190 return (unsigned long) pthread_self();
193 static void ssl_locking_function(int mode, int n, const char *file, int line)
195 file = file; /* -Wunused */
196 line = line; /* -Wunused */
198 if (mode & CRYPTO_LOCK) {
199 pthread_mutex_lock(&(ssl_mutexes[n]));
201 pthread_mutex_unlock(&(ssl_mutexes[n]));
205 static int setup_ssl_mutexes(void)
209 ssl_mutexes = rad_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t));
211 radlog(L_ERR, "Error allocating memory for SSL mutexes!");
215 for (i = 0; i < CRYPTO_num_locks(); i++) {
216 pthread_mutex_init(&(ssl_mutexes[i]), NULL);
219 CRYPTO_set_id_callback(ssl_id_function);
220 CRYPTO_set_locking_callback(ssl_locking_function);
228 * We don't want to catch SIGCHLD for a host of reasons.
230 * - exec_wait means that someone, somewhere, somewhen, will
231 * call waitpid(), and catch the child.
233 * - SIGCHLD is delivered to a random thread, not the one that
236 * - if another thread catches the child, we have to coordinate
237 * with the thread doing the waiting.
239 * - if we don't waitpid() for non-wait children, they'll be zombies,
240 * and will hang around forever.
243 static void reap_children(void)
247 thread_fork_t mytf, *tf;
250 pthread_mutex_lock(&thread_pool.wait_mutex);
253 pid = waitpid(0, &status, WNOHANG);
257 tf = lrad_hash_table_finddata(thread_pool.waiters, &mytf);
262 } while (lrad_hash_table_num_elements(thread_pool.waiters) > 0);
264 pthread_mutex_unlock(&thread_pool.wait_mutex);
268 * Add a request to the list of waiting requests.
269 * This function gets called ONLY from the main handler thread...
271 * This function should never fail.
273 static int request_enqueue(REQUEST *request, RAD_REQUEST_FUNP fun)
275 request_queue_t *entry;
277 pthread_mutex_lock(&thread_pool.queue_mutex);
279 thread_pool.request_count++;
281 if (thread_pool.num_queued >= thread_pool.max_queue_size) {
282 pthread_mutex_unlock(&thread_pool.queue_mutex);
285 * Mark the request as done.
287 radlog(L_ERR|L_CONS, "!!! ERROR !!! The server is blocked: discarding new request %d", request->number);
288 request->child_state = REQUEST_DONE;
292 entry = rad_malloc(sizeof(*entry));
293 entry->request = request;
297 * Push the request onto the appropriate fifo for that
299 if (!lrad_fifo_push(thread_pool.fifo[request->priority],
301 pthread_mutex_unlock(&thread_pool.queue_mutex);
302 radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number);
303 request->child_state = REQUEST_DONE;
308 * We've added an entry that didn't come from the detail
309 * file. Note that the child thread should signal the
310 * main worker thread again when the queue becomes empty.
312 if (request->listener->type != RAD_LISTEN_DETAIL) {
313 thread_pool.can_read_detail = FALSE;
316 thread_pool.num_queued++;
318 pthread_mutex_unlock(&thread_pool.queue_mutex);
321 * There's one more request in the queue.
323 * Note that we're not touching the queue any more, so
324 * the semaphore post is outside of the mutex. This also
325 * means that when the thread wakes up and tries to lock
326 * the mutex, it will be unlocked, and there won't be
329 sem_post(&thread_pool.semaphore);
335 * Remove a request from the queue.
337 static int request_dequeue(REQUEST **request, RAD_REQUEST_FUNP *fun)
339 RAD_LISTEN_TYPE i, start;
340 request_queue_t *entry;
344 pthread_mutex_lock(&thread_pool.queue_mutex);
347 * Clear old requests from all queues.
349 * We only do one pass over the queue, in order to
350 * amortize the work across the child threads. Since we
351 * do N checks for one request de-queued, the old
352 * requests will be quickly cleared.
354 for (i = 0; i < RAD_LISTEN_MAX; i++) {
355 entry = lrad_fifo_peek(thread_pool.fifo[i]);
357 (entry->request->master_state != REQUEST_STOP_PROCESSING)) {
361 * This entry was marked to be stopped. Acknowledge it.
363 entry = lrad_fifo_pop(thread_pool.fifo[i]);
364 rad_assert(entry != NULL);
365 entry->request->child_state = REQUEST_DONE;
371 * Pop results from the top of the queue
373 for (i = start; i < RAD_LISTEN_MAX; i++) {
374 entry = lrad_fifo_pop(thread_pool.fifo[i]);
382 pthread_mutex_unlock(&thread_pool.queue_mutex);
388 rad_assert(thread_pool.num_queued > 0);
389 thread_pool.num_queued--;
390 *request = entry->request;
394 rad_assert(*request != NULL);
395 rad_assert((*request)->magic == REQUEST_MAGIC);
396 rad_assert(*fun != NULL);
399 * If the request has sat in the queue for too long,
402 * The main clean-up code can't delete the request from
403 * the queue, and therefore won't clean it up until we
404 * have acknowledged it as "done".
406 if ((*request)->master_state == REQUEST_STOP_PROCESSING) {
407 (*request)->child_state = REQUEST_DONE;
412 * The thread is currently processing a request.
414 thread_pool.active_threads++;
416 pthread_mutex_unlock(&thread_pool.queue_mutex);
423 * The main thread handler for requests.
425 * Wait on the semaphore until we have it, and process the request.
427 static void *request_handler_thread(void *arg)
429 RAD_REQUEST_FUNP fun;
430 THREAD_HANDLE *self = (THREAD_HANDLE *) arg;
433 * Loop forever, until told to exit.
439 * Wait to be signalled.
441 DEBUG2("Thread %d waiting to be assigned a request",
444 if (sem_wait(&thread_pool.semaphore) != 0) {
446 * Interrupted system call. Go back to
447 * waiting, but DON'T print out any more
450 if (errno == EINTR) {
451 DEBUG2("Re-wait %d", self->thread_num);
454 radlog(L_ERR, "Thread %d failed waiting for semaphore: %s: Exiting\n",
455 self->thread_num, strerror(errno));
459 DEBUG2("Thread %d got semaphore", self->thread_num);
462 * Try to grab a request from the queue.
464 * It may be empty, in which case we fail
467 if (!request_dequeue(&self->request, &fun)) continue;
469 self->request->child_pid = self->pthread_id;
470 self->request_count++;
472 DEBUG2("Thread %d handling request %d, (%d handled so far)",
473 self->thread_num, self->request->number,
474 self->request_count);
476 radius_handle_request(self->request, fun);
479 * Update the active threads.
481 pthread_mutex_lock(&thread_pool.queue_mutex);
482 rad_assert(thread_pool.active_threads > 0);
483 thread_pool.active_threads--;
486 * If we're not currently allowed to read the
487 * detail file, AND there are no requests queued,
488 * THEN signal the main worker thread that
489 * there's at least one waiting thread (us) who
490 * can accept a packet from the detail file.
492 can_read_detail = FALSE;
493 if (!thread_pool.can_read_detail &&
494 (thread_pool.num_queued == 0)) {
495 can_read_detail = TRUE;
498 pthread_mutex_unlock(&thread_pool.queue_mutex);
501 * Do this out of the lock to be nice to everyone.
503 if (can_read_detail) {
504 radius_signal_self(RADIUS_SIGNAL_SELF_DETAIL);
507 } while (self->status != THREAD_CANCELLED);
509 DEBUG2("Thread %d exiting...", self->thread_num);
511 #ifdef HAVE_OPENSSL_ERR_H
513 * If we linked with OpenSSL, the application
514 * must remove the thread's error queue before
515 * exiting to prevent memory leaks.
521 * Do this as the LAST thing before exiting.
523 self->status = THREAD_EXITED;
529 * Take a THREAD_HANDLE, delete it from the thread pool and
530 * free its resources.
532 * This function is called ONLY from the main server thread,
533 * ONLY after the thread has exited.
535 static void delete_thread(THREAD_HANDLE *handle)
540 rad_assert(handle->request == NULL);
542 DEBUG2("Deleting thread %d", handle->thread_num);
546 rad_assert(thread_pool.total_threads > 0);
547 thread_pool.total_threads--;
550 * Remove the handle from the list.
553 rad_assert(thread_pool.head == handle);
554 thread_pool.head = next;
560 rad_assert(thread_pool.tail == handle);
561 thread_pool.tail = prev;
567 * Free the handle, now that it's no longer referencable.
574 * Spawn a new thread, and place it in the thread pool.
576 * The thread is started initially in the blocked state, waiting
579 static THREAD_HANDLE *spawn_thread(time_t now)
582 THREAD_HANDLE *handle;
586 * Ensure that we don't spawn too many threads.
588 if (thread_pool.total_threads >= thread_pool.max_threads) {
589 DEBUG2("Thread spawn failed. Maximum number of threads (%d) already running.", thread_pool.max_threads);
594 * Allocate a new thread handle.
596 handle = (THREAD_HANDLE *) rad_malloc(sizeof(THREAD_HANDLE));
597 memset(handle, 0, sizeof(THREAD_HANDLE));
600 handle->pthread_id = NO_SUCH_CHILD_PID;
601 handle->thread_num = thread_pool.max_thread_num++;
602 handle->request_count = 0;
603 handle->status = THREAD_RUNNING;
604 handle->timestamp = time(NULL);
607 * Initialize the thread's attributes to detached.
609 * We could call pthread_detach() later, but if the thread
610 * exits between the create & detach calls, it will need to
611 * be joined, which will never happen.
613 pthread_attr_init(&attr);
614 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
617 * Create the thread detached, so that it cleans up it's
618 * own memory when it exits.
620 * Note that the function returns non-zero on error, NOT
621 * -1. The return code is the error, and errno isn't set.
623 rcode = pthread_create(&handle->pthread_id, &attr,
624 request_handler_thread, handle);
626 radlog(L_ERR|L_CONS, "FATAL: Thread create failed: %s",
630 pthread_attr_destroy(&attr);
633 * One more thread to go into the list.
635 thread_pool.total_threads++;
636 DEBUG2("Thread spawned new child %d. Total threads in pool: %d",
637 handle->thread_num, thread_pool.total_threads);
640 * Add the thread handle to the tail of the thread pool list.
642 if (thread_pool.tail) {
643 thread_pool.tail->next = handle;
644 handle->prev = thread_pool.tail;
645 thread_pool.tail = handle;
647 rad_assert(thread_pool.head == NULL);
648 thread_pool.head = thread_pool.tail = handle;
652 * Update the time we last spawned a thread.
654 thread_pool.time_last_spawned = now;
657 * And return the new handle to the caller.
663 * Temporary function to prevent server from executing a SIGHUP
664 * until all threads are finished handling requests. This returns
665 * the number of active threads to 'radiusd.c'.
667 int total_active_threads(void)
670 * We don't acquire the mutex, so this is just an estimate.
671 * We can't return with the lock held, so there's no point
672 * in getting the guaranteed correct value; by the time
673 * the caller sees it, it can be wrong again.
675 return thread_pool.active_threads;
679 static uint32_t pid_hash(const void *data)
681 const thread_fork_t *tf = data;
683 return lrad_hash(&tf->pid, sizeof(tf->pid));
686 static int pid_cmp(const void *one, const void *two)
688 const thread_fork_t *a = one;
689 const thread_fork_t *b = two;
691 return (a->pid - b->pid);
695 * Allocate the thread pool, and seed it with an initial number
698 * FIXME: What to do on a SIGHUP???
700 int thread_pool_init(int spawn_flag)
703 CONF_SECTION *pool_cf;
706 DEBUG("Initializing the thread pool...");
710 * We're not spawning new threads, don't do
713 if (!spawn_flag) return 0;
716 * After a SIGHUP, we don't over-write the previous values.
718 if (!pool_initialized) {
720 * Initialize the thread pool to some reasonable values.
722 memset(&thread_pool, 0, sizeof(THREAD_POOL));
723 thread_pool.head = NULL;
724 thread_pool.tail = NULL;
725 thread_pool.total_threads = 0;
726 thread_pool.max_thread_num = 1;
727 thread_pool.cleanup_delay = 5;
728 thread_pool.spawn_flag = spawn_flag;
730 if ((pthread_mutex_init(&thread_pool.wait_mutex,NULL) != 0)) {
731 radlog(L_ERR, "FATAL: Failed to initialize wait mutex: %s",
737 * Create the hash table of child PID's
739 thread_pool.waiters = lrad_hash_table_create(pid_hash,
742 if (!thread_pool.waiters) {
743 radlog(L_ERR, "FATAL: Failed to set up wait hash");
748 pool_cf = cf_section_find("thread");
750 radlog(L_ERR, "FATAL: Attempting to start in multi-threaded mode with no thread configuration in radiusd.conf");
754 if (cf_section_parse(pool_cf, NULL, thread_config) < 0) {
759 * Catch corner cases.
761 if (thread_pool.min_spare_threads < 1)
762 thread_pool.min_spare_threads = 1;
763 if (thread_pool.max_spare_threads < 1)
764 thread_pool.max_spare_threads = 1;
765 if (thread_pool.max_spare_threads < thread_pool.min_spare_threads)
766 thread_pool.max_spare_threads = thread_pool.min_spare_threads;
769 * The pool has already been initialized. Don't spawn
770 * new threads, and don't forget about forked children,
772 if (pool_initialized) {
777 * Initialize the queue of requests.
779 memset(&thread_pool.semaphore, 0, sizeof(thread_pool.semaphore));
780 rcode = sem_init(&thread_pool.semaphore, 0, SEMAPHORE_LOCKED);
782 radlog(L_ERR|L_CONS, "FATAL: Failed to initialize semaphore: %s",
787 rcode = pthread_mutex_init(&thread_pool.queue_mutex,NULL);
789 radlog(L_ERR, "FATAL: Failed to initialize queue mutex: %s",
795 * Allocate multiple fifos.
797 for (i = 0; i < RAD_LISTEN_MAX; i++) {
798 thread_pool.fifo[i] = lrad_fifo_create(65536, NULL);
799 if (!thread_pool.fifo[i]) {
800 radlog(L_ERR, "FATAL: Failed to set up request fifo");
805 #ifdef HAVE_OPENSSL_CRYPTO_H
807 * If we're linking with OpenSSL too, then we need
808 * to set up the mutexes and enable the thread callbacks.
810 if (!setup_ssl_mutexes()) {
811 radlog(L_ERR, "FATAL: Failed to set up SSL mutexes");
818 * Create a number of waiting threads.
820 * If we fail while creating them, do something intelligent.
822 for (i = 0; i < thread_pool.start_threads; i++) {
823 if (spawn_thread(now) == NULL) {
828 DEBUG2("Thread pool initialized");
829 pool_initialized = TRUE;
835 * Assign a new request to a free thread.
837 * If there isn't a free thread, then try to create a new one,
838 * up to the configured limits.
840 int thread_pool_addrequest(REQUEST *request, RAD_REQUEST_FUNP fun)
843 * We've been told not to spawn threads, so don't.
845 if (!thread_pool.spawn_flag) {
846 radius_handle_request(request, fun);
849 * Requests that care about child process exit
850 * codes have already either called
851 * rad_waitpid(), or they've given up.
858 * Add the new request to the queue.
860 if (!request_enqueue(request, fun)) return 0;
863 * If the thread pool is busy handling requests, then
864 * try to spawn another one. We don't acquire the mutex
865 * before reading active_threads, so our thread count is
866 * just an estimate. It's fine to go ahead and spawn an
867 * extra thread in that case.
868 * NOTE: the log message may be in error since active_threads
869 * is an estimate, but it's only in error about the thread
870 * count, not about the fact that we can't create a new one.
872 if (thread_pool.active_threads == thread_pool.total_threads) {
873 if (spawn_thread(request->timestamp) == NULL) {
875 "The maximum number of threads (%d) are active, cannot spawn new thread to handle request",
876 thread_pool.max_threads);
885 * Check the min_spare_threads and max_spare_threads.
887 * If there are too many or too few threads waiting, then we
888 * either create some more, or delete some.
890 int thread_pool_clean(time_t now)
894 THREAD_HANDLE *handle, *next;
896 static time_t last_cleaned = 0;
899 * Loop over the thread pool deleting exited threads.
901 for (handle = thread_pool.head; handle; handle = next) {
905 * Maybe we've asked the thread to exit, and it
908 if (handle->status == THREAD_EXITED) {
909 delete_thread(handle);
914 * We don't need a mutex lock here, as we're reading
915 * active_threads, and not modifying it. We want a close
916 * approximation of the number of active threads, and this
919 active_threads = thread_pool.active_threads;
920 spare = thread_pool.total_threads - active_threads;
922 static int old_total = -1;
923 static int old_active = -1;
925 if ((old_total != thread_pool.total_threads) ||
926 (old_active != active_threads)) {
927 DEBUG2("Threads: total/active/spare threads = %d/%d/%d",
928 thread_pool.total_threads, active_threads, spare);
929 old_total = thread_pool.total_threads;
930 old_active = active_threads;
935 * If there are too few spare threads, create some more.
937 if (spare < thread_pool.min_spare_threads) {
938 total = thread_pool.min_spare_threads - spare;
940 DEBUG2("Threads: Spawning %d spares", total);
942 * Create a number of spare threads.
944 for (i = 0; i < total; i++) {
945 handle = spawn_thread(now);
946 if (handle == NULL) {
952 * And exit, as there can't be too many spare threads.
958 * Only delete spare threads if we haven't already done
961 if (now == last_cleaned) {
967 * Only delete the spare threads if sufficient time has
968 * passed since we last created one. This helps to minimize
969 * the amount of create/delete cycles.
971 if ((now - thread_pool.time_last_spawned) < thread_pool.cleanup_delay) {
976 * If there are too many spare threads, delete one.
978 * Note that we only delete ONE at a time, instead of
979 * wiping out many. This allows the excess servers to
980 * be slowly reaped, just in case the load spike comes again.
982 if (spare > thread_pool.max_spare_threads) {
984 spare -= thread_pool.max_spare_threads;
986 DEBUG2("Threads: deleting 1 spare out of %d spares", spare);
989 * Walk through the thread pool, deleting the
990 * first idle thread we come across.
992 for (handle = thread_pool.head; (handle != NULL) && (spare > 0) ; handle = next) {
996 * If the thread is not handling a
997 * request, but still live, then tell it
1000 * It will eventually wake up, and realize
1001 * it's been told to commit suicide.
1003 if ((handle->request == NULL) &&
1004 (handle->status == THREAD_RUNNING)) {
1005 handle->status = THREAD_CANCELLED;
1007 * Post an extra semaphore, as a
1008 * signal to wake up, and exit.
1010 sem_post(&thread_pool.semaphore);
1018 * If the thread has handled too many requests, then make it
1021 if (thread_pool.max_requests_per_thread > 0) {
1022 for (handle = thread_pool.head; handle; handle = next) {
1023 next = handle->next;
1026 * Not handling a request, but otherwise
1027 * live, we can kill it.
1029 if ((handle->request == NULL) &&
1030 (handle->status == THREAD_RUNNING) &&
1031 (handle->request_count > thread_pool.max_requests_per_thread)) {
1032 handle->status = THREAD_CANCELLED;
1033 sem_post(&thread_pool.semaphore);
1039 * Otherwise everything's kosher. There are not too few,
1040 * or too many spare threads. Exit happily.
1047 * Thread wrapper for fork().
1049 pid_t rad_fork(void)
1053 if (!pool_initialized) return fork();
1055 reap_children(); /* be nice to non-wait thingies */
1057 if (lrad_hash_table_num_elements(thread_pool.waiters) >= 1024) {
1062 * Fork & save the PID for later reaping.
1065 if (child_pid > 0) {
1069 tf = rad_malloc(sizeof(*tf));
1070 memset(tf, 0, sizeof(*tf));
1072 tf->pid = child_pid;
1074 pthread_mutex_lock(&thread_pool.wait_mutex);
1075 rcode = lrad_hash_table_insert(thread_pool.waiters, tf);
1076 pthread_mutex_unlock(&thread_pool.wait_mutex);
1079 radlog(L_ERR, "Failed to store PID, creating what will be a zombie process %d",
1085 * Return whatever we were told.
1092 * Wait 10 seconds at most for a child to exit, then give up.
1094 pid_t rad_waitpid(pid_t pid, int *status)
1097 thread_fork_t mytf, *tf;
1099 if (!pool_initialized) return waitpid(pid, status, 0);
1101 if (pid <= 0) return -1;
1105 pthread_mutex_lock(&thread_pool.wait_mutex);
1106 tf = lrad_hash_table_finddata(thread_pool.waiters, &mytf);
1107 pthread_mutex_unlock(&thread_pool.wait_mutex);
1111 for (i = 0; i < 100; i++) {
1115 *status = tf->status;
1117 pthread_mutex_lock(&thread_pool.wait_mutex);
1118 lrad_hash_table_delete(thread_pool.waiters, &mytf);
1119 pthread_mutex_unlock(&thread_pool.wait_mutex);
1122 usleep(100000); /* sleep for 1/10 of a second */
1126 * 10 seconds have passed, give up on the child.
1128 pthread_mutex_lock(&thread_pool.wait_mutex);
1129 lrad_hash_table_delete(thread_pool.waiters, &mytf);
1130 pthread_mutex_unlock(&thread_pool.wait_mutex);
1135 #else /* HAVE_PTHREAD_H */
1137 * "thread" code when we don't have threads.
1139 int thread_pool_init(int spawn_flag)
1145 * call "radrespond".
1147 int thread_pool_addrequest(REQUEST *request, RAD_REQUEST_FUNP fun)
1149 radius_handle_request(request, fun);
1153 #endif /* HAVE_PTHREAD_H */