2 * threads.c request threading support
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * Copyright 2000 The FreeRADIUS server project
21 * Copyright 2000 Alan DeKok <aland@ox.org>
24 #include "libradius.h"
30 #include <semaphore.h>
33 #ifdef HAVE_SYS_WAIT_H
38 #include "rad_assert.h"
41 static const char rcsid[] =
44 #define SEMAPHORE_LOCKED (0)
45 #define SEMAPHORE_UNLOCKED (1)
47 #define THREAD_RUNNING (1)
48 #define THREAD_CANCELLED (2)
49 #define THREAD_EXITED (3)
52 * A data structure which contains the information about
55 * pthread_id pthread id
56 * thread_num server thread number, 1...number of threads
57 * semaphore used to block the thread until a request comes in
58 * status is the thread running or exited?
59 * request_count the number of requests that this thread has handled
60 * timestamp when the thread started executing.
62 typedef struct THREAD_HANDLE {
63 struct THREAD_HANDLE *prev;
64 struct THREAD_HANDLE *next;
68 unsigned int request_count;
74 * For the request queue.
76 typedef struct request_queue_t {
83 * A data structure to manage the thread pool. There's no real
84 * need for a data structure, but it makes things conceptually
87 typedef struct THREAD_POOL {
95 int min_spare_threads;
96 int max_spare_threads;
97 unsigned int max_requests_per_thread;
98 unsigned long request_count;
99 time_t time_last_spawned;
103 * All threads wait on this semaphore, for requests
104 * to enter the queue.
109 * To ensure only one thread at a time touches the queue.
111 pthread_mutex_t mutex;
114 int queue_head; /* first filled entry */
115 int queue_tail; /* first empty entry */
117 request_queue_t *queue;
120 static THREAD_POOL thread_pool;
121 static int pool_initialized = FALSE;
124 * Data structure to keep track of which child forked which
125 * request. If we cared, we'd keep a list of "free" and "active"
128 * FIXME: Have a time out, so we clean up entries which haven't
131 typedef struct rad_fork_t {
135 int status; /* exit status of the child */
140 * This MUST be a power of 2 for it to work properly!
142 #define NUM_FORKERS (8192)
143 static rad_fork_t forkers[NUM_FORKERS];
146 * This mutex ensures that only one thread is doing certain
147 * kinds of magic to the previous array.
149 static pthread_mutex_t fork_mutex;
153 * A mapping of configuration file names to internal integers
155 static const CONF_PARSER thread_config[] = {
156 { "start_servers", PW_TYPE_INTEGER, 0, &thread_pool.start_threads, "5" },
157 { "max_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_threads, "32" },
158 { "min_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.min_spare_threads, "3" },
159 { "max_spare_servers", PW_TYPE_INTEGER, 0, &thread_pool.max_spare_threads, "10" },
160 { "max_requests_per_server", PW_TYPE_INTEGER, 0, &thread_pool.max_requests_per_thread, "0" },
161 { "cleanup_delay", PW_TYPE_INTEGER, 0, &thread_pool.cleanup_delay, "5" },
162 { NULL, -1, 0, NULL, NULL }
167 * Add a request to the list of waiting requests.
168 * This function gets called ONLY from the main handler thread...
170 * This function should never fail.
172 static void request_enqueue(REQUEST *request, RAD_REQUEST_FUNP fun)
176 pthread_mutex_lock(&thread_pool.mutex);
178 thread_pool.request_count++;
181 * If the queue is empty, re-set the indices to zero,
182 * for no particular reason...
184 if ((thread_pool.queue_head == thread_pool.queue_tail) &&
185 (thread_pool.queue_head != 0)) {
186 thread_pool.queue_head = thread_pool.queue_tail = 0;
190 * If the queue is full, die.
192 * The math is to take into account the fact that it's a
195 num_entries = ((thread_pool.queue_tail + thread_pool.queue_size) -
196 thread_pool.queue_head) % thread_pool.queue_size;
197 if (num_entries == (thread_pool.queue_size - 1)) {
198 request_queue_t *new_queue;
201 * If the queue becomes larger than 65536,
202 * there's a serious problem.
204 if (thread_pool.queue_size >= 65536) {
205 pthread_mutex_unlock(&thread_pool.mutex);
208 * Mark the request as done.
210 radlog(L_ERR|L_CONS, "!!! ERROR !!! The server is blocked: discarding new request %d", request->number);
211 request->finished = TRUE;
216 * Malloc a new queue, doubled in size, copy the
217 * data from the current queue over to it, zero
218 * out the second half of the queue, free the old
219 * one, and replace thread_pool.queue with the
222 new_queue = rad_malloc(sizeof(*new_queue) * thread_pool.queue_size * 2);
223 memcpy(new_queue, thread_pool.queue,
224 sizeof(*new_queue) * thread_pool.queue_size);
225 memset(new_queue + sizeof(*new_queue) * thread_pool.queue_size,
226 0, sizeof(*new_queue) * thread_pool.queue_size);
228 free(thread_pool.queue);
229 thread_pool.queue = new_queue;
230 thread_pool.queue_size *= 2;
234 * Add the data to the queue tail, increment the tail,
235 * and signal the semaphore that there's another request
238 thread_pool.queue[thread_pool.queue_tail].request = request;
239 thread_pool.queue[thread_pool.queue_tail].fun = fun;
240 thread_pool.queue_tail++;
241 thread_pool.queue_tail &= (thread_pool.queue_size - 1);
243 pthread_mutex_unlock(&thread_pool.mutex);
246 * There's one more request in the queue.
248 * Note that we're not touching the queue any more, so
249 * the semaphore post is outside of the mutex. This also
250 * means that when the thread wakes up and tries to lock
251 * the mutex, it will be unlocked, and there won't be
255 sem_post(&thread_pool.semaphore);
261 * Remove a request from the queue.
263 static void request_dequeue(REQUEST **request, RAD_REQUEST_FUNP *fun)
265 pthread_mutex_lock(&thread_pool.mutex);
268 * Head & tail are the same. There's nothing in
271 if (thread_pool.queue_head == thread_pool.queue_tail) {
272 pthread_mutex_unlock(&thread_pool.mutex);
278 *request = thread_pool.queue[thread_pool.queue_head].request;
279 *fun = thread_pool.queue[thread_pool.queue_head].fun;
281 rad_assert(*request != NULL);
282 rad_assert((*request)->magic == REQUEST_MAGIC);
283 rad_assert(*fun != NULL);
285 thread_pool.queue_head++;
286 thread_pool.queue_head &= (thread_pool.queue_size - 1);
289 * FIXME: Check the request timestamp. If it's more than
290 * "clean_delay" seconds old, then discard the request,
291 * log an error, and try to de-queue another request.
293 * The main clean-up code won't delete the request from
294 * the request list, because it's not marked "finished"
298 * The thread is currently processing a request.
300 thread_pool.active_threads++;
302 pthread_mutex_unlock(&thread_pool.mutex);
305 * If the request is currently being processed, then that
306 * MAY be OK, if it's a proxy reply. In that case,
307 * sending the packet may result in a reply being
308 * received before that thread clears the child_pid.
310 * In that case, we busy-wait for the request to be free.
312 * We COULD push it onto the queue and try to grab
313 * another request, but what if this is the only request?
314 * What if there are multiple such packets with race
315 * conditions? We don't want to thrash the queue...
317 * This busy-wait is less than optimal, but it's simple,
318 * fail-safe, and it works.
320 if ((*request)->child_pid != NO_SUCH_CHILD_PID) {
323 #ifdef HAVE_PTHREAD_SIGMASK
324 sigset_t set, old_set;
327 * Block a large number of signals which could
328 * cause the select to return EINTR
331 sigaddset(&set, SIGPIPE);
332 sigaddset(&set, SIGCONT);
333 sigaddset(&set, SIGSTOP);
334 sigaddset(&set, SIGCHLD);
335 pthread_sigmask(SIG_BLOCK, &set, &old_set);
338 rad_assert((*request)->proxy_reply != NULL);
343 * Sleep for 100 milliseconds. If the other thread
344 * doesn't get serviced in this time, to clear
345 * the "child_pid" entry, then the server is too
348 for (count = 0; count < 10; count++) {
350 tv.tv_usec = 10000; /* sleep for 10 milliseconds */
353 * Portable sleep that's thread-safe.
355 * Don't worry about interrupts, as they're
358 select(0, NULL, NULL, NULL, &tv);
359 if ((*request)->child_pid == NO_SUCH_CHILD_PID) {
365 #ifdef HAVE_PTHREAD_SIGMASK
367 * Restore the original thread signal mask.
369 pthread_sigmask(SIG_SETMASK, &old_set, NULL);
373 radlog(L_ERR, "FATAL! Server is too busy to process requests");
383 * The main thread handler for requests.
385 * Wait on the semaphore until we have it, and process the request.
387 static void *request_handler_thread(void *arg)
389 RAD_REQUEST_FUNP fun;
390 THREAD_HANDLE *self = (THREAD_HANDLE *) arg;
391 #ifdef HAVE_PTHREAD_SIGMASK
395 * Block SIGHUP handling for the child threads.
397 * This ensures that only the main server thread will
398 * process HUP signals.
400 * If we don't have sigprocmask, then it shouldn't be
401 * a problem, either, as the sig_hup handler should check
402 * for this condition.
405 sigaddset(&set, SIGHUP);
406 sigaddset(&set, SIGINT);
407 sigaddset(&set, SIGQUIT);
408 sigaddset(&set, SIGTERM);
409 pthread_sigmask(SIG_BLOCK, &set, NULL);
413 * Loop forever, until told to exit.
417 * Wait to be signalled.
419 DEBUG2("Thread %d waiting to be assigned a request",
422 if (sem_wait(&thread_pool.semaphore) != 0) {
424 * Interrupted system call. Go back to
425 * waiting, but DON'T print out any more
428 if (errno == EINTR) {
429 DEBUG2("Re-wait %d", self->thread_num);
432 radlog(L_ERR, "Thread %d failed waiting for semaphore: %s: Exiting\n",
433 self->thread_num, strerror(errno));
437 DEBUG2("Thread %d got semaphore", self->thread_num);
440 * Try to grab a request from the queue.
442 * It may be empty, in which case we fail
445 request_dequeue(&self->request, &fun);
446 if (!self->request) continue;
448 self->request->child_pid = self->pthread_id;
449 self->request_count++;
451 DEBUG2("Thread %d handling request %d, (%d handled so far)",
452 self->thread_num, self->request->number,
453 self->request_count);
456 * Respond, and reset request->child_pid
458 rad_respond(self->request, fun);
459 self->request = NULL;
462 * Update the active threads.
464 pthread_mutex_lock(&thread_pool.mutex);
465 rad_assert(thread_pool.active_threads > 0);
466 thread_pool.active_threads--;
467 pthread_mutex_unlock(&thread_pool.mutex);
468 } while (self->status != THREAD_CANCELLED);
470 DEBUG2("Thread %d exiting...", self->thread_num);
473 * Do this as the LAST thing before exiting.
475 self->status = THREAD_EXITED;
481 * Take a THREAD_HANDLE, and delete it from the thread pool.
483 * This function is called ONLY from the main server thread.
485 static void delete_thread(THREAD_HANDLE *handle)
490 rad_assert(handle->request == NULL);
494 rad_assert(thread_pool.total_threads > 0);
495 thread_pool.total_threads--;
498 * Remove the handle from the list.
501 rad_assert(thread_pool.head == handle);
502 thread_pool.head = next;
508 rad_assert(thread_pool.tail == handle);
509 thread_pool.tail = prev;
514 DEBUG2("Deleting thread %d", handle->thread_num);
517 * This thread has exited. Delete any additional
518 * resources associated with it.
522 * Free the memory, now that we're sure the thread
530 * Spawn a new thread, and place it in the thread pool.
532 * The thread is started initially in the blocked state, waiting
535 static THREAD_HANDLE *spawn_thread(time_t now)
538 THREAD_HANDLE *handle;
542 * Ensure that we don't spawn too many threads.
544 if (thread_pool.total_threads >= thread_pool.max_threads) {
545 DEBUG2("Thread spawn failed. Maximum number of threads (%d) already running.", thread_pool.max_threads);
550 * Allocate a new thread handle.
552 handle = (THREAD_HANDLE *) rad_malloc(sizeof(THREAD_HANDLE));
553 memset(handle, 0, sizeof(THREAD_HANDLE));
556 handle->pthread_id = NO_SUCH_CHILD_PID;
557 handle->thread_num = thread_pool.max_thread_num++;
558 handle->request_count = 0;
559 handle->status = THREAD_RUNNING;
560 handle->timestamp = time(NULL);
563 * Initialize the thread's attributes to detached.
565 * We could call pthread_detach() later, but if the thread
566 * exits between the create & detach calls, it will need to
567 * be joined, which will never happen.
569 pthread_attr_init(&attr);
570 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
573 * Create the thread detached, so that it cleans up it's
574 * own memory when it exits.
576 * Note that the function returns non-zero on error, NOT
577 * -1. The return code is the error, and errno isn't set.
579 rcode = pthread_create(&handle->pthread_id, &attr,
580 request_handler_thread, handle);
582 radlog(L_ERR|L_CONS, "FATAL: Thread create failed: %s",
586 pthread_attr_destroy(&attr);
589 * One more thread to go into the list.
591 thread_pool.total_threads++;
592 DEBUG2("Thread spawned new child %d. Total threads in pool: %d",
593 handle->thread_num, thread_pool.total_threads);
596 * Add the thread handle to the tail of the thread pool list.
598 if (thread_pool.tail) {
599 thread_pool.tail->next = handle;
600 handle->prev = thread_pool.tail;
601 thread_pool.tail = handle;
603 rad_assert(thread_pool.head == NULL);
604 thread_pool.head = thread_pool.tail = handle;
608 * Update the time we last spawned a thread.
610 thread_pool.time_last_spawned = now;
613 * And return the new handle to the caller.
619 * Temporary function to prevent server from executing a SIGHUP
620 * until all threads are finished handling requests. This returns
621 * the number of active threads to 'radiusd.c'.
623 int total_active_threads(void)
626 THREAD_HANDLE *handle;
628 for (handle = thread_pool.head; handle != NULL; handle = handle->next){
629 if (handle->request != NULL) {
637 * Allocate the thread pool, and seed it with an initial number
640 * FIXME: What to do on a SIGHUP???
642 int thread_pool_init(void)
645 CONF_SECTION *pool_cf;
648 DEBUG("Initializing the thread pool...");
652 * After a SIGHUP, we don't over-write the previous values.
654 if (!pool_initialized) {
656 * Initialize the thread pool to some reasonable values.
658 memset(&thread_pool, 0, sizeof(THREAD_POOL));
659 thread_pool.head = NULL;
660 thread_pool.tail = NULL;
661 thread_pool.total_threads = 0;
662 thread_pool.max_thread_num = 1;
663 thread_pool.cleanup_delay = 5;
666 pool_cf = cf_section_find("thread");
667 if (pool_cf != NULL) {
668 cf_section_parse(pool_cf, NULL, thread_config);
672 * Limit the maximum number of threads to the maximum
673 * number of forks we can do.
675 * FIXME: Make this code better...
677 if (thread_pool.max_threads >= NUM_FORKERS) {
678 thread_pool.max_threads = NUM_FORKERS;
683 * The pool has already been initialized. Don't spawn
684 * new threads, and don't forget about forked children,
686 if (pool_initialized) {
691 * Initialize the queue of requests.
693 rcode = sem_init(&thread_pool.semaphore, 0, SEMAPHORE_LOCKED);
695 radlog(L_ERR|L_CONS, "FATAL: Failed to initialize semaphore: %s",
700 rcode = pthread_mutex_init(&thread_pool.mutex,NULL);
702 radlog(L_ERR, "FATAL: Failed to initialize mutex: %s",
708 * Queue head & tail are set to zero by the memset,
711 * Allocate an initial queue, always as a power of 2.
713 thread_pool.queue_size = 256;
714 thread_pool.queue = rad_malloc(sizeof(*thread_pool.queue) *
715 thread_pool.queue_size);
716 memset(thread_pool.queue, 0, (sizeof(*thread_pool.queue) *
717 thread_pool.queue_size));
720 * Create a number of waiting threads.
722 * If we fail while creating them, do something intelligent.
724 for (i = 0; i < thread_pool.start_threads; i++) {
725 if (spawn_thread(now) == NULL) {
730 DEBUG2("Thread pool initialized");
731 pool_initialized = TRUE;
737 * Assign a new request to a free thread.
739 * If there isn't a free thread, then try to create a new one,
740 * up to the configured limits.
742 int thread_pool_addrequest(REQUEST *request, RAD_REQUEST_FUNP fun)
745 * If the thread pool is busy handling requests, then
746 * try to spawn another one.
748 if (thread_pool.active_threads == thread_pool.total_threads) {
749 if (spawn_thread(request->timestamp) == NULL) {
751 "The maximum number of threads (%d) are active, cannot spawn new thread to handle request",
752 thread_pool.max_threads);
758 * Add the new request to the queue.
760 request_enqueue(request, fun);
766 * Check the min_spare_threads and max_spare_threads.
768 * If there are too many or too few threads waiting, then we
769 * either create some more, or delete some.
771 int thread_pool_clean(time_t now)
775 THREAD_HANDLE *handle, *next;
777 static time_t last_cleaned = 0;
780 * Loop over the thread pool deleting exited threads.
782 for (handle = thread_pool.head; handle; handle = next) {
786 * Maybe we've asked the thread to exit, and it
789 if (handle->status == THREAD_EXITED) {
790 delete_thread(handle);
795 * We don't need a mutex lock here, as we're reading
796 * the location, and not modifying it. We want a close
797 * approximation of the number of active threads, and this
800 active_threads = thread_pool.active_threads;
801 spare = thread_pool.total_threads - active_threads;
803 static int old_total = -1;
804 static int old_active = -1;
806 if ((old_total != thread_pool.total_threads) ||
807 (old_active != active_threads)) {
808 DEBUG2("Threads: total/active/spare threads = %d/%d/%d",
809 thread_pool.total_threads, active_threads, spare);
810 old_total = thread_pool.total_threads;
811 old_active = active_threads;
816 * If there are too few spare threads, create some more.
818 if (spare < thread_pool.min_spare_threads) {
819 total = thread_pool.min_spare_threads - spare;
821 DEBUG2("Threads: Spawning %d spares", total);
823 * Create a number of spare threads.
825 for (i = 0; i < total; i++) {
826 handle = spawn_thread(now);
827 if (handle == NULL) {
833 * And exit, as there can't be too many spare threads.
839 * Only delete spare threads if we haven't already done
842 if (now == last_cleaned) {
848 * Only delete the spare threads if sufficient time has
849 * passed since we last created one. This helps to minimize
850 * the amount of create/delete cycles.
852 if ((now - thread_pool.time_last_spawned) < thread_pool.cleanup_delay) {
857 * If there are too many spare threads, delete one.
859 * Note that we only delete ONE at a time, instead of
860 * wiping out many. This allows the excess servers to
861 * be slowly reaped, just in case the load spike comes again.
863 if (spare > thread_pool.max_spare_threads) {
865 spare -= thread_pool.max_spare_threads;
867 DEBUG2("Threads: deleting 1 spare out of %d spares", spare);
870 * Walk through the thread pool, deleting the
871 * first idle thread we come across.
873 for (handle = thread_pool.head; (handle != NULL) && (spare > 0) ; handle = next) {
877 * If the thread is not handling a
878 * request, but still live, then tell it
881 * It will eventually wake up, and realize
882 * it's been told to commit suicide.
884 if ((handle->request == NULL) &&
885 (handle->status == THREAD_RUNNING)) {
886 handle->status = THREAD_CANCELLED;
888 * Post an extra semaphore, as a
889 * signal to wake up, and exit.
891 sem_post(&thread_pool.semaphore);
899 * If the thread has handled too many requests, then make it
902 if (thread_pool.max_requests_per_thread > 0) {
903 for (handle = thread_pool.head; handle; handle = next) {
907 * Not handling a request, but otherwise
908 * live, we can kill it.
910 if ((handle->request == NULL) &&
911 (handle->status == THREAD_RUNNING) &&
912 (handle->request_count > thread_pool.max_requests_per_thread)) {
913 handle->status = THREAD_CANCELLED;
914 sem_post(&thread_pool.semaphore);
920 * Otherwise everything's kosher. There are not too few,
921 * or too many spare threads. Exit happily.
926 static int exec_initialized = FALSE;
929 * Initialize the stuff for keeping track of child processes.
931 void rad_exec_init(void)
936 * Initialize the mutex used to remember calls to fork.
938 pthread_mutex_init(&fork_mutex, NULL);
941 * Initialize the data structure where we remember the
942 * mappings of thread ID && child PID to exit status.
944 for (i = 0; i < NUM_FORKERS; i++) {
945 forkers[i].thread_id = NO_SUCH_CHILD_PID;
946 forkers[i].child_pid = -1;
947 forkers[i].status = 0;
950 exec_initialized = TRUE;
954 * We use the PID number as a base for the array index, so that
955 * we can quickly turn the PID into a free array entry, instead
956 * of rooting blindly through the entire array.
958 #define PID_2_ARRAY(pid) (((int) pid ) & (NUM_FORKERS - 1))
961 * Thread wrapper for fork().
963 pid_t rad_fork(int exec_wait)
969 * The thread is NOT interested in waiting for the exit
970 * status of the child process, so we don't bother
971 * updating our kludgy array.
973 * Or, there no NO threads, so we can just do the fork
976 if (!exec_wait || !exec_initialized) {
981 * Block SIGCLHD until such time as we've saved the PID.
983 * Note that we block SIGCHLD for ALL threads associated
984 * with this process! This is to prevent race conditions!
987 sigaddset(&set, SIGCHLD);
988 sigprocmask(SIG_BLOCK, &set, NULL);
996 * We managed to fork. Let's see if we have a free
999 if (child_pid > 0) { /* parent */
1002 time_t now = time(NULL);
1005 * We store the information in the array
1006 * indexed by PID. This means that we have
1007 * on average an O(1) lookup to find the element,
1008 * instead of rooting through the entire array.
1010 i = PID_2_ARRAY(child_pid);
1014 * We may have multiple threads trying to find an
1015 * empty position, so we lock the array until
1016 * we've found an entry.
1018 pthread_mutex_lock(&fork_mutex);
1020 if (forkers[i].thread_id == NO_SUCH_CHILD_PID) {
1026 * Clean up any stale forked sessions.
1028 * This sometimes happens, for crazy reasons.
1030 if ((now - forkers[i].time_forked) > 30) {
1031 forkers[i].thread_id = NO_SUCH_CHILD_PID;
1034 * Grab the child's exit condition,
1037 waitpid(forkers[i].child_pid,
1040 sem_destroy(&forkers[i].child_done);
1046 * Increment it, within the array.
1049 i &= (NUM_FORKERS - 1);
1050 } while (i != PID_2_ARRAY(child_pid));
1053 * Arg. We did a fork, and there was nowhere to
1057 sigprocmask(SIG_UNBLOCK, &set, NULL);
1058 pthread_mutex_unlock(&fork_mutex);
1063 * In the parent, set the status, and create the
1066 forkers[found].status = -1;
1067 forkers[found].child_pid = child_pid;
1068 forkers[found].thread_id = pthread_self();
1069 forkers[found].time_forked = now;
1070 sem_init(&forkers[found].child_done, 0, SEMAPHORE_LOCKED);
1071 pthread_mutex_unlock(&fork_mutex);
1075 * Unblock SIGCHLD, now that there's no chance of bad entries
1078 sigprocmask(SIG_UNBLOCK, &set, NULL);
1081 * Return whatever we were told.
1087 * Thread wrapper for waitpid(), so threads can wait for
1088 * the PID they forked.
1090 pid_t rad_waitpid(pid_t pid, int *status, int options)
1094 pthread_t self = pthread_self();
1097 * We're only allowed to wait for a SPECIFIC pid.
1104 * Find the PID to wait for, starting at an index within
1105 * the array. This makes the lookups O(1) on average,
1106 * instead of O(n), when the array is filling up.
1109 i = PID_2_ARRAY(pid);
1112 * We were the ones who forked this specific
1115 if ((forkers[i].thread_id == self) &&
1116 (forkers[i].child_pid == pid)) {
1122 i &= (NUM_FORKERS - 1);
1123 } while (i != PID_2_ARRAY(pid));
1126 * No thread ID found: we're trying to wait for a child
1127 * we've never forked!
1134 * Wait for the signal that the child's status has been
1137 if (options == WNOHANG) {
1138 rcode = sem_trywait(&forkers[found].child_done);
1140 return 0; /* no child available */
1142 } else { /* wait forever */
1144 rcode = sem_wait(&forkers[found].child_done);
1145 if ((rcode != 0) && (errno == EINTR)) {
1151 * We've got the semaphore. Now destroy it.
1153 * FIXME: Maybe we want to set up the semaphores in advance,
1154 * to prevent the creation && deletion of lots of them,
1155 * if creating and deleting them is expensive.
1157 sem_destroy(&forkers[found].child_done);
1160 * Save the status BEFORE we re-set the thread ID.
1162 *status = forkers[found].status;
1165 * This next line taints the other array entries,
1166 * due to other threads re-using the data structure.
1168 forkers[found].thread_id = NO_SUCH_CHILD_PID;
1174 * Called by the main signal handler, to save the status of the child
1176 int rad_savepid(pid_t pid, int status)
1181 * Find the PID to wait for, starting at an index within
1182 * the array. This makes the lookups O(1) on average,
1183 * instead of O(n), when the array is filling up.
1185 i = PID_2_ARRAY(pid);
1188 * Do NOT lock the array, as nothing else sets the
1189 * status and posts the semaphore.
1193 * Any thread can get the sigchild...
1195 if ((forkers[i].thread_id != NO_SUCH_CHILD_PID) &&
1196 (forkers[i].child_pid == pid)) {
1198 * Save the status, THEN post the
1201 forkers[i].status = status;
1202 sem_post(&forkers[i].child_done);
1205 * FIXME: If the child is more than 60
1206 * seconds out of date, then delete it.
1208 * That is, we've forked, and the forker
1209 * is waiting nearly forever
1215 i &= (NUM_FORKERS - 1);
1216 } while (i != PID_2_ARRAY(pid));