1 /*****************************************************************************
5 * Description: Implements a credentail caching layer to ease the loading
6 * on the authentication mechanisms.
8 * Copyright (C) 2003 Jeremy Rumpf
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED ``AS IS''. ANY EXPRESS OR IMPLIED WARRANTIES,
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL JEREMY RUMPF OR ANY CONTRIBUTER TO THIS SOFTWARE BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE
33 * jrumpf@heavyload.net
35 *****************************************************************************/
37 /****************************************
39 *****************************************/
40 #include "saslauthd.h"
43 #include <sys/types.h>
57 #include "md5global.h"
58 #include "saslauthd_md5.h"
60 /****************************************
62 *****************************************/
63 static struct mm_ctl mm;
64 static struct lock_ctl lock;
65 static struct bucket *table = NULL;
66 static struct stats *table_stats = NULL;
67 static unsigned int table_size = 0;
68 static unsigned int table_timeout = 0;
70 /****************************************
71 * flags global from saslauthd-main.c
72 * run_path global from saslauthd-main.c
73 * tx_rec() function from utils.c
74 * logger() function from utils.c
75 *****************************************/
77 /*************************************************************
78 * The initialization function. This function will setup
79 * the hash table's memory region, initialize the table, etc.
80 **************************************************************/
81 int cache_init(void) {
86 if (!(flags & CACHE_ENABLED))
89 memset(cache_magic, 0, sizeof(cache_magic));
90 strlcpy(cache_magic, CACHE_CACHE_MAGIC, sizeof(cache_magic));
92 /**************************************************************
93 * Compute the size of the hash table. This and a stats
94 * struct will make up the memory region.
95 **************************************************************/
98 table_size = CACHE_DEFAULT_TABLE_SIZE;
100 bytes = (table_size * CACHE_MAX_BUCKETS_PER * sizeof(struct bucket)) \
101 + sizeof(struct stats) + 256;
104 if ((base = cache_alloc_mm(bytes)) == NULL)
107 if (table_timeout == 0)
108 table_timeout = CACHE_DEFAULT_TIMEOUT;
110 if (flags & VERBOSE) {
111 logger(L_DEBUG, L_FUNC, "bucket size: %d bytes",
112 sizeof(struct bucket));
113 logger(L_DEBUG, L_FUNC, "stats size : %d bytes",
114 sizeof(struct stats));
115 logger(L_DEBUG, L_FUNC, "timeout : %d seconds",
117 logger(L_DEBUG, L_FUNC, "cache table: %d total bytes",
119 logger(L_DEBUG, L_FUNC, "cache table: %d slots",
121 logger(L_DEBUG, L_FUNC, "cache table: %d buckets",
122 table_size * CACHE_MAX_BUCKETS_PER);
125 /**************************************************************
126 * At the top of the region is the magic and stats struct. The
127 * slots follow. Due to locking, the counters in the stats
128 * struct will not be entirely accurate.
129 **************************************************************/
131 memset(base, 0, bytes);
133 memcpy(base, cache_magic, 64);
134 table_stats = (void *)((char *)base + 64);
135 table_stats->table_size = table_size;
136 table_stats->max_buckets_per = CACHE_MAX_BUCKETS_PER;
137 table_stats->sizeof_bucket = sizeof(struct bucket);
138 table_stats->timeout = table_timeout;
139 table_stats->bytes = bytes;
141 table = (void *)((char *)table_stats + 128);
143 /**************************************************************
144 * Last, initialize the hash table locking.
145 **************************************************************/
147 if (cache_init_lock() != 0)
153 /*************************************************************
154 * Here we'll take some credentials and run them through
155 * the hash table. If we have a valid hit then all is good
156 * return CACHE_OK. If we don't get a hit, write the entry to
157 * the result pointer and expect a later call to
158 * cache_commit() to flush the bucket into the table.
159 **************************************************************/
160 int cache_lookup(const char *user, const char *realm, const char *service, const char *password, struct cache_result *result) {
163 int realm_length = 0;
164 int service_length = 0;
166 unsigned char pwd_digest[16];
169 time_t epoch_timeout;
170 struct bucket *ref_bucket;
171 struct bucket *low_bucket;
172 struct bucket *high_bucket;
173 struct bucket *read_bucket = NULL;
174 char userrealmserv[CACHE_MAX_CREDS_LENGTH];
175 static char *debug = "[login=%s] [service=%s] [realm=%s]: %s";
178 if (!(flags & CACHE_ENABLED))
181 memset((void *)result, 0, sizeof(struct cache_result));
182 result->status = CACHE_NO_FLUSH;
184 /**************************************************************
185 * Initial length checks
186 **************************************************************/
188 user_length = strlen(user) + 1;
189 realm_length = strlen(realm) + 1;
190 service_length = strlen(service) + 1;
192 if ((user_length + realm_length + service_length) > CACHE_MAX_CREDS_LENGTH) {
193 return CACHE_TOO_BIG;
196 /**************************************************************
197 * Any ideas on how not to call time() for every lookup?
198 **************************************************************/
201 epoch_timeout = epoch - table_timeout;
203 /**************************************************************
204 * Get the offset into the hash table and the md5 sum of
206 **************************************************************/
208 strlcpy(userrealmserv, user, sizeof(userrealmserv));
209 strlcat(userrealmserv, realm, sizeof(userrealmserv));
210 strlcat(userrealmserv, service, sizeof(userrealmserv));
212 hash_offset = cache_pjwhash(userrealmserv);
214 _saslauthd_MD5Init(&md5_context);
215 _saslauthd_MD5Update(&md5_context, password, strlen(password));
216 _saslauthd_MD5Final(pwd_digest, &md5_context);
218 /**************************************************************
219 * Loop through the bucket chain to try and find a hit.
221 * low_bucket = bucket at the start of the slot.
223 * high_bucket = last bucket in the slot.
225 * read_bucket = Contains the matched bucket if found.
228 * Also, lock the slot first to avoid contention in the
231 **************************************************************/
233 table_stats->attempts++;
235 if (cache_get_rlock(hash_offset) != 0) {
236 table_stats->misses++;
237 table_stats->lock_failures++;
241 low_bucket = table + (CACHE_MAX_BUCKETS_PER * hash_offset);
242 high_bucket = low_bucket + CACHE_MAX_BUCKETS_PER;
244 for (ref_bucket = low_bucket; ref_bucket < high_bucket; ref_bucket++) {
245 if (strcmp(user, ref_bucket->creds + ref_bucket->user_offt) == 0 && \
246 strcmp (realm, ref_bucket->creds + ref_bucket->realm_offt) == 0 && \
247 strcmp(service, ref_bucket->creds + ref_bucket->service_offt) == 0) {
248 read_bucket = ref_bucket;
253 /**************************************************************
254 * If we have our fish, check the password. If it's good,
255 * release the slot (row) lock and return CACHE_OK. Else,
256 * we'll write the entry to the result pointer. If we have a
257 * read_bucket, then tell cache_commit() to not rescan the
258 * chain (CACHE_FLUSH). Else, have cache_commit() determine the
259 * best bucket to place the new entry (CACHE_FLUSH_WITH_RESCAN).
260 **************************************************************/
262 if (read_bucket != NULL && read_bucket->created > epoch_timeout) {
264 if (memcmp(pwd_digest, read_bucket->pwd_digest, 16) == 0) {
267 logger(L_DEBUG, L_FUNC, debug, user, realm, service, "found with valid passwd");
269 cache_un_lock(hash_offset);
275 logger(L_DEBUG, L_FUNC, debug, user, realm, service, "found with invalid passwd, update pending");
277 result->status = CACHE_FLUSH;
282 logger(L_DEBUG, L_FUNC, debug, user, realm, service, "not found, update pending");
284 result->status = CACHE_FLUSH_WITH_RESCAN;
287 result->hash_offset = hash_offset;
288 result->read_bucket = read_bucket;
290 result->bucket.user_offt = 0;
291 result->bucket.realm_offt = user_length;
292 result->bucket.service_offt = user_length + realm_length;
294 strcpy(result->bucket.creds + result->bucket.user_offt, user);
295 strcpy(result->bucket.creds + result->bucket.realm_offt, realm);
296 strcpy(result->bucket.creds + result->bucket.service_offt, service);
298 memcpy(result->bucket.pwd_digest, pwd_digest, 16);
299 result->bucket.created = epoch;
301 cache_un_lock(hash_offset);
302 table_stats->misses++;
307 /*************************************************************
308 * If it was later determined that the previous failed lookup
309 * is ok, flush the result->bucket out to it's permanent home
311 **************************************************************/
312 void cache_commit(struct cache_result *result) {
313 struct bucket *write_bucket;
314 struct bucket *ref_bucket;
315 struct bucket *low_bucket;
316 struct bucket *high_bucket;
318 if (!(flags & CACHE_ENABLED))
321 if (result->status == CACHE_NO_FLUSH)
324 if (cache_get_wlock(result->hash_offset) != 0) {
325 table_stats->lock_failures++;
329 if (result->status == CACHE_FLUSH) {
330 write_bucket = result->read_bucket;
332 /*********************************************************
333 * CACHE_FLUSH_WITH_RESCAN is the default action to take.
334 * Simply traverse the slot looking for the oldest bucket
335 * and mark it for writing.
336 **********************************************************/
337 low_bucket = table + (CACHE_MAX_BUCKETS_PER * result->hash_offset);
338 high_bucket = low_bucket + CACHE_MAX_BUCKETS_PER;
339 write_bucket = low_bucket;
341 for (ref_bucket = low_bucket; ref_bucket < high_bucket; ref_bucket++) {
342 if (ref_bucket->created < write_bucket->created)
343 write_bucket = ref_bucket;
347 memcpy((void *)write_bucket, (void *)&(result->bucket), sizeof(struct bucket));
350 logger(L_DEBUG, L_FUNC, "lookup committed");
352 cache_un_lock(result->hash_offset);
357 /*************************************************************
358 * Hashing function. Algorithm is an adaptation of Peter
359 * Weinberger's (PJW) generic hashing algorithm, which
360 * is based on Allen Holub's version.
361 **************************************************************/
362 int cache_pjwhash(char *datum ) {
363 const int BITS_IN_int = ( sizeof(int) * CHAR_BIT );
364 const int THREE_QUARTERS = ((int) ((BITS_IN_int * 3) / 4));
365 const int ONE_EIGHTH = ((int) (BITS_IN_int / 8));
366 const int HIGH_BITS = ( ~((unsigned int)(~0) >> ONE_EIGHTH ));
368 unsigned int hash_value, i;
370 for (hash_value = 0; *datum; ++datum) {
371 hash_value = (hash_value << ONE_EIGHTH) + *datum;
372 if ((i = hash_value & HIGH_BITS) != 0)
373 hash_value = (hash_value ^ (i >> THREE_QUARTERS)) & ~HIGH_BITS;
376 return (hash_value % table_size);
379 /*************************************************************
380 * Allow someone to set the hash table size (in kilobytes).
381 * Since the hash table has to be prime, this won't be exact.
382 **************************************************************/
383 void cache_set_table_size(const char *size) {
384 unsigned int kilobytes;
386 unsigned int calc_bytes = 0;
387 unsigned int calc_table_size = 1;
389 kilobytes = strtol(size, (char **)NULL, 10);
391 if (kilobytes <= 0) {
392 logger(L_ERR, L_FUNC,
393 "cache size must be positive and non zero");
397 bytes = kilobytes * 1024;
400 bytes / (sizeof(struct bucket) * CACHE_MAX_BUCKETS_PER);
403 calc_table_size = cache_get_next_prime(calc_table_size);
404 calc_bytes = calc_table_size *
405 sizeof(struct bucket) * CACHE_MAX_BUCKETS_PER;
406 } while (calc_bytes < bytes);
408 table_size = calc_table_size;
414 /*************************************************************
415 * Allow someone to set the table timeout (in seconds)
416 **************************************************************/
417 void cache_set_timeout(const char *time) {
418 table_timeout = strtol(time, (char **)NULL, 10);
420 if (table_timeout <= 0) {
421 logger(L_ERR, L_FUNC, "cache timeout must be positive");
429 /*************************************************************
430 * Find the next closest prime relative to the number given.
431 * This is a variation of an implementation of the
432 * Sieve of Erastothenes by Frank Pilhofer,
433 * http://www.fpx.de/fp/Software/Sieve.html.
434 **************************************************************/
435 unsigned int cache_get_next_prime(unsigned int number) {
437 #define TEST(f,x) (*(f+((x)>>4))&(1<<(((x)&15L)>>1)))
438 #define SET(f,x) *(f+((x)>>4))|=1<<(((x)&15)>>1)
440 unsigned char *feld = NULL;
441 unsigned int teste = 1;
446 max = number + 20000;
448 feld = malloc(alloc=(((max-=10000)>>4)+1));
451 logger(L_ERR, L_FUNC, "could not allocate memory");
455 memset(feld, 0, alloc);
457 while ((teste += 2) < max) {
458 if (!TEST(feld, teste)) {
459 if (teste > number) {
464 for (mom=3*teste; mom<max; mom+=teste<<1) SET (feld, mom);
468 /******************************************************
469 * A prime wasn't found in the maximum search range.
470 * Just return the original number.
471 ******************************************************/
478 /*************************************************************
479 * Open the file that we'll mmap in as the shared memory
480 * segment. If something fails, return NULL.
481 **************************************************************/
482 void *cache_alloc_mm(unsigned int bytes) {
486 char null_buff[1024];
491 mm_file_len = strlen(run_path) + sizeof(CACHE_MMAP_FILE) + 1;
493 (char *)malloc(mm_file_len))) {
494 logger(L_ERR, L_FUNC, "could not allocate memory");
498 strlcpy(mm.file, run_path, mm_file_len);
499 strlcat(mm.file, CACHE_MMAP_FILE, mm_file_len);
502 open(mm.file, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR)) < 0) {
504 logger(L_ERR, L_FUNC, "could not open mmap file: %s", mm.file);
505 logger(L_ERR, L_FUNC, "open: %s", strerror(rc));
509 memset(null_buff, 0, sizeof(null_buff));
511 chunk_count = (bytes / sizeof(null_buff)) + 1;
513 while (chunk_count > 0) {
514 if (tx_rec(file_fd, null_buff, sizeof(null_buff))
515 != (ssize_t)sizeof(null_buff)) {
517 logger(L_ERR, L_FUNC,
518 "failed while writing to mmap file: %s",
527 if ((mm.base = mmap(NULL, bytes, PROT_READ|PROT_WRITE,
528 MAP_SHARED, file_fd, 0))== (void *)-1) {
530 logger(L_ERR, L_FUNC, "could not mmap shared memory segment");
531 logger(L_ERR, L_FUNC, "mmap: %s", strerror(rc));
538 if (flags & VERBOSE) {
539 logger(L_DEBUG, L_FUNC,
540 "mmaped shared memory segment on file: %s", mm.file);
547 /*************************************************************
548 * When we die we may need to perform some cleanup on the
549 * mmaped region. We assume we're the last process out here.
550 * Otherwise, deleting the file may cause SIGBUS signals to
551 * be generated for other processes.
552 **************************************************************/
553 void cache_cleanup_mm(void) {
554 if (mm.base != NULL) {
555 munmap(mm.base, mm.bytes);
558 if (flags & VERBOSE) {
559 logger(L_DEBUG, L_FUNC,
560 "cache mmap file removed: %s", mm.file);
567 /*****************************************************************
568 * The following is relative to the fcntl() locking method. Probably
569 * used when the Sys IV SHM Implementation is in effect.
570 ****************************************************************/
571 #ifdef CACHE_USE_FCNTL
573 /*************************************************************
574 * Setup the locking stuff required to implement the fcntl()
575 * style record locking of the hash table. Return 0 if
576 * everything is peachy, otherwise -1.
578 **************************************************************/
579 int cache_init_lock(void) {
581 size_t flock_file_len;
583 flock_file_len = strlen(run_path) + sizeof(CACHE_FLOCK_FILE) + 1;
584 if ((lock.flock_file = (char *)malloc(flock_file_len)) == NULL) {
585 logger(L_ERR, L_FUNC, "could not allocate memory");
589 strlcpy(lock.flock_file, run_path, flock_file_len);
590 strlcat(lock.flock_file, CACHE_FLOCK_FILE, flock_file_len);
592 if ((lock.flock_fd = open(lock.flock_file, O_RDWR|O_CREAT|O_TRUNC, S_IWUSR|S_IRUSR)) == -1) {
594 logger(L_ERR, L_FUNC, "could not open flock file: %s", lock.flock_file);
595 logger(L_ERR, L_FUNC, "open: %s", strerror(rc));
600 logger(L_DEBUG, L_FUNC, "flock file opened at %s", lock.flock_file);
606 /*************************************************************
607 * When the processes die we'll need to cleanup/delete
608 * the flock_file. More for correctness than anything.
610 **************************************************************/
611 void cache_cleanup_lock(void) {
614 if (lock.flock_file != NULL) {
615 unlink(lock.flock_file);
618 logger(L_DEBUG, L_FUNC, "flock file removed: %s", lock.flock_file);
626 /*************************************************************
627 * Attempt to get a write lock on a slot. Return 0 if
628 * everything went ok, return -1 if something bad happened.
629 * This function is expected to block.
631 **************************************************************/
632 int cache_get_wlock(unsigned int slot) {
633 struct flock lock_st;
636 lock_st.l_type = F_WRLCK;
637 lock_st.l_start = slot;
638 lock_st.l_whence = SEEK_SET;
645 logger(L_DEBUG, L_FUNC, "attempting a write lock on slot: %d", slot);
647 rc = fcntl(lock.flock_fd, F_SETLKW, &lock_st);
648 } while (rc != 0 && errno == EINTR);
652 logger(L_ERR, L_FUNC, "could not acquire a write lock on slot: %d\n", slot);
653 logger(L_ERR, L_FUNC, "fcntl: %s", strerror(rc));
661 /*************************************************************
662 * Attempt to get a read lock on a slot. Return 0 if
663 * everything went ok, return -1 if something bad happened.
664 * This function is expected to block.
666 **************************************************************/
667 int cache_get_rlock(unsigned int slot) {
669 struct flock lock_st;
673 lock_st.l_type = F_RDLCK;
674 lock_st.l_start = slot;
675 lock_st.l_whence = SEEK_SET;
682 logger(L_DEBUG, L_FUNC, "attempting a read lock on slot: %d", slot);
684 rc = fcntl(lock.flock_fd, F_SETLKW, &lock_st);
685 } while (rc != 0 && errno == EINTR);
689 logger(L_ERR, L_FUNC, "could not acquire a read lock on slot: %d\n", slot);
690 logger(L_ERR, L_FUNC, "fcntl: %s", strerror(rc));
698 /*************************************************************
699 * Releases a previously acquired lock on a slot.
701 **************************************************************/
702 int cache_un_lock(unsigned int slot) {
704 struct flock lock_st;
708 lock_st.l_type = F_UNLCK;
709 lock_st.l_start = slot;
710 lock_st.l_whence = SEEK_SET;
717 logger(L_DEBUG, L_FUNC, "attempting to release lock on slot: %d", slot);
719 rc = fcntl(lock.flock_fd, F_SETLKW, &lock_st);
720 } while (rc != 0 && errno == EINTR);
724 logger(L_ERR, L_FUNC, "could not release lock on slot: %d\n", slot);
725 logger(L_ERR, L_FUNC, "fcntl: %s", strerror(rc));
733 #endif /* CACHE_USE_FCNTL */
735 /**********************************************************************
736 * The following is relative to the POSIX threads rwlock method of locking
737 * slots in the hash table. Used when the Doors IPC is in effect, thus
738 * -lpthreads is evident.
739 ***********************************************************************/
741 #ifdef CACHE_USE_PTHREAD_RWLOCK
743 /*************************************************************
744 * Initialize a pthread_rwlock_t for every slot (row) in the
745 * hash table. Return 0 if everything went ok, -1 if we bomb.
747 **************************************************************/
748 int cache_init_lock(void) {
750 pthread_rwlock_t *rwlock;
753 (pthread_rwlock_t *)malloc(sizeof(pthread_rwlock_t) * table_size))) {
754 logger(L_ERR, L_FUNC, "could not allocate memory");
758 for (x = 0; x < table_size; x++) {
759 rwlock = lock.rwlock + x;
761 if (pthread_rwlock_init(rwlock, NULL) != 0) {
762 logger(L_ERR, L_FUNC, "failed to initialize lock %d", x);
768 logger(L_DEBUG, L_FUNC, "%d rwlocks initialized", table_size);
774 /*************************************************************
775 * Destroy all of the rwlocks, free the buffer.
777 **************************************************************/
778 void cache_cleanup_lock(void) {
780 pthread_rwlock_t *rwlock;
782 if(!lock.rwlock) return;
784 for(x=0; x<table_size; x++) {
785 rwlock = lock.rwlock + x;
786 pthread_rwlock_destroy(rwlock);
795 /*************************************************************
796 * Attempt to get a write lock on a slot. Return 0 if
797 * everything went ok, return -1 if something bad happened.
798 * This function is expected to block the current thread.
800 **************************************************************/
801 int cache_get_wlock(unsigned int slot) {
807 logger(L_DEBUG, L_FUNC, "attempting a write lock on slot: %d", slot);
809 rc = pthread_rwlock_wrlock(lock.rwlock + slot);
812 logger(L_ERR, L_FUNC, "could not acquire a write lock on slot: %d\n", slot);
820 /*************************************************************
821 * Attempt to get a read lock on a slot. Return 0 if
822 * everything went ok, return -1 if something bad happened.
823 * This function is expected to block the current thread.
825 **************************************************************/
826 int cache_get_rlock(unsigned int slot) {
832 logger(L_DEBUG, L_FUNC, "attempting a read lock on slot: %d", slot);
834 rc = pthread_rwlock_rdlock(lock.rwlock + slot);
837 logger(L_ERR, L_FUNC, "could not acquire a read lock on slot: %d\n", slot);
845 /*************************************************************
846 * Releases a previously acquired lock on a slot.
848 **************************************************************/
849 int cache_un_lock(unsigned int slot) {
855 logger(L_DEBUG, L_FUNC, "attempting to release lock on slot: %d", slot);
857 rc = pthread_rwlock_unlock(lock.rwlock + slot);
860 logger(L_ERR, L_FUNC, "could not release lock on slot: %d\n", slot);
868 #endif /* CACHE_USE_PTHREAD_RWLOCK */
869 /***************************************************************************************/
870 /***************************************************************************************/