2 * Copyright (c) 2016, JANET(UK)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of JANET(UK) nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 * OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <trust_router/tr_dh.h>
38 #include <tid_internal.h>
39 #include <tr_filter.h>
46 #include <trp_route.h>
47 #include <trp_internal.h>
48 #include <tr_config.h>
53 /* Structure to hold data for the tid response callback */
54 typedef struct tr_resp_cookie {
59 /* hold a tids instance and a config manager */
60 struct tr_tids_event_cookie {
66 static void tr_tidc_resp_handler(TIDC_INSTANCE *tidc,
71 TR_RESP_COOKIE *cookie=talloc_get_type_abort(resp_cookie, TR_RESP_COOKIE);
73 tr_debug("tr_tidc_resp_handler: Response received! Realm = %s, Community = %s, result = %s.",
76 (TID_SUCCESS==resp->result)?"success":"error");
78 if (resp->error_path!=NULL)
79 tr_debug("tr_tids_resp_handler: error_path is set.");
80 cookie->resp=tid_resp_dup(cookie, resp);
83 /* data for AAA req forwarding threads */
84 struct tr_tids_fwd_cookie {
86 pthread_mutex_t mutex; /* lock on the mq (separate from the locking within the mq, see below) */
87 TR_MQ *mq; /* messages from thread to main process; set to NULL to disable response */
88 TR_NAME *aaa_hostname;
90 TID_REQ *fwd_req; /* the req to duplicate */
93 static int tr_tids_fwd_cookie_destructor(void *obj)
95 struct tr_tids_fwd_cookie *c=talloc_get_type_abort(obj, struct tr_tids_fwd_cookie);
96 if (c->aaa_hostname!=NULL)
97 tr_free_name(c->aaa_hostname);
98 if (c->dh_params!=NULL)
99 tr_destroy_dh_params(c->dh_params);
103 /* Block until we get the lock, returns 0 on success.
104 * The mutex is used to protect changes to the mq pointer in
105 * a thread's cookie. The master thread sets this to null to indicate
106 * that it has abandoned the thread and the message queue is no longer
107 * valid. This is unrelated to the locking in the message queue
108 * implementation itself. */
109 static int tr_tids_fwd_get_mutex(struct tr_tids_fwd_cookie *cookie)
114 return (pthread_mutex_lock(&(cookie->mutex)));
117 static int tr_tids_fwd_release_mutex(struct tr_tids_fwd_cookie *cookie)
122 return (pthread_mutex_unlock(&(cookie->mutex)));
125 /* values for messages */
126 #define TR_TID_MQMSG_SUCCESS "tid success"
127 #define TR_TID_MQMSG_FAILURE "tid failure"
129 /* Thread main for sending and receiving a request to a single AAA server */
130 static void *tr_tids_req_fwd_thread(void *arg)
132 TALLOC_CTX *tmp_ctx=talloc_new(NULL);
133 struct tr_tids_fwd_cookie *args=talloc_get_type_abort(arg, struct tr_tids_fwd_cookie);
134 TIDC_INSTANCE *tidc=tidc_create();
136 TR_RESP_COOKIE *cookie=NULL;
140 talloc_steal(tmp_ctx, args); /* take responsibility for the cookie */
143 talloc_steal(tmp_ctx, tidc);
145 /* create the cookie we will use for our response */
146 cookie=talloc(tmp_ctx, TR_RESP_COOKIE);
148 tr_notice("tr_tids_req_fwd_thread: unable to allocate response cookie.");
152 cookie->thread_id=args->thread_id;
153 tr_debug("tr_tids_req_fwd_thread: thread %d started.", cookie->thread_id);
155 /* Create a TID client instance */
157 tr_crit("tr_tids_req_fwd_thread: Unable to allocate TIDC instance.");
158 /*tids_send_err_response(tids, orig_req, "Memory allocation failure");*/
159 /* TODO: encode reason for failure */
164 /* Set-up TID connection */
165 if (-1==(args->fwd_req->conn = tidc_open_connection(tidc,
166 args->aaa_hostname->buf,
167 TID_PORT, /* TODO: make this configurable */
168 &(args->fwd_req->gssctx)))) {
169 tr_notice("tr_tids_req_fwd_thread: Error in tidc_open_connection.");
170 /* tids_send_err_response(tids, orig_req, "Can't open connection to next hop TIDS"); */
171 /* TODO: encode reason for failure */
175 tr_debug("tr_tids_req_fwd_thread: thread %d opened TID connection to %s.",
177 args->aaa_hostname->buf);
179 /* Send a TID request. */
180 if (0 > (rc = tidc_fwd_request(tidc, args->fwd_req, tr_tidc_resp_handler, (void *)cookie))) {
181 tr_notice("Error from tidc_fwd_request, rc = %d.", rc);
185 /* cookie->resp should now contain our copy of the response */
187 tr_debug("tr_tids_req_fwd_thread: thread %d received response.");
190 /* Notify parent thread of the response, if it's still listening. */
191 if (0!=tr_tids_fwd_get_mutex(args)) {
192 tr_notice("tr_tids_req_fwd_thread: thread %d unable to acquire mutex.", cookie->thread_id);
193 } else if (NULL!=args->mq) {
194 /* mq is still valid, so we can queue our response */
195 tr_debug("tr_tids_req_fwd_thread: thread %d using valid msg queue.", cookie->thread_id);
197 msg=tr_mq_msg_new(tmp_ctx, TR_TID_MQMSG_SUCCESS, TR_MQ_PRIO_NORMAL);
199 msg=tr_mq_msg_new(tmp_ctx, TR_TID_MQMSG_FAILURE, TR_MQ_PRIO_NORMAL);
202 tr_notice("tr_tids_req_fwd_thread: thread %d unable to allocate response msg.", cookie->thread_id);
204 tr_mq_msg_set_payload(msg, (void *)cookie, NULL);
206 talloc_steal(msg, cookie); /* attach this to the msg so we can forget about it */
207 tr_mq_add(args->mq, msg);
208 talloc_steal(NULL, args); /* take out of our tmp_ctx; master thread now responsible for freeing */
209 tr_debug("tr_tids_req_fwd_thread: thread %d queued response message.", cookie->thread_id);
210 if (0!=tr_tids_fwd_release_mutex(args))
211 tr_notice("tr_tids_req_fwd_thread: Error releasing mutex.");
214 talloc_free(tmp_ctx);
218 /* Merges r2 into r1 if they are compatible. */
219 static TID_RC tr_tids_merge_resps(TID_RESP *r1, TID_RESP *r2)
221 /* ensure these are compatible replies */
222 if ((r1->result!=TID_SUCCESS) || (r2->result!=TID_SUCCESS))
225 if ((0!=tr_name_cmp(r1->rp_realm, r2->rp_realm)) ||
226 (0!=tr_name_cmp(r1->realm, r2->realm)) ||
227 (0!=tr_name_cmp(r1->comm, r2->comm)))
230 tid_srvr_blk_add(r1->servers, tid_srvr_blk_dup(r1, r2->servers));
235 * Process a TID request
237 * Return value of -1 means to send a TID_ERROR response. Fill in resp->err_msg or it will
238 * be returned as a generic error.
246 static int tr_tids_req_handler(TIDS_INSTANCE *tids,
251 TALLOC_CTX *tmp_ctx=talloc_new(NULL);
252 TR_AAA_SERVER *aaa_servers=NULL, *this_aaa=NULL;
255 TR_AAA_SERVER_ITER *aaa_iter=NULL;
256 pthread_t aaa_thread[TR_TID_MAX_AAA_SERVERS];
257 struct tr_tids_fwd_cookie *aaa_cookie[TR_TID_MAX_AAA_SERVERS]={NULL};
258 TID_RESP *aaa_resp[TR_TID_MAX_AAA_SERVERS]={NULL};
259 TR_RP_CLIENT *rp_client=NULL;
260 TR_RP_CLIENT_ITER *rpc_iter=NULL;
262 TID_REQ *fwd_req = NULL;
263 TR_COMM *cfg_comm = NULL;
264 TR_COMM *cfg_apc = NULL;
265 TR_FILTER_ACTION oaction = TR_FILTER_ACTION_REJECT;
266 time_t expiration_interval=0;
267 struct tr_tids_event_cookie *cookie=talloc_get_type_abort(cookie_in, struct tr_tids_event_cookie);
268 TR_CFG_MGR *cfg_mgr=cookie->cfg_mgr;
269 TRPS_INSTANCE *trps=cookie->trps;
270 TRP_ROUTE *route=NULL;
273 unsigned int n_responses=0;
274 unsigned int n_failed=0;
275 struct timespec ts_abort={0};
276 unsigned int resp_frac_numer=cfg_mgr->active->internal->tid_resp_numer;
277 unsigned int resp_frac_denom=cfg_mgr->active->internal->tid_resp_denom;
278 TR_RESP_COOKIE *payload=NULL;
279 TR_FILTER_TARGET *target=NULL;
283 if ((!tids) || (!orig_req) || (!resp)) {
284 tr_debug("tr_tids_req_handler: Bad parameters");
289 tr_debug("tr_tids_req_handler: Request received (conn = %d)! Realm = %s, Comm = %s", orig_req->conn,
290 orig_req->realm->buf, orig_req->comm->buf);
292 /* Duplicate the request, so we can modify and forward it */
293 if (NULL == (fwd_req=tid_dup_req(orig_req))) {
294 tr_debug("tr_tids_req_handler: Unable to duplicate request.");
295 retval=-1; /* response will be a generic internal error */
298 talloc_steal(tmp_ctx, fwd_req);
300 if (NULL == (cfg_comm=tr_comm_table_find_comm(cfg_mgr->active->ctable, orig_req->comm))) {
301 tr_notice("tr_tids_req_hander: Request for unknown comm: %s.", orig_req->comm->buf);
302 tid_resp_set_err_msg(resp, tr_new_name("Unknown community"));
307 /* We now need to apply the filters associated with the RP client handing us the request.
308 * It is possible (or even likely) that more than one client is associated with the GSS
309 * name we got from the authentication. We will apply all of them in an arbitrary order.
310 * For this to result in well-defined behavior, either only accept or only reject filter
311 * lines should be used, or a unique GSS name must be given for each RP realm. */
313 if (!tids->gss_name) {
314 tr_notice("tr_tids_req_handler: No GSS name for incoming request.");
315 tid_resp_set_err_msg(resp, tr_new_name("No GSS name for request"));
320 /* Keep original constraints, may add more from the filter. These will be added to orig_req as
321 * well. Need to verify that this is acceptable behavior, but it's what we've always done. */
322 fwd_req->cons=orig_req->cons;
324 target=tr_filter_target_tid_req(tmp_ctx, orig_req);
326 tr_crit("tid_req_handler: Unable to allocate filter target, cannot apply filter!");
327 tid_resp_set_err_msg(resp, tr_new_name("Incoming TID request filter error"));
332 rpc_iter=tr_rp_client_iter_new(tmp_ctx);
333 if (rpc_iter==NULL) {
334 tr_err("tid_req_handler: Unable to allocate RP client iterator.");
338 for (rp_client=tr_rp_client_iter_first(rpc_iter, cfg_mgr->active->rp_clients);
340 rp_client=tr_rp_client_iter_next(rpc_iter)) {
342 if (!tr_gss_names_matches(rp_client->gss_names, tids->gss_name))
343 continue; /* skip any that don't match the GSS name */
345 if (TR_FILTER_MATCH == tr_filter_apply(target,
346 tr_filter_set_get(rp_client->filters,
347 TR_FILTER_TYPE_TID_INBOUND),
350 break; /* Stop looking, oaction is set */
353 /* We get here whether or not a filter matched. If tr_filter_apply() doesn't match, it returns
354 * a default action of reject, so we don't have to check why we exited the loop. */
355 if (oaction != TR_FILTER_ACTION_ACCEPT) {
356 tr_notice("tr_tids_req_handler: Incoming TID request rejected by filter for GSS name", orig_req->rp_realm->buf);
357 tid_resp_set_err_msg(resp, tr_new_name("Incoming TID request filter error"));
362 /* Check that the rp_realm is a member of the community in the request */
363 if (NULL == tr_comm_find_rp(cfg_mgr->active->ctable, cfg_comm, orig_req->rp_realm)) {
364 tr_notice("tr_tids_req_handler: RP Realm (%s) not member of community (%s).", orig_req->rp_realm->buf, orig_req->comm->buf);
365 tid_resp_set_err_msg(resp, tr_new_name("RP COI membership error"));
370 /* Map the comm in the request from a COI to an APC, if needed */
371 if (TR_COMM_COI == cfg_comm->type) {
372 if (orig_req->orig_coi!=NULL) {
373 tr_notice("tr_tids_req_handler: community %s is COI but COI to APC mapping already occurred. Dropping request.",
374 orig_req->comm->buf);
375 tid_resp_set_err_msg(resp, tr_new_name("Second COI to APC mapping would result, permitted only once."));
380 tr_debug("tr_tids_req_handler: Community was a COI, switching.");
381 /* TBD -- In theory there can be more than one? How would that work? */
382 if ((!cfg_comm->apcs) || (!cfg_comm->apcs->id)) {
383 tr_notice("No valid APC for COI %s.", orig_req->comm->buf);
384 tid_resp_set_err_msg(resp, tr_new_name("No valid APC for community"));
388 apc = tr_dup_name(cfg_comm->apcs->id);
390 /* Check that the APC is configured */
391 if (NULL == (cfg_apc = tr_comm_table_find_comm(cfg_mgr->active->ctable, apc))) {
392 tr_notice("tr_tids_req_hander: Request for unknown comm: %s.", apc->buf);
393 tid_resp_set_err_msg(resp, tr_new_name("Unknown APC"));
399 fwd_req->orig_coi = orig_req->comm;
401 /* Check that rp_realm is a member of this APC */
402 if (NULL == (tr_comm_find_rp(cfg_mgr->active->ctable, cfg_apc, orig_req->rp_realm))) {
403 tr_notice("tr_tids_req_hander: RP Realm (%s) not member of community (%s).", orig_req->rp_realm->buf, orig_req->comm->buf);
404 tid_resp_set_err_msg(resp, tr_new_name("RP APC membership error"));
410 /* Look up the route for this community/realm. */
411 tr_debug("tr_tids_req_handler: looking up route.");
412 route=trps_get_selected_route(trps, orig_req->comm, orig_req->realm);
414 /* No route. Use default AAA servers if we have them. */
415 tr_debug("tr_tids_req_handler: No route for realm %s, defaulting.", orig_req->realm->buf);
416 if (NULL == (aaa_servers = tr_default_server_lookup(cfg_mgr->active->default_servers,
418 tr_notice("tr_tids_req_handler: No default AAA servers, discarded.");
419 tid_resp_set_err_msg(resp, tr_new_name("No path to AAA Server(s) for realm"));
425 /* Found a route. Determine the AAA servers or next hop address. */
426 tr_debug("tr_tids_req_handler: found route.");
427 if (trp_route_is_local(route)) {
428 tr_debug("tr_tids_req_handler: route is local.");
429 aaa_servers = tr_idp_aaa_server_lookup(cfg_mgr->active->ctable->idp_realms,
434 tr_debug("tr_tids_req_handler: route not local.");
435 aaa_servers = tr_aaa_server_new(tmp_ctx, trp_route_get_next_hop(route));
439 /* Since we aren't defaulting, check idp coi and apc membership */
440 if (NULL == (tr_comm_find_idp(cfg_mgr->active->ctable, cfg_comm, fwd_req->realm))) {
441 tr_notice("tr_tids_req_handler: IDP Realm (%s) not member of community (%s).", orig_req->realm->buf, orig_req->comm->buf);
442 tid_resp_set_err_msg(resp, tr_new_name("IDP community membership error"));
446 if ( cfg_apc && (NULL == (tr_comm_find_idp(cfg_mgr->active->ctable, cfg_apc, fwd_req->realm)))) {
447 tr_notice("tr_tids_req_handler: IDP Realm (%s) not member of APC (%s).", orig_req->realm->buf, orig_req->comm->buf);
448 tid_resp_set_err_msg(resp, tr_new_name("IDP APC membership error"));
454 /* Make sure we came through with a AAA server. If not, we can't handle the request. */
455 if (NULL == aaa_servers) {
456 tr_notice("tr_tids_req_handler: no route or AAA server for realm (%s) in community (%s).",
457 orig_req->realm->buf, orig_req->comm->buf);
458 tid_resp_set_err_msg(resp, tr_new_name("Missing trust route error"));
463 /* send a TID request to the AAA server(s), and get the answer(s) */
464 tr_debug("tr_tids_req_handler: sending TID request(s).");
466 expiration_interval = cfg_apc->expiration_interval;
467 else expiration_interval = cfg_comm->expiration_interval;
468 if (fwd_req->expiration_interval)
469 fwd_req->expiration_interval = (expiration_interval < fwd_req->expiration_interval) ? expiration_interval : fwd_req->expiration_interval;
470 else fwd_req->expiration_interval = expiration_interval;
472 /* Set up message queue for replies from req forwarding threads */
473 mq=tr_mq_new(tmp_ctx);
475 tr_notice("tr_tids_req_handler: unable to allocate message queue.");
479 tr_debug("tr_tids_req_handler: message queue allocated.");
482 aaa_iter=tr_aaa_server_iter_new(tmp_ctx);
483 if (aaa_iter==NULL) {
484 tr_notice("tr_tids_req_handler: unable to allocate AAA server iterator.");
488 for (n_aaa=0, this_aaa=tr_aaa_server_iter_first(aaa_iter, aaa_servers);
490 n_aaa++, this_aaa=tr_aaa_server_iter_next(aaa_iter)) {
491 tr_debug("tr_tids_req_handler: Preparing to start thread %d.", n_aaa);
493 aaa_cookie[n_aaa]=talloc(tmp_ctx, struct tr_tids_fwd_cookie);
494 if (aaa_cookie[n_aaa]==NULL) {
495 tr_notice("tr_tids_req_handler: unable to allocate cookie for AAA thread %d.", n_aaa);
499 talloc_set_destructor((void *)(aaa_cookie[n_aaa]), tr_tids_fwd_cookie_destructor);
500 /* fill in the cookie. To ensure the thread has valid data even if we exit first and
501 * abandon it, duplicate anything pointed to (except the mq). */
502 aaa_cookie[n_aaa]->thread_id=n_aaa;
503 if (0!=pthread_mutex_init(&(aaa_cookie[n_aaa]->mutex), NULL)) {
504 tr_notice("tr_tids_req_handler: unable to init mutex for AAA thread %d.", n_aaa);
508 aaa_cookie[n_aaa]->mq=mq;
509 aaa_cookie[n_aaa]->aaa_hostname=tr_dup_name(this_aaa->hostname);
510 aaa_cookie[n_aaa]->dh_params=tr_dh_dup(orig_req->tidc_dh);
511 aaa_cookie[n_aaa]->fwd_req=tid_dup_req(fwd_req);
512 talloc_steal(aaa_cookie[n_aaa], aaa_cookie[n_aaa]->fwd_req);
513 tr_debug("tr_tids_req_handler: cookie %d initialized.", n_aaa);
515 /* Take the cookie out of tmp_ctx before starting thread. If thread starts, it becomes
516 * responsible for freeing it until it queues a response. If we did not do this, the possibility
517 * exists that this function exits, freeing the cookie, before the thread takes the cookie
518 * out of our tmp_ctx. This would cause a segfault or talloc error in the thread. */
519 talloc_steal(NULL, aaa_cookie[n_aaa]);
520 if (0!=pthread_create(&(aaa_thread[n_aaa]), NULL, tr_tids_req_fwd_thread, aaa_cookie[n_aaa])) {
521 talloc_steal(tmp_ctx, aaa_cookie[n_aaa]); /* thread start failed; steal this back */
522 tr_notice("tr_tids_req_handler: unable to start AAA thread %d.", n_aaa);
526 tr_debug("tr_tids_req_handler: thread %d started.", n_aaa);
529 /* determine expiration time */
530 if (0!=tr_mq_pop_timeout(cfg_mgr->active->internal->tid_req_timeout, &ts_abort)) {
531 tr_notice("tr_tids_req_handler: unable to read clock for timeout.");
536 /* wait for responses */
537 tr_debug("tr_tids_req_handler: waiting for response(s).");
540 while (((n_responses+n_failed)<n_aaa) &&
541 (NULL!=(msg=tr_mq_pop(mq, &ts_abort)))) {
542 /* process message */
543 if (0==strcmp(tr_mq_msg_get_message(msg), TR_TID_MQMSG_SUCCESS)) {
544 payload=talloc_get_type_abort(tr_mq_msg_get_payload(msg), TR_RESP_COOKIE);
545 talloc_steal(tmp_ctx, payload); /* put this back in our context */
546 aaa_resp[payload->thread_id]=payload->resp; /* save pointers to these */
548 if (payload->resp->result==TID_SUCCESS) {
549 tr_tids_merge_resps(resp, payload->resp);
553 tr_notice("tr_tids_req_handler: TID error received from AAA server %d: %.*s",
555 payload->resp->err_msg->len,
556 payload->resp->err_msg->buf);
558 } else if (0==strcmp(tr_mq_msg_get_message(msg), TR_TID_MQMSG_FAILURE)) {
561 payload=talloc_get_type(tr_mq_msg_get_payload(msg), TR_RESP_COOKIE);
563 talloc_steal(tmp_ctx, payload); /* put this back in our context */
565 /* this means the thread was unable to allocate a response cookie, and we thus cannot determine which thread it was. This is bad and should never happen in a working system.. Give up. */
566 tr_notice("tr_tids_req_handler: TID request thread sent invalid reply. Aborting!");
570 tr_notice("tr_tids_req_handler: TID request for AAA server %d failed.",
573 /* unexpected message */
574 tr_err("tr_tids_req_handler: Unexpected message received. Aborting!");
579 /* Set the cookie pointer to NULL so we know we've dealt with this one. The
580 * cookie itself is in our tmp_ctx, which we'll free before exiting. Let it hang
581 * around in case we are still using pointers to elements of the cookie. */
582 aaa_cookie[payload->thread_id]=NULL;
586 /* check whether we've received enough responses to exit */
587 if ((idp_shared && (n_responses>0)) ||
588 (resp_frac_denom*n_responses>=resp_frac_numer*n_aaa))
592 tr_debug("tr_tids_req_handler: done waiting for responses. %d responses, %d failures.",
593 n_responses, n_failed);
594 /* Inform any remaining threads that we will no longer handle their responses. */
595 for (ii=0; ii<n_aaa; ii++) {
596 if (aaa_cookie[ii]!=NULL) {
597 if (0!=tr_tids_fwd_get_mutex(aaa_cookie[ii]))
598 tr_notice("tr_tids_req_handler: unable to get mutex for AAA thread %d.", ii);
600 aaa_cookie[ii]->mq=NULL; /* threads will not try to respond through a null mq */
602 if (0!=tr_tids_fwd_release_mutex(aaa_cookie[ii]))
603 tr_notice("tr_tids_req_handler: unable to release mutex for AAA thread %d.", ii);
607 /* Now all threads have either replied (and aaa_cookie[ii] is null) or have been told not to
608 * reply (by setting their mq pointer to null). However, some may have responded by placing
609 * a message on the mq after we last checked but before we set their mq pointer to null. These
610 * will not know that we gave up on them, so we must free their cookies for them. We can just
611 * go through any remaining messages on the mq to identify these threads. By putting them in
612 * our context instead of freeing them directly, we ensure we don't accidentally invalidate
613 * any of our own pointers into the structure before this function exits. */
614 while (NULL!=(msg=tr_mq_pop(mq, NULL))) {
615 payload=(TR_RESP_COOKIE *)tr_mq_msg_get_payload(msg);
616 if (aaa_cookie[payload->thread_id]!=NULL)
617 talloc_steal(tmp_ctx, aaa_cookie[payload->thread_id]);
622 if (n_responses==0) {
623 /* No requests succeeded, so this will be an error */
626 /* If we got any error responses, send an arbitrarily chosen one. */
627 for (ii=0; ii<n_aaa; ii++) {
628 if (aaa_resp[ii] != NULL) {
629 tid_resp_cpy(resp, aaa_resp[ii]);
633 /* No error responses at all, so generate our own error. */
634 tid_resp_set_err_msg(resp, tr_new_name("Unable to contact AAA server(s)."));
642 talloc_free(tmp_ctx);
646 static int tr_tids_gss_handler(gss_name_t client_name, TR_NAME *gss_name,
649 struct tr_tids_event_cookie *cookie=talloc_get_type_abort(data, struct tr_tids_event_cookie);
650 TIDS_INSTANCE *tids = cookie->tids;
651 TR_CFG_MGR *cfg_mgr = cookie->cfg_mgr;
653 if ((!client_name) || (!gss_name) || (!tids) || (!cfg_mgr)) {
654 tr_debug("tr_tidc_gss_handler: Bad parameters.");
658 /* Ensure at least one client exists using this GSS name */
659 if (NULL == tr_rp_client_lookup(cfg_mgr->active->rp_clients, gss_name)) {
660 tr_debug("tr_tids_gss_handler: Unknown GSS name %.*s", gss_name->len, gss_name->buf);
664 /* Store the GSS name */
665 tids->gss_name = tr_dup_name(gss_name);
666 tr_debug("Client's GSS Name: %.*s", gss_name->len, gss_name->buf);
672 /***** TIDS event handling *****/
674 /* called when a connection to the TIDS port is received */
675 static void tr_tids_event_cb(int listener, short event, void *arg)
677 TIDS_INSTANCE *tids = talloc_get_type_abort(arg, TIDS_INSTANCE);
679 if (0==(event & EV_READ))
680 tr_debug("tr_tids_event_cb: unexpected event on TIDS socket (event=0x%X)", event);
682 tids_accept(tids, listener);
685 /* called when it's time to sweep for completed TID child processes */
686 static void tr_tids_sweep_cb(int listener, short event, void *arg)
688 TIDS_INSTANCE *tids = talloc_get_type_abort(arg, TIDS_INSTANCE);
690 if (0==(event & EV_TIMEOUT))
691 tr_debug("tr_tids_event_cb: unexpected event on TID process sweep timer (event=0x%X)", event);
693 tids_sweep_procs(tids);
696 /* Configure the tids instance and set up its event handlers.
697 * Returns 0 on success, nonzero on failure. Fills in
698 * *tids_event (which should be allocated by caller). */
699 int tr_tids_event_init(struct event_base *base, TIDS_INSTANCE *tids, TR_CFG_MGR *cfg_mgr, TRPS_INSTANCE *trps,
700 struct tr_socket_event *tids_ev, struct event **sweep_ev)
702 TALLOC_CTX *tmp_ctx=talloc_new(NULL);
703 struct tr_tids_event_cookie *cookie=NULL;
704 struct timeval sweep_interval;
708 if (tids_ev == NULL) {
709 tr_debug("tr_tids_event_init: Null tids_ev.");
714 if (sweep_ev == NULL) {
715 tr_debug("tr_tids_event_init: Null sweep_ev.");
720 /* Create the cookie for callbacks. We'll put it in the tids context, so it will
721 * be cleaned up when tids is freed by talloc_free. */
722 cookie=talloc(tmp_ctx, struct tr_tids_event_cookie);
723 if (cookie == NULL) {
724 tr_debug("tr_tids_event_init: Unable to allocate cookie.");
729 cookie->cfg_mgr=cfg_mgr;
731 talloc_steal(tids, cookie);
733 /* get a tids listener */
734 tids_ev->n_sock_fd = (int)tids_get_listener(tids,
737 cfg_mgr->active->internal->hostname,
738 cfg_mgr->active->internal->tids_port,
742 if (tids_ev->n_sock_fd==0) {
743 tr_crit("Error opening TID server socket.");
748 /* Set up listener events */
749 for (ii=0; ii<tids_ev->n_sock_fd; ii++) {
750 tids_ev->ev[ii]=event_new(base,
751 tids_ev->sock_fd[ii],
755 event_add(tids_ev->ev[ii], NULL);
758 /* Set up a periodic check for completed TID handler processes */
759 *sweep_ev = event_new(base, -1, EV_TIMEOUT|EV_PERSIST, tr_tids_sweep_cb, tids);
760 sweep_interval.tv_sec = 10;
761 sweep_interval.tv_usec = 0;
762 event_add(*sweep_ev, &sweep_interval);
765 talloc_free(tmp_ctx);