#include <talloc.h>
#include <errno.h>
#include <unistd.h>
+#include <sys/time.h>
#include <gsscon.h>
#include <tr_rp.h>
#include <trust_router/tr_name.h>
#include <trp_internal.h>
+#include <trp_ptable.h>
#include <trp_rtable.h>
#include <tr_debug.h>
trps->cookie=NULL;
trps->conn=NULL;
trps->trpc=NULL;
+ trps->update_interval=(struct timeval){0,0};
+ trps->sweep_interval=(struct timeval){0,0};
+
trps->mq=tr_mq_new(trps);
if (trps->mq==NULL) {
/* failed to allocate mq */
talloc_free(trps);
- trps=NULL;
- } else {
- trps->rtable=trp_rtable_new();
- if (trps->rtable==NULL) {
- /* failed to allocate rtable */
- talloc_free(trps);
- trps=NULL;
- } else
- talloc_set_destructor((void *)trps, trps_destructor);
+ return NULL;
+ }
+
+ trps->ptable=trp_ptable_new(trps);
+ if (trps->ptable==NULL) {
+ /* failed to allocate ptable */
+ talloc_free(trps);
+ return NULL;
+ }
+
+ trps->rtable=trp_rtable_new();
+ if (trps->rtable==NULL) {
+ /* failed to allocate rtable */
+ talloc_free(trps);
+ return NULL;
}
+
+ talloc_set_destructor((void *)trps, trps_destructor);
}
return trps;
}
tr_mq_append(trps->mq, msg);
}
-/* stand-in for a function that finds the connection for a particular peer */
-#if 0
-static TRP_CONNECTION *trps_find_connection(TRPS_INSTANCE *trps)
+unsigned int trps_get_connect_interval(TRPS_INSTANCE *trps)
+{
+ return trps->connect_interval.tv_sec;
+}
+
+void trps_set_connect_interval(TRPS_INSTANCE *trps, unsigned int interval)
+{
+ trps->connect_interval.tv_sec=interval;
+ trps->connect_interval.tv_usec=0;
+}
+
+unsigned int trps_get_update_interval(TRPS_INSTANCE *trps)
+{
+ return trps->update_interval.tv_sec;
+}
+
+void trps_set_update_interval(TRPS_INSTANCE *trps, unsigned int interval)
+{
+ trps->update_interval.tv_sec=interval;
+ trps->update_interval.tv_usec=0;
+}
+
+unsigned int trps_get_sweep_interval(TRPS_INSTANCE *trps)
{
- return trps->conn;
+ return trps->sweep_interval.tv_sec;
+}
+
+void trps_set_sweep_interval(TRPS_INSTANCE *trps, unsigned int interval)
+{
+ trps->sweep_interval.tv_sec=interval;
+ trps->sweep_interval.tv_usec=0;
+}
+
+TRPC_INSTANCE *trps_find_trpc(TRPS_INSTANCE *trps, TRP_PEER *peer)
+{
+ TRPC_INSTANCE *cur=NULL;
+ TR_NAME *name=NULL;
+ TR_NAME *peer_gssname=trp_peer_get_gssname(peer);
+
+ for (cur=trps->trpc; cur!=NULL; cur=trpc_get_next(cur)) {
+ name=trpc_get_gssname(cur);
+ if ((name!=NULL) && (0==tr_name_cmp(peer_gssname, name))) {
+ break;
+ }
+ }
+ tr_free_name(peer_gssname);
+ return cur;
}
-#endif
void trps_add_connection(TRPS_INSTANCE *trps, TRP_CONNECTION *new)
{
trps->trpc=trpc_remove(trps->trpc, remove);
}
-TRP_RC trps_send_msg (TRPS_INSTANCE *trps, void *peer, const char *msg)
+TRP_RC trps_send_msg(TRPS_INSTANCE *trps, TRP_PEER *peer, const char *msg)
{
TALLOC_CTX *tmp_ctx=talloc_new(NULL);
TR_MQ_MSG *mq_msg=NULL;
char *msg_dup=NULL;
TRP_RC rc=TRP_ERROR;
-
- /* Currently ignore peer and just send to an open connection.
- * In reality, need to identify the correct peer and send via that
- * one. */
- if (trps->trpc != NULL) {
- if (trpc_get_status(trps->trpc)!=TRP_CONNECTION_UP)
- tr_debug("trps_send_msg: skipping message sent while TRPC connection not up.");
- else {
- mq_msg=tr_mq_msg_new(tmp_ctx, "trpc_send");
- msg_dup=talloc_strdup(mq_msg, msg); /* get local copy in mq_msg context */
- tr_mq_msg_set_payload(mq_msg, msg_dup, NULL); /* no need for a free() func */
- trpc_mq_append(trps->trpc, mq_msg);
- rc=TRP_SUCCESS;
- }
+ TRPC_INSTANCE *trpc=NULL;
+
+ /* get the connection for this peer */
+ trpc=trps_find_trpc(trps, peer);
+ if ((trpc==NULL) || (trpc_get_status(trps->trpc)!=TRP_CONNECTION_UP)) {
+ /* We could just let these sit on the queue in the hopes that a connection
+ * is eventually established. However, we'd then have to ensure the queue
+ * didn't keep growing, etc. */
+ tr_warning("trps_send_msg: skipping message queued while TRPC connection not up.");
+ } else {
+ mq_msg=tr_mq_msg_new(tmp_ctx, "trpc_send");
+ msg_dup=talloc_strdup(mq_msg, msg); /* get local copy in mq_msg context */
+ tr_mq_msg_set_payload(mq_msg, msg_dup, NULL); /* no need for a free() func */
+ trpc_mq_append(trpc, mq_msg);
+ rc=TRP_SUCCESS;
}
talloc_free(tmp_ctx);
return rc;
/* is this route retracted? */
static int trps_route_retracted(TRPS_INSTANCE *trps, TRP_RENTRY *entry)
{
- return (trp_rentry_get_metric(entry)==TRP_METRIC_INFINITY);
+ return (trp_metric_is_infinite(trp_rentry_get_metric(entry)));
}
static TRP_RC trps_read_message(TRPS_INSTANCE *trps, TRP_CONNECTION *conn, TR_MSG **msg)
}
/* check for valid metric */
- if ((trp_inforec_get_metric(rec)==TRP_METRIC_INVALID)
- || (trp_inforec_get_metric(rec)>TRP_METRIC_INFINITY)) {
- tr_debug("trps_validate_inforec: invalid metric.");
+ if (trp_metric_is_invalid(trp_inforec_get_metric(rec))) {
+ tr_debug("trps_validate_inforec: invalid metric (%u).", trp_inforec_get_metric(rec));
return TRP_ERROR;
}
TR_NAME *next_hop=NULL;
/* we check these in the validation stage, but just in case... */
- if ((rec_metric==TRP_METRIC_INVALID) || (rec_metric>TRP_METRIC_INFINITY))
+ if (trp_metric_is_invalid(rec_metric))
return 0;
/* retractions (aka infinite metrics) are always feasible */
- if (rec_metric==TRP_METRIC_INFINITY)
+ if (trp_metric_is_infinite(rec_metric))
return 1;
/* updates from our current next hop are always feasible*/
* time unset on a new route entry. */
tr_debug("trps_accept_update: accepting route update.");
trp_rentry_set_metric(entry, trp_inforec_get_metric(rec));
+ trp_rentry_set_interval(entry, trp_inforec_get_interval(rec));
if (!trps_route_retracted(trps, entry)) {
tr_debug("trps_accept_update: route not retracted, setting expiry timer.");
trp_rentry_set_expiry(entry, trps_compute_expiry(trps,
- trp_inforec_get_interval(rec),
+ trp_rentry_get_interval(entry),
trp_rentry_get_expiry(entry)));
}
return TRP_SUCCESS;
} else {
/* No existing route table entry. Ignore it unless it is feasible and not a retraction. */
tr_debug("trps_handle_update: no route entry exists yet.");
- if (feas && (trp_inforec_get_metric(rec) != TRP_METRIC_INFINITY))
+ if (feas && trp_metric_is_finite(trp_inforec_get_metric(rec)))
trps_accept_update(trps, rec);
}
}
return TRP_SUCCESS;
}
+/* choose the best route to comm/realm, optionally excluding routes to a particular peer */
+static TRP_RENTRY *trps_find_best_route(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TR_NAME *exclude_peer)
+{
+ TRP_RENTRY **entry=NULL;
+ TRP_RENTRY *best=NULL;
+ size_t n_entry=0;
+ unsigned int kk=0;
+ unsigned int kk_min=0;
+ unsigned int min_metric=TRP_METRIC_INFINITY;
+
+ entry=trp_rtable_get_realm_entries(trps->rtable, comm, realm, &n_entry);
+ for (kk=0; kk<n_entry; kk++) {
+ if (trp_rentry_get_metric(entry[kk]) < min_metric) {
+ if ((exclude_peer==NULL) || (0!=tr_name_cmp(trp_rentry_get_peer(entry[kk]),
+ exclude_peer))) {
+ kk_min=kk;
+ min_metric=trp_rentry_get_metric(entry[kk]);
+ }
+ }
+ }
+ if (trp_metric_is_finite(min_metric));
+ best=entry[kk_min];
+
+ talloc_free(entry);
+ return best;
+}
+
+/* TODO: think this through more carefully. At least ought to add hysteresis
+ * to avoid flapping between routers or routes. */
+static TRP_RC trps_update_active_routes(TRPS_INSTANCE *trps)
+{
+ size_t n_apc=0, ii=0;
+ TR_NAME **apc=trp_rtable_get_apcs(trps->rtable, &n_apc);
+ size_t n_realm=0, jj=0;
+ TR_NAME **realm=NULL;
+ TRP_RENTRY *best_route=NULL, *cur_route=NULL;
+ unsigned int best_metric=0, cur_metric=0;
+
+ for (ii=0; ii<n_apc; ii++) {
+ realm=trp_rtable_get_apc_realms(trps->rtable, apc[ii], &n_realm);
+ for (jj=0; jj<n_realm; jj++) {
+ best_route=trps_find_best_route(trps, apc[ii], realm[jj], NULL);
+ if (best_route==NULL)
+ best_metric=TRP_METRIC_INFINITY;
+ else
+ best_metric=trp_rentry_get_metric(best_route);
+
+ cur_route=trps_get_selected_route(trps, apc[ii], realm[jj]);
+ if (cur_route!=NULL) {
+ cur_metric=trp_rentry_get_metric(cur_route);
+ if ((best_metric < cur_metric) && (trp_metric_is_finite(best_metric))) {
+ trp_rentry_set_selected(cur_route, 0);
+ trp_rentry_set_selected(best_route, 1);
+ } else if (!trp_metric_is_finite(cur_metric)) /* rejects infinite or invalid metrics */
+ trp_rentry_set_selected(cur_route, 0);
+ } else if (trp_metric_is_finite(best_metric))
+ trp_rentry_set_selected(best_route, 1);
+ }
+ if (realm!=NULL)
+ talloc_free(realm);
+ realm=NULL; n_realm=0;
+ }
+ if (apc!=NULL)
+ talloc_free(apc);
+ apc=NULL; n_apc=0;
+
+ return TRP_SUCCESS;
+}
+
TRP_RC trps_handle_tr_msg(TRPS_INSTANCE *trps, TR_MSG *tr_msg)
{
+ TRP_RC rc=TRP_ERROR;
+
switch (tr_msg_get_msg_type(tr_msg)) {
case TRP_UPDATE:
- return trps_handle_update(trps, tr_msg_get_trp_upd(tr_msg));
+ rc=trps_handle_update(trps, tr_msg_get_trp_upd(tr_msg));
+ if (rc==TRP_SUCCESS) {
+ rc=trps_update_active_routes(trps);
+ }
+ return rc;
case TRP_REQUEST:
return TRP_UNSUPPORTED;
return TRP_ERROR;
}
}
+
+/* true if curtime >= expiry */
+static int trps_expired(struct timespec *expiry, struct timespec *curtime)
+{
+ return ((curtime->tv_sec > expiry->tv_sec)
+ || ((curtime->tv_sec == expiry->tv_sec)
+ &&(curtime->tv_nsec > expiry->tv_nsec)));
+}
+
+/* Sweep for expired routes. For each expired route, if its metric is infinite, the route is flushed.
+ * If its metric is finite, the metric is set to infinite and the route's expiration time is updated. */
+TRP_RC trps_sweep_routes(TRPS_INSTANCE *trps)
+{
+ struct timespec sweep_time={0,0};
+ TRP_RENTRY **entry=NULL;
+ size_t n_entry=0;
+ size_t ii=0;
+
+ /* use a single time for the entire sweep */
+ if (0!=clock_gettime(CLOCK_REALTIME, &sweep_time)) {
+ tr_err("trps_sweep_routes: could not read realtime clock.");
+ sweep_time.tv_sec=0;
+ sweep_time.tv_nsec=0;
+ return TRP_ERROR;
+ }
+
+ entry=trp_rtable_get_entries(trps->rtable, &n_entry); /* must talloc_free *entry */
+
+ /* loop over the entries */
+ for (ii=0; ii<n_entry; ii++) {
+ if (trps_expired(trp_rentry_get_expiry(entry[ii]), &sweep_time)) {
+ tr_debug("trps_sweep_routes: route expired.");
+ if (!trp_metric_is_finite(trp_rentry_get_metric(entry[ii]))) {
+ /* flush route */
+ tr_debug("trps_sweep_routes: metric was infinity, flushing route.");
+ trp_rtable_remove(trps->rtable, entry[ii]); /* entry[ii] is no longer valid */
+ entry[ii]=NULL;
+ } else {
+ /* set metric to infinity and reset timer */
+ tr_debug("trps_sweep_routes: setting metric to infinity and resetting expiry.");
+ trp_rentry_set_metric(entry[ii], TRP_METRIC_INFINITY);
+ trp_rentry_set_expiry(entry[ii], trps_compute_expiry(trps,
+ trp_rentry_get_interval(entry[ii]),
+ trp_rentry_get_expiry(entry[ii])));
+ }
+ }
+ }
+
+ talloc_free(entry);
+ return TRP_SUCCESS;
+}
+
+/* select the correct route to comm/realm to be announced to peer */
+static TRP_RENTRY *trps_select_realm_update(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TR_NAME *peer_gssname)
+{
+ TRP_RENTRY *route;
+
+ /* Take the currently selected route unless it is through the peer we're sending the update to.
+ * I.e., enforce the split horizon rule. */
+ route=trp_rtable_get_selected_entry(trps->rtable, comm, realm);
+ if (0==tr_name_cmp(peer_gssname, trp_rentry_get_peer(route))) {
+ /* the selected entry goes through the peer we're reporting to, choose an alternate */
+ route=trps_find_best_route(trps, comm, realm, peer_gssname);
+ if (!trp_metric_is_finite(trp_rentry_get_metric(route)))
+ route=NULL; /* don't advertise a retracted route */
+ }
+ return route;
+}
+
+/* returns an array of pointers to updates (*not* an array of updates). Returns number of entries
+ * via n_update parameter. (The allocated space will generally be larger than required, see note in
+ * the code.) */
+static TRP_RENTRY **trps_select_updates_for_peer(TALLOC_CTX *memctx, TRPS_INSTANCE *trps, TR_NAME *peer_gssname, size_t *n_update)
+{
+ size_t n_apc=0;
+ TR_NAME **apc=trp_rtable_get_apcs(trps->rtable, &n_apc);
+ TR_NAME **realm=NULL;
+ size_t n_realm=0;
+ size_t ii=0, jj=0;
+ TRP_RENTRY *best=NULL;
+ TRP_RENTRY **result=NULL;
+ size_t n_used=0;
+
+ /* Need to allocate space for the results. For simplicity, we just allocate a block
+ * with space for every route table entry to be returned. This is guaranteed to be large
+ * enough. If the routing table gets very large, this may be wasteful, but that seems
+ * unlikely to be significant in the near future. */
+ result=talloc_array(memctx, TRP_RENTRY *, trp_rtable_size(trps->rtable));
+ if (result==NULL) {
+ talloc_free(apc);
+ *n_update=0;
+ return NULL;
+ }
+
+ for (ii=0; ii<n_apc; ii++) {
+ realm=trp_rtable_get_apc_realms(trps->rtable, apc[ii], &n_realm);
+ for (jj=0; jj<n_realm; jj++) {
+ best=trps_select_realm_update(trps, apc[ii], realm[jj], peer_gssname);
+ if (best!=NULL)
+ result[n_used++]=best;
+ }
+ if (realm!=NULL)
+ talloc_free(realm);
+ realm=NULL;
+ n_realm=0;
+ }
+ if (apc!=NULL)
+ talloc_free(apc);
+
+ *n_update=n_used;
+ return result;
+}
+
+/* convert an rentry into a new trp update info record */
+static TRP_INFOREC *trps_rentry_to_inforec(TALLOC_CTX *mem_ctx, TRPS_INSTANCE *trps, TRP_RENTRY *entry)
+{
+ TRP_INFOREC *rec=trp_inforec_new(mem_ctx, TRP_INFOREC_TYPE_ROUTE);
+ unsigned int linkcost=0;
+
+ if (rec!=NULL) {
+ linkcost=trp_peer_get_linkcost(trps_get_peer(trps,
+ trp_rentry_get_next_hop(entry)));
+
+ /* Note that we leave the next hop empty since the recipient fills that in.
+ * This is where we add the link cost (currently always 1) to the next peer. */
+ if ((trp_inforec_set_comm(rec, trp_rentry_dup_apc(entry)) != TRP_SUCCESS)
+ ||(trp_inforec_set_realm(rec, trp_rentry_dup_realm(entry)) != TRP_SUCCESS)
+ ||(trp_inforec_set_trust_router(rec, trp_rentry_dup_trust_router(entry)) != TRP_SUCCESS)
+ ||(trp_inforec_set_metric(rec, trp_rentry_get_metric(entry)+linkcost) != TRP_SUCCESS)
+ ||(trp_inforec_set_interval(rec, trps_get_update_interval(trps)) != TRP_SUCCESS)) {
+ tr_err("trps_rentry_to_inforec: error creating route update.");
+ talloc_free(rec);
+ rec=NULL;
+ }
+ }
+ return rec;
+}
+
+TRP_RC trps_scheduled_update(TRPS_INSTANCE *trps)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TRP_PTABLE_ITER *iter=trp_ptable_iter_new(tmp_ctx);
+ TRP_PEER *peer=NULL;
+ TR_MSG msg; /* not a pointer! */
+ TRP_UPD *upd=NULL;
+ TRP_RENTRY **update_list=NULL;
+ TRP_INFOREC *rec=NULL;
+ size_t n_updates=0, ii=0;
+ char *encoded=NULL;
+ TRP_RC rc=TRP_ERROR;
+ TR_NAME *peer_gssname=NULL;
+
+ if (iter==NULL) {
+ tr_err("trps_scheduled_update: failed to allocate peer table iterator.");
+ talloc_free(tmp_ctx);
+ return TRP_NOMEM;
+ }
+
+ for (peer=trp_ptable_iter_first(iter, trps->ptable);
+ peer!=NULL;
+ peer=trp_ptable_iter_next(iter))
+ {
+ peer_gssname=trp_peer_get_gssname(peer);
+ tr_debug("trps_scheduled_update: preparing scheduled route update for %.*s",
+ peer_gssname->len, peer_gssname->buf);
+ /* do not fill in peer, recipient does that */
+ update_list=trps_select_updates_for_peer(tmp_ctx, trps, peer_gssname, &n_updates);
+ tr_free_name(peer_gssname); peer_gssname=NULL;
+ if ((n_updates>0) && (update_list!=NULL)) {
+ tr_debug("trps_scheduled_update: sending %u update records.", (unsigned int)n_updates);
+ upd=trp_upd_new(tmp_ctx);
+
+ for (ii=0; ii<n_updates; ii++) {
+ rec=trps_rentry_to_inforec(tmp_ctx, trps, update_list[ii]);
+ if (rec==NULL) {
+ tr_err("trps_scheduled_update: could not create all update records.");
+ rc=TRP_ERROR;
+ goto cleanup;
+ }
+ trp_upd_add_inforec(upd, rec);
+ }
+ talloc_free(update_list);
+ update_list=NULL;
+
+ /* now encode the update message */
+ tr_msg_set_trp_upd(&msg, upd);
+ encoded=tr_msg_encode(&msg);
+ if (encoded==NULL) {
+ tr_err("trps_scheduled_update: error encoding update.");
+ rc=TRP_ERROR;
+ goto cleanup;
+ }
+
+ tr_debug("trps_scheduled_update: adding message to queue.");
+ if (trps_send_msg(trps, peer, encoded) != TRP_SUCCESS)
+ tr_err("trps_scheduled_update: error queueing update.");
+ else
+ tr_debug("trps_scheduled_update: update queued successfully.");
+
+ encoded=NULL;
+ tr_msg_free_encoded(encoded);
+ trp_upd_free(upd);
+ upd=NULL;
+ }
+ }
+
+cleanup:
+ trp_ptable_iter_free(iter);
+ talloc_free(tmp_ctx);
+ return rc;
+}
+
+TRP_RC trps_add_peer(TRPS_INSTANCE *trps, TRP_PEER *peer)
+{
+ return trp_ptable_add(trps->ptable, peer);
+}
+
+TRP_PEER *trps_get_peer(TRPS_INSTANCE *trps, TR_NAME *gssname)
+{
+ return trp_ptable_find(trps->ptable, gssname);
+}