#include <talloc.h>
#include <errno.h>
#include <unistd.h>
+#include <sys/time.h>
#include <gsscon.h>
#include <tr_rp.h>
trps->cookie=NULL;
trps->conn=NULL;
trps->trpc=NULL;
+ trps->update_interval=(struct timeval){0,0};
+ trps->sweep_interval=(struct timeval){0,0};
trps->mq=tr_mq_new(trps);
if (trps->mq==NULL) {
tr_mq_append(trps->mq, msg);
}
-#if 0
-static TRP_CONNECTION *trps_find_conn(TRPS_INSTANCE *trps, TR_NAME *peer_gssname)
+unsigned int trps_get_connect_interval(TRPS_INSTANCE *trps)
{
- TRP_CONNECTION *cur=NULL;
- for (cur=trps->conn; cur!=NULL; cur=trp_connection_get_next(cur)) {
- if (0==tr_name_cmp(peer_gssname, trp_connection_get_gssname(cur)))
+ return trps->connect_interval.tv_sec;
+}
+
+void trps_set_connect_interval(TRPS_INSTANCE *trps, unsigned int interval)
+{
+ trps->connect_interval.tv_sec=interval;
+ trps->connect_interval.tv_usec=0;
+}
+
+unsigned int trps_get_update_interval(TRPS_INSTANCE *trps)
+{
+ return trps->update_interval.tv_sec;
+}
+
+void trps_set_update_interval(TRPS_INSTANCE *trps, unsigned int interval)
+{
+ trps->update_interval.tv_sec=interval;
+ trps->update_interval.tv_usec=0;
+}
+
+unsigned int trps_get_sweep_interval(TRPS_INSTANCE *trps)
+{
+ return trps->sweep_interval.tv_sec;
+}
+
+void trps_set_sweep_interval(TRPS_INSTANCE *trps, unsigned int interval)
+{
+ trps->sweep_interval.tv_sec=interval;
+ trps->sweep_interval.tv_usec=0;
+}
+
+TRPC_INSTANCE *trps_find_trpc(TRPS_INSTANCE *trps, TRP_PEER *peer)
+{
+ TRPC_INSTANCE *cur=NULL;
+ TR_NAME *name=NULL;
+ TR_NAME *peer_gssname=trp_peer_get_gssname(peer);
+
+ for (cur=trps->trpc; cur!=NULL; cur=trpc_get_next(cur)) {
+ name=trpc_get_gssname(cur);
+ if ((name!=NULL) && (0==tr_name_cmp(peer_gssname, name))) {
break;
+ }
}
+ tr_free_name(peer_gssname);
return cur;
}
-#endif
void trps_add_connection(TRPS_INSTANCE *trps, TRP_CONNECTION *new)
{
trps->trpc=trpc_remove(trps->trpc, remove);
}
-TRP_RC trps_send_msg (TRPS_INSTANCE *trps, void *peer, const char *msg)
+TRP_RC trps_send_msg(TRPS_INSTANCE *trps, TRP_PEER *peer, const char *msg)
{
TALLOC_CTX *tmp_ctx=talloc_new(NULL);
TR_MQ_MSG *mq_msg=NULL;
char *msg_dup=NULL;
TRP_RC rc=TRP_ERROR;
-
- /* Currently ignore peer and just send to an open connection.
- * In reality, need to identify the correct peer and send via that
- * one. */
- if (trps->trpc != NULL) {
- if (trpc_get_status(trps->trpc)!=TRP_CONNECTION_UP)
- tr_debug("trps_send_msg: skipping message sent while TRPC connection not up.");
- else {
- mq_msg=tr_mq_msg_new(tmp_ctx, "trpc_send");
- msg_dup=talloc_strdup(mq_msg, msg); /* get local copy in mq_msg context */
- tr_mq_msg_set_payload(mq_msg, msg_dup, NULL); /* no need for a free() func */
- trpc_mq_append(trps->trpc, mq_msg);
- rc=TRP_SUCCESS;
- }
+ TRPC_INSTANCE *trpc=NULL;
+
+ /* get the connection for this peer */
+ trpc=trps_find_trpc(trps, peer);
+ if ((trpc==NULL) || (trpc_get_status(trps->trpc)!=TRP_CONNECTION_UP)) {
+ /* We could just let these sit on the queue in the hopes that a connection
+ * is eventually established. However, we'd then have to ensure the queue
+ * didn't keep growing, etc. */
+ tr_warning("trps_send_msg: skipping message queued while TRPC connection not up.");
+ } else {
+ mq_msg=tr_mq_msg_new(tmp_ctx, "trpc_send");
+ msg_dup=talloc_strdup(mq_msg, msg); /* get local copy in mq_msg context */
+ tr_mq_msg_set_payload(mq_msg, msg_dup, NULL); /* no need for a free() func */
+ trpc_mq_append(trpc, mq_msg);
+ rc=TRP_SUCCESS;
}
talloc_free(tmp_ctx);
return rc;
}
/* check for valid metric */
- if (trp_metric_is_valid(trp_inforec_get_metric(rec))) {
- tr_debug("trps_validate_inforec: invalid metric.");
+ if (trp_metric_is_invalid(trp_inforec_get_metric(rec))) {
+ tr_debug("trps_validate_inforec: invalid metric (%u).", trp_inforec_get_metric(rec));
return TRP_ERROR;
}
}
/* choose the best route to comm/realm, optionally excluding routes to a particular peer */
-static TRP_RENTRY *trps_find_best_route(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TRP_PEER *exclude_peer)
+static TRP_RENTRY *trps_find_best_route(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TR_NAME *exclude_peer)
{
TRP_RENTRY **entry=NULL;
TRP_RENTRY *best=NULL;
for (kk=0; kk<n_entry; kk++) {
if (trp_rentry_get_metric(entry[kk]) < min_metric) {
if ((exclude_peer==NULL) || (0!=tr_name_cmp(trp_rentry_get_peer(entry[kk]),
- trp_peer_get_gssname(exclude_peer)))) {
+ exclude_peer))) {
kk_min=kk;
min_metric=trp_rentry_get_metric(entry[kk]);
}
for (ii=0; ii<n_apc; ii++) {
realm=trp_rtable_get_apc_realms(trps->rtable, apc[ii], &n_realm);
for (jj=0; jj<n_realm; jj++) {
- best_route=trps_find_best_route(trps, apc[ii], realm[jj]);
+ best_route=trps_find_best_route(trps, apc[ii], realm[jj], NULL);
if (best_route==NULL)
- min_metric=TRP_METRIC_INFINITY;
+ best_metric=TRP_METRIC_INFINITY;
else
- min_metric=trp_rentry_get_metric(best_route);
+ best_metric=trp_rentry_get_metric(best_route);
- cur_route=trps_get_selected_route(trps, apc[ii], realm[jj], NULL);
+ cur_route=trps_get_selected_route(trps, apc[ii], realm[jj]);
if (cur_route!=NULL) {
cur_metric=trp_rentry_get_metric(cur_route);
- if ((min_metric < cur_metric) && (trp_metric_is_finite(min_metric))) {
+ if ((best_metric < cur_metric) && (trp_metric_is_finite(best_metric))) {
trp_rentry_set_selected(cur_route, 0);
trp_rentry_set_selected(best_route, 1);
} else if (!trp_metric_is_finite(cur_metric)) /* rejects infinite or invalid metrics */
trp_rentry_set_selected(cur_route, 0);
- } else if (trp_metric_is_finite(min_metric))
+ } else if (trp_metric_is_finite(best_metric))
trp_rentry_set_selected(best_route, 1);
}
if (realm!=NULL)
}
/* select the correct route to comm/realm to be announced to peer */
-static TRP_RENTRY trps_select_realm_update(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME realm, TRP_PEER *peer)
+static TRP_RENTRY *trps_select_realm_update(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TR_NAME *peer_gssname)
{
TRP_RENTRY *route;
/* Take the currently selected route unless it is through the peer we're sending the update to.
* I.e., enforce the split horizon rule. */
route=trp_rtable_get_selected_entry(trps->rtable, comm, realm);
- if (0==tr_name_cmp(trp_peer_get_gssname(peer), trp_rentry_get_peer(route))) {
+ if (0==tr_name_cmp(peer_gssname, trp_rentry_get_peer(route))) {
/* the selected entry goes through the peer we're reporting to, choose an alternate */
- route=trps_find_best_route(trps, comm, realm, peer);
+ route=trps_find_best_route(trps, comm, realm, peer_gssname);
if (!trp_metric_is_finite(trp_rentry_get_metric(route)))
route=NULL; /* don't advertise a retracted route */
}
/* returns an array of pointers to updates (*not* an array of updates). Returns number of entries
* via n_update parameter. (The allocated space will generally be larger than required, see note in
* the code.) */
-TRP_RENTRY **trps_select_updates_for_peer(TALLOC_CTX *memctx, TRPS_INSTANCE *trps, TRP_PEER *peer, size_t *n_update)
+static TRP_RENTRY **trps_select_updates_for_peer(TALLOC_CTX *memctx, TRPS_INSTANCE *trps, TR_NAME *peer_gssname, size_t *n_update)
{
size_t n_apc=0;
TR_NAME **apc=trp_rtable_get_apcs(trps->rtable, &n_apc);
* with space for every route table entry to be returned. This is guaranteed to be large
* enough. If the routing table gets very large, this may be wasteful, but that seems
* unlikely to be significant in the near future. */
- result=talloc_array(memctx, TRP_RENTRY, trp_rtable_size(trps->rtable));
+ result=talloc_array(memctx, TRP_RENTRY *, trp_rtable_size(trps->rtable));
if (result==NULL) {
talloc_free(apc);
+ *n_update=0;
return NULL;
}
for (ii=0; ii<n_apc; ii++) {
- realm=trp_rtable_get_apc_realms(trps->rtable, apc[ii], &n_realm) {
- for (jj=0; jj<n_realm; jj++) {
- best=trps_select_realm_update(trps, apc[ii], realm[jj], peer);
- if (best!=NULL)
- result[n_used++]=best;
- }
+ realm=trp_rtable_get_apc_realms(trps->rtable, apc[ii], &n_realm);
+ for (jj=0; jj<n_realm; jj++) {
+ best=trps_select_realm_update(trps, apc[ii], realm[jj], peer_gssname);
+ if (best!=NULL)
+ result[n_used++]=best;
}
if (realm!=NULL)
talloc_free(realm);
return result;
}
+/* convert an rentry into a new trp update info record */
+static TRP_INFOREC *trps_rentry_to_inforec(TALLOC_CTX *mem_ctx, TRPS_INSTANCE *trps, TRP_RENTRY *entry)
+{
+ TRP_INFOREC *rec=trp_inforec_new(mem_ctx, TRP_INFOREC_TYPE_ROUTE);
+ unsigned int linkcost=0;
+
+ if (rec!=NULL) {
+ linkcost=trp_peer_get_linkcost(trps_get_peer(trps,
+ trp_rentry_get_next_hop(entry)));
+
+ /* Note that we leave the next hop empty since the recipient fills that in.
+ * This is where we add the link cost (currently always 1) to the next peer. */
+ if ((trp_inforec_set_comm(rec, trp_rentry_dup_apc(entry)) != TRP_SUCCESS)
+ ||(trp_inforec_set_realm(rec, trp_rentry_dup_realm(entry)) != TRP_SUCCESS)
+ ||(trp_inforec_set_trust_router(rec, trp_rentry_dup_trust_router(entry)) != TRP_SUCCESS)
+ ||(trp_inforec_set_metric(rec, trp_rentry_get_metric(entry)+linkcost) != TRP_SUCCESS)
+ ||(trp_inforec_set_interval(rec, trps_get_update_interval(trps)) != TRP_SUCCESS)) {
+ tr_err("trps_rentry_to_inforec: error creating route update.");
+ talloc_free(rec);
+ rec=NULL;
+ }
+ }
+ return rec;
+}
+
+TRP_RC trps_scheduled_update(TRPS_INSTANCE *trps)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TRP_PTABLE_ITER *iter=trp_ptable_iter_new(tmp_ctx);
+ TRP_PEER *peer=NULL;
+ TR_MSG msg; /* not a pointer! */
+ TRP_UPD *upd=NULL;
+ TRP_RENTRY **update_list=NULL;
+ TRP_INFOREC *rec=NULL;
+ size_t n_updates=0, ii=0;
+ char *encoded=NULL;
+ TRP_RC rc=TRP_ERROR;
+ TR_NAME *peer_gssname=NULL;
+
+ if (iter==NULL) {
+ tr_err("trps_scheduled_update: failed to allocate peer table iterator.");
+ talloc_free(tmp_ctx);
+ return TRP_NOMEM;
+ }
+
+ for (peer=trp_ptable_iter_first(iter, trps->ptable);
+ peer!=NULL;
+ peer=trp_ptable_iter_next(iter))
+ {
+ peer_gssname=trp_peer_get_gssname(peer);
+ tr_debug("trps_scheduled_update: preparing scheduled route update for %.*s",
+ peer_gssname->len, peer_gssname->buf);
+ /* do not fill in peer, recipient does that */
+ update_list=trps_select_updates_for_peer(tmp_ctx, trps, peer_gssname, &n_updates);
+ tr_free_name(peer_gssname); peer_gssname=NULL;
+ if ((n_updates>0) && (update_list!=NULL)) {
+ tr_debug("trps_scheduled_update: sending %u update records.", (unsigned int)n_updates);
+ upd=trp_upd_new(tmp_ctx);
+
+ for (ii=0; ii<n_updates; ii++) {
+ rec=trps_rentry_to_inforec(tmp_ctx, trps, update_list[ii]);
+ if (rec==NULL) {
+ tr_err("trps_scheduled_update: could not create all update records.");
+ rc=TRP_ERROR;
+ goto cleanup;
+ }
+ trp_upd_add_inforec(upd, rec);
+ }
+ talloc_free(update_list);
+ update_list=NULL;
+
+ /* now encode the update message */
+ tr_msg_set_trp_upd(&msg, upd);
+ encoded=tr_msg_encode(&msg);
+ if (encoded==NULL) {
+ tr_err("trps_scheduled_update: error encoding update.");
+ rc=TRP_ERROR;
+ goto cleanup;
+ }
+
+ tr_debug("trps_scheduled_update: adding message to queue.");
+ if (trps_send_msg(trps, peer, encoded) != TRP_SUCCESS)
+ tr_err("trps_scheduled_update: error queueing update.");
+ else
+ tr_debug("trps_scheduled_update: update queued successfully.");
+
+ encoded=NULL;
+ tr_msg_free_encoded(encoded);
+ trp_upd_free(upd);
+ upd=NULL;
+ }
+ }
+
+cleanup:
+ trp_ptable_iter_free(iter);
+ talloc_free(tmp_ctx);
+ return rc;
+}
+
TRP_RC trps_add_peer(TRPS_INSTANCE *trps, TRP_PEER *peer)
{
return trp_ptable_add(trps->ptable, peer);
}
+
+TRP_PEER *trps_get_peer(TRPS_INSTANCE *trps, TR_NAME *gssname)
+{
+ return trp_ptable_find(trps->ptable, gssname);
+}