+ return rc;
+}
+
+/**
+ * Apply applicable TRP_INBOUND filters to an inforec. Rejects everything if peer has no filters.
+ *
+ * @param trps Active TRPS instance
+ * @param upd TRP_UPD that contains the inforec to filter
+ * @param rec Inforec to filter
+ * @return 1 if accepted by the filter, 0 otherwise
+ */
+static int trps_filter_inbound_inforec(TRPS_INSTANCE *trps, TRP_UPD *upd, TRP_INFOREC *rec)
+{
+ TRP_PEER *peer=NULL;
+ TR_NAME *peer_name=NULL;
+ TR_FILTER_ACTION action=TR_FILTER_ACTION_REJECT;
+ TR_FILTER_TARGET *target=NULL;
+ int retval=0;
+
+ /* Look up the peer. For inbound messages, the peer is identified by its GSS name */
+ peer_name=trp_upd_get_peer(upd);
+ peer=trps_get_peer_by_gssname(trps, peer_name);
+ if (peer==NULL) {
+ tr_err("trps_filter_inbound_inforec: received inforec from unknown peer (%.*s), rejecting.",
+ peer_name->len,
+ peer_name->buf);
+ return 0;
+ }
+
+ /* tr_filter_apply() and tr_filter_set_get() handle null filter sets/filters by rejecting */
+ target= tr_filter_target_trp_inforec(NULL, upd, rec);
+ if (target==NULL) {
+ /* TODO: signal that filtering failed. Until then, just filter everything and give an error message. */
+ tr_crit("trps_filter_inbound_inforec: Unable to allocate filter target, cannot apply filter!");
+ }
+ if ((target==NULL)
+ || (TR_FILTER_NO_MATCH==tr_filter_apply(target,
+ tr_filter_set_get(peer->filters, TR_FILTER_TYPE_TRP_INBOUND),
+ NULL,
+ &action))
+ || (action!=TR_FILTER_ACTION_ACCEPT)) {
+ /* either the filter did not match or it matched a reject rule or allocating the target failed */
+ retval=0;
+ } else
+ retval=1;
+ if (target!=NULL)
+ tr_filter_target_free(target);
+
+ /* filter matched an accept rule */
+ return retval;
+}
+
+
+static TRP_RC trps_handle_update(TRPS_INSTANCE *trps, TRP_UPD *upd)
+{
+ TRP_INFOREC *rec=NULL;
+
+ if (trps_validate_update(trps, upd) != TRP_SUCCESS) {
+ tr_notice("trps_handle_update: received invalid TRP update.");
+ return TRP_ERROR;
+ }
+
+ for (rec=trp_upd_get_inforec(upd); rec!=NULL; rec=trp_inforec_get_next(rec)) {
+ /* validate/sanity check the record update */
+ if (trps_validate_inforec(trps, rec) != TRP_SUCCESS) {
+ tr_notice("trps_handle_update: invalid inforec in TRP update, discarding entire update.");
+ return TRP_ERROR;
+ }
+ }
+
+ for (rec=trp_upd_get_inforec(upd); rec!=NULL; rec=trp_inforec_get_next(rec)) {
+ if (!trps_filter_inbound_inforec(trps, upd, rec)) {
+ tr_debug("trps_handle_update: inforec rejected by filter.");
+ continue; /* just go on to the next record */
+ }
+
+ switch (trp_inforec_get_type(rec)) {
+ case TRP_INFOREC_TYPE_ROUTE:
+ tr_debug("trps_handle_update: handling route inforec.");
+ if (TRP_SUCCESS!=trps_handle_inforec_route(trps, upd, rec))
+ tr_notice("trps_handle_update: error handling route inforec.");
+ break;
+ case TRP_INFOREC_TYPE_COMMUNITY:
+ tr_debug("trps_handle_update: handling community inforec.");
+ if (TRP_SUCCESS!=trps_handle_inforec_comm(trps, upd, rec))
+ tr_notice("trps_handle_update: error handling community inforec.");
+
+ break;
+ default:
+ tr_notice("trps_handle_update: unsupported inforec in TRP update.");
+ break;
+ }
+ }
+ return TRP_SUCCESS;
+}
+
+static TRP_RC trps_validate_request(TRPS_INSTANCE *trps, TRP_REQ *req)
+{
+ if (req==NULL) {
+ tr_notice("trps_validate_request: null TRP request.");
+ return TRP_BADARG;
+ }
+
+ if (trp_req_get_comm(req)==NULL) {
+ tr_notice("trps_validate_request: received TRP request with null community.");
+ return TRP_ERROR;
+ }
+
+ if (trp_req_get_realm(req)==NULL) {
+ tr_notice("trps_validate_request: received TRP request with null realm.");
+ return TRP_ERROR;
+ }
+
+ if (trp_req_get_peer(req)==NULL) {
+ tr_notice("trps_validate_request: received TRP request without origin peer information.");
+ return TRP_ERROR;
+ }
+
+ return TRP_SUCCESS;
+}
+
+/* choose the best route to comm/realm, optionally excluding routes to a particular peer */
+static TRP_ROUTE *trps_find_best_route(TRPS_INSTANCE *trps,
+ TR_NAME *comm,
+ TR_NAME *realm,
+ TR_NAME *exclude_peer_label)
+{
+ TRP_ROUTE **entry=NULL;
+ TRP_ROUTE *best=NULL;
+ TRP_PEER *route_peer = NULL;
+ size_t n_entry=0;
+ unsigned int kk=0;
+ unsigned int kk_min=0;
+ unsigned int min_metric=TRP_METRIC_INFINITY;
+
+ entry=trp_rtable_get_realm_entries(trps->rtable, comm, realm, &n_entry);
+ for (kk=0; kk<n_entry; kk++) {
+ if (trp_route_get_metric(entry[kk]) < min_metric) {
+ if (exclude_peer_label != NULL) {
+ if (!trp_route_is_local(entry[kk])) {
+ /* route is not local, check the peer label */
+ route_peer = trp_ptable_find_gss_name(trps->ptable,
+ trp_route_get_peer(entry[kk]));
+ if (route_peer == NULL) {
+ tr_err("trps_find_best_route: unknown peer GSS name (%.*s) for route %d to %.*s/%.*s",
+ trp_route_get_peer(entry[kk])->len, trp_route_get_peer(entry[kk])->buf,
+ kk,
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ continue; /* unknown peer, skip the route */
+ }
+ if (0 == tr_name_cmp(exclude_peer_label, trp_peer_get_label(route_peer))) {
+ /* we're excluding this peer - skip the route */
+ continue;
+ }
+ }
+ }
+ /* if we get here, we're not excluding the route */
+ kk_min = kk;
+ min_metric = trp_route_get_metric(entry[kk]);
+ }
+ }
+
+ if (trp_metric_is_finite(min_metric))
+ best=entry[kk_min];
+
+ talloc_free(entry);
+ return best;
+}
+
+/* TODO: think this through more carefully. At least ought to add hysteresis
+ * to avoid flapping between routers or routes. */
+TRP_RC trps_update_active_routes(TRPS_INSTANCE *trps)
+{
+ size_t n_comm=0, ii=0;
+ TR_NAME **comm=trp_rtable_get_comms(trps->rtable, &n_comm);
+ size_t n_realm=0, jj=0;
+ TR_NAME **realm=NULL;
+ TRP_ROUTE *best_route=NULL, *cur_route=NULL;
+ unsigned int best_metric=0, cur_metric=0;
+
+ for (ii=0; ii<n_comm; ii++) {
+ realm=trp_rtable_get_comm_realms(trps->rtable, comm[ii], &n_realm);
+ for (jj=0; jj<n_realm; jj++) {
+ best_route=trps_find_best_route(trps, comm[ii], realm[jj], NULL);
+ if (best_route==NULL)
+ best_metric=TRP_METRIC_INFINITY;
+ else
+ best_metric=trp_route_get_metric(best_route);
+
+ cur_route=trps_get_selected_route(trps, comm[ii], realm[jj]);
+ if (cur_route!=NULL) {
+ cur_metric=trp_route_get_metric(cur_route);
+ if ((best_metric < cur_metric) && (trp_metric_is_finite(best_metric))) {
+ /* The new route has a lower metric than the previous, and is finite. Accept. */
+ trp_route_set_selected(cur_route, 0);
+ trp_route_set_selected(best_route, 1);
+ } else if (!trp_metric_is_finite(cur_metric)) /* rejects infinite or invalid metrics */
+ trp_route_set_selected(cur_route, 0);
+ } else if (trp_metric_is_finite(best_metric)) {
+ trp_route_set_selected(best_route, 1);
+ }
+ }
+ if (realm!=NULL)
+ talloc_free(realm);
+ realm=NULL; n_realm=0;
+ }
+ if (comm!=NULL)
+ talloc_free(comm);
+ comm=NULL; n_comm=0;
+
+ return TRP_SUCCESS;
+}
+
+/* true if curtime >= expiry */
+static int trps_expired(struct timespec *expiry, struct timespec *curtime)
+{
+ return (tr_cmp_timespec(curtime, expiry) >= 0);
+}
+
+/* Sweep for expired routes. For each expired route, if its metric is infinite, the route is flushed.
+ * If its metric is finite, the metric is set to infinite and the route's expiration time is updated. */
+TRP_RC trps_sweep_routes(TRPS_INSTANCE *trps)
+{
+ struct timespec sweep_time={0,0};
+ TRP_ROUTE **entry=NULL;
+ size_t n_entry=0;
+ size_t ii=0;
+
+ /* use a single time for the entire sweep */
+ if (0!=clock_gettime(TRP_CLOCK, &sweep_time)) {
+ tr_err("trps_sweep_routes: could not read realtime clock.");
+ sweep_time.tv_sec=0;
+ sweep_time.tv_nsec=0;
+ return TRP_ERROR;
+ }
+
+ entry= trp_rtable_get_entries(NULL, trps->rtable, &n_entry); /* must talloc_free *entry */
+
+ /* loop over the entries */
+ for (ii=0; ii<n_entry; ii++) {
+ if (!trp_route_is_local(entry[ii]) && trps_expired(trp_route_get_expiry(entry[ii]), &sweep_time)) {
+ tr_debug("trps_sweep_routes: route expired.");
+ if (!trp_metric_is_finite(trp_route_get_metric(entry[ii]))) {
+ /* flush route */
+ tr_debug("trps_sweep_routes: metric was infinity, flushing route.");
+ trp_rtable_remove(trps->rtable, entry[ii]); /* entry[ii] is no longer valid */
+ entry[ii]=NULL;
+ } else {
+ /* set metric to infinity and reset timer */
+ tr_debug("trps_sweep_routes: setting metric to infinity and resetting expiry.");
+ trp_route_set_metric(entry[ii], TRP_METRIC_INFINITY);
+ trp_route_set_expiry(entry[ii], trps_compute_expiry(trps,
+ trp_route_get_interval(entry[ii]),
+ trp_route_get_expiry(entry[ii])));
+ }
+ }
+ }
+
+ talloc_free(entry);
+ return TRP_SUCCESS;
+}
+
+
+/* Sweep for expired communities/realms/memberships. */
+TRP_RC trps_sweep_ctable(TRPS_INSTANCE *trps)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ struct timespec sweep_time={0,0};
+ struct timespec tmp = {0};
+ TR_COMM_MEMB *memb=NULL;
+ TR_COMM_ITER *iter=NULL;
+ TRP_RC rc=TRP_ERROR;
+
+ /* use a single time for the entire sweep */
+ if (0!=clock_gettime(TRP_CLOCK, &sweep_time)) {
+ tr_err("trps_sweep_ctable: could not read realtime clock.");
+ sweep_time.tv_sec=0;
+ sweep_time.tv_nsec=0;
+ goto cleanup;
+ }
+
+ /* iterate all memberships */
+ iter=tr_comm_iter_new(tmp_ctx);
+ if (iter==NULL) {
+ tr_err("trps_sweep_ctable: unable to allocate iterator.");
+ rc=TRP_NOMEM;
+ goto cleanup;
+ }
+ for (memb=tr_comm_memb_iter_all_first(iter, trps->ctable);
+ memb!=NULL;
+ memb=tr_comm_memb_iter_all_next(iter)) {
+ if (tr_comm_memb_get_origin(memb)==NULL)
+ continue; /* do not expire local entries */
+
+ if (tr_comm_memb_is_expired(memb, &sweep_time)) {
+ if (tr_comm_memb_get_times_expired(memb)>0) {
+ /* Already expired once; flush. */
+ tr_debug("trps_sweep_ctable: flushing expired community membership (%.*s in %.*s, origin %.*s, expired %s).",
+ tr_comm_memb_get_realm_id(memb)->len, tr_comm_memb_get_realm_id(memb)->buf,
+ tr_comm_get_id(tr_comm_memb_get_comm(memb))->len, tr_comm_get_id(tr_comm_memb_get_comm(memb))->buf,
+ tr_comm_memb_get_origin(memb)->len, tr_comm_memb_get_origin(memb)->buf,
+ timespec_to_str(tr_comm_memb_get_expiry_realtime(memb, &tmp)));
+ tr_comm_table_remove_memb(trps->ctable, memb);
+ tr_comm_memb_free(memb);
+ } else {
+ /* This is the first expiration. Note this and reset the expiry time. */
+ tr_comm_memb_expire(memb);
+ trps_compute_expiry(trps, tr_comm_memb_get_interval(memb), tr_comm_memb_get_expiry(memb));
+ tr_debug("trps_sweep_ctable: community membership expired at %s, resetting expiry to %s (%.*s in %.*s, origin %.*s).",
+ timespec_to_str(tr_clock_convert(TRP_CLOCK, &sweep_time, CLOCK_REALTIME, &tmp)),
+ timespec_to_str(tr_comm_memb_get_expiry_realtime(memb, &tmp)),
+ tr_comm_memb_get_realm_id(memb)->len, tr_comm_memb_get_realm_id(memb)->buf,
+ tr_comm_get_id(tr_comm_memb_get_comm(memb))->len, tr_comm_get_id(tr_comm_memb_get_comm(memb))->buf,
+ tr_comm_memb_get_origin(memb)->len, tr_comm_memb_get_origin(memb)->buf);
+ }
+ }
+ }
+
+ /* get rid of any unreferenced realms, etc */
+ tr_comm_table_sweep(trps->ctable);
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return rc;
+}
+
+/* add metrics */
+static unsigned int trps_metric_add(unsigned int m1, unsigned int m2)
+{
+ if (trp_metric_is_invalid(m1) || trp_metric_is_invalid(m2))
+ return TRP_METRIC_INVALID;
+
+ if (trp_metric_is_infinite(m1) || trp_metric_is_infinite(m2))
+ return TRP_METRIC_INFINITY;
+
+ if (trp_metric_is_finite(m1+m2))
+ return m1+m2;
+ else
+ return TRP_METRIC_INFINITY;
+}
+
+/* convert an rentry into a new trp update info record */
+static TRP_INFOREC *trps_route_to_inforec(TALLOC_CTX *mem_ctx, TRPS_INSTANCE *trps, TRP_ROUTE *route)
+{
+ TRP_INFOREC *rec=trp_inforec_new(mem_ctx, TRP_INFOREC_TYPE_ROUTE);
+ unsigned int linkcost=0;
+
+ if (rec!=NULL) {
+ if (trp_route_is_local(route))
+ linkcost=0;
+ else {
+ linkcost=trp_peer_get_linkcost(trps_get_peer_by_gssname(trps,
+ trp_route_get_peer(route)));
+ }
+
+ /* Note that we leave the next hop empty since the recipient fills that in.
+ * This is where we add the link cost (currently always 1) to the next peer. */
+ if ((trp_inforec_set_trust_router(rec, trp_route_dup_trust_router(route)) != TRP_SUCCESS)
+ ||(trp_inforec_set_metric(rec,
+ trps_metric_add(trp_route_get_metric(route),
+ linkcost)) != TRP_SUCCESS)
+ ||(trp_inforec_set_interval(rec, trps_get_update_interval(trps)) != TRP_SUCCESS)) {
+ tr_err("trps_route_to_inforec: error creating route update.");
+ talloc_free(rec);
+ rec=NULL;
+ }
+ }
+ return rec;
+}
+
+static TRP_UPD *trps_route_to_upd(TALLOC_CTX *mem_ctx, TRPS_INSTANCE *trps, TRP_ROUTE *route)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TRP_UPD *upd=trp_upd_new(tmp_ctx);
+ TRP_INFOREC *rec=NULL;
+
+ if (upd==NULL) {
+ tr_err("trps_route_to_upd: could not create update message.");
+ goto cleanup;
+ }
+ trp_upd_set_realm(upd, trp_route_dup_realm(route));
+ if (trp_upd_get_realm(upd)==NULL) {
+ tr_err("trps_route_to_upd: could not copy realm.");
+ upd=NULL; /* it's still in tmp_ctx, so it will be freed */
+ goto cleanup;
+ }
+ trp_upd_set_comm(upd, trp_route_dup_comm(route));
+ if (trp_upd_get_comm(upd)==NULL) {
+ tr_err("trps_route_to_upd: could not copy comm.");
+ upd=NULL; /* it's still in tmp_ctx, so it will be freed */
+ goto cleanup;
+ }
+ rec=trps_route_to_inforec(tmp_ctx, trps, route);
+ if (rec==NULL) {
+ tr_err("trps_route_to_upd: could not create route info record for realm %.*s in comm %.*s.",
+ trp_route_get_realm(route)->len, trp_route_get_realm(route)->buf,
+ trp_route_get_comm(route)->len, trp_route_get_comm(route)->buf);
+ upd=NULL; /* it's till in tmp_ctx, so it will be freed */
+ goto cleanup;
+ }
+ trp_upd_add_inforec(upd, rec);
+
+ /* sucess */
+ talloc_steal(mem_ctx, upd);
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return upd;
+}
+
+/* select the correct route to comm/realm to be announced to peer */
+static TRP_ROUTE *trps_select_realm_update(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TR_NAME *peer_label)
+{
+ TRP_ROUTE *route = NULL;
+ TRP_PEER *route_peer = NULL;
+ TR_NAME *route_peer_label = NULL;
+
+ /* Take the currently selected route unless it is through the peer we're sending the update to.
+ * I.e., enforce the split horizon rule. Start by looking up the currently selected route. */
+ route=trp_rtable_get_selected_entry(trps->rtable, comm, realm);
+ if (route==NULL) {
+ /* No selected route, this should only happen if the only route has been retracted,
+ * in which case we do not want to advertise it. */
+ return NULL;
+ }
+
+ /* Check whether it's local. */
+ if (trp_route_is_local(route)) {
+ /* It is always ok to announce a local route */
+ tr_debug("trps_select_realm_update: selected route for %.*s/%.*s is local",
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ } else {
+ /* It's not local. Get the route's peer and check whether it's the same place we
+ * got the selected route from. Peer should always correspond to an entry in our
+ * peer table. */
+ tr_debug("trps_select_realm_update: selected route for %.*s/%.*s is not local",
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ route_peer = trp_ptable_find_gss_name(trps->ptable, trp_route_get_peer(route));
+ if (route_peer == NULL) {
+ tr_err("trps_select_realm_update: unknown peer GSS name (%.*s) for selected route for %.*s/%.*s",
+ trp_route_get_peer(route)->len, trp_route_get_peer(route)->buf,
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ return NULL;
+ }
+ route_peer_label = trp_peer_get_label(route_peer);
+ if (route_peer_label == NULL) {
+ tr_err("trps_select_realm_update: error retrieving peer label for selected route for %.*s/%.*s",
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ return NULL;
+ }
+
+ /* see if these match */
+ tr_debug("trps_select_realm_update: %.*s vs %.*s",
+ peer_label->len, peer_label->buf,
+ route_peer_label->len, route_peer_label->buf);
+
+ if (0==tr_name_cmp(peer_label, route_peer_label)) {
+ /* the selected entry goes through the peer we're reporting to, choose an alternate */
+ tr_debug("trps_select_realm_update: matched, finding alternate route");
+ route=trps_find_best_route(trps, comm, realm, peer_label);
+ if ((route==NULL) || (!trp_metric_is_finite(trp_route_get_metric(route)))) {
+ tr_debug("trps_select_realm_update: no route to %.*s/%.*s suitable to advertise to %.*s",
+ realm->len, realm->buf,
+ comm->len, comm->buf,
+ peer_label->len, peer_label->buf);
+ return NULL; /* don't advertise a nonexistent or retracted route */
+ }
+ }
+ }
+ return route;
+}
+
+/* Add TRP_UPD msgs to the updates GPtrArray. Caller needs to arrange for these to be freed. */
+static TRP_RC trps_select_route_updates_for_peer(TALLOC_CTX *mem_ctx,
+ GPtrArray *updates,
+ TRPS_INSTANCE *trps,
+ TR_NAME *peer_label,
+ int triggered)
+{
+ size_t n_comm=0;
+ TR_NAME **comm=trp_rtable_get_comms(trps->rtable, &n_comm);
+ TR_NAME **realm=NULL;
+ size_t n_realm=0;
+ size_t ii=0, jj=0;
+ TRP_ROUTE *best=NULL;
+ TRP_UPD *upd=NULL;
+
+ if (updates==NULL)
+ return TRP_BADARG;
+
+ for (ii=0; ii<n_comm; ii++) {
+ realm=trp_rtable_get_comm_realms(trps->rtable, comm[ii], &n_realm);
+ for (jj=0; jj<n_realm; jj++) {
+ best=trps_select_realm_update(trps, comm[ii], realm[jj], peer_label);
+ /* If we found a route, add it to the list. If triggered!=0, then only
+ * add triggered routes. */
+ if ((best!=NULL) && ((!triggered) || trp_route_is_triggered(best))) {
+ upd=trps_route_to_upd(mem_ctx, trps, best);
+ if (upd==NULL) {
+ tr_err("trps_select_route_updates_for_peer: unable to create update message.");
+ continue;
+ }
+ g_ptr_array_add(updates, upd);
+ }
+ }
+
+ if (realm!=NULL)
+ talloc_free(realm);
+ realm=NULL;
+ n_realm=0;
+ }
+
+ if (comm!=NULL)
+ talloc_free(comm);
+
+ return TRP_SUCCESS;
+}
+
+static TRP_INFOREC *trps_memb_to_inforec(TALLOC_CTX *mem_ctx, TRPS_INSTANCE *trps, TR_COMM_MEMB *memb)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TRP_INFOREC *rec=NULL;
+ TR_COMM *comm=NULL;
+
+ if (memb==NULL)
+ goto cleanup;
+
+ comm=tr_comm_memb_get_comm(memb);
+ rec=trp_inforec_new(tmp_ctx, TRP_INFOREC_TYPE_COMMUNITY);
+ if (rec==NULL)
+ goto cleanup;
+
+ if (TRP_SUCCESS!=trp_inforec_set_comm_type(rec, tr_comm_get_type(comm))) {
+ rec=NULL;
+ goto cleanup;
+ }
+
+ if (TRP_SUCCESS!=trp_inforec_set_role(rec, tr_comm_memb_get_role(memb))) {
+ rec=NULL;
+ goto cleanup;
+ }
+
+ if ((NULL!=tr_comm_get_apcs(comm)) &&
+ ( (TRP_SUCCESS!=trp_inforec_set_apcs(rec,
+ tr_apc_dup(rec, tr_comm_get_apcs(comm)))) ||
+ (NULL==trp_inforec_get_apcs(rec)))) {
+ rec=NULL;
+ goto cleanup;
+ }
+
+ if ((NULL!=tr_comm_get_owner_realm(comm)) &&
+ ( (TRP_SUCCESS!=trp_inforec_set_owner_realm(rec, tr_dup_name(tr_comm_get_owner_realm(comm)))) ||
+ (NULL==trp_inforec_get_owner_realm(rec)))) {
+ rec=NULL;
+ goto cleanup;
+ }
+
+ if ((NULL!=tr_comm_get_owner_contact(comm)) &&
+ ( (TRP_SUCCESS!=trp_inforec_set_owner_contact(rec, tr_dup_name(tr_comm_get_owner_contact(comm)))) ||
+ (NULL==trp_inforec_get_owner_contact(rec)))) {
+ rec=NULL;
+ goto cleanup;
+ }
+
+ if ((NULL!=tr_comm_memb_get_provenance(memb)) &&
+ (TRP_SUCCESS!=trp_inforec_set_provenance(rec, tr_comm_memb_get_provenance(memb)))) {
+ rec=NULL;
+ goto cleanup;
+ }
+
+ if (TRP_SUCCESS!=trp_inforec_set_interval(rec, trps_get_update_interval(trps))) {
+ rec=NULL;
+ goto cleanup;
+ }
+
+ /* success! */
+ talloc_steal(mem_ctx, rec);
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return rec;
+}
+
+/* construct an update with all the inforecs for comm/realm/role to be sent to peer */
+static TRP_UPD *trps_comm_update(TALLOC_CTX *mem_ctx,
+ TRPS_INSTANCE *trps,
+ TR_NAME *peer_label,
+ TR_COMM *comm,
+ TR_REALM *realm)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TRP_UPD *upd=trp_upd_new(tmp_ctx);
+ TRP_INFOREC *rec=NULL;
+ TR_COMM_ITER *iter=NULL;
+ TR_COMM_MEMB *memb=NULL;
+
+ if (upd==NULL)
+ goto cleanup;
+
+ trp_upd_set_comm(upd, tr_comm_dup_id(comm));
+ trp_upd_set_realm(upd, tr_realm_dup_id(realm));
+ /* leave peer empty */
+
+ iter=tr_comm_iter_new(tmp_ctx);
+ if (iter==NULL) {
+ tr_err("trps_comm_update: unable to allocate iterator.");
+ upd=NULL;
+ goto cleanup;
+ }
+
+ /* now add inforecs */
+ switch (realm->role) {
+ case TR_ROLE_IDP:
+ memb=tr_comm_table_find_idp_memb(trps->ctable,
+ tr_realm_get_id(realm),
+ tr_comm_get_id(comm));
+ break;
+ case TR_ROLE_RP:
+ memb=tr_comm_table_find_rp_memb(trps->ctable,
+ tr_realm_get_id(realm),
+ tr_comm_get_id(comm));
+ break;
+ default:
+ break;
+ }
+ if (memb!=NULL) {
+ for (memb=tr_comm_memb_iter_first(iter, memb);
+ memb!=NULL;
+ memb=tr_comm_memb_iter_next(iter)) {
+ rec=trps_memb_to_inforec(tmp_ctx, trps, memb);
+ if (rec==NULL) {
+ tr_err("trps_comm_update: unable to allocate inforec.");
+ upd=NULL;
+ goto cleanup;
+ }
+ trp_upd_add_inforec(upd, rec);
+ }
+ }
+
+ if (trp_upd_get_inforec(upd)==NULL)
+ upd=NULL; /* no inforecs, no reason to send the update */
+ else
+ talloc_steal(mem_ctx, upd); /* success! */
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return upd;
+}
+
+/* Find all community updates to send to a peer and add these as TR_UPD records
+ * to the updates GPtrArray. */
+static TRP_RC trps_select_comm_updates_for_peer(TALLOC_CTX *mem_ctx,
+ GPtrArray *updates,
+ TRPS_INSTANCE *trps,
+ TR_NAME *peer_label,
+ int triggered)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TR_COMM_ITER *comm_iter=NULL;
+ TR_COMM *comm=NULL;
+ TR_COMM_ITER *realm_iter=NULL;
+ TR_REALM *realm=NULL;
+ TRP_UPD *upd=NULL;
+ TRP_RC rc=TRP_ERROR;
+
+ /* currently do not send any communities on triggered updates */
+ if (triggered) {
+ rc=TRP_SUCCESS;
+ goto cleanup;
+ }
+
+ comm_iter=tr_comm_iter_new(tmp_ctx);
+ realm_iter=tr_comm_iter_new(tmp_ctx);
+ if ((comm_iter==NULL) || (realm_iter==NULL)) {
+ tr_err("trps_select_comm_updates_for_peer: unable to allocate iterator.");
+ rc=TRP_NOMEM;
+ goto cleanup;
+ }
+
+ /* do every community */
+ for (comm=tr_comm_table_iter_first(comm_iter, trps->ctable);
+ comm!=NULL;
+ comm=tr_comm_table_iter_next(comm_iter)) {
+ /* do every realm in this community */
+ tr_debug("trps_select_comm_updates_for_peer: looking through community %.*s",
+ tr_comm_get_id(comm)->len,
+ tr_comm_get_id(comm)->buf);
+ for (realm=tr_realm_iter_first(realm_iter, trps->ctable, tr_comm_get_id(comm));
+ realm!=NULL;
+ realm=tr_realm_iter_next(realm_iter)) {
+ /* get the update for this comm/realm */
+ tr_debug("trps_select_comm_updates_for_peer: adding realm %.*s",
+ tr_realm_get_id(realm)->len,
+ tr_realm_get_id(realm)->buf);
+ upd=trps_comm_update(mem_ctx, trps, peer_label, comm, realm);
+ if (upd!=NULL)
+ g_ptr_array_add(updates, upd);
+ }
+ }
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return rc;
+}
+
+/**
+ * Filter the inforecs in a single update
+ *
+ * @param filt The filter to apply
+ * @param upd The update to filter
+ */
+static void trps_filter_one_outbound_update(TR_FILTER *filt, TRP_UPD *upd)
+{
+ TRP_INFOREC *this=NULL, *next=NULL;
+ TR_FILTER_ACTION action=TR_FILTER_ACTION_REJECT;
+ TR_FILTER_TARGET *target=NULL;
+
+ for(this=trp_upd_get_inforec(upd); this!=NULL; this=next) {
+ next=this->next;
+ target= tr_filter_target_trp_inforec(NULL, upd, this);
+ if (target==NULL) {
+ /* TODO: signal that filtering failed. Until then, just filter everything and give an error message. */
+ tr_crit("trps_filter_one_outbound_update: Unable to allocate filter target, cannot apply filter!");
+ }
+ if ((target==NULL)
+ || (TR_FILTER_NO_MATCH==tr_filter_apply(target, filt, NULL, &action))
+ || (action!=TR_FILTER_ACTION_ACCEPT)) {
+ /* Either no filter matched or one matched and rejected this record.
+ * Also filter out record if we were unable to allocate a target. */
+ trp_upd_remove_inforec(upd, this); /* "this" is now invalid */
+ }
+ if (target!=NULL)
+ tr_filter_target_free(target);
+ }
+}
+
+/**
+ * May shuffle the update list.
+ *
+ * @param filters The filter set for the relevant TRP peer
+ * @param updates GPtrArray of updates to filter
+ */
+static void trps_filter_outbound_updates(TR_FILTER_SET *filters, GPtrArray *updates)
+{
+ TRP_UPD *upd=NULL;
+ guint ii=0;
+
+ /* Walk backward through the array so we can remove elements. Careful about loop
+ * termination - remember that ii is unsigned. */
+ for (ii=updates->len; ii>0; ii--) {
+ upd=g_ptr_array_index(updates, ii-1);
+ trps_filter_one_outbound_update(tr_filter_set_get(filters, TR_FILTER_TYPE_TRP_OUTBOUND), upd);
+ /* see if we removed all the records from this update */
+ if (trp_upd_num_inforecs(upd)==0)
+ g_ptr_array_remove_index_fast(updates, ii-1); /* does not preserve order at index ii or higher */
+ }
+}
+
+/* helper for trps_update_one_peer. Frees the TRP_UPD pointed to by a GPtrArray element */
+static void trps_trp_upd_destroy(gpointer data)
+{
+ trp_upd_free((TRP_UPD *)data);
+}
+
+/* all routes/communities to a single peer, unless comm/realm are specified (both or neither must be NULL) */
+static TRP_RC trps_update_one_peer(TRPS_INSTANCE *trps,
+ TRP_PEER *peer,
+ TRP_UPDATE_TYPE update_type,
+ TR_NAME *realm,
+ TR_NAME *comm)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TR_MSG msg; /* not a pointer! */
+ TRP_UPD *upd=NULL;
+ TRP_ROUTE *route=NULL;
+ size_t ii=0;
+ char *encoded=NULL;
+ TRP_RC rc=TRP_ERROR;
+ TR_NAME *peer_label=trp_peer_get_label(peer);
+ GPtrArray *updates=g_ptr_array_new_with_free_func(trps_trp_upd_destroy);
+
+ if (updates==NULL) {
+ tr_err("trps_update_one_peer: unable to allocate updates array.");
+ rc=TRP_NOMEM;
+ goto cleanup;
+ }
+
+ switch (update_type) {
+ case TRP_UPDATE_TRIGGERED:
+ tr_debug("trps_update_one_peer: preparing triggered update for %.*s",
+ peer_label->len, peer_label->buf);
+ break;
+ case TRP_UPDATE_SCHEDULED:
+ tr_debug("trps_update_one_peer: preparing scheduled update for %.*s",
+ peer_label->len, peer_label->buf);
+ break;
+ case TRP_UPDATE_REQUESTED:
+ tr_debug("trps_update_one_peer: preparing requested update for %.*s",
+ peer_label->len, peer_label->buf);
+ break;
+ default:
+ tr_err("trps_update_one_peer: invalid update type requested.");
+ rc=TRP_BADARG;
+ goto cleanup;
+ }
+
+ /* First, gather route updates. */
+ tr_debug("trps_update_one_peer: selecting route updates for %.*s.", peer_label->len, peer_label->buf);
+ if ((comm==NULL) && (realm==NULL)) {
+ /* do all realms */
+ rc=trps_select_route_updates_for_peer(tmp_ctx,
+ updates,
+ trps,
+ peer_label,
+ update_type==TRP_UPDATE_TRIGGERED);
+ } else if ((comm!=NULL) && (realm!=NULL)) {
+ /* a single community/realm was requested */
+ route=trps_select_realm_update(trps, comm, realm, peer_label);
+ if (route==NULL) {
+ /* we have no actual update to send back, MUST send a retraction */
+ tr_debug("trps_update_one_peer: community/realm without route requested, sending mandatory retraction.");
+ route=trp_route_new(tmp_ctx);
+ trp_route_set_comm(route, tr_dup_name(comm));
+ trp_route_set_realm(route, tr_dup_name(realm));
+ trp_route_set_peer(route, tr_new_name(""));
+ trp_route_set_metric(route, TRP_METRIC_INFINITY);
+ trp_route_set_trust_router(route, tr_new_name(""));
+ trp_route_set_next_hop(route, tr_new_name(""));
+ }
+ upd=trps_route_to_upd(tmp_ctx, trps, route);
+ if (upd==NULL) {
+ tr_err("trps_update_one_peer: unable to allocate route update.");
+ rc=TRP_NOMEM;
+ goto cleanup;
+ }
+ g_ptr_array_add(updates, upd);
+ } else {
+ tr_err("trps_update_one_peer: error: only comm or realm was specified. Need both or neither.");
+ rc=TRP_ERROR;
+ goto cleanup;
+ }
+
+ /* Second, gather community updates */
+ tr_debug("trps_update_one_peer: selecting community updates for %.*s.", peer_label->len, peer_label->buf);
+ rc=trps_select_comm_updates_for_peer(tmp_ctx, updates, trps, peer_label, update_type==TRP_UPDATE_TRIGGERED);
+
+ /* see if we have anything to send */
+ if (updates->len<=0)
+ tr_debug("trps_update_one_peer: no updates for %.*s", peer_label->len, peer_label->buf);
+ else {
+ /* Apply outbound TRP filters for this peer */
+ trps_filter_outbound_updates(peer->filters, updates);
+
+ if (updates->len<=0)
+ tr_debug("trps_update_one_peer: no updates for %.*s after filtering.", peer_label->len, peer_label->buf);
+ else {
+ tr_debug("trps_update_one_peer: sending %d update messages.", updates->len);
+ for (ii=0; ii<updates->len; ii++) {
+ upd = (TRP_UPD *) g_ptr_array_index(updates, ii);
+ /* now encode the update message */
+ tr_msg_set_trp_upd(&msg, upd);
+ encoded = tr_msg_encode(NULL, &msg);
+ if (encoded == NULL) {
+ tr_err("trps_update_one_peer: error encoding update.");
+ rc = TRP_ERROR;
+ goto cleanup;
+ }
+
+ tr_debug("trps_update_one_peer: adding message to queue.");
+ if (trps_send_msg(trps, peer, encoded) != TRP_SUCCESS)
+ tr_err("trps_update_one_peer: error queueing update.");
+ else
+ tr_debug("trps_update_one_peer: update queued successfully.");
+
+ tr_msg_free_encoded(encoded);
+ encoded = NULL;
+ }
+ }
+ }
+
+ rc=TRP_SUCCESS;
+
+cleanup:
+ if (updates!=NULL)
+ g_ptr_array_free(updates, TRUE); /* frees any TRP_UPD records */
+ talloc_free(tmp_ctx);
+ return rc;
+}
+
+/* all routes/communities to all peers */
+TRP_RC trps_update(TRPS_INSTANCE *trps, TRP_UPDATE_TYPE update_type)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TRP_PTABLE_ITER *iter=trp_ptable_iter_new(tmp_ctx);
+ TRP_PEER *peer=NULL;
+ TRP_RC rc=TRP_SUCCESS;
+
+ if (trps->ptable==NULL)
+ return TRP_SUCCESS; /* no peers, nothing to do */
+
+ if (iter==NULL) {
+ tr_err("trps_update: failed to allocate peer table iterator.");
+ talloc_free(tmp_ctx);
+ return TRP_NOMEM;
+ }
+
+ for (peer=trp_ptable_iter_first(iter, trps->ptable);
+ (peer!=NULL) && (rc==TRP_SUCCESS);
+ peer=trp_ptable_iter_next(iter))
+ {
+ if (!trps_peer_connected(trps, peer)) {
+ TR_NAME *peer_label=trp_peer_get_label(peer);
+ tr_debug("trps_update: no TRP connection to %.*s, skipping.",
+ peer_label->len, peer_label->buf);
+ continue;
+ }
+ rc=trps_update_one_peer(trps, peer, update_type, NULL, NULL);
+ }
+
+ tr_debug("trps_update: rc=%u after attempting update.", rc);
+ trp_ptable_iter_free(iter);
+ trp_rtable_clear_triggered(trps->rtable); /* don't re-send triggered updates */
+ talloc_free(tmp_ctx);
+ return rc;
+}
+
+TRP_RC trps_add_route(TRPS_INSTANCE *trps, TRP_ROUTE *route)
+{
+ trp_rtable_add(trps->rtable, route); /* should return status */
+ return TRP_SUCCESS;
+}
+
+/* steals the peer object */
+TRP_RC trps_add_peer(TRPS_INSTANCE *trps, TRP_PEER *peer)
+{
+ if (trps->ptable==NULL) {
+ trps->ptable=trp_ptable_new(trps);
+ if (trps->ptable==NULL)
+ return TRP_NOMEM;
+ }
+ return trp_ptable_add(trps->ptable, peer);
+}
+
+TRP_PEER *trps_get_peer_by_gssname(TRPS_INSTANCE *trps, TR_NAME *gssname)
+{
+ if (trps->ptable==NULL)
+ return NULL;
+
+ return trp_ptable_find_gss_name(trps->ptable, gssname);
+}
+
+TRP_PEER *trps_get_peer_by_servicename(TRPS_INSTANCE *trps, TR_NAME *servicename)
+{
+ if (trps->ptable==NULL)
+ return NULL;
+
+ return trp_ptable_find_servicename(trps->ptable, servicename);
+}
+
+int trps_peer_connected(TRPS_INSTANCE *trps, TRP_PEER *peer)
+{
+ TRPC_INSTANCE *trpc=trps_find_trpc(trps, peer);
+ if (trpc==NULL)
+ return 0;
+
+ if (trpc_get_status(trpc)==TRP_CONNECTION_UP)
+ return 1;
+ else
+ return 0;
+}
+
+
+static TRP_RC trps_handle_request(TRPS_INSTANCE *trps, TRP_REQ *req)
+{
+ TR_NAME *comm=NULL;
+ TR_NAME *realm=NULL;
+
+ tr_debug("trps_handle_request: handling TRP request.");
+
+ if (trps_validate_request(trps, req) != TRP_SUCCESS) {
+ tr_notice("trps_handle_request: received invalid TRP request.");
+ return TRP_ERROR;
+ }
+
+ if (!trp_req_is_wildcard(req)) {
+ comm=trp_req_get_comm(req);
+ realm=trp_req_get_realm(req);
+ tr_debug("trps_handle_request: route for %.*s/%.*s requested.",
+ comm->len, comm->buf, realm->len, realm->buf);
+ } else {
+ tr_debug("trps_handle_request: all routes requested.");
+ /* leave comm/realm NULL */
+ }
+ return trps_update_one_peer(trps,
+ trps_get_peer_by_gssname(trps, trp_req_get_peer(req)),
+ TRP_UPDATE_REQUESTED,
+ realm,
+ comm);
+}
+
+
+TRP_RC trps_handle_tr_msg(TRPS_INSTANCE *trps, TR_MSG *tr_msg)
+{
+ TRP_RC rc=TRP_ERROR;
+
+ switch (tr_msg_get_msg_type(tr_msg)) {
+ case TRP_UPDATE:
+ rc=trps_handle_update(trps, tr_msg_get_trp_upd(tr_msg));
+ if (rc==TRP_SUCCESS) {
+ rc=trps_update_active_routes(trps);
+ trps_update(trps, TRP_UPDATE_TRIGGERED); /* send any triggered routes */
+ }
+ return rc;
+
+ case TRP_REQUEST:
+ rc=trps_handle_request(trps, tr_msg_get_trp_req(tr_msg));
+ return rc;
+
+ default:
+ /* unknown error or one we don't care about (e.g., TID messages) */
+ return TRP_ERROR;
+ }
+}
+
+/* send wildcard route request to a peer */
+TRP_RC trps_wildcard_route_req(TRPS_INSTANCE *trps, TR_NAME *peer_servicename)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TRP_PEER *peer=trps_get_peer_by_servicename(trps, peer_servicename);
+ TR_MSG msg; /* not a pointer */
+ TRP_REQ *req=trp_req_new(tmp_ctx);
+ char *encoded=NULL;
+ TRP_RC rc=TRP_ERROR;
+
+ if (peer==NULL) {
+ tr_err("trps_wildcard_route_req: unknown peer (%.*s).", peer_servicename->len, peer_servicename->buf);
+ rc=TRP_BADARG;
+ goto cleanup;
+ }
+ if ((req==NULL) || (trp_req_make_wildcard(req)!=TRP_SUCCESS)) {
+ tr_err("trps_wildcard_route_req: unable to create wildcard TRP request.");
+ rc=TRP_NOMEM;
+ goto cleanup;
+ }
+
+ tr_msg_set_trp_req(&msg, req);
+ encoded= tr_msg_encode(NULL, &msg);
+ if (encoded==NULL) {
+ tr_err("trps_wildcard_route_req: error encoding wildcard TRP request.");
+ rc=TRP_ERROR;
+ goto cleanup;
+ }
+
+ tr_debug("trps_wildcard_route_req: adding message to queue.");
+ if (trps_send_msg(trps, peer, encoded) != TRP_SUCCESS) {
+ tr_err("trps_wildcard_route_req: error queueing request.");
+ rc=TRP_ERROR;
+ } else {
+ tr_debug("trps_wildcard_route_req: request queued successfully.");
+ rc=TRP_SUCCESS;
+ }
+
+cleanup:
+ if (encoded!=NULL)
+ tr_msg_free_encoded(encoded);
+ if (req!=NULL)
+ trp_req_free(req);
+
+ talloc_free(tmp_ctx);
+ return rc;