#include <unistd.h>
#include <sys/time.h>
#include <glib.h>
+#include <string.h>
+#include <poll.h> // for nfds_t
#include <gsscon.h>
+#include <tr_comm.h>
#include <tr_apc.h>
#include <tr_rp.h>
-#include <trust_router/tr_name.h>
+#include <tr_name_internal.h>
+#include <trp_route.h>
#include <trp_internal.h>
-#include <tr_gss.h>
+#include <tr_gss_names.h>
+#include <trp_peer.h>
#include <trp_ptable.h>
#include <trp_rtable.h>
#include <tr_debug.h>
-
+#include <tr_util.h>
+#include <tr_socket.h>
static int trps_destructor(void *object)
{
TRPS_INSTANCE *trps=talloc(mem_ctx, TRPS_INSTANCE);
if (trps!=NULL) {
trps->hostname=NULL;
- trps->port=0;
+ trps->trps_port=0;
trps->cookie=NULL;
trps->conn=NULL;
trps->trpc=NULL;
TR_MQ_MSG *trps_mq_pop(TRPS_INSTANCE *trps)
{
- return tr_mq_pop(trps->mq);
+ return tr_mq_pop(trps->mq, 0);
}
void trps_mq_add(TRPS_INSTANCE *trps, TR_MQ_MSG *msg)
void trps_set_ctable(TRPS_INSTANCE *trps, TR_COMM_TABLE *comm)
{
- if (trps->ctable!=NULL)
- tr_comm_table_free(trps->ctable);
trps->ctable=comm;
}
trp_ptable_iter_free(iter);
}
+/* Get the label peers will know us by - needs to match trp_peer_get_label() output.
+ * There is no get, only dup, because we don't store the label except when requested. */
+TR_NAME *trps_dup_label(TRPS_INSTANCE *trps)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TR_NAME *label=NULL;
+ char *s=talloc_asprintf(tmp_ctx, "%s:%u", trps->hostname, trps->trps_port);
+ if (s==NULL)
+ goto cleanup;
+ label=tr_new_name(s);
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return label;
+}
+
TRPC_INSTANCE *trps_find_trpc(TRPS_INSTANCE *trps, TRP_PEER *peer)
{
TRPC_INSTANCE *cur=NULL;
/* get the connection for this peer */
trpc=trps_find_trpc(trps, peer);
- /* instead, let's let that happen and then clear the queue when an attempt to
- * connect fails */
+ /* The peer connection (trpc) usually exists even if the connection is down.
+ * We will queue messages even if the connection is down. To prevent this from
+ * endlessly increasing the size of the queue, the trpc handler needs to clear
+ * its queue periodically, even if it is unable to send the messages
+ */
if (trpc==NULL) {
tr_warning("trps_send_msg: skipping message queued for missing TRP client entry.");
} else {
- mq_msg=tr_mq_msg_new(tmp_ctx, TR_MQMSG_TRPC_SEND, TR_MQ_PRIO_NORMAL);
+ mq_msg=tr_mq_msg_new(tmp_ctx, TR_MQMSG_TRPC_SEND);
msg_dup=talloc_strdup(mq_msg, msg); /* get local copy in mq_msg context */
tr_mq_msg_set_payload(mq_msg, msg_dup, NULL); /* no need for a free() func */
trpc_mq_add(trpc, mq_msg);
return rc;
}
-static int trps_listen (TRPS_INSTANCE *trps, int port)
-{
- int rc = 0;
- int conn = -1;
- int optval = 1;
-
- union {
- struct sockaddr_storage storage;
- struct sockaddr_in in4;
- } addr;
-
- struct sockaddr_in *saddr = (struct sockaddr_in *) &addr.in4;
-
- saddr->sin_port = htons (port);
- saddr->sin_family = AF_INET;
- saddr->sin_addr.s_addr = INADDR_ANY;
-
- if (0 > (conn = socket (AF_INET, SOCK_STREAM, 0)))
- return conn;
-
- setsockopt(conn, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
-
- if (0 > (rc = bind (conn, (struct sockaddr *) saddr, sizeof(struct sockaddr_in))))
- return rc;
-
- if (0 > (rc = listen(conn, 512)))
- return rc;
-
- tr_debug("trps_listen: TRP Server listening on port %d", port);
- return conn;
-}
-
/* get the currently selected route if available */
TRP_ROUTE *trps_get_route(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TR_NAME *peer)
{
tr_debug("trps_read_message: message received, %u bytes.", (unsigned) buflen);
tr_debug("trps_read_message: %.*s", buflen, buf);
- *msg=tr_msg_decode(buf, buflen);
+ *msg= tr_msg_decode(NULL, buf, buflen);
free(buf);
if (*msg==NULL)
return TRP_NOPARSE;
switch (tr_msg_get_msg_type(*msg)) {
case TRP_UPDATE:
trp_upd_set_peer(tr_msg_get_trp_upd(*msg), tr_dup_name(conn_peer));
- trp_upd_set_next_hop(tr_msg_get_trp_upd(*msg), trp_peer_get_server(peer), 0); /* TODO: 0 should be the configured TID port */
+ /* update provenance if necessary */
+ trp_upd_add_to_provenance(tr_msg_get_trp_upd(*msg), trp_peer_get_label(peer));
break;
case TRP_REQUEST:
TRPS_MSG_FUNC msg_handler,
TRP_AUTH_FUNC auth_handler,
const char *hostname,
- unsigned int port,
- void *cookie)
+ int port,
+ void *cookie,
+ int *fd_out,
+ size_t max_fd)
{
- int listen = -1;
+ nfds_t n_fd=0;
+ nfds_t ii=0;
- if (0 > (listen = trps_listen(trps, port))) {
- char errbuf[256];
- if (0 == strerror_r(errno, errbuf, 256)) {
- tr_debug("trps_get_listener: Error opening port %d: %s.", port, errbuf);
- } else {
- tr_debug("trps_get_listener: Unknown error openining port %d.", port);
- }
- }
+ n_fd = tr_sock_listen_all(port, fd_out, max_fd);
- if (listen > 0) {
+ if (n_fd == 0)
+ tr_err("trps_get_listener: Error opening port %d.", port);
+ else {
/* opening port succeeded */
- tr_debug("trps_get_listener: Opened port %d.", port);
+ tr_info("trps_get_listener: Opened port %d.", port);
- /* make this socket non-blocking */
- if (0 != fcntl(listen, F_SETFL, O_NONBLOCK)) {
- tr_debug("trps_get_listener: Error setting O_NONBLOCK.");
- close(listen);
- listen=-1;
+ /* make the sockets non-blocking */
+ for (ii=0; ii<n_fd; ii++) {
+ if (0 != fcntl(fd_out[ii], F_SETFL, O_NONBLOCK)) {
+ tr_err("trps_get_listener: Error setting O_NONBLOCK.");
+ for (ii=0; ii<n_fd; ii++) {
+ close(fd_out[ii]);
+ fd_out[ii]=-1;
+ }
+ n_fd = 0;
+ break;
+ }
}
}
- if (listen > 0) {
+ if (n_fd > 0) {
/* store the caller's request handler & cookie */
trps->msg_handler = msg_handler;
trps->auth_handler = auth_handler;
trps->hostname = talloc_strdup(trps, hostname);
- trps->port = port;
+ trps->trps_port = port;
trps->cookie = cookie;
}
- return listen;
+ return (int) n_fd;
}
TRP_RC trps_authorize_connection(TRPS_INSTANCE *trps, TRP_CONNECTION *conn)
switch(trp_inforec_get_type(rec)) {
case TRP_INFOREC_TYPE_ROUTE:
if ((trp_inforec_get_trust_router(rec)==NULL)
- || (trp_inforec_get_next_hop(rec)==NULL)) {
+ || (trp_inforec_get_next_hop(rec)==NULL)) {
tr_debug("trps_validate_inforec: missing record info.");
return TRP_ERROR;
}
- /* check for valid metric */
+ /* check for valid ports */
+ if ((trp_inforec_get_trust_router_port(rec) <= 0)
+ || (trp_inforec_get_trust_router_port(rec) > 65535)) {
+ tr_debug("trps_validate_inforec: invalid trust router port (%d)",
+ trp_inforec_get_trust_router_port(rec));
+ return TRP_ERROR;
+ }
+
+ if ((trp_inforec_get_next_hop_port(rec) <= 0)
+ || (trp_inforec_get_next_hop_port(rec) > 65535)) {
+ tr_debug("trps_validate_inforec: invalid next hop port (%d)",
+ trp_inforec_get_next_hop_port(rec));
+ return TRP_ERROR;
+ }
+
+ /* check for valid metric */
if (trp_metric_is_invalid(trp_inforec_get_metric(rec))) {
tr_debug("trps_validate_inforec: invalid metric (%u).", trp_inforec_get_metric(rec));
return TRP_ERROR;
}
break;
+ case TRP_INFOREC_TYPE_COMMUNITY:
+ /* TODO: validate community updates */
+ break;
+
default:
tr_notice("trps_validate_inforec: unsupported record type.");
return TRP_UNSUPPORTED;
static struct timespec *trps_compute_expiry(TRPS_INSTANCE *trps, unsigned int interval, struct timespec *ts)
{
const unsigned int small_factor=3; /* how many intervals we wait before expiring */
- if (0!=clock_gettime(CLOCK_REALTIME, ts)) {
+ if (0!=clock_gettime(TRP_CLOCK, ts)) {
tr_err("trps_compute_expiry: could not read realtime clock.");
ts->tv_sec=0;
ts->tv_nsec=0;
}
+ tr_debug("trps_compute_expiry: tv_sec=%u, interval=%u, small_factor*interval=%u", ts->tv_sec, interval, small_factor*interval);
ts->tv_sec += small_factor*interval;
return ts;
}
+
+/* compare hostname/port of the trust router, return 0 if they match */
+static int trust_router_changed(TRP_ROUTE *route, TRP_INFOREC *rec)
+{
+ if (trp_route_get_trust_router_port(route) != trp_inforec_get_trust_router_port(rec))
+ return 1;
+
+ return tr_name_cmp(trp_route_get_trust_router(route),
+ trp_inforec_get_trust_router(rec));
+}
+
static TRP_RC trps_accept_update(TRPS_INSTANCE *trps, TRP_UPD *upd, TRP_INFOREC *rec)
{
TRP_ROUTE *entry=NULL;
trp_route_set_realm(entry, trp_upd_dup_realm(upd));
trp_route_set_peer(entry, trp_upd_dup_peer(upd));
trp_route_set_trust_router(entry, trp_inforec_dup_trust_router(rec));
+ trp_route_set_trust_router_port(entry, trp_inforec_get_trust_router_port(rec));
trp_route_set_next_hop(entry, trp_inforec_dup_next_hop(rec));
- /* TODO: pass next hop port (now defaults to TID_PORT) --jlr */
+ trp_route_set_next_hop_port(entry, trp_inforec_get_next_hop_port(rec));
if ((trp_route_get_comm(entry)==NULL)
||(trp_route_get_realm(entry)==NULL)
||(trp_route_get_peer(entry)==NULL)
trp_route_set_metric(entry, trp_inforec_get_metric(rec));
trp_route_set_interval(entry, trp_inforec_get_interval(rec));
- /* check whether the trust router has changed */
- if (0!=tr_name_cmp(trp_route_get_trust_router(entry),
- trp_inforec_get_trust_router(rec))) {
+ /* check whether the trust router has changed (either name or port) */
+ if (trust_router_changed(entry, rec)) {
/* The name changed. Set this route as triggered. */
tr_debug("trps_accept_update: trust router for route changed.");
trp_route_set_triggered(entry, 1);
trp_route_set_trust_router(entry, trp_inforec_dup_trust_router(rec)); /* frees old name */
+ trp_route_set_trust_router_port(entry, trp_inforec_get_trust_router_port(rec));
}
if (!trps_route_retracted(trps, entry)) {
tr_debug("trps_accept_update: route not retracted, setting expiry timer.");
return TRP_SUCCESS;
}
-static TRP_RC trps_handle_update(TRPS_INSTANCE *trps, TRP_UPD *upd)
+
+static TRP_RC trps_handle_inforec_route(TRPS_INSTANCE *trps, TRP_UPD *upd, TRP_INFOREC *rec)
{
- unsigned int feas=0;
- TRP_INFOREC *rec=NULL;
TRP_ROUTE *route=NULL;
+ TR_COMM *comm = NULL;
+ unsigned int feas=0;
- if (trps_validate_update(trps, upd) != TRP_SUCCESS) {
- tr_notice("trps_handle_update: received invalid TRP update.");
- return TRP_ERROR;
- }
-
- for (rec=trp_upd_get_inforec(upd); rec!=NULL; rec=trp_inforec_get_next(rec)) {
- /* validate/sanity check the record update */
- if (trps_validate_inforec(trps, rec) != TRP_SUCCESS) {
- tr_notice("trps_handle_update: invalid record in TRP update, discarding entire update.");
- return TRP_ERROR;
- }
- }
-
- for (rec=trp_upd_get_inforec(upd); rec!=NULL; rec=trp_inforec_get_next(rec)) {
- /* determine feasibility */
- feas=trps_check_feasibility(trps, trp_upd_get_realm(upd), trp_upd_get_comm(upd), rec);
- tr_debug("trps_handle_update: record feasibility=%d", feas);
-
+ /* determine feasibility */
+ feas=trps_check_feasibility(trps, trp_upd_get_realm(upd), trp_upd_get_comm(upd), rec);
+ tr_debug("trps_handle_update: record feasibility=%d", feas);
+
+ /* verify that the community is an APC */
+ comm = tr_comm_table_find_comm(trps->ctable, trp_upd_get_comm(upd));
+ if (comm == NULL) {
+ /* We don't know this community. Reject the route. */
+ tr_debug("trps_handle_updates: community %.*s unknown, ignoring route for %.*s",
+ trp_upd_get_comm(upd)->len, trp_upd_get_comm(upd)->buf,
+ trp_upd_get_realm(upd)->len, trp_upd_get_realm(upd)->buf);
+ } else if (tr_comm_get_type(comm) != TR_COMM_APC) {
+ /* The community in a route request *must* be an APC. This was not - ignore it. */
+ tr_debug("trps_handle_updates: community %.*s is not an APC, ignoring route for %.*s",
+ trp_upd_get_comm(upd)->len, trp_upd_get_comm(upd)->buf,
+ trp_upd_get_realm(upd)->len, trp_upd_get_realm(upd)->buf);
+ } else {
/* do we have an existing route? */
route=trps_get_route(trps,
trp_upd_get_comm(upd),
trps_accept_update(trps, upd, rec);
} else {
/* Update is infeasible. Ignore it unless the trust router has changed. */
- if (0!=tr_name_cmp(trp_route_get_trust_router(route),
- trp_inforec_get_trust_router(rec))) {
+ if (trust_router_changed(route, rec)) {
/* the trust router associated with the route has changed, treat update as a retraction */
trps_retract_route(trps, route);
}
trps_accept_update(trps, upd, rec);
}
}
+
+ return TRP_SUCCESS;
+}
+
+static int trps_name_in_provenance(TR_NAME *name, json_t *prov)
+{
+ size_t ii=0;
+ TR_NAME *this_name=NULL;
+ const char *s=NULL;
+
+ if (prov==NULL)
+ return 0; /* no provenance list, so it has no names in it */
+
+ /* now check to see if name is in the provenance */
+ for (ii=0; ii<json_array_size(prov); ii++) {
+ s=json_string_value(json_array_get(prov, ii));
+ if (s==NULL) {
+ tr_debug("trps_name_in_provenance: empty entry in provenance list.");
+ continue;
+ }
+
+ this_name=tr_new_name(s);
+ if (this_name==NULL) {
+ tr_debug("trps_name_in_provenance: unable to allocate name.");
+ return -1;
+ }
+ if (0==tr_name_cmp(name, this_name)) {
+ tr_free_name(this_name);
+ return 1;
+ }
+ tr_free_name(this_name);
+ }
+ return 0;
+}
+
+static TR_COMM *trps_create_new_comm(TALLOC_CTX *mem_ctx, TR_NAME *comm_id, TRP_INFOREC *rec)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TR_COMM *comm=tr_comm_new(tmp_ctx);
+
+ if (comm==NULL) {
+ tr_debug("trps_create_new_comm: unable to allocate new community.");
+ goto cleanup;
+ }
+ /* fill in the community with info */
+ tr_comm_set_id(comm, tr_dup_name(comm_id));
+ if (tr_comm_get_id(comm)==NULL) {
+ tr_debug("trps_create_new_comm: unable to allocate community name.");
+ comm=NULL;
+ goto cleanup;
+ }
+ tr_comm_set_type(comm, trp_inforec_get_comm_type(rec));
+ if (trp_inforec_get_apcs(rec)!=NULL) {
+ tr_comm_set_apcs(comm, tr_apc_dup(tmp_ctx, trp_inforec_get_apcs(rec)));
+ if (tr_comm_get_apcs(comm)==NULL) {
+ tr_debug("trps_create_new_comm: unable to allocate APC list.");
+ comm=NULL;
+ goto cleanup;
+ }
+ }
+ if (trp_inforec_get_owner_realm(rec)!=NULL) {
+ tr_comm_set_owner_realm(comm, tr_dup_name(trp_inforec_get_owner_realm(rec)));
+ if (tr_comm_get_owner_realm(comm)==NULL) {
+ tr_debug("trps_create_new_comm: unable to allocate owner realm name.");
+ comm=NULL;
+ goto cleanup;
+ }
+ }
+ if (trp_inforec_get_owner_contact(rec)!=NULL) {
+ tr_comm_set_owner_contact(comm, tr_dup_name(trp_inforec_get_owner_contact(rec)));
+ if (tr_comm_get_owner_contact(comm)==NULL) {
+ tr_debug("trps_create_new_comm: unable to allocate owner contact.");
+ comm=NULL;
+ goto cleanup;
+ }
+ }
+ comm->expiration_interval=trp_inforec_get_exp_interval(rec);
+ talloc_steal(mem_ctx, comm);
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return comm;
+}
+
+static TR_RP_REALM *trps_create_new_rp_realm(TALLOC_CTX *mem_ctx, TR_NAME *comm, TR_NAME *realm_id, TRP_INFOREC *rec)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TR_RP_REALM *rp=tr_rp_realm_new(tmp_ctx);
+
+ if (rp==NULL) {
+ tr_debug("trps_create_new_rp_realm: unable to allocate new realm.");
+ goto cleanup;
+ }
+ /* fill in the realm */
+ tr_rp_realm_set_id(rp, tr_dup_name(realm_id));
+ if (tr_rp_realm_get_id(rp)==NULL) {
+ tr_debug("trps_create_new_rp_realm: unable to allocate realm name.");
+ rp=NULL;
+ goto cleanup;
+ }
+ talloc_steal(mem_ctx, rp);
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return rp;
+}
+
+static TR_IDP_REALM *trps_create_new_idp_realm(TALLOC_CTX *mem_ctx,
+ TR_NAME *comm_id,
+ TR_NAME *realm_id,
+ TRP_INFOREC *rec)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TR_IDP_REALM *idp=tr_idp_realm_new(tmp_ctx);
+ TR_APC *realm_apcs = NULL;
+
+ if (idp==NULL) {
+ tr_debug("trps_create_new_idp_realm: unable to allocate new realm.");
+ goto cleanup;
+ }
+ /* fill in the realm */
+ tr_idp_realm_set_id(idp, tr_dup_name(realm_id));
+ if (tr_idp_realm_get_id(idp)==NULL) {
+ tr_debug("trps_create_new_idp_realm: unable to allocate realm name.");
+ idp=NULL;
+ goto cleanup;
+ }
+
+ /* Set the APCs. If the community is a CoI, copy its APCs. If it is an APC, then
+ * that community itself is the APC for the realm. */
+ if (trp_inforec_get_comm_type(rec) == TR_COMM_APC) {
+ /* the community is an APC for this realm */
+ realm_apcs = tr_apc_new(tmp_ctx);
+ if (realm_apcs == NULL) {
+ tr_debug("trps_create_new_idp_realm: unable to allocate new APC list.");
+ idp = NULL;
+ goto cleanup;
+ }
+
+ tr_apc_set_id(realm_apcs, tr_dup_name(comm_id));
+ if (tr_apc_get_id(realm_apcs) == NULL) {
+ tr_debug("trps_create_new_idp_realm: unable to allocate new APC name.");
+ idp = NULL;
+ goto cleanup;
+ }
+ } else {
+ /* the community is not an APC for this realm */
+ realm_apcs = trp_inforec_get_apcs(rec);
+ if (realm_apcs == NULL) {
+ tr_debug("trps_create_new_idp_realm: no APCs for realm %.*s/%.*s, cannot add.",
+ realm_id->len, realm_id->buf,
+ comm_id->len, comm_id->buf);
+ idp = NULL;
+ goto cleanup;
+ }
+
+ /* we have APCs, make our own copy */
+ realm_apcs = tr_apc_dup(tmp_ctx, realm_apcs);
+ if (realm_apcs == NULL) {
+ tr_debug("trps_create_new_idp_realm: unable to duplicate APC list.");
+ idp = NULL;
+ goto cleanup;
+ }
+ }
+
+ /* Whether the community is an APC or CoI, the APCs for the realm are in realm_apcs */
+ tr_idp_realm_set_apcs(idp, realm_apcs); /* takes realm_apcs out of tmp_ctx on success */
+ if (tr_idp_realm_get_apcs(idp) == NULL) {
+ tr_debug("trps_create_new_idp_realm: unable to set APC list for new realm.");
+ idp=NULL;
+ goto cleanup;
+ }
+
+ idp->origin=TR_REALM_DISCOVERED;
+
+ talloc_steal(mem_ctx, idp);
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return idp;
+}
+
+static TRP_RC trps_handle_inforec_comm(TRPS_INSTANCE *trps, TRP_UPD *upd, TRP_INFOREC *rec)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ TR_NAME *comm_id=trp_upd_get_comm(upd);
+ TR_NAME *realm_id=trp_upd_get_realm(upd);
+ TR_NAME *origin_id=NULL;
+ TR_NAME *our_peer_label=NULL;
+ TR_COMM *comm=NULL;
+ TR_RP_REALM *rp_realm=NULL;
+ TR_IDP_REALM *idp_realm=NULL;
+ struct timespec expiry={0,0};
+ TRP_RC rc=TRP_ERROR;
+
+ if ((comm_id==NULL) || (realm_id==NULL))
+ goto cleanup;
+
+ origin_id=trp_inforec_dup_origin(rec);
+ if (origin_id==NULL)
+ goto cleanup;
+
+ /* see whether we want to add this */
+ our_peer_label=trps_dup_label(trps);
+ if (our_peer_label==NULL) {
+ tr_debug("trps_handle_inforec_comm: unable to allocate peer label.");
+ goto cleanup;
+ }
+
+ if (trps_name_in_provenance(our_peer_label, trp_inforec_get_provenance(rec)))
+ tr_debug("trps_handle_inforec_comm: rejecting community inforec to avoid provenance loop.");
+ else {
+ /* no loop occurring, accept the update */
+ comm=tr_comm_table_find_comm(trps->ctable, comm_id);
+ if (comm==NULL) {
+ tr_debug("trps_handle_inforec_comm: unknown community %.*s in inforec, creating it.",
+ comm_id->len, comm_id->buf);
+ comm=trps_create_new_comm(tmp_ctx, comm_id, rec);
+ if (comm==NULL) {
+ tr_debug("trps_handle_inforec_comm: unable to create new community.");
+ goto cleanup;
+ }
+ if (tr_comm_table_add_comm(trps->ctable, comm) != 0)
+ {
+ tr_debug("trps_handle_inforec_comm: unable to add community to community table.");
+ goto cleanup;
+ }
+ }
+ /* TODO: see if other comm data match the new inforec and update or complain */
+
+ trps_compute_expiry(trps, trp_inforec_get_interval(rec), &expiry);
+ if ((expiry.tv_sec==0)&&(expiry.tv_nsec==0))
+ goto cleanup;
+
+ switch (trp_inforec_get_role(rec)) {
+ case TR_ROLE_RP:
+ rp_realm=tr_rp_realm_lookup(trps->ctable->rp_realms, realm_id);
+ if (rp_realm==NULL) {
+ tr_debug("trps_handle_inforec_comm: unknown RP realm %.*s in inforec, creating it.",
+ realm_id->len, realm_id->buf);
+ rp_realm= trps_create_new_rp_realm(tmp_ctx, tr_comm_get_id(comm), realm_id, rec);
+ if (rp_realm==NULL) {
+ tr_debug("trps_handle_inforec_comm: unable to create new RP realm.");
+ /* we may leave an unused community in the table, but it will only last until
+ * the next table sweep if it does not get any realms before that happens */
+ goto cleanup;
+ }
+ tr_comm_table_add_rp_realm(trps->ctable, rp_realm);
+ }
+ /* TODO: if realm existed, see if data match the new inforec and update or complain */
+ tr_comm_add_rp_realm(trps->ctable, comm, rp_realm, trp_inforec_get_interval(rec), trp_inforec_get_provenance(rec), &expiry);
+ tr_debug("trps_handle_inforec_comm: added RP realm %.*s to comm %.*s (origin %.*s).",
+ realm_id->len, realm_id->buf,
+ comm_id->len, comm_id->buf,
+ origin_id->len, origin_id->buf);
+ break;
+ case TR_ROLE_IDP:
+ idp_realm=tr_idp_realm_lookup(trps->ctable->idp_realms, realm_id);
+ if (idp_realm==NULL) {
+ tr_debug("trps_handle_inforec_comm: unknown IDP realm %.*s in inforec, creating it.",
+ realm_id->len, realm_id->buf);
+ idp_realm= trps_create_new_idp_realm(tmp_ctx, tr_comm_get_id(comm), realm_id, rec);
+ if (idp_realm==NULL) {
+ tr_debug("trps_handle_inforec_comm: unable to create new IDP realm.");
+ /* we may leave an unused community in the table, but it will only last until
+ * the next table sweep if it does not get any realms before that happens */
+ goto cleanup;
+ }
+ tr_comm_table_add_idp_realm(trps->ctable, idp_realm);
+ }
+ /* TODO: if realm existed, see if data match the new inforec and update or complain */
+ tr_comm_add_idp_realm(trps->ctable, comm, idp_realm, trp_inforec_get_interval(rec), trp_inforec_get_provenance(rec), &expiry);
+ tr_debug("trps_handle_inforec_comm: added IDP realm %.*s to comm %.*s (origin %.*s).",
+ realm_id->len, realm_id->buf,
+ comm_id->len, comm_id->buf,
+ origin_id->len, origin_id->buf);
+ break;
+ default:
+ tr_debug("trps_handle_inforec_comm: unable to add realm.");
+ goto cleanup;
+ }
+ }
+
+ rc=TRP_SUCCESS;
+
+cleanup:
+ if (our_peer_label!=NULL)
+ tr_free_name(our_peer_label);
+ if (origin_id!=NULL)
+ tr_free_name(origin_id);
+ talloc_free(tmp_ctx);
+ return rc;
+}
+
+/**
+ * Apply applicable TRP_INBOUND filters to an inforec. Rejects everything if peer has no filters.
+ *
+ * @param trps Active TRPS instance
+ * @param upd TRP_UPD that contains the inforec to filter
+ * @param rec Inforec to filter
+ * @return 1 if accepted by the filter, 0 otherwise
+ */
+static int trps_filter_inbound_inforec(TRPS_INSTANCE *trps, TRP_UPD *upd, TRP_INFOREC *rec)
+{
+ TRP_PEER *peer=NULL;
+ TR_NAME *peer_name=NULL;
+ TR_FILTER_ACTION action=TR_FILTER_ACTION_REJECT;
+ TR_FILTER_TARGET *target=NULL;
+ int retval=0;
+
+ /* Look up the peer. For inbound messages, the peer is identified by its GSS name */
+ peer_name=trp_upd_get_peer(upd);
+ peer=trps_get_peer_by_gssname(trps, peer_name);
+ if (peer==NULL) {
+ tr_err("trps_filter_inbound_inforec: received inforec from unknown peer (%.*s), rejecting.",
+ peer_name->len,
+ peer_name->buf);
+ return 0;
+ }
+
+ /* tr_filter_apply() and tr_filter_set_get() handle null filter sets/filters by rejecting */
+ target= tr_filter_target_trp_inforec(NULL, upd, rec);
+ if (target==NULL) {
+ /* TODO: signal that filtering failed. Until then, just filter everything and give an error message. */
+ tr_crit("trps_filter_inbound_inforec: Unable to allocate filter target, cannot apply filter!");
+ }
+ if ((target==NULL)
+ || (TR_FILTER_NO_MATCH==tr_filter_apply(target,
+ tr_filter_set_get(peer->filters, TR_FILTER_TYPE_TRP_INBOUND),
+ NULL,
+ &action))
+ || (action!=TR_FILTER_ACTION_ACCEPT)) {
+ /* either the filter did not match or it matched a reject rule or allocating the target failed */
+ retval=0;
+ } else
+ retval=1;
+ if (target!=NULL)
+ tr_filter_target_free(target);
+
+ /* filter matched an accept rule */
+ return retval;
+}
+
+
+static TRP_RC trps_handle_update(TRPS_INSTANCE *trps, TRP_UPD *upd)
+{
+ TRP_INFOREC *rec=NULL;
+
+ if (trps_validate_update(trps, upd) != TRP_SUCCESS) {
+ tr_notice("trps_handle_update: received invalid TRP update.");
+ return TRP_ERROR;
+ }
+
+ for (rec=trp_upd_get_inforec(upd); rec!=NULL; rec=trp_inforec_get_next(rec)) {
+ /* validate/sanity check the record update */
+ if (trps_validate_inforec(trps, rec) != TRP_SUCCESS) {
+ tr_notice("trps_handle_update: invalid inforec in TRP update, discarding entire update.");
+ return TRP_ERROR;
+ }
+ }
+
+ for (rec=trp_upd_get_inforec(upd); rec!=NULL; rec=trp_inforec_get_next(rec)) {
+ if (!trps_filter_inbound_inforec(trps, upd, rec)) {
+ tr_debug("trps_handle_update: inforec rejected by filter.");
+ continue; /* just go on to the next record */
+ }
+
+ switch (trp_inforec_get_type(rec)) {
+ case TRP_INFOREC_TYPE_ROUTE:
+ tr_debug("trps_handle_update: handling route inforec.");
+ if (TRP_SUCCESS!=trps_handle_inforec_route(trps, upd, rec))
+ tr_notice("trps_handle_update: error handling route inforec.");
+ break;
+ case TRP_INFOREC_TYPE_COMMUNITY:
+ tr_debug("trps_handle_update: handling community inforec.");
+ if (TRP_SUCCESS!=trps_handle_inforec_comm(trps, upd, rec))
+ tr_notice("trps_handle_update: error handling community inforec.");
+
+ break;
+ default:
+ tr_notice("trps_handle_update: unsupported inforec in TRP update.");
+ break;
+ }
+ }
return TRP_SUCCESS;
}
/* choose the best route to comm/realm, optionally excluding routes to a particular peer */
static TRP_ROUTE *trps_find_best_route(TRPS_INSTANCE *trps,
- TR_NAME *comm,
- TR_NAME *realm,
- TR_NAME *exclude_peer)
+ TR_NAME *comm,
+ TR_NAME *realm,
+ TR_NAME *exclude_peer_label)
{
TRP_ROUTE **entry=NULL;
TRP_ROUTE *best=NULL;
+ TRP_PEER *route_peer = NULL;
size_t n_entry=0;
unsigned int kk=0;
unsigned int kk_min=0;
entry=trp_rtable_get_realm_entries(trps->rtable, comm, realm, &n_entry);
for (kk=0; kk<n_entry; kk++) {
if (trp_route_get_metric(entry[kk]) < min_metric) {
- if ((exclude_peer==NULL) || (0!=tr_name_cmp(trp_route_get_peer(entry[kk]),
- exclude_peer))) {
- kk_min=kk;
- min_metric=trp_route_get_metric(entry[kk]);
- }
+ if (exclude_peer_label != NULL) {
+ if (!trp_route_is_local(entry[kk])) {
+ /* route is not local, check the peer label */
+ route_peer = trp_ptable_find_gss_name(trps->ptable,
+ trp_route_get_peer(entry[kk]));
+ if (route_peer == NULL) {
+ tr_err("trps_find_best_route: unknown peer GSS name (%.*s) for route %d to %.*s/%.*s",
+ trp_route_get_peer(entry[kk])->len, trp_route_get_peer(entry[kk])->buf,
+ kk,
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ continue; /* unknown peer, skip the route */
+ }
+ if (0 == tr_name_cmp(exclude_peer_label, trp_peer_get_label(route_peer))) {
+ /* we're excluding this peer - skip the route */
+ continue;
+ }
+ }
+ }
+ /* if we get here, we're not excluding the route */
+ kk_min = kk;
+ min_metric = trp_route_get_metric(entry[kk]);
}
}
+
if (trp_metric_is_finite(min_metric))
best=entry[kk_min];
/* true if curtime >= expiry */
static int trps_expired(struct timespec *expiry, struct timespec *curtime)
{
- return ((curtime->tv_sec > expiry->tv_sec)
- || ((curtime->tv_sec == expiry->tv_sec)
- &&(curtime->tv_nsec >= expiry->tv_nsec)));
+ return (tr_cmp_timespec(curtime, expiry) >= 0);
}
/* Sweep for expired routes. For each expired route, if its metric is infinite, the route is flushed.
size_t ii=0;
/* use a single time for the entire sweep */
- if (0!=clock_gettime(CLOCK_REALTIME, &sweep_time)) {
+ if (0!=clock_gettime(TRP_CLOCK, &sweep_time)) {
tr_err("trps_sweep_routes: could not read realtime clock.");
sweep_time.tv_sec=0;
sweep_time.tv_nsec=0;
return TRP_ERROR;
}
- entry=trp_rtable_get_entries(trps->rtable, &n_entry); /* must talloc_free *entry */
+ entry= trp_rtable_get_entries(NULL, trps->rtable, &n_entry); /* must talloc_free *entry */
/* loop over the entries */
for (ii=0; ii<n_entry; ii++) {
return TRP_SUCCESS;
}
+
+/* Sweep for expired communities/realms/memberships. */
+TRP_RC trps_sweep_ctable(TRPS_INSTANCE *trps)
+{
+ TALLOC_CTX *tmp_ctx=talloc_new(NULL);
+ struct timespec sweep_time={0,0};
+ struct timespec tmp = {0};
+ TR_COMM_MEMB *memb=NULL;
+ TR_COMM_ITER *iter=NULL;
+ TRP_RC rc=TRP_ERROR;
+
+ /* use a single time for the entire sweep */
+ if (0!=clock_gettime(TRP_CLOCK, &sweep_time)) {
+ tr_err("trps_sweep_ctable: could not read realtime clock.");
+ sweep_time.tv_sec=0;
+ sweep_time.tv_nsec=0;
+ goto cleanup;
+ }
+
+ /* iterate all memberships */
+ iter=tr_comm_iter_new(tmp_ctx);
+ if (iter==NULL) {
+ tr_err("trps_sweep_ctable: unable to allocate iterator.");
+ rc=TRP_NOMEM;
+ goto cleanup;
+ }
+ for (memb=tr_comm_memb_iter_all_first(iter, trps->ctable);
+ memb!=NULL;
+ memb=tr_comm_memb_iter_all_next(iter)) {
+ if (tr_comm_memb_get_origin(memb)==NULL)
+ continue; /* do not expire local entries */
+
+ if (tr_comm_memb_is_expired(memb, &sweep_time)) {
+ if (tr_comm_memb_get_times_expired(memb)>0) {
+ /* Already expired once; flush. */
+ tr_debug("trps_sweep_ctable: flushing expired community membership (%.*s in %.*s, origin %.*s, expired %s).",
+ tr_comm_memb_get_realm_id(memb)->len, tr_comm_memb_get_realm_id(memb)->buf,
+ tr_comm_get_id(tr_comm_memb_get_comm(memb))->len, tr_comm_get_id(tr_comm_memb_get_comm(memb))->buf,
+ tr_comm_memb_get_origin(memb)->len, tr_comm_memb_get_origin(memb)->buf,
+ timespec_to_str(tr_comm_memb_get_expiry_realtime(memb, &tmp)));
+ tr_comm_table_remove_memb(trps->ctable, memb);
+ tr_comm_memb_free(memb);
+ } else {
+ /* This is the first expiration. Note this and reset the expiry time. */
+ tr_comm_memb_expire(memb);
+ trps_compute_expiry(trps, tr_comm_memb_get_interval(memb), tr_comm_memb_get_expiry(memb));
+ tr_debug("trps_sweep_ctable: community membership expired at %s, resetting expiry to %s (%.*s in %.*s, origin %.*s).",
+ timespec_to_str(tr_clock_convert(TRP_CLOCK, &sweep_time, CLOCK_REALTIME, &tmp)),
+ timespec_to_str(tr_comm_memb_get_expiry_realtime(memb, &tmp)),
+ tr_comm_memb_get_realm_id(memb)->len, tr_comm_memb_get_realm_id(memb)->buf,
+ tr_comm_get_id(tr_comm_memb_get_comm(memb))->len, tr_comm_get_id(tr_comm_memb_get_comm(memb))->buf,
+ tr_comm_memb_get_origin(memb)->len, tr_comm_memb_get_origin(memb)->buf);
+ }
+ }
+ }
+
+ /* get rid of any unreferenced realms, etc */
+ tr_comm_table_sweep(trps->ctable);
+
+cleanup:
+ talloc_free(tmp_ctx);
+ return rc;
+}
+
/* add metrics */
static unsigned int trps_metric_add(unsigned int m1, unsigned int m2)
{
trp_route_get_peer(route)));
}
- /* Note that we leave the next hop empty since the recipient fills that in.
- * This is where we add the link cost (currently always 1) to the next peer. */
- if ((trp_inforec_set_trust_router(rec, trp_route_dup_trust_router(route)) != TRP_SUCCESS)
- ||(trp_inforec_set_metric(rec,
- trps_metric_add(trp_route_get_metric(route),
- linkcost)) != TRP_SUCCESS)
- ||(trp_inforec_set_interval(rec, trps_get_update_interval(trps)) != TRP_SUCCESS)) {
+ /*
+ * This is where we add the link cost (currently always 1) to the next peer.
+ *
+ * Here, set next_hop to our TID address/port rather than passing along our own
+ * next_hop. That is the one *we* use to forward requests. We are advertising
+ * ourselves as a hop for our peers.
+ */
+ if ((TRP_SUCCESS != trp_inforec_set_trust_router(rec,
+ trp_route_dup_trust_router(route),
+ trp_route_get_trust_router_port(route)))
+ ||(TRP_SUCCESS != trp_inforec_set_next_hop(rec,
+ tr_new_name(trps->hostname),
+ trps->tids_port))
+ ||(TRP_SUCCESS != trp_inforec_set_metric(rec,
+ trps_metric_add(trp_route_get_metric(route),
+ linkcost)))
+ ||(TRP_SUCCESS != trp_inforec_set_interval(rec, trps_get_update_interval(trps)))) {
tr_err("trps_route_to_inforec: error creating route update.");
talloc_free(rec);
rec=NULL;
}
/* select the correct route to comm/realm to be announced to peer */
-static TRP_ROUTE *trps_select_realm_update(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TR_NAME *peer_gssname)
+static TRP_ROUTE *trps_select_realm_update(TRPS_INSTANCE *trps, TR_NAME *comm, TR_NAME *realm, TR_NAME *peer_label)
{
- TRP_ROUTE *route;
+ TRP_ROUTE *route = NULL;
+ TRP_PEER *route_peer = NULL;
+ TR_NAME *route_peer_label = NULL;
/* Take the currently selected route unless it is through the peer we're sending the update to.
- * I.e., enforce the split horizon rule. */
+ * I.e., enforce the split horizon rule. Start by looking up the currently selected route. */
route=trp_rtable_get_selected_entry(trps->rtable, comm, realm);
if (route==NULL) {
/* No selected route, this should only happen if the only route has been retracted,
* in which case we do not want to advertise it. */
return NULL;
}
- tr_debug("trps_select_realm_update: %s vs %s", peer_gssname->buf,
- trp_route_get_peer(route)->buf);
- if (0==tr_name_cmp(peer_gssname, trp_route_get_peer(route))) {
- tr_debug("trps_select_realm_update: matched, finding alternate route");
- /* the selected entry goes through the peer we're reporting to, choose an alternate */
- route=trps_find_best_route(trps, comm, realm, peer_gssname);
- if ((route==NULL) || (!trp_metric_is_finite(trp_route_get_metric(route))))
- return NULL; /* don't advertise a nonexistent or retracted route */
+
+ /* Check whether it's local. */
+ if (trp_route_is_local(route)) {
+ /* It is always ok to announce a local route */
+ tr_debug("trps_select_realm_update: selected route for %.*s/%.*s is local",
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ } else {
+ /* It's not local. Get the route's peer and check whether it's the same place we
+ * got the selected route from. Peer should always correspond to an entry in our
+ * peer table. */
+ tr_debug("trps_select_realm_update: selected route for %.*s/%.*s is not local",
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ route_peer = trp_ptable_find_gss_name(trps->ptable, trp_route_get_peer(route));
+ if (route_peer == NULL) {
+ tr_err("trps_select_realm_update: unknown peer GSS name (%.*s) for selected route for %.*s/%.*s",
+ trp_route_get_peer(route)->len, trp_route_get_peer(route)->buf,
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ return NULL;
+ }
+ route_peer_label = trp_peer_get_label(route_peer);
+ if (route_peer_label == NULL) {
+ tr_err("trps_select_realm_update: error retrieving peer label for selected route for %.*s/%.*s",
+ realm->len, realm->buf,
+ comm->len, comm->buf);
+ return NULL;
+ }
+
+ /* see if these match */
+ tr_debug("trps_select_realm_update: %.*s vs %.*s",
+ peer_label->len, peer_label->buf,
+ route_peer_label->len, route_peer_label->buf);
+
+ if (0==tr_name_cmp(peer_label, route_peer_label)) {
+ /* the selected entry goes through the peer we're reporting to, choose an alternate */
+ tr_debug("trps_select_realm_update: matched, finding alternate route");
+ route=trps_find_best_route(trps, comm, realm, peer_label);
+ if ((route==NULL) || (!trp_metric_is_finite(trp_route_get_metric(route)))) {
+ tr_debug("trps_select_realm_update: no route to %.*s/%.*s suitable to advertise to %.*s",
+ realm->len, realm->buf,
+ comm->len, comm->buf,
+ peer_label->len, peer_label->buf);
+ return NULL; /* don't advertise a nonexistent or retracted route */
+ }
+ }
}
return route;
}
static TRP_RC trps_select_route_updates_for_peer(TALLOC_CTX *mem_ctx,
GPtrArray *updates,
TRPS_INSTANCE *trps,
- TR_NAME *peer_gssname,
+ TR_NAME *peer_label,
int triggered)
{
size_t n_comm=0;
for (ii=0; ii<n_comm; ii++) {
realm=trp_rtable_get_comm_realms(trps->rtable, comm[ii], &n_realm);
for (jj=0; jj<n_realm; jj++) {
- best=trps_select_realm_update(trps, comm[ii], realm[jj], peer_gssname);
+ best=trps_select_realm_update(trps, comm[ii], realm[jj], peer_label);
/* If we found a route, add it to the list. If triggered!=0, then only
* add triggered routes. */
if ((best!=NULL) && ((!triggered) || trp_route_is_triggered(best))) {
goto cleanup;
}
- if ((TRP_SUCCESS!=trp_inforec_set_apcs(rec,
- tr_apc_dup(rec, tr_comm_get_apcs(comm)))) ||
- (NULL==trp_inforec_get_apcs(rec))) {
+ if ((NULL!=tr_comm_get_apcs(comm)) &&
+ ( (TRP_SUCCESS!=trp_inforec_set_apcs(rec,
+ tr_apc_dup(rec, tr_comm_get_apcs(comm)))) ||
+ (NULL==trp_inforec_get_apcs(rec)))) {
rec=NULL;
goto cleanup;
}
goto cleanup;
}
- if (TRP_SUCCESS!=trp_inforec_set_interval(rec, tr_comm_memb_get_interval(memb))) {
+ if (TRP_SUCCESS!=trp_inforec_set_interval(rec, trps_get_update_interval(trps))) {
rec=NULL;
goto cleanup;
}
}
/* construct an update with all the inforecs for comm/realm/role to be sent to peer */
-static TRP_UPD *trps_comm_update(TALLOC_CTX *mem_ctx, TRPS_INSTANCE *trps, TR_NAME *peer_gssname, TR_COMM *comm, TR_REALM *realm)
+static TRP_UPD *trps_comm_update(TALLOC_CTX *mem_ctx,
+ TRPS_INSTANCE *trps,
+ TR_NAME *peer_label,
+ TR_COMM *comm,
+ TR_REALM *realm)
{
TALLOC_CTX *tmp_ctx=talloc_new(NULL);
TRP_UPD *upd=trp_upd_new(tmp_ctx);
iter=tr_comm_iter_new(tmp_ctx);
if (iter==NULL) {
- tr_err("tr_comm_update: unable to allocate iterator.");
+ tr_err("trps_comm_update: unable to allocate iterator.");
upd=NULL;
goto cleanup;
}
memb=tr_comm_memb_iter_next(iter)) {
rec=trps_memb_to_inforec(tmp_ctx, trps, memb);
if (rec==NULL) {
- tr_err("tr_comm_update: unable to allocate inforec.");
+ tr_err("trps_comm_update: unable to allocate inforec.");
upd=NULL;
goto cleanup;
}
/* Find all community updates to send to a peer and add these as TR_UPD records
* to the updates GPtrArray. */
-static TRP_RC trps_select_comm_updates_for_peer(TALLOC_CTX *mem_ctx, GPtrArray *updates, TRPS_INSTANCE *trps, TR_NAME *peer_gssname)
+static TRP_RC trps_select_comm_updates_for_peer(TALLOC_CTX *mem_ctx,
+ GPtrArray *updates,
+ TRPS_INSTANCE *trps,
+ TR_NAME *peer_label,
+ int triggered)
{
TALLOC_CTX *tmp_ctx=talloc_new(NULL);
TR_COMM_ITER *comm_iter=NULL;
TR_REALM *realm=NULL;
TRP_UPD *upd=NULL;
TRP_RC rc=TRP_ERROR;
-
+
+ /* currently do not send any communities on triggered updates */
+ if (triggered) {
+ rc=TRP_SUCCESS;
+ goto cleanup;
+ }
+
comm_iter=tr_comm_iter_new(tmp_ctx);
realm_iter=tr_comm_iter_new(tmp_ctx);
if ((comm_iter==NULL) || (realm_iter==NULL)) {
comm!=NULL;
comm=tr_comm_table_iter_next(comm_iter)) {
/* do every realm in this community */
+ tr_debug("trps_select_comm_updates_for_peer: looking through community %.*s",
+ tr_comm_get_id(comm)->len,
+ tr_comm_get_id(comm)->buf);
for (realm=tr_realm_iter_first(realm_iter, trps->ctable, tr_comm_get_id(comm));
realm!=NULL;
realm=tr_realm_iter_next(realm_iter)) {
/* get the update for this comm/realm */
- upd=trps_comm_update(tmp_ctx, trps, peer_gssname, comm, realm);
- if (upd!=NULL) {
+ tr_debug("trps_select_comm_updates_for_peer: adding realm %.*s",
+ tr_realm_get_id(realm)->len,
+ tr_realm_get_id(realm)->buf);
+ upd=trps_comm_update(mem_ctx, trps, peer_label, comm, realm);
+ if (upd!=NULL)
g_ptr_array_add(updates, upd);
- }
}
}
- /* move anything needed into mem_ctx */
-
-
cleanup:
talloc_free(tmp_ctx);
return rc;
}
+/**
+ * Filter the inforecs in a single update
+ *
+ * @param filt The filter to apply
+ * @param upd The update to filter
+ */
+static void trps_filter_one_outbound_update(TR_FILTER *filt, TRP_UPD *upd)
+{
+ TRP_INFOREC *this=NULL, *next=NULL;
+ TR_FILTER_ACTION action=TR_FILTER_ACTION_REJECT;
+ TR_FILTER_TARGET *target=NULL;
+
+ for(this=trp_upd_get_inforec(upd); this!=NULL; this=next) {
+ next=this->next;
+ target= tr_filter_target_trp_inforec(NULL, upd, this);
+ if (target==NULL) {
+ /* TODO: signal that filtering failed. Until then, just filter everything and give an error message. */
+ tr_crit("trps_filter_one_outbound_update: Unable to allocate filter target, cannot apply filter!");
+ }
+ if ((target==NULL)
+ || (TR_FILTER_NO_MATCH==tr_filter_apply(target, filt, NULL, &action))
+ || (action!=TR_FILTER_ACTION_ACCEPT)) {
+ /* Either no filter matched or one matched and rejected this record.
+ * Also filter out record if we were unable to allocate a target. */
+ trp_upd_remove_inforec(upd, this); /* "this" is now invalid */
+ }
+ if (target!=NULL)
+ tr_filter_target_free(target);
+ }
+}
+
+/**
+ * May shuffle the update list.
+ *
+ * @param filters The filter set for the relevant TRP peer
+ * @param updates GPtrArray of updates to filter
+ */
+static void trps_filter_outbound_updates(TR_FILTER_SET *filters, GPtrArray *updates)
+{
+ TRP_UPD *upd=NULL;
+ guint ii=0;
+
+ /* Walk backward through the array so we can remove elements. Careful about loop
+ * termination - remember that ii is unsigned. */
+ for (ii=updates->len; ii>0; ii--) {
+ upd=g_ptr_array_index(updates, ii-1);
+ trps_filter_one_outbound_update(tr_filter_set_get(filters, TR_FILTER_TYPE_TRP_OUTBOUND), upd);
+ /* see if we removed all the records from this update */
+ if (trp_upd_num_inforecs(upd)==0)
+ g_ptr_array_remove_index_fast(updates, ii-1); /* does not preserve order at index ii or higher */
+ }
+}
/* helper for trps_update_one_peer. Frees the TRP_UPD pointed to by a GPtrArray element */
static void trps_trp_upd_destroy(gpointer data)
static TRP_RC trps_update_one_peer(TRPS_INSTANCE *trps,
TRP_PEER *peer,
TRP_UPDATE_TYPE update_type,
- TR_NAME *comm,
- TR_NAME *realm)
+ TR_NAME *realm,
+ TR_NAME *comm)
{
TALLOC_CTX *tmp_ctx=talloc_new(NULL);
TR_MSG msg; /* not a pointer! */
}
/* First, gather route updates. */
+ tr_debug("trps_update_one_peer: selecting route updates for %.*s.", peer_label->len, peer_label->buf);
if ((comm==NULL) && (realm==NULL)) {
/* do all realms */
rc=trps_select_route_updates_for_peer(tmp_ctx,
}
/* Second, gather community updates */
- rc=trps_select_comm_updates_for_peer(tmp_ctx, updates, trps, peer_label);
+ tr_debug("trps_update_one_peer: selecting community updates for %.*s.", peer_label->len, peer_label->buf);
+ rc=trps_select_comm_updates_for_peer(tmp_ctx, updates, trps, peer_label, update_type==TRP_UPDATE_TRIGGERED);
/* see if we have anything to send */
if (updates->len<=0)
tr_debug("trps_update_one_peer: no updates for %.*s", peer_label->len, peer_label->buf);
else {
- tr_debug("trps_update_one_peer: sending %d update messages.", updates->len);
- for (ii=0; NULL!=(upd=(TRP_UPD *)g_ptr_array_index(updates, ii)); ii++) {
- /* now encode the update message */
- tr_msg_set_trp_upd(&msg, upd);
- encoded=tr_msg_encode(&msg);
- if (encoded==NULL) {
- tr_err("trps_update_one_peer: error encoding update.");
- rc=TRP_ERROR;
- goto cleanup;
- }
+ /* Apply outbound TRP filters for this peer */
+ trps_filter_outbound_updates(peer->filters, updates);
- tr_debug("trps_update_one_peer: adding message to queue.");
- if (trps_send_msg(trps, peer, encoded) != TRP_SUCCESS)
- tr_err("trps_update_one_peer: error queueing update.");
- else
- tr_debug("trps_update_one_peer: update queued successfully.");
+ if (updates->len<=0)
+ tr_debug("trps_update_one_peer: no updates for %.*s after filtering.", peer_label->len, peer_label->buf);
+ else {
+ tr_debug("trps_update_one_peer: sending %d update messages.", updates->len);
+ for (ii=0; ii<updates->len; ii++) {
+ upd = (TRP_UPD *) g_ptr_array_index(updates, ii);
+ /* now encode the update message */
+ tr_msg_set_trp_upd(&msg, upd);
+ encoded = tr_msg_encode(NULL, &msg);
+ if (encoded == NULL) {
+ tr_err("trps_update_one_peer: error encoding update.");
+ rc = TRP_ERROR;
+ goto cleanup;
+ }
- tr_msg_free_encoded(encoded);
- encoded=NULL;
+ tr_debug("trps_update_one_peer: adding message to queue.");
+ if (trps_send_msg(trps, peer, encoded) != TRP_SUCCESS)
+ tr_err("trps_update_one_peer: error queueing update.");
+ else
+ tr_debug("trps_update_one_peer: update queued successfully.");
+
+ tr_msg_free_encoded(encoded);
+ encoded = NULL;
+ }
}
}
}
for (peer=trp_ptable_iter_first(iter, trps->ptable);
- peer!=NULL && rc==TRP_SUCCESS;
+ (peer!=NULL) && (rc==TRP_SUCCESS);
peer=trp_ptable_iter_next(iter))
{
if (!trps_peer_connected(trps, peer)) {
return trps_update_one_peer(trps,
trps_get_peer_by_gssname(trps, trp_req_get_peer(req)),
TRP_UPDATE_REQUESTED,
- comm,
- realm);
+ realm,
+ comm);
}
}
tr_msg_set_trp_req(&msg, req);
- encoded=tr_msg_encode(&msg);
+ encoded= tr_msg_encode(NULL, &msg);
if (encoded==NULL) {
tr_err("trps_wildcard_route_req: error encoding wildcard TRP request.");
rc=TRP_ERROR;