diff options
author | David Howells <dhowells@redhat.com> | 2016-07-01 09:51:50 +0300 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2016-07-06 12:51:14 +0300 |
commit | 8496af50eb385c1cadff9ad396fd5359e96b6c27 (patch) | |
tree | ea5adee605917e9b7b1d3b809dc2458cbd07c271 /net/rxrpc/conn_service.c | |
parent | 995f1405610bd8446c5be37d2ffc031a7729e406 (diff) | |
download | linux-8496af50eb385c1cadff9ad396fd5359e96b6c27.tar.xz |
rxrpc: Use RCU to access a peer's service connection tree
Move to using RCU access to a peer's service connection tree when routing
an incoming packet. This is done using a seqlock to trigger retrying of
the tree walk if a change happened.
Further, we no longer get a ref on the connection looked up in the
data_ready handler unless we queue the connection's work item - and then
only if the refcount > 0.
Note that I'm avoiding the use of a hash table for service connections
because each service connection is addressed by a 62-bit number
(constructed from epoch and connection ID >> 2) that would allow the client
to engage in bucket stuffing, given knowledge of the hash algorithm.
Peers, however, are hashed as the network address is less controllable by
the client. The total number of peers will also be limited in a future
commit.
Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net/rxrpc/conn_service.c')
-rw-r--r-- | net/rxrpc/conn_service.c | 270 |
1 files changed, 161 insertions, 109 deletions
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c index c6db2e8400a2..7cbd612be0d7 100644 --- a/net/rxrpc/conn_service.c +++ b/net/rxrpc/conn_service.c @@ -13,18 +13,122 @@ #include "ar-internal.h" /* + * Find a service connection under RCU conditions. + * + * We could use a hash table, but that is subject to bucket stuffing by an + * attacker as the client gets to pick the epoch and cid values and would know + * the hash function. So, instead, we use a hash table for the peer and from + * that an rbtree to find the service connection. Under ordinary circumstances + * it might be slower than a large hash table, but it is at least limited in + * depth. + */ +struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer, + struct sk_buff *skb) +{ + struct rxrpc_connection *conn = NULL; + struct rxrpc_conn_proto k; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + struct rb_node *p; + unsigned int seq = 0; + + k.epoch = sp->hdr.epoch; + k.cid = sp->hdr.cid & RXRPC_CIDMASK; + + do { + /* Unfortunately, rbtree walking doesn't give reliable results + * under just the RCU read lock, so we have to check for + * changes. + */ + read_seqbegin_or_lock(&peer->service_conn_lock, &seq); + + p = rcu_dereference_raw(peer->service_conns.rb_node); + while (p) { + conn = rb_entry(p, struct rxrpc_connection, service_node); + + if (conn->proto.index_key < k.index_key) + p = rcu_dereference_raw(p->rb_left); + else if (conn->proto.index_key > k.index_key) + p = rcu_dereference_raw(p->rb_right); + else + goto done; + conn = NULL; + } + } while (need_seqretry(&peer->service_conn_lock, seq)); + +done: + done_seqretry(&peer->service_conn_lock, seq); + _leave(" = %d", conn ? conn->debug_id : -1); + return conn; +} + +/* + * Insert a service connection into a peer's tree, thereby making it a target + * for incoming packets. + */ +static struct rxrpc_connection * +rxrpc_publish_service_conn(struct rxrpc_peer *peer, + struct rxrpc_connection *conn) +{ + struct rxrpc_connection *cursor = NULL; + struct rxrpc_conn_proto k = conn->proto; + struct rb_node **pp, *parent; + + write_seqlock_bh(&peer->service_conn_lock); + + pp = &peer->service_conns.rb_node; + parent = NULL; + while (*pp) { + parent = *pp; + cursor = rb_entry(parent, + struct rxrpc_connection, service_node); + + if (cursor->proto.index_key < k.index_key) + pp = &(*pp)->rb_left; + else if (cursor->proto.index_key > k.index_key) + pp = &(*pp)->rb_right; + else + goto found_extant_conn; + } + + rb_link_node_rcu(&conn->service_node, parent, pp); + rb_insert_color(&conn->service_node, &peer->service_conns); +conn_published: + set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags); + write_sequnlock_bh(&peer->service_conn_lock); + _leave(" = %d [new]", conn->debug_id); + return conn; + +found_extant_conn: + if (atomic_read(&cursor->usage) == 0) + goto replace_old_connection; + write_sequnlock_bh(&peer->service_conn_lock); + /* We should not be able to get here. rxrpc_incoming_connection() is + * called in a non-reentrant context, so there can't be a race to + * insert a new connection. + */ + BUG(); + +replace_old_connection: + /* The old connection is from an outdated epoch. */ + _debug("replace conn"); + rb_replace_node_rcu(&cursor->service_node, + &conn->service_node, + &peer->service_conns); + clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags); + goto conn_published; +} + +/* * get a record of an incoming connection */ struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local, struct sockaddr_rxrpc *srx, struct sk_buff *skb) { - struct rxrpc_connection *conn, *candidate = NULL; + struct rxrpc_connection *conn; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_peer *peer; - struct rb_node *p, **pp; const char *new = "old"; - u32 epoch, cid; _enter(""); @@ -36,131 +140,79 @@ struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local, ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED); - epoch = sp->hdr.epoch; - cid = sp->hdr.cid & RXRPC_CIDMASK; - - /* search the connection list first */ - read_lock_bh(&peer->conn_lock); - - p = peer->service_conns.rb_node; - while (p) { - conn = rb_entry(p, struct rxrpc_connection, service_node); - - _debug("maybe %x", conn->proto.cid); - - if (epoch < conn->proto.epoch) - p = p->rb_left; - else if (epoch > conn->proto.epoch) - p = p->rb_right; - else if (cid < conn->proto.cid) - p = p->rb_left; - else if (cid > conn->proto.cid) - p = p->rb_right; - else - goto found_extant_connection; + rcu_read_lock(); + peer = rxrpc_lookup_peer_rcu(local, srx); + if (peer) { + conn = rxrpc_find_service_conn_rcu(peer, skb); + if (conn) { + if (sp->hdr.securityIndex != conn->security_ix) + goto security_mismatch_rcu; + if (rxrpc_get_connection_maybe(conn)) + goto found_extant_connection_rcu; + + /* The conn has expired but we can't remove it without + * the appropriate lock, so we attempt to replace it + * when we have a new candidate. + */ + } + + if (!rxrpc_get_peer_maybe(peer)) + peer = NULL; } - read_unlock_bh(&peer->conn_lock); - - /* not yet present - create a candidate for a new record and then - * redo the search */ - candidate = rxrpc_alloc_connection(GFP_NOIO); - if (!candidate) { - rxrpc_put_peer(peer); - _leave(" = -ENOMEM"); - return ERR_PTR(-ENOMEM); - } - - candidate->proto.epoch = sp->hdr.epoch; - candidate->proto.cid = sp->hdr.cid & RXRPC_CIDMASK; - candidate->params.local = local; - candidate->params.peer = peer; - candidate->params.service_id = sp->hdr.serviceId; - candidate->security_ix = sp->hdr.securityIndex; - candidate->out_clientflag = 0; - candidate->state = RXRPC_CONN_SERVICE; - if (candidate->params.service_id) - candidate->state = RXRPC_CONN_SERVICE_UNSECURED; - - write_lock_bh(&peer->conn_lock); - - pp = &peer->service_conns.rb_node; - p = NULL; - while (*pp) { - p = *pp; - conn = rb_entry(p, struct rxrpc_connection, service_node); + rcu_read_unlock(); - if (epoch < conn->proto.epoch) - pp = &(*pp)->rb_left; - else if (epoch > conn->proto.epoch) - pp = &(*pp)->rb_right; - else if (cid < conn->proto.cid) - pp = &(*pp)->rb_left; - else if (cid > conn->proto.cid) - pp = &(*pp)->rb_right; - else - goto found_extant_second; + if (!peer) { + peer = rxrpc_lookup_peer(local, srx, GFP_NOIO); + if (IS_ERR(peer)) + goto enomem; } - /* we can now add the new candidate to the list */ - set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &candidate->flags); - rb_link_node(&candidate->service_node, p, pp); - rb_insert_color(&candidate->service_node, &peer->service_conns); -attached: - conn = candidate; - candidate = NULL; - rxrpc_get_peer(peer); - rxrpc_get_local(local); + /* We don't have a matching record yet. */ + conn = rxrpc_alloc_connection(GFP_NOIO); + if (!conn) + goto enomem_peer; + + conn->proto.epoch = sp->hdr.epoch; + conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK; + conn->params.local = local; + conn->params.peer = peer; + conn->params.service_id = sp->hdr.serviceId; + conn->security_ix = sp->hdr.securityIndex; + conn->out_clientflag = 0; + conn->state = RXRPC_CONN_SERVICE; + if (conn->params.service_id) + conn->state = RXRPC_CONN_SERVICE_UNSECURED; - write_unlock_bh(&peer->conn_lock); + rxrpc_get_local(local); write_lock(&rxrpc_connection_lock); list_add_tail(&conn->link, &rxrpc_connections); write_unlock(&rxrpc_connection_lock); + /* Make the connection a target for incoming packets. */ + rxrpc_publish_service_conn(peer, conn); + new = "new"; success: _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid); - - rxrpc_put_peer(peer); _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); return conn; - /* we found the connection in the list immediately */ -found_extant_connection: - if (!rxrpc_get_connection_maybe(conn)) { - set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &candidate->flags); - rb_replace_node(&conn->service_node, - &candidate->service_node, - &peer->service_conns); - clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags); - goto attached; - } - - if (sp->hdr.securityIndex != conn->security_ix) { - read_unlock_bh(&peer->conn_lock); - goto security_mismatch_put; - } - read_unlock_bh(&peer->conn_lock); - goto success; - - /* we found the connection on the second time through the list */ -found_extant_second: - if (sp->hdr.securityIndex != conn->security_ix) { - write_unlock_bh(&peer->conn_lock); - goto security_mismatch; - } - rxrpc_get_connection(conn); - write_unlock_bh(&peer->conn_lock); - kfree(candidate); +found_extant_connection_rcu: + rcu_read_unlock(); goto success; -security_mismatch_put: - rxrpc_put_connection(conn); -security_mismatch: - kfree(candidate); +security_mismatch_rcu: + rcu_read_unlock(); _leave(" = -EKEYREJECTED"); return ERR_PTR(-EKEYREJECTED); + +enomem_peer: + rxrpc_put_peer(peer); +enomem: + _leave(" = -ENOMEM"); + return ERR_PTR(-ENOMEM); } /* @@ -171,8 +223,8 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn) { struct rxrpc_peer *peer = conn->params.peer; - write_lock_bh(&peer->conn_lock); + write_seqlock_bh(&peer->service_conn_lock); if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags)) rb_erase(&conn->service_node, &peer->service_conns); - write_unlock_bh(&peer->conn_lock); + write_sequnlock_bh(&peer->service_conn_lock); } |