summaryrefslogtreecommitdiff
path: root/net/sctp/endpointola.c
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2007-09-17 03:03:28 +0400
committerDavid S. Miller <davem@davemloft.net>2007-09-17 03:03:28 +0400
commit559cf710b07c5e2cfa3fb8d8f4a1320fd84c53f9 (patch)
treedeb74aea811a7d7c7e203f3743fd15372f8a6589 /net/sctp/endpointola.c
parent293035479942400a7fe8e4f72465d4e4e466b91a (diff)
downloadlinux-559cf710b07c5e2cfa3fb8d8f4a1320fd84c53f9.tar.xz
[SCTP]: Convert bind_addr_list locking to RCU
Since the sctp_sockaddr_entry is now RCU enabled as part of the patch to synchronize sctp_localaddr_list, it makes sense to change all handling of these entries to RCU. This includes the sctp_bind_addrs structure and it's list of bound addresses. This list is currently protected by an external rw_lock and that looks like an overkill. There are only 2 writers to the list: bind()/bindx() calls, and BH processing of ASCONF-ACK chunks. These are already seriealized via the socket lock, so they will not step on each other. These are also relatively rare, so we should be good with RCU. The readers are varied and they are easily converted to RCU. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Sridhar Samdurala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/endpointola.c')
-rw-r--r--net/sctp/endpointola.c27
1 files changed, 7 insertions, 20 deletions
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 1404a9e2e78f..8f485a0d14bd 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -92,7 +92,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
- rwlock_init(&ep->base.addr_lock);
/* Remember who we are attached to. */
ep->base.sk = sk;
@@ -225,21 +224,14 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
const union sctp_addr *laddr)
{
- struct sctp_endpoint *retval;
+ struct sctp_endpoint *retval = NULL;
- sctp_read_lock(&ep->base.addr_lock);
if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
- sctp_sk(ep->base.sk))) {
+ sctp_sk(ep->base.sk)))
retval = ep;
- goto out;
- }
}
- retval = NULL;
-
-out:
- sctp_read_unlock(&ep->base.addr_lock);
return retval;
}
@@ -261,9 +253,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
list_for_each(pos, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
if (rport == asoc->peer.port) {
- sctp_read_lock(&asoc->base.addr_lock);
*transport = sctp_assoc_lookup_paddr(asoc, paddr);
- sctp_read_unlock(&asoc->base.addr_lock);
if (*transport)
return asoc;
@@ -295,20 +285,17 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
const union sctp_addr *paddr)
{
- struct list_head *pos;
struct sctp_sockaddr_entry *addr;
struct sctp_bind_addr *bp;
- sctp_read_lock(&ep->base.addr_lock);
bp = &ep->base.bind_addr;
- list_for_each(pos, &bp->address_list) {
- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
- if (sctp_has_association(&addr->a, paddr)) {
- sctp_read_unlock(&ep->base.addr_lock);
+ /* This function is called with the socket lock held,
+ * so the address_list can not change.
+ */
+ list_for_each_entry(addr, &bp->address_list, list) {
+ if (sctp_has_association(&addr->a, paddr))
return 1;
- }
}
- sctp_read_unlock(&ep->base.addr_lock);
return 0;
}