summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-24 01:53:17 +0300
committerDavid S. Miller <davem@davemloft.net>2015-03-24 05:16:07 +0300
commitba7c95ea3870fe7b847466d39a049ab6f156aa2c (patch)
tree8fed9deb6a4a0c1f52634ff0bd07fb6a0edb8dde /lib
parentce046c568cbfb4734583131086f88cfe993c01d0 (diff)
downloadlinux-ba7c95ea3870fe7b847466d39a049ab6f156aa2c.tar.xz
rhashtable: Fix sleeping inside RCU critical section in walk_stop
The commit 963ecbd41a1026d99ec7537c050867428c397b89 ("rhashtable: Fix use-after-free in rhashtable_walk_stop") fixed a real bug but created another one because we may end up sleeping inside an RCU critical section. This patch fixes it properly by replacing the mutex with a spin lock that specifically protects the walker lists. Reported-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 7686c1e9934a..e96ad1a52c90 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -256,8 +256,10 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
/* Publish the new table pointer. */
rcu_assign_pointer(ht->tbl, new_tbl);
+ spin_lock(&ht->lock);
list_for_each_entry(walker, &old_tbl->walkers, list)
walker->tbl = NULL;
+ spin_unlock(&ht->lock);
/* Wait for readers. All new readers will see the new
* table, and thus no references to the old table will
@@ -635,12 +637,12 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
ht = iter->ht;
- mutex_lock(&ht->mutex);
+ spin_lock(&ht->lock);
if (tbl->rehash < tbl->size)
list_add(&iter->walker->list, &tbl->walkers);
else
iter->walker->tbl = NULL;
- mutex_unlock(&ht->mutex);
+ spin_unlock(&ht->lock);
iter->p = NULL;
@@ -723,6 +725,7 @@ int rhashtable_init(struct rhashtable *ht,
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
+ spin_lock_init(&ht->lock);
memcpy(&ht->p, params, sizeof(*params));
if (params->min_size)