summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-08-21 20:42:57 +0300
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-10-06 21:25:31 +0300
commit001dac627ff37433d5528ffb0d897cd19c2b1e43 (patch)
tree800865c4653a6a3b8b2601a812e2ea609d637d39 /include/linux
parent95b19f684c61ffc9b039e02c5d1113c2d8cd7105 (diff)
downloadlinux-001dac627ff37433d5528ffb0d897cd19c2b1e43.tar.xz
locking/percpu-rwsem: Make use of the rcu_sync infrastructure
Currently down_write/up_write calls synchronize_sched_expedited() twice, which is evil. Change this code to rely on rcu-sync primitives. This avoids the _expedited "big hammer", and this can be faster in the contended case or even in the case when a single thread does down_write/up_write in a loop. Of course, a single down_write() will take more time, but otoh it will be much more friendly to the whole system. To simplify the review this patch doesn't update the comments, fixed by the next change. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/percpu-rwsem.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 834c4e52cb2d..c2fa3ecb0dce 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -5,11 +5,12 @@
#include <linux/rwsem.h>
#include <linux/percpu.h>
#include <linux/wait.h>
+#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
struct percpu_rw_semaphore {
+ struct rcu_sync rss;
unsigned int __percpu *fast_read_ctr;
- atomic_t write_ctr;
struct rw_semaphore rw_sem;
atomic_t slow_read_ctr;
wait_queue_head_t write_waitq;