summaryrefslogtreecommitdiff
path: root/kernel/rcutorture.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2009-10-03 14:48:23 +0400
committerTejun Heo <tj@kernel.org>2009-10-03 14:48:23 +0400
commite800879d50c5a528d40191528557b1bdfbccbd42 (patch)
tree5cf00a68df5c0b63dcb8c84f00b0ecb48910c0fa /kernel/rcutorture.c
parent4dac3e98840f11bb2d8d52fd375150c7c1912117 (diff)
downloadlinux-e800879d50c5a528d40191528557b1bdfbccbd42.tar.xz
this_cpu: Use this_cpu operations in RCU
RCU does not do dynamic allocations but it increments per cpu variables a lot. These instructions results in a move to a register and then back to memory. This patch will make it use the inc/dec instructions on x86 that do not need a register. Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r--kernel/rcutorture.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 233768f21f97..178967b6434e 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -731,13 +731,13 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_batch)[completed];
+ __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
preempt_enable();
cur_ops->readunlock(idx);
}
@@ -786,13 +786,13 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_batch)[completed];
+ __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
preempt_enable();
cur_ops->readunlock(idx);
schedule();