diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-01-11 04:21:07 +0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-01-29 10:19:54 +0400 |
commit | 0e11c8e8a60f8591556d142c2e1e53eaf86ab528 (patch) | |
tree | 98a824aba939ebd38178e60a3ba4eb97a29fb97c /kernel/rcutorture.c | |
parent | 7e8b1e78ea028cbd32337e2aea574a8466c796bb (diff) | |
download | linux-0e11c8e8a60f8591556d142c2e1e53eaf86ab528.tar.xz |
rcu: Make rcutorture's shuffler task shuffle recently added tasks
A number of kthreads have been added to rcutorture, but the shuffler
task was not informed of them, and thus did not shuffle them. This
commit therefore adds the requisite shuffling, and, while in the area
fixes up some whitespace issues.
However, the shuffling is intended to keep randomly selected CPUs
idle, which means that the RCU priority boosting kthreads need to
avoid waking up every jiffy. This commit also makes that fix.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r-- | kernel/rcutorture.c | 24 |
1 files changed, 20 insertions, 4 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index a583f1ce713d..3ebc8bfb5525 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -846,7 +846,7 @@ static int rcu_torture_boost(void *arg) /* Wait for the next test interval. */ oldstarttime = boost_starttime; while (ULONG_CMP_LT(jiffies, oldstarttime)) { - schedule_timeout_uninterruptible(1); + schedule_timeout_interruptible(oldstarttime - jiffies); rcu_stutter_wait("rcu_torture_boost"); if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) @@ -1318,19 +1318,35 @@ static void rcu_torture_shuffle_tasks(void) set_cpus_allowed_ptr(reader_tasks[i], shuffle_tmp_mask); } - if (fakewriter_tasks) { for (i = 0; i < nfakewriters; i++) if (fakewriter_tasks[i]) set_cpus_allowed_ptr(fakewriter_tasks[i], shuffle_tmp_mask); } - if (writer_task) set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); - if (stats_task) set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); + if (stutter_task) + set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask); + if (fqs_task) + set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask); + if (shutdown_task) + set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask); +#ifdef CONFIG_HOTPLUG_CPU + if (onoff_task) + set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask); +#endif /* #ifdef CONFIG_HOTPLUG_CPU */ + if (stall_task) + set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask); + if (barrier_cbs_tasks) + for (i = 0; i < n_barrier_cbs; i++) + if (barrier_cbs_tasks[i]) + set_cpus_allowed_ptr(barrier_cbs_tasks[i], + shuffle_tmp_mask); + if (barrier_task) + set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask); if (rcu_idle_cpu == -1) rcu_idle_cpu = num_online_cpus() - 1; |