diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2009-04-13 21:27:49 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-13 22:09:46 +0400 |
commit | 01599fca6758d2cd133e78f87426fc851c9ea725 (patch) | |
tree | 26a3f1d69c955de2c5388e5855dfe4ff3ff8687b | |
parent | 8371f87c9994d9942af5984309835aeb948ba579 (diff) | |
download | linux-01599fca6758d2cd133e78f87426fc851c9ea725.tar.xz |
cpufreq: use smp_call_function_[single|many]() in acpi-cpufreq.c
Atttempting to rid us of the problematic work_on_cpu(). Just use
smp_call_fuction_single() here.
This repairs a 10% sysbench(oltp)+mysql regression which Mike reported,
due to
commit 6b44003e5ca66a3fffeb5bc90f40ada2c4340896
Author: Andrew Morton <akpm@linux-foundation.org>
Date: Thu Apr 9 09:50:37 2009 -0600
work_on_cpu(): rewrite it to create a kernel thread on demand
It seems that the kernel calls these acpi-cpufreq functions at a quite
high frequency.
Valdis Kletnieks also reports that this causes 70-90 forks per second on
his hardware.
Cc: Valdis.Kletnieks@vt.edu
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Zhao Yakui <yakui.zhao@intel.com>
Acked-by: Dave Jones <davej@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Mike Galbraith <efault@gmx.de>
Cc: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
[ Made it use smp_call_function_many() instead of looping over cpu's
with smp_call_function_single() - Linus ]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 24 |
1 files changed, 9 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 9d3af380c6bd..3e3cd3db7a0c 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -153,7 +153,8 @@ struct drv_cmd { u32 val; }; -static long do_drv_read(void *_cmd) +/* Called via smp_call_function_single(), on the target CPU */ +static void do_drv_read(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 h; @@ -170,10 +171,10 @@ static long do_drv_read(void *_cmd) default: break; } - return 0; } -static long do_drv_write(void *_cmd) +/* Called via smp_call_function_many(), on the target CPUs */ +static void do_drv_write(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 lo, hi; @@ -192,23 +193,18 @@ static long do_drv_write(void *_cmd) default: break; } - return 0; } static void drv_read(struct drv_cmd *cmd) { cmd->val = 0; - work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); + smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1); } static void drv_write(struct drv_cmd *cmd) { - unsigned int i; - - for_each_cpu(i, cmd->mask) { - work_on_cpu(i, do_drv_write, cmd); - } + smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); } static u32 get_cur_val(const struct cpumask *mask) @@ -252,15 +248,13 @@ struct perf_pair { } aperf, mperf; }; - -static long read_measured_perf_ctrs(void *_cur) +/* Called via smp_call_function_single(), on the target CPU */ +static void read_measured_perf_ctrs(void *_cur) { struct perf_pair *cur = _cur; rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); - - return 0; } /* @@ -283,7 +277,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, unsigned int perf_percent; unsigned int retval; - if (!work_on_cpu(cpu, read_measured_perf_ctrs, &readin)) + if (smp_call_function_single(cpu, read_measured_perf_ctrs, &cur, 1)) return 0; cur.aperf.whole = readin.aperf.whole - |