diff options
author | Dave Jones <davej@redhat.com> | 2005-06-01 06:03:49 +0400 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2005-06-01 06:03:49 +0400 |
commit | 9c7d269b9b05440dd0fe92d96f4e5d7e73dd7238 (patch) | |
tree | 4e4268cc4f075187135312d5243e24d3a4fcd155 /drivers/cpufreq/cpufreq_ondemand.c | |
parent | 790d76fa979f55bfc49a6901bb911778949b582d (diff) | |
download | linux-9c7d269b9b05440dd0fe92d96f4e5d7e73dd7238.tar.xz |
[CPUFREQ] ondemand,conservative governor idle_tick clean-up
[PATCH] [3/5] ondemand,conservative governor idle_tick clean-up
Ondemand and conservative governor clean-up, it factorises the idle ticks
measurement.
Signed-off-by: Eric Piel <eric.piel@tremplin-utc.net>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 26 |
1 files changed, 5 insertions, 21 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f239545ac1b8..0482bd49aba8 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -296,7 +296,6 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(int cpu) { unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int total_idle_ticks; unsigned int freq_down_step; unsigned int freq_down_sampling_rate; static int down_skip[NR_CPUS]; @@ -325,20 +324,12 @@ static void dbs_check_cpu(int cpu) */ /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - + idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; - if (j == cpu) - continue; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency increase */ total_idle_ticks = get_cpu_idle_time(j); tmp_idle_ticks = total_idle_ticks - j_dbs_info->prev_cpu_idle_up; @@ -376,18 +367,11 @@ static void dbs_check_cpu(int cpu) if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) return; - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - + idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; - if (j == cpu) - continue; - j_dbs_info = &per_cpu(cpu_dbs_info, j); /* Check for frequency decrease */ total_idle_ticks = j_dbs_info->prev_cpu_idle_up; @@ -408,7 +392,7 @@ static void dbs_check_cpu(int cpu) down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * usecs_to_jiffies(freq_down_sampling_rate); - if (idle_ticks > down_idle_ticks ) { + if (idle_ticks > down_idle_ticks) { /* if we are already at the lowest speed then break out early * or if we 'cannot' reduce the speed as the user might want * freq_step to be zero */ |