diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-01 00:34:58 +0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-01 00:34:58 +0400 |
commit | 86579dd06deecfa6ac88d5e84e4d63c397cd6f6d (patch) | |
tree | b4475d3ccde53015ad84a06e4e55e64591171b75 /drivers/cpufreq | |
parent | 7ea9ea832212c4a755650f7c7cc1ff0b63292a41 (diff) | |
parent | a0f067802576d4eb4c65d40b8ee7d6ea3c81dd61 (diff) | |
download | linux-86579dd06deecfa6ac88d5e84e4d63c397cd6f6d.tar.xz |
Merge branch 'master'
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 166 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 155 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 97 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_performance.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_powersave.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 8 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 12 | ||||
-rw-r--r-- | drivers/cpufreq/freq_table.c | 12 |
8 files changed, 235 insertions, 219 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9582de1c9cad..9b6ae7dc8b8a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -5,7 +5,9 @@ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * * Oct 2005 - Ashok Raj <ashok.raj@intel.com> - * Added handling for CPU hotplug + * Added handling for CPU hotplug + * Feb 2006 - Jacob Shin <jacob.shin@amd.com> + * Fix handling for CPU hotplug -- affected CPUs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -44,15 +46,14 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) static void handle_update(void *data); /** - * Two notifier lists: the "policy" list is involved in the - * validation process for a new CPU frequency policy; the + * Two notifier lists: the "policy" list is involved in the + * validation process for a new CPU frequency policy; the * "transition" list for kernel code that needs to handle * changes to devices when the CPU clock speed changes. * The mutex locks both lists. */ -static struct notifier_block *cpufreq_policy_notifier_list; -static struct notifier_block *cpufreq_transition_notifier_list; -static DECLARE_RWSEM (cpufreq_notifier_rwsem); +static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); +static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list); static LIST_HEAD(cpufreq_governor_list); @@ -151,7 +152,7 @@ void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt va_list args; unsigned int len; unsigned long flags; - + WARN_ON(!prefix); if (type & debug) { spin_lock_irqsave(&disable_ratelimit_lock, flags); @@ -198,7 +199,7 @@ static inline void cpufreq_debug_disable_ratelimit(void) { return; } * * This function alters the system "loops_per_jiffy" for the clock * speed change. Note that loops_per_jiffy cannot be updated on SMP - * systems as each CPU might be scaled differently. So, use the arch + * systems as each CPU might be scaled differently. So, use the arch * per-CPU loops_per_jiffy value wherever possible. */ #ifndef CONFIG_SMP @@ -233,7 +234,7 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { * * This function calls the transition notifiers and the "adjust_jiffies" * function. It is called twice on all CPU frequency changes that have - * external effects. + * external effects. */ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) { @@ -245,13 +246,11 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) dprintk("notification %u of frequency transition to %u kHz\n", state, freqs->new); - down_read(&cpufreq_notifier_rwsem); - policy = cpufreq_cpu_data[freqs->cpu]; switch (state) { case CPUFREQ_PRECHANGE: - /* detect if the driver reported a value as "old frequency" + /* detect if the driver reported a value as "old frequency" * which is not equal to what the cpufreq core thinks is * "old frequency". */ @@ -264,20 +263,19 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) freqs->old = policy->cur; } } - notifier_call_chain(&cpufreq_transition_notifier_list, - CPUFREQ_PRECHANGE, freqs); + blocking_notifier_call_chain(&cpufreq_transition_notifier_list, + CPUFREQ_PRECHANGE, freqs); adjust_jiffies(CPUFREQ_PRECHANGE, freqs); break; case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - notifier_call_chain(&cpufreq_transition_notifier_list, - CPUFREQ_POSTCHANGE, freqs); + blocking_notifier_call_chain(&cpufreq_transition_notifier_list, + CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) policy->cur = freqs->new; break; } - up_read(&cpufreq_notifier_rwsem); } EXPORT_SYMBOL_GPL(cpufreq_notify_transition); @@ -335,11 +333,11 @@ extern struct sysdev_class cpu_sysdev_class; * "unsigned int". */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct cpufreq_policy * policy, char *buf) \ -{ \ - return sprintf (buf, "%u\n", policy->object); \ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct cpufreq_policy * policy, char *buf) \ +{ \ + return sprintf (buf, "%u\n", policy->object); \ } show_one(cpuinfo_min_freq, cpuinfo.min_freq); @@ -404,8 +402,8 @@ static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf) /** * store_scaling_governor - store policy for the specified CPU */ -static ssize_t store_scaling_governor (struct cpufreq_policy * policy, - const char *buf, size_t count) +static ssize_t store_scaling_governor (struct cpufreq_policy * policy, + const char *buf, size_t count) { unsigned int ret = -EINVAL; char str_governor[16]; @@ -528,7 +526,7 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) return ret; } -static ssize_t store(struct kobject * kobj, struct attribute * attr, +static ssize_t store(struct kobject * kobj, struct attribute * attr, const char * buf, size_t count) { struct cpufreq_policy * policy = to_policy(kobj); @@ -564,7 +562,7 @@ static struct kobj_type ktype_cpufreq = { /** * cpufreq_add_dev - add a CPU device * - * Adds the cpufreq interface for a CPU device. + * Adds the cpufreq interface for a CPU device. */ static int cpufreq_add_dev (struct sys_device * sys_dev) { @@ -573,8 +571,12 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) struct cpufreq_policy new_policy; struct cpufreq_policy *policy; struct freq_attr **drv_attr; + struct sys_device *cpu_sys_dev; unsigned long flags; unsigned int j; +#ifdef CONFIG_SMP + struct cpufreq_policy *managed_policy; +#endif if (cpu_is_offline(cpu)) return 0; @@ -587,8 +589,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) * CPU because it is in the same boat. */ policy = cpufreq_cpu_get(cpu); if (unlikely(policy)) { - dprintk("CPU already managed, adding link\n"); - sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq"); + cpufreq_cpu_put(policy); cpufreq_debug_enable_ratelimit(); return 0; } @@ -623,6 +624,32 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) goto err_out; } +#ifdef CONFIG_SMP + for_each_cpu_mask(j, policy->cpus) { + if (cpu == j) + continue; + + /* check for existing affected CPUs. They may not be aware + * of it due to CPU Hotplug. + */ + managed_policy = cpufreq_cpu_get(j); + if (unlikely(managed_policy)) { + spin_lock_irqsave(&cpufreq_driver_lock, flags); + managed_policy->cpus = policy->cpus; + cpufreq_cpu_data[cpu] = managed_policy; + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + dprintk("CPU already managed, adding link\n"); + sysfs_create_link(&sys_dev->kobj, + &managed_policy->kobj, "cpufreq"); + + cpufreq_debug_enable_ratelimit(); + mutex_unlock(&policy->lock); + ret = 0; + goto err_out_driver_exit; /* call driver->exit() */ + } + } +#endif memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); /* prepare interface data */ @@ -650,6 +677,21 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) for_each_cpu_mask(j, policy->cpus) cpufreq_cpu_data[j] = policy; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + /* symlink affected CPUs */ + for_each_cpu_mask(j, policy->cpus) { + if (j == cpu) + continue; + if (!cpu_online(j)) + continue; + + dprintk("CPU already managed, adding link\n"); + cpufreq_cpu_get(cpu); + cpu_sys_dev = get_cpu_sysdev(j); + sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, + "cpufreq"); + } + policy->governor = NULL; /* to assure that the starting sequence is * run in cpufreq_set_policy */ mutex_unlock(&policy->lock); @@ -724,10 +766,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) #ifdef CONFIG_SMP /* if this isn't the CPU which is the parent of the kobj, we - * only need to unlink, put and exit + * only need to unlink, put and exit */ if (unlikely(cpu != data->cpu)) { dprintk("removing link\n"); + cpu_clear(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); sysfs_remove_link(&sys_dev->kobj, "cpufreq"); cpufreq_cpu_put(data); @@ -740,7 +783,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) if (!kobject_get(&data->kobj)) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); cpufreq_debug_enable_ratelimit(); - return -EFAULT; + return -EFAULT; } #ifdef CONFIG_SMP @@ -783,7 +826,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) kobject_put(&data->kobj); /* we need to make sure that the underlying kobj is actually - * not referenced anymore by anybody before we proceed with + * not referenced anymore by anybody before we proceed with * unloading. */ dprintk("waiting for dropping of refcount\n"); @@ -831,7 +874,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne } -/** +/** * cpufreq_quick_get - get the CPU frequency (in kHz) frpm policy->cur * @cpu: CPU number * @@ -855,7 +898,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu) EXPORT_SYMBOL(cpufreq_quick_get); -/** +/** * cpufreq_get - get the current CPU frequency (in kHz) * @cpu: CPU number * @@ -960,7 +1003,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) freqs.old = cpu_policy->cur; freqs.new = cur_freq; - notifier_call_chain(&cpufreq_transition_notifier_list, + blocking_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_SUSPENDCHANGE, &freqs); adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); @@ -1041,7 +1084,8 @@ static int cpufreq_resume(struct sys_device * sysdev) freqs.old = cpu_policy->cur; freqs.new = cur_freq; - notifier_call_chain(&cpufreq_transition_notifier_list, + blocking_notifier_call_chain( + &cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs); adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); @@ -1072,30 +1116,30 @@ static struct sysdev_driver cpufreq_sysdev_driver = { * @nb: notifier function to register * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER * - * Add a driver to one of two lists: either a list of drivers that + * Add a driver to one of two lists: either a list of drivers that * are notified about clock rate changes (once before and once after * the transition), or a list of drivers that are notified about * changes in cpufreq policy. * * This function may sleep, and has the same return conditions as - * notifier_chain_register. + * blocking_notifier_chain_register. */ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) { int ret; - down_write(&cpufreq_notifier_rwsem); switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: - ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb); + ret = blocking_notifier_chain_register( + &cpufreq_transition_notifier_list, nb); break; case CPUFREQ_POLICY_NOTIFIER: - ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb); + ret = blocking_notifier_chain_register( + &cpufreq_policy_notifier_list, nb); break; default: ret = -EINVAL; } - up_write(&cpufreq_notifier_rwsem); return ret; } @@ -1110,24 +1154,24 @@ EXPORT_SYMBOL(cpufreq_register_notifier); * Remove a driver from the CPU frequency notifier list. * * This function may sleep, and has the same return conditions as - * notifier_chain_unregister. + * blocking_notifier_chain_unregister. */ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) { int ret; - down_write(&cpufreq_notifier_rwsem); switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: - ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb); + ret = blocking_notifier_chain_unregister( + &cpufreq_transition_notifier_list, nb); break; case CPUFREQ_POLICY_NOTIFIER: - ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb); + ret = blocking_notifier_chain_unregister( + &cpufreq_policy_notifier_list, nb); break; default: ret = -EINVAL; } - up_write(&cpufreq_notifier_rwsem); return ret; } @@ -1225,7 +1269,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) return -EINVAL; mutex_lock(&cpufreq_governor_mutex); - + list_for_each_entry(t, &cpufreq_governor_list, governor_list) { if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { mutex_unlock(&cpufreq_governor_mutex); @@ -1234,7 +1278,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) } list_add(&governor->governor_list, &cpufreq_governor_list); - mutex_unlock(&cpufreq_governor_mutex); + mutex_unlock(&cpufreq_governor_mutex); return 0; } EXPORT_SYMBOL_GPL(cpufreq_register_governor); @@ -1299,29 +1343,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli if (ret) goto error_out; - down_read(&cpufreq_notifier_rwsem); - /* adjust if necessary - all reasons */ - notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, - policy); + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_ADJUST, policy); /* adjust if necessary - hardware incompatibility*/ - notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, - policy); + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_INCOMPATIBLE, policy); /* verify the cpu speed can be set within this limit, which might be different to the first one */ ret = cpufreq_driver->verify(policy); - if (ret) { - up_read(&cpufreq_notifier_rwsem); + if (ret) goto error_out; - } /* notification of the new policy */ - notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, - policy); - - up_read(&cpufreq_notifier_rwsem); + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_NOTIFY, policy); data->min = policy->min; data->max = policy->max; @@ -1497,9 +1535,9 @@ static struct notifier_block cpufreq_cpu_notifier = * @driver_data: A struct cpufreq_driver containing the values# * submitted by the CPU Frequency driver. * - * Registers a CPU Frequency driver to this core code. This code + * Registers a CPU Frequency driver to this core code. This code * returns zero on success, -EBUSY when another driver got here first - * (and isn't unregistered in the meantime). + * (and isn't unregistered in the meantime). * */ int cpufreq_register_driver(struct cpufreq_driver *driver_data) @@ -1560,7 +1598,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver); /** * cpufreq_unregister_driver - unregister the current CPUFreq driver * - * Unregister the current CPUFreq driver. Only call this if you have + * Unregister the current CPUFreq driver. Only call this if you have * the right to do so, i.e. if you have succeeded in initialising before! * Returns zero if successful, and -EINVAL if the cpufreq_driver is * currently not initialised. diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index ac38766b2583..037f6bf4543c 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -35,12 +35,7 @@ */ #define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MIN_FREQUENCY_UP_THRESHOLD (0) -#define MAX_FREQUENCY_UP_THRESHOLD (100) - #define DEF_FREQUENCY_DOWN_THRESHOLD (20) -#define MIN_FREQUENCY_DOWN_THRESHOLD (0) -#define MAX_FREQUENCY_DOWN_THRESHOLD (100) /* * The polling frequency of this governor depends on the capability of @@ -53,10 +48,14 @@ * All times here are in uS. */ static unsigned int def_sampling_rate; -#define MIN_SAMPLING_RATE (def_sampling_rate / 2) +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) +#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) #define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000) -#define DEF_SAMPLING_DOWN_FACTOR (5) +#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000) static void do_dbs_timer(void *data); @@ -66,6 +65,8 @@ struct cpu_dbs_info_s { unsigned int prev_cpu_idle_up; unsigned int prev_cpu_idle_down; unsigned int enable; + unsigned int down_skip; + unsigned int requested_freq; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); @@ -87,6 +88,8 @@ static struct dbs_tuners dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, + .freq_step = 5, }; static inline unsigned int get_cpu_idle_time(unsigned int cpu) @@ -136,7 +139,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, unsigned int input; int ret; ret = sscanf (buf, "%u", &input); - if (ret != 1 ) + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; mutex_lock(&dbs_mutex); @@ -173,8 +176,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD || + if (ret != 1 || input > 100 || input < 0 || input <= dbs_tuners_ins.down_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -194,8 +196,7 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || - input < MIN_FREQUENCY_DOWN_THRESHOLD || + if (ret != 1 || input > 100 || input < 0 || input >= dbs_tuners_ins.up_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -297,31 +298,17 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(int cpu) { unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; unsigned int freq_step; unsigned int freq_down_sampling_rate; - static int down_skip[NR_CPUS]; - static int requested_freq[NR_CPUS]; - static unsigned short init_flag = 0; - struct cpu_dbs_info_s *this_dbs_info; - struct cpu_dbs_info_s *dbs_info; - + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); struct cpufreq_policy *policy; - unsigned int j; - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); if (!this_dbs_info->enable) return; policy = this_dbs_info->cur_policy; - if ( init_flag == 0 ) { - for_each_online_cpu(j) { - dbs_info = &per_cpu(cpu_dbs_info, j); - requested_freq[j] = dbs_info->cur_policy->cur; - } - init_flag = 1; - } - /* * The default safe range is 20% to 80% * Every sampling_rate, we check @@ -337,39 +324,29 @@ static void dbs_check_cpu(int cpu) */ /* Check for frequency increase */ - idle_ticks = UINT_MAX; - for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks, total_idle_ticks; - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(j); - tmp_idle_ticks = total_idle_ticks - - j_dbs_info->prev_cpu_idle_up; - j_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - } + /* Check for frequency increase */ + total_idle_ticks = get_cpu_idle_time(cpu); + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_up; + this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); if (idle_ticks < up_idle_ticks) { - down_skip[cpu] = 0; - for_each_cpu_mask(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; + this_dbs_info->down_skip = 0; + this_dbs_info->prev_cpu_idle_down = + this_dbs_info->prev_cpu_idle_up; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_down = - j_dbs_info->prev_cpu_idle_up; - } /* if we are already at full speed then break out early */ - if (requested_freq[cpu] == policy->max) + if (this_dbs_info->requested_freq == policy->max) return; freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; @@ -378,49 +355,45 @@ static void dbs_check_cpu(int cpu) if (unlikely(freq_step == 0)) freq_step = 5; - requested_freq[cpu] += freq_step; - if (requested_freq[cpu] > policy->max) - requested_freq[cpu] = policy->max; + this_dbs_info->requested_freq += freq_step; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; - __cpufreq_driver_target(policy, requested_freq[cpu], + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } /* Check for frequency decrease */ - down_skip[cpu]++; - if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) + this_dbs_info->down_skip++; + if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) return; - idle_ticks = UINT_MAX; - for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks, total_idle_ticks; - struct cpu_dbs_info_s *j_dbs_info; + /* Check for frequency decrease */ + total_idle_ticks = this_dbs_info->prev_cpu_idle_up; + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_down; + this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - total_idle_ticks = j_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - j_dbs_info->prev_cpu_idle_down; - j_dbs_info->prev_cpu_idle_down = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - } + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; - down_skip[cpu] = 0; + this_dbs_info->down_skip = 0; freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * dbs_tuners_ins.sampling_down_factor; down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); + usecs_to_jiffies(freq_down_sampling_rate); if (idle_ticks > down_idle_ticks) { - /* if we are already at the lowest speed then break out early + /* + * if we are already at the lowest speed then break out early * or if we 'cannot' reduce the speed as the user might want - * freq_step to be zero */ - if (requested_freq[cpu] == policy->min + * freq_step to be zero + */ + if (this_dbs_info->requested_freq == policy->min || dbs_tuners_ins.freq_step == 0) return; @@ -430,13 +403,12 @@ static void dbs_check_cpu(int cpu) if (unlikely(freq_step == 0)) freq_step = 5; - requested_freq[cpu] -= freq_step; - if (requested_freq[cpu] < policy->min) - requested_freq[cpu] = policy->min; + this_dbs_info->requested_freq -= freq_step; + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; - __cpufreq_driver_target(policy, - requested_freq[cpu], - CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); return; } } @@ -493,11 +465,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; } this_dbs_info->enable = 1; + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; sysfs_create_group(&policy->kobj, &dbs_attr_group); dbs_enable++; /* @@ -507,16 +481,17 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; - latency = policy->cpuinfo.transition_latency; - if (latency < 1000) - latency = 1000; - - def_sampling_rate = (latency / 1000) * + def_sampling_rate = 10 * latency * DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; + + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = def_sampling_rate; - dbs_tuners_ins.ignore_nice = 0; - dbs_tuners_ins.freq_step = 5; dbs_timer_init(); } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 9ee9411f186f..956d121cb161 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -38,17 +38,17 @@ #define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) -/* - * The polling frequency of this governor depends on the capability of +/* + * The polling frequency of this governor depends on the capability of * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling * rate. * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) * this governor will not work. * All times here are in uS. */ -static unsigned int def_sampling_rate; +static unsigned int def_sampling_rate; #define MIN_SAMPLING_RATE_RATIO (2) /* for correct statistics, we need at least 10 ticks between each measure */ #define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) @@ -62,28 +62,29 @@ static unsigned int def_sampling_rate; static void do_dbs_timer(void *data); struct cpu_dbs_info_s { - struct cpufreq_policy *cur_policy; - unsigned int prev_cpu_idle_up; - unsigned int prev_cpu_idle_down; - unsigned int enable; + struct cpufreq_policy *cur_policy; + unsigned int prev_cpu_idle_up; + unsigned int prev_cpu_idle_down; + unsigned int enable; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ -static DEFINE_MUTEX (dbs_mutex); +static DEFINE_MUTEX (dbs_mutex); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int ignore_nice; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int ignore_nice; }; static struct dbs_tuners dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, }; static inline unsigned int get_cpu_idle_time(unsigned int cpu) @@ -106,8 +107,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); } -#define define_one_ro(_name) \ -static struct freq_attr _name = \ +#define define_one_ro(_name) \ +static struct freq_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL) define_one_ro(sampling_rate_max); @@ -125,7 +126,7 @@ show_one(sampling_down_factor, sampling_down_factor); show_one(up_threshold, up_threshold); show_one(ignore_nice_load, ignore_nice); -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, +static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; @@ -144,7 +145,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, return count; } -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; @@ -163,7 +164,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, return count; } -static ssize_t store_up_threshold(struct cpufreq_policy *unused, +static ssize_t store_up_threshold(struct cpufreq_policy *unused, const char *buf, size_t count) { unsigned int input; @@ -171,7 +172,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || input < MIN_FREQUENCY_UP_THRESHOLD) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -190,14 +191,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, int ret; unsigned int j; - + ret = sscanf (buf, "%u", &input); if ( ret != 1 ) return -EINVAL; if ( input > 1 ) input = 1; - + mutex_lock(&dbs_mutex); if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ mutex_unlock(&dbs_mutex); @@ -259,16 +260,16 @@ static void dbs_check_cpu(int cpu) return; policy = this_dbs_info->cur_policy; - /* + /* * Every sampling_rate, we check, if current idle time is less * than 20% (default), then we try to increase frequency * Every sampling_rate*sampling_down_factor, we look for a the lowest * frequency which can sustain the load while keeping idle time over * 30%. If such a frequency exist, we try to decrease to this frequency. * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency */ /* Check for frequency increase */ @@ -298,14 +299,14 @@ static void dbs_check_cpu(int cpu) struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_down = + j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; } /* if we are already at full speed then break out early */ if (policy->cur == policy->max) return; - - __cpufreq_driver_target(policy, policy->max, + + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); return; } @@ -347,23 +348,26 @@ static void dbs_check_cpu(int cpu) * policy. To be safe, we focus 10 points under the threshold. */ freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks; - freq_next = (freq_next * policy->cur) / + freq_next = (freq_next * policy->cur) / (dbs_tuners_ins.up_threshold - 10); + if (freq_next < policy->min) + freq_next = policy->min; + if (freq_next <= ((policy->cur * 95) / 100)) __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } static void do_dbs_timer(void *data) -{ +{ int i; mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, + schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); mutex_unlock(&dbs_mutex); -} +} static inline void dbs_timer_init(void) { @@ -390,22 +394,25 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, switch (event) { case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || + if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; if (policy->cpuinfo.transition_latency > - (TRANSITION_LATENCY_LIMIT * 1000)) + (TRANSITION_LATENCY_LIMIT * 1000)) { + printk(KERN_WARNING "ondemand governor failed to load " + "due to too long transition latency\n"); return -EINVAL; + } if (this_dbs_info->enable) /* Already enabled */ break; - + mutex_lock(&dbs_mutex); for_each_cpu_mask(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; @@ -431,11 +438,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, def_sampling_rate = MIN_STAT_SAMPLING_RATE; dbs_tuners_ins.sampling_rate = def_sampling_rate; - dbs_tuners_ins.ignore_nice = 0; - dbs_timer_init(); } - + mutex_unlock(&dbs_mutex); break; @@ -448,9 +453,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, * Stop the timerschedule work, when this governor * is used for first time */ - if (dbs_enable == 0) + if (dbs_enable == 0) dbs_timer_exit(); - + mutex_unlock(&dbs_mutex); break; @@ -460,11 +465,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); + policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); + policy->min, CPUFREQ_RELATION_L); mutex_unlock(&dbs_mutex); break; } diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index 8d536b40deb8..de91e3371ef8 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -32,7 +32,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, } return 0; } - + struct cpufreq_governor cpufreq_gov_performance = { .name = "performance", .governor = cpufreq_governor_performance, diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index c85edda7feb0..0a2596044e65 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -31,7 +31,7 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy, } return 0; } - + static struct cpufreq_governor cpufreq_gov_powersave = { .name = "powersave", .governor = cpufreq_governor_powersave, diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 0bddb8e694d9..9694b6ed3268 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -2,7 +2,7 @@ * drivers/cpufreq/cpufreq_stats.c * * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. - * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>. + * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -90,7 +90,7 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf) return 0; cpufreq_stats_update(stat->cpu); for (i = 0; i < stat->state_num; i++) { - len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], + len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], (unsigned long long)cputime64_to_clock_t(stat->time_in_state[i])); } return len; @@ -171,7 +171,7 @@ cpufreq_stats_free_table (unsigned int cpu) { struct cpufreq_stats *stat = cpufreq_stats_table[cpu]; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - if (policy && policy->cpu == cpu) + if (policy && policy->cpu == cpu) sysfs_remove_group(&policy->kobj, &stats_attr_group); if (stat) { kfree(stat->time_in_state); @@ -303,7 +303,7 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val, return 0; } -static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, +static int cpufreq_stat_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 92a0be22a2a9..071ee4f1bbf2 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -41,7 +41,7 @@ static DEFINE_MUTEX (userspace_mutex); #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) /* keep track of frequency transitions */ -static int +static int userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { @@ -58,7 +58,7 @@ static struct notifier_block userspace_cpufreq_notifier_block = { }; -/** +/** * cpufreq_set - set the CPU frequency * @freq: target frequency in kHz * @cpu: CPU for which the frequency is to be set @@ -103,8 +103,8 @@ static ssize_t show_speed (struct cpufreq_policy *policy, char *buf) return sprintf (buf, "%u\n", cpu_cur_freq[policy->cpu]); } -static ssize_t -store_speed (struct cpufreq_policy *policy, const char *buf, size_t count) +static ssize_t +store_speed (struct cpufreq_policy *policy, const char *buf, size_t count) { unsigned int freq = 0; unsigned int ret; @@ -118,7 +118,7 @@ store_speed (struct cpufreq_policy *policy, const char *buf, size_t count) return count; } -static struct freq_attr freq_attr_scaling_setspeed = +static struct freq_attr freq_attr_scaling_setspeed = { .attr = { .name = "scaling_setspeed", .mode = 0644, .owner = THIS_MODULE }, .show = show_speed, @@ -135,7 +135,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, return -EINVAL; BUG_ON(!policy->cur); mutex_lock(&userspace_mutex); - cpu_is_managed[cpu] = 1; + cpu_is_managed[cpu] = 1; cpu_min_freq[cpu] = policy->min; cpu_max_freq[cpu] = policy->max; cpu_cur_freq[cpu] = policy->cur; diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index ba460bdea60f..a4818ce88919 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -59,9 +59,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, if (!cpu_online(policy->cpu)) return -EINVAL; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; @@ -76,9 +75,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, if (!count) policy->max = next_larger; - cpufreq_verify_within_limits(policy, - policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); dprintk("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); @@ -199,7 +197,7 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); * if you use these, you must assure that the frequency table is valid * all the time between get_attr and put_attr! */ -void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, +void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu) { dprintk("setting show_table for cpu %u to %p\n", cpu, table); |