summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
4 files changed, 42 insertions, 21 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 60c9be99c6d9..2cc71b66231e 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -99,7 +99,7 @@ config CPU_FREQ_GOV_USERSPACE
Enable this cpufreq governor when you either want to set the
CPU frequency manually or when an userspace program shall
be able to set the CPU dynamically, like on LART
- <http://www.lart.tudelft.nl/>
+ <http://www.lartmaker.nl/>.
For details, take a look at <file:Documentation/cpu-freq/>.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9b6ae7dc8b8a..29b2fa5534ae 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -319,7 +319,6 @@ out:
}
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(cpufreq_parse_governor);
/* drivers/base/cpu.c */
@@ -346,6 +345,8 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
+static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy);
+
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
@@ -364,7 +365,10 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
- ret = cpufreq_set_policy(&new_policy); \
+ mutex_lock(&policy->lock); \
+ ret = __cpufreq_set_policy(policy, &new_policy); \
+ policy->user_policy.object = policy->object; \
+ mutex_unlock(&policy->lock); \
\
return ret ? ret : count; \
}
@@ -420,7 +424,15 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor))
return -EINVAL;
- ret = cpufreq_set_policy(&new_policy);
+ /* Do not use cpufreq_set_policy here or the user_policy.max
+ will be wrongly overridden */
+ mutex_lock(&policy->lock);
+ ret = __cpufreq_set_policy(policy, &new_policy);
+
+ policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
+ mutex_unlock(&policy->lock);
+
return ret ? ret : count;
}
@@ -685,7 +697,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
if (!cpu_online(j))
continue;
- dprintk("CPU already managed, adding link\n");
+ dprintk("CPU %u already managed, adding link\n", j);
cpufreq_cpu_get(cpu);
cpu_sys_dev = get_cpu_sysdev(j);
sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
@@ -695,9 +707,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
policy->governor = NULL; /* to assure that the starting sequence is
* run in cpufreq_set_policy */
mutex_unlock(&policy->lock);
-
+
/* set default policy */
-
ret = cpufreq_set_policy(&new_policy);
if (ret) {
dprintk("setting policy failed\n");
@@ -707,7 +718,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
module_put(cpufreq_driver->owner);
dprintk("initialization complete\n");
cpufreq_debug_enable_ratelimit();
-
+
return 0;
@@ -1486,7 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_update_policy);
-static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
+static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 037f6bf4543c..e07a35487bde 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -176,8 +176,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
ret = sscanf (buf, "%u", &input);
mutex_lock(&dbs_mutex);
- if (ret != 1 || input > 100 || input < 0 ||
- input <= dbs_tuners_ins.down_threshold) {
+ if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
@@ -196,8 +195,7 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
ret = sscanf (buf, "%u", &input);
mutex_lock(&dbs_mutex);
- if (ret != 1 || input > 100 || input < 0 ||
- input >= dbs_tuners_ins.up_threshold) {
+ if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 956d121cb161..3e6ffcaa5af4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -74,6 +74,8 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
static DEFINE_MUTEX (dbs_mutex);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+static struct workqueue_struct *dbs_workq;
+
struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
@@ -364,23 +366,29 @@ static void do_dbs_timer(void *data)
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
- schedule_delayed_work(&dbs_work,
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ queue_delayed_work(dbs_workq, &dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
}
static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
- schedule_delayed_work(&dbs_work,
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ if (!dbs_workq)
+ dbs_workq = create_singlethread_workqueue("ondemand");
+ if (!dbs_workq) {
+ printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n");
+ return;
+ }
+ queue_delayed_work(dbs_workq, &dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
}
static inline void dbs_timer_exit(void)
{
- cancel_delayed_work(&dbs_work);
- return;
+ if (dbs_workq)
+ cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work);
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -489,8 +497,12 @@ static int __init cpufreq_gov_dbs_init(void)
static void __exit cpufreq_gov_dbs_exit(void)
{
- /* Make sure that the scheduled work is indeed not running */
- flush_scheduled_work();
+ /* Make sure that the scheduled work is indeed not running.
+ Assumes the timer has been cancelled first. */
+ if (dbs_workq) {
+ flush_workqueue(dbs_workq);
+ destroy_workqueue(dbs_workq);
+ }
cpufreq_unregister_governor(&cpufreq_gov_dbs);
}