summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r--drivers/cpufreq/cpufreq.c72
1 files changed, 38 insertions, 34 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7cbf1d53804f..e000f3691661 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* mode before doing so.
*
* Additional rules:
- * - All holders of the lock should check to make sure that the CPU they
- * are concerned with are online after they get the lock.
* - Governor routines that can be called in cpufreq hotplug path should not
* take this sem as top level hotplug notifier handler takes this.
* - Lock should not be held across
@@ -257,6 +255,7 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
struct cpufreq_policy *policy;
+ unsigned long flags;
BUG_ON(irqs_disabled());
@@ -267,7 +266,10 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
switch (state) {
case CPUFREQ_PRECHANGE:
@@ -808,22 +810,22 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
policy = cpufreq_cpu_get(sibling);
WARN_ON(!policy);
- per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
-
- lock_policy_rwsem_write(cpu);
-
__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ lock_policy_rwsem_write(sibling);
+
spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
cpumask_set_cpu(cpu, policy->cpus);
+ per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
per_cpu(cpufreq_cpu_data, cpu) = policy;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ unlock_policy_rwsem_write(sibling);
+
__cpufreq_governor(policy, CPUFREQ_GOV_START);
__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
- unlock_policy_rwsem_write(cpu);
-
ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
if (ret) {
cpufreq_cpu_put(policy);
@@ -870,11 +872,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
#ifdef CONFIG_HOTPLUG_CPU
/* Check if this cpu was hot-unplugged earlier and has siblings */
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_online_cpu(sibling) {
struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
- if (cp && cpumask_test_cpu(cpu, cp->related_cpus))
+ if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return cpufreq_add_policy_cpu(cpu, sibling, dev);
+ }
}
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
#endif
@@ -899,8 +905,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
/* Initially set CPU itself as the policy_cpu */
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
- ret = (lock_policy_rwsem_write(cpu) < 0);
- WARN_ON(ret);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
@@ -911,7 +915,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
ret = cpufreq_driver->init(policy);
if (ret) {
pr_debug("initialization failed\n");
- goto err_unlock_policy;
+ goto err_set_policy_cpu;
}
/* related cpus should atleast have policy->cpus */
@@ -942,8 +946,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
if (ret)
goto err_out_unregister;
- unlock_policy_rwsem_write(cpu);
-
kobject_uevent(&policy->kobj, KOBJ_ADD);
module_put(cpufreq_driver->owner);
pr_debug("initialization complete\n");
@@ -959,8 +961,8 @@ err_out_unregister:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
-err_unlock_policy:
- unlock_policy_rwsem_write(cpu);
+err_set_policy_cpu:
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
@@ -1008,12 +1010,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
data = per_cpu(cpufreq_cpu_data, cpu);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!data) {
pr_debug("%s: No cpu_data found\n", __func__);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(cpu);
return -EINVAL;
}
@@ -1025,9 +1029,10 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
CPUFREQ_NAME_LEN);
#endif
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ WARN_ON(lock_policy_rwsem_write(cpu));
cpus = cpumask_weight(data->cpus);
cpumask_clear_cpu(cpu, data->cpus);
+ unlock_policy_rwsem_write(cpu);
if (cpu != data->cpu) {
sysfs_remove_link(&dev->kobj, "cpufreq");
@@ -1038,31 +1043,37 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
ret = kobject_move(&data->kobj, &cpu_dev->kobj);
if (ret) {
pr_err("%s: Failed to move kobj: %d", __func__, ret);
+
+ WARN_ON(lock_policy_rwsem_write(cpu));
cpumask_set_cpu(cpu, data->cpus);
- ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
- "cpufreq");
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, cpu) = data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
unlock_policy_rwsem_write(cpu);
+
+ ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
+ "cpufreq");
return -EINVAL;
}
+ WARN_ON(lock_policy_rwsem_write(cpu));
update_policy_cpu(data, cpu_dev->id);
+ unlock_policy_rwsem_write(cpu);
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
__func__, cpu_dev->id, cpu);
}
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
cpufreq_cpu_put(data);
- unlock_policy_rwsem_write(cpu);
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
- lock_policy_rwsem_write(cpu);
+ lock_policy_rwsem_read(cpu);
kobj = &data->kobj;
cmp = &data->kobj_unregister;
- unlock_policy_rwsem_write(cpu);
+ unlock_policy_rwsem_read(cpu);
kobject_put(kobj);
/* we need to make sure that the underlying kobj is actually
@@ -1073,10 +1084,8 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
wait_for_completion(cmp);
pr_debug("wait complete\n");
- lock_policy_rwsem_write(cpu);
if (cpufreq_driver->exit)
cpufreq_driver->exit(data);
- unlock_policy_rwsem_write(cpu);
free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus);
@@ -1086,6 +1095,7 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
}
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
return 0;
}
@@ -1098,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu))
return 0;
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
-
retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
@@ -1791,9 +1798,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
-
__cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED: