summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/Kconfig24
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c586
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c180
-rw-r--r--drivers/cpufreq/cpufreq_stats.c47
-rw-r--r--drivers/firmware/pcdp.c1
-rw-r--r--drivers/ide/pci/amd74xx.c3
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c302
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c144
-rw-r--r--drivers/pci/hotplug/shpchprm_acpi.c4
11 files changed, 934 insertions, 366 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 95882bb1950e..60c9be99c6d9 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -46,6 +46,10 @@ config CPU_FREQ_STAT_DETAILS
This will show detail CPU frequency translation table in sysfs file
system
+# Note that it is not currently possible to set the other governors (such as ondemand)
+# as the default, since if they fail to initialise, cpufreq will be
+# left in an undefined state.
+
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
@@ -115,4 +119,24 @@ config CPU_FREQ_GOV_ONDEMAND
If in doubt, say N.
+config CPU_FREQ_GOV_CONSERVATIVE
+ tristate "'conservative' cpufreq governor"
+ depends on CPU_FREQ
+ help
+ 'conservative' - this driver is rather similar to the 'ondemand'
+ governor both in its source code and its purpose, the difference is
+ its optimisation for better suitability in a battery powered
+ environment. The frequency is gracefully increased and decreased
+ rather than jumping to 100% when speed is required.
+
+ If you have a desktop machine then you should really be considering
+ the 'ondemand' governor instead, however if you are using a laptop,
+ PDA or even an AMD64 based computer (due to the unacceptable
+ step-by-step latency issues between the minimum and maximum frequency
+ transitions in the CPU) you will probably want to use this governor.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
endif # CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 67b16e5a41a7..71fc3b4173f1 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
+obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8e561313d094..03b5fb2ddcf4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -258,7 +258,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
(likely(cpufreq_cpu_data[freqs->cpu]->cur)) &&
(unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur)))
{
- printk(KERN_WARNING "Warning: CPU frequency is %u, "
+ dprintk(KERN_WARNING "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur);
freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
}
@@ -814,7 +814,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
{
struct cpufreq_freqs freqs;
- printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
+ dprintk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
"core thinks of %u, is %u kHz.\n", old_freq, new_freq);
freqs.cpu = cpu;
@@ -923,7 +923,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, u32 state)
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- printk(KERN_DEBUG "Warning: CPU frequency is %u, "
+ dprintk(KERN_DEBUG "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);
@@ -1004,7 +1004,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- printk(KERN_WARNING "Warning: CPU frequency"
+ dprintk(KERN_WARNING "Warning: CPU frequency"
"is %u, cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
new file mode 100644
index 000000000000..e1df376e709e
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -0,0 +1,586 @@
+/*
+ * drivers/cpufreq/cpufreq_conservative.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/cpufreq.h>
+#include <linux/sysctl.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/kmod.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/kernel_stat.h>
+#include <linux/percpu.h>
+
+/*
+ * dbs is used in this file as a shortform for demandbased switching
+ * It helps to keep variable names smaller, simpler
+ */
+
+#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define MIN_FREQUENCY_UP_THRESHOLD (0)
+#define MAX_FREQUENCY_UP_THRESHOLD (100)
+
+#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
+#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
+#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
+
+/*
+ * The polling frequency of this governor depends on the capability of
+ * the processor. Default polling frequency is 1000 times the transition
+ * latency of the processor. The governor will work on any processor with
+ * transition latency <= 10mS, using appropriate sampling
+ * rate.
+ * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
+ * this governor will not work.
+ * All times here are in uS.
+ */
+static unsigned int def_sampling_rate;
+#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
+#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
+#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000)
+#define DEF_SAMPLING_DOWN_FACTOR (5)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000)
+
+static void do_dbs_timer(void *data);
+
+struct cpu_dbs_info_s {
+ struct cpufreq_policy *cur_policy;
+ unsigned int prev_cpu_idle_up;
+ unsigned int prev_cpu_idle_down;
+ unsigned int enable;
+};
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
+
+static unsigned int dbs_enable; /* number of CPUs using this policy */
+
+static DECLARE_MUTEX (dbs_sem);
+static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+
+struct dbs_tuners {
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int ignore_nice;
+ unsigned int freq_step;
+};
+
+static struct dbs_tuners dbs_tuners_ins = {
+ .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+ .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
+ .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
+};
+
+static inline unsigned int get_cpu_idle_time(unsigned int cpu)
+{
+ return kstat_cpu(cpu).cpustat.idle +
+ kstat_cpu(cpu).cpustat.iowait +
+ ( !dbs_tuners_ins.ignore_nice ?
+ kstat_cpu(cpu).cpustat.nice :
+ 0);
+}
+
+/************************** sysfs interface ************************/
+static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
+}
+
+static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
+}
+
+#define define_one_ro(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+define_one_ro(sampling_rate_max);
+define_one_ro(sampling_rate_min);
+
+/* cpufreq_conservative Governor Tunables */
+#define show_one(file_name, object) \
+static ssize_t show_##file_name \
+(struct cpufreq_policy *unused, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
+}
+show_one(sampling_rate, sampling_rate);
+show_one(sampling_down_factor, sampling_down_factor);
+show_one(up_threshold, up_threshold);
+show_one(down_threshold, down_threshold);
+show_one(ignore_nice, ignore_nice);
+show_one(freq_step, freq_step);
+
+static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+ if (ret != 1 )
+ return -EINVAL;
+
+ down(&dbs_sem);
+ dbs_tuners_ins.sampling_down_factor = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ down(&dbs_sem);
+ if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.sampling_rate = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_up_threshold(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ down(&dbs_sem);
+ if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+ input < MIN_FREQUENCY_UP_THRESHOLD ||
+ input <= dbs_tuners_ins.down_threshold) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.up_threshold = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_down_threshold(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ down(&dbs_sem);
+ if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
+ input < MIN_FREQUENCY_DOWN_THRESHOLD ||
+ input >= dbs_tuners_ins.up_threshold) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.down_threshold = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ unsigned int j;
+
+ ret = sscanf (buf, "%u", &input);
+ if ( ret != 1 )
+ return -EINVAL;
+
+ if ( input > 1 )
+ input = 1;
+
+ down(&dbs_sem);
+ if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
+ up(&dbs_sem);
+ return count;
+ }
+ dbs_tuners_ins.ignore_nice = input;
+
+ /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
+ }
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_freq_step(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf (buf, "%u", &input);
+
+ if ( ret != 1 )
+ return -EINVAL;
+
+ if ( input > 100 )
+ input = 100;
+
+ /* no need to test here if freq_step is zero as the user might actually
+ * want this, they would be crazy though :) */
+ down(&dbs_sem);
+ dbs_tuners_ins.freq_step = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+#define define_one_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+define_one_rw(sampling_rate);
+define_one_rw(sampling_down_factor);
+define_one_rw(up_threshold);
+define_one_rw(down_threshold);
+define_one_rw(ignore_nice);
+define_one_rw(freq_step);
+
+static struct attribute * dbs_attributes[] = {
+ &sampling_rate_max.attr,
+ &sampling_rate_min.attr,
+ &sampling_rate.attr,
+ &sampling_down_factor.attr,
+ &up_threshold.attr,
+ &down_threshold.attr,
+ &ignore_nice.attr,
+ &freq_step.attr,
+ NULL
+};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "conservative",
+};
+
+/************************** sysfs end ************************/
+
+static void dbs_check_cpu(int cpu)
+{
+ unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
+ unsigned int freq_step;
+ unsigned int freq_down_sampling_rate;
+ static int down_skip[NR_CPUS];
+ static int requested_freq[NR_CPUS];
+ static unsigned short init_flag = 0;
+ struct cpu_dbs_info_s *this_dbs_info;
+ struct cpu_dbs_info_s *dbs_info;
+
+ struct cpufreq_policy *policy;
+ unsigned int j;
+
+ this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ if (!this_dbs_info->enable)
+ return;
+
+ policy = this_dbs_info->cur_policy;
+
+ if ( init_flag == 0 ) {
+ for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) {
+ dbs_info = &per_cpu(cpu_dbs_info, init_flag);
+ requested_freq[cpu] = dbs_info->cur_policy->cur;
+ }
+ init_flag = 1;
+ }
+
+ /*
+ * The default safe range is 20% to 80%
+ * Every sampling_rate, we check
+ * - If current idle time is less than 20%, then we try to
+ * increase frequency
+ * Every sampling_rate*sampling_down_factor, we check
+ * - If current idle time is more than 80%, then we try to
+ * decrease frequency
+ *
+ * Any frequency increase takes it to the maximum frequency.
+ * Frequency reduction happens at minimum steps of
+ * 5% (default) of max_frequency
+ */
+
+ /* Check for frequency increase */
+
+ idle_ticks = UINT_MAX;
+ for_each_cpu_mask(j, policy->cpus) {
+ unsigned int tmp_idle_ticks, total_idle_ticks;
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ /* Check for frequency increase */
+ total_idle_ticks = get_cpu_idle_time(j);
+ tmp_idle_ticks = total_idle_ticks -
+ j_dbs_info->prev_cpu_idle_up;
+ j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
+
+ if (tmp_idle_ticks < idle_ticks)
+ idle_ticks = tmp_idle_ticks;
+ }
+
+ /* Scale idle ticks by 100 and compare with up and down ticks */
+ idle_ticks *= 100;
+ up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ if (idle_ticks < up_idle_ticks) {
+ down_skip[cpu] = 0;
+ for_each_cpu_mask(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_down =
+ j_dbs_info->prev_cpu_idle_up;
+ }
+ /* if we are already at full speed then break out early */
+ if (requested_freq[cpu] == policy->max)
+ return;
+
+ freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
+
+ /* max freq cannot be less than 100. But who knows.... */
+ if (unlikely(freq_step == 0))
+ freq_step = 5;
+
+ requested_freq[cpu] += freq_step;
+ if (requested_freq[cpu] > policy->max)
+ requested_freq[cpu] = policy->max;
+
+ __cpufreq_driver_target(policy, requested_freq[cpu],
+ CPUFREQ_RELATION_H);
+ return;
+ }
+
+ /* Check for frequency decrease */
+ down_skip[cpu]++;
+ if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
+ return;
+
+ idle_ticks = UINT_MAX;
+ for_each_cpu_mask(j, policy->cpus) {
+ unsigned int tmp_idle_ticks, total_idle_ticks;
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
+ tmp_idle_ticks = total_idle_ticks -
+ j_dbs_info->prev_cpu_idle_down;
+ j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
+
+ if (tmp_idle_ticks < idle_ticks)
+ idle_ticks = tmp_idle_ticks;
+ }
+
+ /* Scale idle ticks by 100 and compare with up and down ticks */
+ idle_ticks *= 100;
+ down_skip[cpu] = 0;
+
+ freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
+ dbs_tuners_ins.sampling_down_factor;
+ down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
+ usecs_to_jiffies(freq_down_sampling_rate);
+
+ if (idle_ticks > down_idle_ticks) {
+ /* if we are already at the lowest speed then break out early
+ * or if we 'cannot' reduce the speed as the user might want
+ * freq_step to be zero */
+ if (requested_freq[cpu] == policy->min
+ || dbs_tuners_ins.freq_step == 0)
+ return;
+
+ freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
+
+ /* max freq cannot be less than 100. But who knows.... */
+ if (unlikely(freq_step == 0))
+ freq_step = 5;
+
+ requested_freq[cpu] -= freq_step;
+ if (requested_freq[cpu] < policy->min)
+ requested_freq[cpu] = policy->min;
+
+ __cpufreq_driver_target(policy,
+ requested_freq[cpu],
+ CPUFREQ_RELATION_H);
+ return;
+ }
+}
+
+static void do_dbs_timer(void *data)
+{
+ int i;
+ down(&dbs_sem);
+ for_each_online_cpu(i)
+ dbs_check_cpu(i);
+ schedule_delayed_work(&dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ up(&dbs_sem);
+}
+
+static inline void dbs_timer_init(void)
+{
+ INIT_WORK(&dbs_work, do_dbs_timer, NULL);
+ schedule_delayed_work(&dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ return;
+}
+
+static inline void dbs_timer_exit(void)
+{
+ cancel_delayed_work(&dbs_work);
+ return;
+}
+
+static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ unsigned int cpu = policy->cpu;
+ struct cpu_dbs_info_s *this_dbs_info;
+ unsigned int j;
+
+ this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) ||
+ (!policy->cur))
+ return -EINVAL;
+
+ if (policy->cpuinfo.transition_latency >
+ (TRANSITION_LATENCY_LIMIT * 1000))
+ return -EINVAL;
+ if (this_dbs_info->enable) /* Already enabled */
+ break;
+
+ down(&dbs_sem);
+ for_each_cpu_mask(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->cur_policy = policy;
+
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down
+ = j_dbs_info->prev_cpu_idle_up;
+ }
+ this_dbs_info->enable = 1;
+ sysfs_create_group(&policy->kobj, &dbs_attr_group);
+ dbs_enable++;
+ /*
+ * Start the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (dbs_enable == 1) {
+ unsigned int latency;
+ /* policy latency is in nS. Convert it to uS first */
+
+ latency = policy->cpuinfo.transition_latency;
+ if (latency < 1000)
+ latency = 1000;
+
+ def_sampling_rate = (latency / 1000) *
+ DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
+ dbs_tuners_ins.sampling_rate = def_sampling_rate;
+ dbs_tuners_ins.ignore_nice = 0;
+ dbs_tuners_ins.freq_step = 5;
+
+ dbs_timer_init();
+ }
+
+ up(&dbs_sem);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ down(&dbs_sem);
+ this_dbs_info->enable = 0;
+ sysfs_remove_group(&policy->kobj, &dbs_attr_group);
+ dbs_enable--;
+ /*
+ * Stop the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (dbs_enable == 0)
+ dbs_timer_exit();
+
+ up(&dbs_sem);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ down(&dbs_sem);
+ if (policy->max < this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(
+ this_dbs_info->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(
+ this_dbs_info->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ up(&dbs_sem);
+ break;
+ }
+ return 0;
+}
+
+static struct cpufreq_governor cpufreq_gov_dbs = {
+ .name = "conservative",
+ .governor = cpufreq_governor_dbs,
+ .owner = THIS_MODULE,
+};
+
+static int __init cpufreq_gov_dbs_init(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_dbs);
+}
+
+static void __exit cpufreq_gov_dbs_exit(void)
+{
+ /* Make sure that the scheduled work is indeed not running */
+ flush_scheduled_work();
+
+ cpufreq_unregister_governor(&cpufreq_gov_dbs);
+}
+
+
+MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>");
+MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for "
+ "Low Latency Frequency Transition capable processors "
+ "optimised for use in a battery environment");
+MODULE_LICENSE ("GPL");
+
+module_init(cpufreq_gov_dbs_init);
+module_exit(cpufreq_gov_dbs_exit);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 8d83a21c6477..c1fc9c62bb51 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -34,13 +34,9 @@
*/
#define DEF_FREQUENCY_UP_THRESHOLD (80)
-#define MIN_FREQUENCY_UP_THRESHOLD (0)
+#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
-#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
-#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
-#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
-
/*
* The polling frequency of this governor depends on the capability of
* the processor. Default polling frequency is 1000 times the transition
@@ -55,9 +51,9 @@ static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
-#define DEF_SAMPLING_DOWN_FACTOR (10)
+#define DEF_SAMPLING_DOWN_FACTOR (1)
+#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
-#define sampling_rate_in_HZ(x) (((x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000)))
static void do_dbs_timer(void *data);
@@ -78,15 +74,23 @@ struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
- unsigned int down_threshold;
+ unsigned int ignore_nice;
};
static struct dbs_tuners dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
- .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
+static inline unsigned int get_cpu_idle_time(unsigned int cpu)
+{
+ return kstat_cpu(cpu).cpustat.idle +
+ kstat_cpu(cpu).cpustat.iowait +
+ ( !dbs_tuners_ins.ignore_nice ?
+ kstat_cpu(cpu).cpustat.nice :
+ 0);
+}
+
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
@@ -115,7 +119,7 @@ static ssize_t show_##file_name \
show_one(sampling_rate, sampling_rate);
show_one(sampling_down_factor, sampling_down_factor);
show_one(up_threshold, up_threshold);
-show_one(down_threshold, down_threshold);
+show_one(ignore_nice, ignore_nice);
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
const char *buf, size_t count)
@@ -126,6 +130,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
if (ret != 1 )
return -EINVAL;
+ if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+
down(&dbs_sem);
dbs_tuners_ins.sampling_down_factor = input;
up(&dbs_sem);
@@ -161,8 +168,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
- input < MIN_FREQUENCY_UP_THRESHOLD ||
- input <= dbs_tuners_ins.down_threshold) {
+ input < MIN_FREQUENCY_UP_THRESHOLD) {
up(&dbs_sem);
return -EINVAL;
}
@@ -173,22 +179,35 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_down_threshold(struct cpufreq_policy *unused,
+static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int input;
int ret;
+
+ unsigned int j;
+
ret = sscanf (buf, "%u", &input);
+ if ( ret != 1 )
+ return -EINVAL;
+ if ( input > 1 )
+ input = 1;
+
down(&dbs_sem);
- if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
- input < MIN_FREQUENCY_DOWN_THRESHOLD ||
- input >= dbs_tuners_ins.up_threshold) {
+ if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
up(&dbs_sem);
- return -EINVAL;
+ return count;
}
+ dbs_tuners_ins.ignore_nice = input;
- dbs_tuners_ins.down_threshold = input;
+ /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
+ }
up(&dbs_sem);
return count;
@@ -201,7 +220,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
define_one_rw(sampling_down_factor);
define_one_rw(up_threshold);
-define_one_rw(down_threshold);
+define_one_rw(ignore_nice);
static struct attribute * dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -209,7 +228,7 @@ static struct attribute * dbs_attributes[] = {
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
- &down_threshold.attr,
+ &ignore_nice.attr,
NULL
};
@@ -222,9 +241,8 @@ static struct attribute_group dbs_attr_group = {
static void dbs_check_cpu(int cpu)
{
- unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
- unsigned int total_idle_ticks;
- unsigned int freq_down_step;
+ unsigned int idle_ticks, up_idle_ticks, total_ticks;
+ unsigned int freq_next;
unsigned int freq_down_sampling_rate;
static int down_skip[NR_CPUS];
struct cpu_dbs_info_s *this_dbs_info;
@@ -238,38 +256,25 @@ static void dbs_check_cpu(int cpu)
policy = this_dbs_info->cur_policy;
/*
- * The default safe range is 20% to 80%
- * Every sampling_rate, we check
- * - If current idle time is less than 20%, then we try to
- * increase frequency
- * Every sampling_rate*sampling_down_factor, we check
- * - If current idle time is more than 80%, then we try to
- * decrease frequency
+ * Every sampling_rate, we check, if current idle time is less
+ * than 20% (default), then we try to increase frequency
+ * Every sampling_rate*sampling_down_factor, we look for a the lowest
+ * frequency which can sustain the load while keeping idle time over
+ * 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
- * 5% of max_frequency
+ * 5% (default) of current frequency
*/
/* Check for frequency increase */
- total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- idle_ticks = total_idle_ticks -
- this_dbs_info->prev_cpu_idle_up;
- this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
-
-
+ idle_ticks = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
- unsigned int tmp_idle_ticks;
+ unsigned int tmp_idle_ticks, total_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
- if (j == cpu)
- continue;
-
j_dbs_info = &per_cpu(cpu_dbs_info, j);
- /* Check for frequency increase */
- total_idle_ticks = kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
+ total_idle_ticks = get_cpu_idle_time(j);
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_up;
j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -281,13 +286,23 @@ static void dbs_check_cpu(int cpu)
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100;
up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
- sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate);
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
if (idle_ticks < up_idle_ticks) {
+ down_skip[cpu] = 0;
+ for_each_cpu_mask(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_down =
+ j_dbs_info->prev_cpu_idle_up;
+ }
+ /* if we are already at full speed then break out early */
+ if (policy->cur == policy->max)
+ return;
+
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- down_skip[cpu] = 0;
- this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
return;
}
@@ -296,23 +311,14 @@ static void dbs_check_cpu(int cpu)
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
- total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- idle_ticks = total_idle_ticks -
- this_dbs_info->prev_cpu_idle_down;
- this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
-
+ idle_ticks = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
- unsigned int tmp_idle_ticks;
+ unsigned int tmp_idle_ticks, total_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
- if (j == cpu)
- continue;
-
j_dbs_info = &per_cpu(cpu_dbs_info, j);
- /* Check for frequency increase */
- total_idle_ticks = kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
+ /* Check for frequency decrease */
+ total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_down;
j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -321,38 +327,37 @@ static void dbs_check_cpu(int cpu)
idle_ticks = tmp_idle_ticks;
}
- /* Scale idle ticks by 100 and compare with up and down ticks */
- idle_ticks *= 100;
down_skip[cpu] = 0;
+ /* if we cannot reduce the frequency anymore, break out early */
+ if (policy->cur == policy->min)
+ return;
+ /* Compute how many ticks there are between two measurements */
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor;
- down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
- sampling_rate_in_HZ(freq_down_sampling_rate);
+ total_ticks = usecs_to_jiffies(freq_down_sampling_rate);
- if (idle_ticks > down_idle_ticks ) {
- freq_down_step = (5 * policy->max) / 100;
-
- /* max freq cannot be less than 100. But who knows.... */
- if (unlikely(freq_down_step == 0))
- freq_down_step = 5;
+ /*
+ * The optimal frequency is the frequency that is the lowest that
+ * can support the current CPU usage without triggering the up
+ * policy. To be safe, we focus 10 points under the threshold.
+ */
+ freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks;
+ freq_next = (freq_next * policy->cur) /
+ (dbs_tuners_ins.up_threshold - 10);
- __cpufreq_driver_target(policy,
- policy->cur - freq_down_step,
- CPUFREQ_RELATION_H);
- return;
- }
+ if (freq_next <= ((policy->cur * 95) / 100))
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
}
static void do_dbs_timer(void *data)
{
int i;
down(&dbs_sem);
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- dbs_check_cpu(i);
+ for_each_online_cpu(i)
+ dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
- sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
up(&dbs_sem);
}
@@ -360,7 +365,7 @@ static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_delayed_work(&dbs_work,
- sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
}
@@ -397,12 +402,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
- j_dbs_info->prev_cpu_idle_up =
- kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
- j_dbs_info->prev_cpu_idle_down =
- kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down
+ = j_dbs_info->prev_cpu_idle_up;
}
this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -422,6 +424,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
def_sampling_rate = (latency / 1000) *
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
dbs_tuners_ins.sampling_rate = def_sampling_rate;
+ dbs_tuners_ins.ignore_nice = 0;
dbs_timer_init();
}
@@ -461,12 +464,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
}
-struct cpufreq_governor cpufreq_gov_dbs = {
+static struct cpufreq_governor cpufreq_gov_dbs = {
.name = "ondemand",
.governor = cpufreq_governor_dbs,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(cpufreq_gov_dbs);
static int __init cpufreq_gov_dbs_init(void)
{
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 2084593937c6..741b6b191e6a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -19,6 +19,7 @@
#include <linux/percpu.h>
#include <linux/kobject.h>
#include <linux/spinlock.h>
+#include <asm/cputime.h>
static spinlock_t cpufreq_stats_lock;
@@ -29,20 +30,14 @@ static struct freq_attr _attr_##_name = {\
.show = _show,\
};
-static unsigned long
-delta_time(unsigned long old, unsigned long new)
-{
- return (old > new) ? (old - new): (new + ~old + 1);
-}
-
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
- unsigned long long last_time;
+ unsigned long long last_time;
unsigned int max_state;
unsigned int state_num;
unsigned int last_index;
- unsigned long long *time_in_state;
+ cputime64_t *time_in_state;
unsigned int *freq_table;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
unsigned int *trans_table;
@@ -60,12 +55,16 @@ static int
cpufreq_stats_update (unsigned int cpu)
{
struct cpufreq_stats *stat;
+ unsigned long long cur_time;
+
+ cur_time = get_jiffies_64();
spin_lock(&cpufreq_stats_lock);
stat = cpufreq_stats_table[cpu];
if (stat->time_in_state)
- stat->time_in_state[stat->last_index] +=
- delta_time(stat->last_time, jiffies);
- stat->last_time = jiffies;
+ stat->time_in_state[stat->last_index] =
+ cputime64_add(stat->time_in_state[stat->last_index],
+ cputime_sub(cur_time, stat->last_time));
+ stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
}
@@ -90,8 +89,8 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf)
return 0;
cpufreq_stats_update(stat->cpu);
for (i = 0; i < stat->state_num; i++) {
- len += sprintf(buf + len, "%u %llu\n",
- stat->freq_table[i], stat->time_in_state[i]);
+ len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
+ (unsigned long long)cputime64_to_clock_t(stat->time_in_state[i]));
}
return len;
}
@@ -107,16 +106,30 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
if(!stat)
return 0;
cpufreq_stats_update(stat->cpu);
+ len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+ len += snprintf(buf + len, PAGE_SIZE - len, " : ");
+ for (i = 0; i < stat->state_num; i++) {
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ stat->freq_table[i]);
+ }
+ if (len >= PAGE_SIZE)
+ return len;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+
for (i = 0; i < stat->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u:\t",
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
stat->freq_table[i]);
for (j = 0; j < stat->state_num; j++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%u\t",
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
stat->trans_table[i*stat->max_state+j]);
}
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -197,7 +210,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
count++;
}
- alloc_size = count * sizeof(int) + count * sizeof(long long);
+ alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
alloc_size += count * count * sizeof(int);
@@ -224,7 +237,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
}
stat->state_num = j;
spin_lock(&cpufreq_stats_lock);
- stat->last_time = jiffies;
+ stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock);
cpufreq_cpu_put(data);
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 6d5df6c2efa2..df1b721154d2 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/config.h>
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/efi.h>
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 47225e324356..4e0f13d1d060 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -72,6 +72,7 @@ static struct amd_ide_chip {
{ PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 },
+ { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
{ 0 }
};
@@ -487,6 +488,7 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
/* 12 */ DECLARE_NV_DEV("NFORCE3-250-SATA2"),
/* 13 */ DECLARE_NV_DEV("NFORCE-CK804"),
/* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
+ /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
};
static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -521,6 +523,7 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
#endif
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 9e9dab7fe86a..8132d946c384 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -1,7 +1,7 @@
/*
* CompactPCI Hot Plug Driver
*
- * Copyright (C) 2002 SOMA Networks, Inc.
+ * Copyright (C) 2002,2005 SOMA Networks, Inc.
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001 IBM Corp.
*
@@ -45,10 +45,10 @@
#define dbg(format, arg...) \
do { \
- if(cpci_debug) \
+ if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
MY_NAME , ## arg); \
- } while(0)
+ } while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -111,10 +111,8 @@ enable_slot(struct hotplug_slot *hotplug_slot)
dbg("%s - physical_slot = %s", __FUNCTION__, hotplug_slot->name);
- if(controller->ops->set_power) {
+ if (controller->ops->set_power)
retval = controller->ops->set_power(slot, 1);
- }
-
return retval;
}
@@ -126,37 +124,41 @@ disable_slot(struct hotplug_slot *hotplug_slot)
dbg("%s - physical_slot = %s", __FUNCTION__, hotplug_slot->name);
+ down_write(&list_rwsem);
+
/* Unconfigure device */
dbg("%s - unconfiguring slot %s",
__FUNCTION__, slot->hotplug_slot->name);
- if((retval = cpci_unconfigure_slot(slot))) {
+ if ((retval = cpci_unconfigure_slot(slot))) {
err("%s - could not unconfigure slot %s",
__FUNCTION__, slot->hotplug_slot->name);
- return retval;
+ goto disable_error;
}
dbg("%s - finished unconfiguring slot %s",
__FUNCTION__, slot->hotplug_slot->name);
/* Clear EXT (by setting it) */
- if(cpci_clear_ext(slot)) {
+ if (cpci_clear_ext(slot)) {
err("%s - could not clear EXT for slot %s",
__FUNCTION__, slot->hotplug_slot->name);
retval = -ENODEV;
+ goto disable_error;
}
cpci_led_on(slot);
- if(controller->ops->set_power) {
- retval = controller->ops->set_power(slot, 0);
- }
+ if (controller->ops->set_power)
+ if ((retval = controller->ops->set_power(slot, 0)))
+ goto disable_error;
- if(update_adapter_status(slot->hotplug_slot, 0)) {
+ if (update_adapter_status(slot->hotplug_slot, 0))
warn("failure to update adapter file");
- }
- if(slot->extracting) {
+ if (slot->extracting) {
slot->extracting = 0;
atomic_dec(&extracting);
}
+disable_error:
+ up_write(&list_rwsem);
return retval;
}
@@ -165,9 +167,8 @@ cpci_get_power_status(struct slot *slot)
{
u8 power = 1;
- if(controller->ops->get_power) {
+ if (controller->ops->get_power)
power = controller->ops->get_power(slot);
- }
return power;
}
@@ -237,9 +238,8 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
int status = -ENOMEM;
int i;
- if(!(controller && bus)) {
+ if (!(controller && bus))
return -ENODEV;
- }
/*
* Create a structure for each slot, and register that slot
@@ -316,32 +316,30 @@ int
cpci_hp_unregister_bus(struct pci_bus *bus)
{
struct slot *slot;
- struct list_head *tmp;
- struct list_head *next;
- int status;
+ struct slot *tmp;
+ int status = 0;
down_write(&list_rwsem);
- if(!slots) {
+ if (!slots) {
up_write(&list_rwsem);
return -1;
}
- list_for_each_safe(tmp, next, &slot_list) {
- slot = list_entry(tmp, struct slot, slot_list);
- if(slot->bus == bus) {
+ list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
+ if (slot->bus == bus) {
+ list_del(&slot->slot_list);
+ slots--;
+
dbg("deregistering slot %s", slot->hotplug_slot->name);
status = pci_hp_deregister(slot->hotplug_slot);
- if(status) {
+ if (status) {
err("pci_hp_deregister failed with error %d",
status);
- return status;
+ break;
}
-
- list_del(&slot->slot_list);
- slots--;
}
}
up_write(&list_rwsem);
- return 0;
+ return status;
}
/* This is the interrupt mode interrupt handler */
@@ -351,7 +349,7 @@ cpci_hp_intr(int irq, void *data, struct pt_regs *regs)
dbg("entered cpci_hp_intr");
/* Check to see if it was our interrupt */
- if((controller->irq_flags & SA_SHIRQ) &&
+ if ((controller->irq_flags & SA_SHIRQ) &&
!controller->ops->check_irq(controller->dev_id)) {
dbg("exited cpci_hp_intr, not our interrupt");
return IRQ_NONE;
@@ -373,38 +371,30 @@ cpci_hp_intr(int irq, void *data, struct pt_regs *regs)
* INS bits of the cold-inserted devices.
*/
static int
-init_slots(void)
+init_slots(int clear_ins)
{
struct slot *slot;
- struct list_head *tmp;
struct pci_dev* dev;
dbg("%s - enter", __FUNCTION__);
down_read(&list_rwsem);
- if(!slots) {
+ if (!slots) {
up_read(&list_rwsem);
return -1;
}
- list_for_each(tmp, &slot_list) {
- slot = list_entry(tmp, struct slot, slot_list);
+ list_for_each_entry(slot, &slot_list, slot_list) {
dbg("%s - looking at slot %s",
__FUNCTION__, slot->hotplug_slot->name);
- if(cpci_check_and_clear_ins(slot)) {
+ if (clear_ins && cpci_check_and_clear_ins(slot))
dbg("%s - cleared INS for slot %s",
__FUNCTION__, slot->hotplug_slot->name);
- dev = pci_find_slot(slot->bus->number, PCI_DEVFN(slot->number, 0));
- if(dev) {
- if(update_adapter_status(slot->hotplug_slot, 1)) {
- warn("failure to update adapter file");
- }
- if(update_latch_status(slot->hotplug_slot, 1)) {
- warn("failure to update latch file");
- }
- slot->dev = dev;
- } else {
- err("%s - no driver attached to device in slot %s",
- __FUNCTION__, slot->hotplug_slot->name);
- }
+ dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
+ if (dev) {
+ if (update_adapter_status(slot->hotplug_slot, 1))
+ warn("failure to update adapter file");
+ if (update_latch_status(slot->hotplug_slot, 1))
+ warn("failure to update latch file");
+ slot->dev = dev;
}
}
up_read(&list_rwsem);
@@ -416,26 +406,28 @@ static int
check_slots(void)
{
struct slot *slot;
- struct list_head *tmp;
int extracted;
int inserted;
u16 hs_csr;
down_read(&list_rwsem);
- if(!slots) {
+ if (!slots) {
up_read(&list_rwsem);
err("no slots registered, shutting down");
return -1;
}
extracted = inserted = 0;
- list_for_each(tmp, &slot_list) {
- slot = list_entry(tmp, struct slot, slot_list);
+ list_for_each_entry(slot, &slot_list, slot_list) {
dbg("%s - looking at slot %s",
__FUNCTION__, slot->hotplug_slot->name);
- if(cpci_check_and_clear_ins(slot)) {
- /* Some broken hardware (e.g. PLX 9054AB) asserts ENUM# twice... */
- if(slot->dev) {
- warn("slot %s already inserted", slot->hotplug_slot->name);
+ if (cpci_check_and_clear_ins(slot)) {
+ /*
+ * Some broken hardware (e.g. PLX 9054AB) asserts
+ * ENUM# twice...
+ */
+ if (slot->dev) {
+ warn("slot %s already inserted",
+ slot->hotplug_slot->name);
inserted++;
continue;
}
@@ -452,7 +444,7 @@ check_slots(void)
/* Configure device */
dbg("%s - configuring slot %s",
__FUNCTION__, slot->hotplug_slot->name);
- if(cpci_configure_slot(slot)) {
+ if (cpci_configure_slot(slot)) {
err("%s - could not configure slot %s",
__FUNCTION__, slot->hotplug_slot->name);
continue;
@@ -465,13 +457,11 @@ check_slots(void)
dbg("%s - slot %s HS_CSR (2) = %04x",
__FUNCTION__, slot->hotplug_slot->name, hs_csr);
- if(update_latch_status(slot->hotplug_slot, 1)) {
+ if (update_latch_status(slot->hotplug_slot, 1))
warn("failure to update latch file");
- }
- if(update_adapter_status(slot->hotplug_slot, 1)) {
+ if (update_adapter_status(slot->hotplug_slot, 1))
warn("failure to update adapter file");
- }
cpci_led_off(slot);
@@ -481,7 +471,7 @@ check_slots(void)
__FUNCTION__, slot->hotplug_slot->name, hs_csr);
inserted++;
- } else if(cpci_check_ext(slot)) {
+ } else if (cpci_check_ext(slot)) {
/* Process extraction request */
dbg("%s - slot %s extracted",
__FUNCTION__, slot->hotplug_slot->name);
@@ -491,27 +481,25 @@ check_slots(void)
dbg("%s - slot %s HS_CSR = %04x",
__FUNCTION__, slot->hotplug_slot->name, hs_csr);
- if(!slot->extracting) {
- if(update_latch_status(slot->hotplug_slot, 0)) {
+ if (!slot->extracting) {
+ if (update_latch_status(slot->hotplug_slot, 0)) {
warn("failure to update latch file");
-
}
- atomic_inc(&extracting);
slot->extracting = 1;
+ atomic_inc(&extracting);
}
extracted++;
- } else if(slot->extracting) {
+ } else if (slot->extracting) {
hs_csr = cpci_get_hs_csr(slot);
- if(hs_csr == 0xffff) {
+ if (hs_csr == 0xffff) {
/*
* Hmmm, we're likely hosed at this point, should we
* bother trying to tell the driver or not?
*/
err("card in slot %s was improperly removed",
slot->hotplug_slot->name);
- if(update_adapter_status(slot->hotplug_slot, 0)) {
+ if (update_adapter_status(slot->hotplug_slot, 0))
warn("failure to update adapter file");
- }
slot->extracting = 0;
atomic_dec(&extracting);
}
@@ -520,10 +508,9 @@ check_slots(void)
up_read(&list_rwsem);
dbg("inserted=%d, extracted=%d, extracting=%d",
inserted, extracted, atomic_read(&extracting));
- if(inserted || extracted) {
+ if (inserted || extracted)
return extracted;
- }
- else if(!atomic_read(&extracting)) {
+ else if (!atomic_read(&extracting)) {
err("cannot find ENUM# source, shutting down");
return -1;
}
@@ -541,12 +528,12 @@ event_thread(void *data)
unlock_kernel();
dbg("%s - event thread started", __FUNCTION__);
- while(1) {
+ while (1) {
dbg("event thread sleeping");
down_interruptible(&event_semaphore);
dbg("event thread woken, thread_finished = %d",
thread_finished);
- if(thread_finished || signal_pending(current))
+ if (thread_finished || signal_pending(current))
break;
do {
rc = check_slots();
@@ -558,7 +545,9 @@ event_thread(void *data)
thread_finished = 1;
break;
}
- } while(atomic_read(&extracting) != 0);
+ } while (atomic_read(&extracting) && !thread_finished);
+ if (thread_finished)
+ break;
/* Re-enable ENUM# interrupt */
dbg("%s - re-enabling irq", __FUNCTION__);
@@ -579,21 +568,21 @@ poll_thread(void *data)
daemonize("cpci_hp_polld");
unlock_kernel();
- while(1) {
- if(thread_finished || signal_pending(current))
+ while (1) {
+ if (thread_finished || signal_pending(current))
break;
- if(controller->ops->query_enum()) {
+ if (controller->ops->query_enum()) {
do {
rc = check_slots();
- if(rc > 0) {
+ if (rc > 0) {
/* Give userspace a chance to handle extraction */
msleep(500);
- } else if(rc < 0) {
+ } else if (rc < 0) {
dbg("%s - error checking slots", __FUNCTION__);
thread_finished = 1;
break;
}
- } while(atomic_read(&extracting) != 0);
+ } while (atomic_read(&extracting) && !thread_finished);
}
msleep(100);
}
@@ -612,12 +601,11 @@ cpci_start_thread(void)
init_MUTEX_LOCKED(&thread_exit);
thread_finished = 0;
- if(controller->irq) {
+ if (controller->irq)
pid = kernel_thread(event_thread, NULL, 0);
- } else {
+ else
pid = kernel_thread(poll_thread, NULL, 0);
- }
- if(pid < 0) {
+ if (pid < 0) {
err("Can't start up our thread");
return -1;
}
@@ -630,9 +618,8 @@ cpci_stop_thread(void)
{
thread_finished = 1;
dbg("thread finish command given");
- if(controller->irq) {
+ if (controller->irq)
up(&event_semaphore);
- }
dbg("wait for thread to exit");
down(&thread_exit);
}
@@ -642,45 +629,67 @@ cpci_hp_register_controller(struct cpci_hp_controller *new_controller)
{
int status = 0;
- if(!controller) {
- controller = new_controller;
- if(controller->irq) {
- if(request_irq(controller->irq,
- cpci_hp_intr,
- controller->irq_flags,
- MY_NAME, controller->dev_id)) {
- err("Can't get irq %d for the hotplug cPCI controller", controller->irq);
- status = -ENODEV;
- }
- dbg("%s - acquired controller irq %d", __FUNCTION__,
- controller->irq);
+ if (controller)
+ return -1;
+ if (!(new_controller && new_controller->ops))
+ return -EINVAL;
+ if (new_controller->irq) {
+ if (!(new_controller->ops->enable_irq &&
+ new_controller->ops->disable_irq))
+ status = -EINVAL;
+ if (request_irq(new_controller->irq,
+ cpci_hp_intr,
+ new_controller->irq_flags,
+ MY_NAME,
+ new_controller->dev_id)) {
+ err("Can't get irq %d for the hotplug cPCI controller",
+ new_controller->irq);
+ status = -ENODEV;
}
- } else {
- err("cPCI hotplug controller already registered");
- status = -1;
+ dbg("%s - acquired controller irq %d",
+ __FUNCTION__, new_controller->irq);
}
+ if (!status)
+ controller = new_controller;
return status;
}
+static void
+cleanup_slots(void)
+{
+ struct slot *slot;
+ struct slot *tmp;
+
+ /*
+ * Unregister all of our slots with the pci_hotplug subsystem,
+ * and free up all memory that we had allocated.
+ */
+ down_write(&list_rwsem);
+ if (!slots)
+ goto cleanup_null;
+ list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+cleanup_null:
+ up_write(&list_rwsem);
+ return;
+}
+
int
cpci_hp_unregister_controller(struct cpci_hp_controller *old_controller)
{
int status = 0;
- if(controller) {
- if(atomic_read(&extracting) != 0) {
- return -EBUSY;
- }
- if(!thread_finished) {
+ if (controller) {
+ if (!thread_finished)
cpci_stop_thread();
- }
- if(controller->irq) {
+ if (controller->irq)
free_irq(controller->irq, controller->dev_id);
- }
controller = NULL;
- } else {
+ cleanup_slots();
+ } else
status = -ENODEV;
- }
return status;
}
@@ -691,32 +700,28 @@ cpci_hp_start(void)
int status;
dbg("%s - enter", __FUNCTION__);
- if(!controller) {
+ if (!controller)
return -ENODEV;
- }
down_read(&list_rwsem);
- if(list_empty(&slot_list)) {
+ if (list_empty(&slot_list)) {
up_read(&list_rwsem);
return -ENODEV;
}
up_read(&list_rwsem);
- if(first) {
- status = init_slots();
- if(status) {
- return status;
- }
+ status = init_slots(first);
+ if (first)
first = 0;
- }
+ if (status)
+ return status;
status = cpci_start_thread();
- if(status) {
+ if (status)
return status;
- }
dbg("%s - thread started", __FUNCTION__);
- if(controller->irq) {
+ if (controller->irq) {
/* Start enum interrupt processing */
dbg("%s - enabling irq", __FUNCTION__);
controller->ops->enable_irq();
@@ -728,13 +733,9 @@ cpci_hp_start(void)
int
cpci_hp_stop(void)
{
- if(!controller) {
+ if (!controller)
return -ENODEV;
- }
- if(atomic_read(&extracting) != 0) {
- return -EBUSY;
- }
- if(controller->irq) {
+ if (controller->irq) {
/* Stop enum interrupt processing */
dbg("%s - disabling irq", __FUNCTION__);
controller->ops->disable_irq();
@@ -743,34 +744,6 @@ cpci_hp_stop(void)
return 0;
}
-static void __exit
-cleanup_slots(void)
-{
- struct list_head *tmp;
- struct slot *slot;
-
- /*
- * Unregister all of our slots with the pci_hotplug subsystem,
- * and free up all memory that we had allocated.
- */
- down_write(&list_rwsem);
- if(!slots) {
- goto null_cleanup;
- }
- list_for_each(tmp, &slot_list) {
- slot = list_entry(tmp, struct slot, slot_list);
- list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot->name);
- kfree(slot->hotplug_slot);
- kfree(slot);
- }
- null_cleanup:
- up_write(&list_rwsem);
- return;
-}
-
int __init
cpci_hotplug_init(int debug)
{
@@ -784,7 +757,8 @@ cpci_hotplug_exit(void)
/*
* Clean everything up.
*/
- cleanup_slots();
+ cpci_hp_stop();
+ cpci_hp_unregister_controller(controller);
}
EXPORT_SYMBOL_GPL(cpci_hp_register_controller);
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 69eb4fc54f2f..c878028ad215 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -1,7 +1,7 @@
/*
* CompactPCI Hot Plug Driver PCI functions
*
- * Copyright (C) 2002 by SOMA Networks, Inc.
+ * Copyright (C) 2002,2005 by SOMA Networks, Inc.
*
* All rights reserved.
*
@@ -38,10 +38,10 @@ extern int cpci_debug;
#define dbg(format, arg...) \
do { \
- if(cpci_debug) \
+ if (cpci_debug) \
printk (KERN_DEBUG "%s: " format "\n", \
MY_NAME , ## arg); \
- } while(0)
+ } while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -57,16 +57,15 @@ u8 cpci_get_attention_status(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
- if(!hs_cap) {
+ if (!hs_cap)
return 0;
- }
- if(pci_bus_read_config_word(slot->bus,
+ if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- &hs_csr)) {
+ &hs_csr))
return 0;
- }
+
return hs_csr & 0x0008 ? 1 : 0;
}
@@ -78,27 +77,22 @@ int cpci_set_attention_status(struct slot* slot, int status)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
- if(!hs_cap) {
+ if (!hs_cap)
return 0;
- }
-
- if(pci_bus_read_config_word(slot->bus,
+ if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- &hs_csr)) {
+ &hs_csr))
return 0;
- }
- if(status) {
+ if (status)
hs_csr |= HS_CSR_LOO;
- } else {
+ else
hs_csr &= ~HS_CSR_LOO;
- }
- if(pci_bus_write_config_word(slot->bus,
+ if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- hs_csr)) {
+ hs_csr))
return 0;
- }
return 1;
}
@@ -110,16 +104,13 @@ u16 cpci_get_hs_csr(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
- if(!hs_cap) {
+ if (!hs_cap)
return 0xFFFF;
- }
-
- if(pci_bus_read_config_word(slot->bus,
+ if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- &hs_csr)) {
+ &hs_csr))
return 0xFFFF;
- }
return hs_csr;
}
@@ -132,24 +123,22 @@ int cpci_check_and_clear_ins(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
- if(!hs_cap) {
+ if (!hs_cap)
return 0;
- }
- if(pci_bus_read_config_word(slot->bus,
+ if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- &hs_csr)) {
+ &hs_csr))
return 0;
- }
- if(hs_csr & HS_CSR_INS) {
+ if (hs_csr & HS_CSR_INS) {
/* Clear INS (by setting it) */
- if(pci_bus_write_config_word(slot->bus,
+ if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- hs_csr)) {
+ hs_csr))
ins = 0;
- }
- ins = 1;
+ else
+ ins = 1;
}
return ins;
}
@@ -163,18 +152,15 @@ int cpci_check_ext(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
- if(!hs_cap) {
+ if (!hs_cap)
return 0;
- }
- if(pci_bus_read_config_word(slot->bus,
+ if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- &hs_csr)) {
+ &hs_csr))
return 0;
- }
- if(hs_csr & HS_CSR_EXT) {
+ if (hs_csr & HS_CSR_EXT)
ext = 1;
- }
return ext;
}
@@ -186,23 +172,20 @@ int cpci_clear_ext(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
- if(!hs_cap) {
+ if (!hs_cap)
return -ENODEV;
- }
- if(pci_bus_read_config_word(slot->bus,
+ if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- &hs_csr)) {
+ &hs_csr))
return -ENODEV;
- }
- if(hs_csr & HS_CSR_EXT) {
+ if (hs_csr & HS_CSR_EXT) {
/* Clear EXT (by setting it) */
- if(pci_bus_write_config_word(slot->bus,
+ if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- hs_csr)) {
+ hs_csr))
return -ENODEV;
- }
}
return 0;
}
@@ -215,18 +198,16 @@ int cpci_led_on(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
- if(!hs_cap) {
+ if (!hs_cap)
return -ENODEV;
- }
- if(pci_bus_read_config_word(slot->bus,
+ if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- &hs_csr)) {
+ &hs_csr))
return -ENODEV;
- }
- if((hs_csr & HS_CSR_LOO) != HS_CSR_LOO) {
+ if ((hs_csr & HS_CSR_LOO) != HS_CSR_LOO) {
hs_csr |= HS_CSR_LOO;
- if(pci_bus_write_config_word(slot->bus,
+ if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
hs_csr)) {
@@ -246,18 +227,16 @@ int cpci_led_off(struct slot* slot)
hs_cap = pci_bus_find_capability(slot->bus,
slot->devfn,
PCI_CAP_ID_CHSWP);
- if(!hs_cap) {
+ if (!hs_cap)
return -ENODEV;
- }
- if(pci_bus_read_config_word(slot->bus,
+ if (pci_bus_read_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
- &hs_csr)) {
+ &hs_csr))
return -ENODEV;
- }
- if(hs_csr & HS_CSR_LOO) {
+ if (hs_csr & HS_CSR_LOO) {
hs_csr &= ~HS_CSR_LOO;
- if(pci_bus_write_config_word(slot->bus,
+ if (pci_bus_write_config_word(slot->bus,
slot->devfn,
hs_cap + 2,
hs_csr)) {
@@ -274,19 +253,6 @@ int cpci_led_off(struct slot* slot)
* Device configuration functions
*/
-static void cpci_enable_device(struct pci_dev *dev)
-{
- struct pci_bus *bus;
-
- pci_enable_device(dev);
- if(dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- bus = dev->subordinate;
- list_for_each_entry(dev, &bus->devices, bus_list) {
- cpci_enable_device(dev);
- }
- }
-}
-
int cpci_configure_slot(struct slot* slot)
{
unsigned char busnr;
@@ -294,14 +260,14 @@ int cpci_configure_slot(struct slot* slot)
dbg("%s - enter", __FUNCTION__);
- if(slot->dev == NULL) {
+ if (slot->dev == NULL) {
dbg("pci_dev null, finding %02x:%02x:%x",
slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn));
- slot->dev = pci_find_slot(slot->bus->number, slot->devfn);
+ slot->dev = pci_get_slot(slot->bus, slot->devfn);
}
/* Still NULL? Well then scan for it! */
- if(slot->dev == NULL) {
+ if (slot->dev == NULL) {
int n;
dbg("pci_dev still null");
@@ -311,10 +277,10 @@ int cpci_configure_slot(struct slot* slot)
*/
n = pci_scan_slot(slot->bus, slot->devfn);
dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n);
- if(n > 0)
+ if (n > 0)
pci_bus_add_devices(slot->bus);
- slot->dev = pci_find_slot(slot->bus->number, slot->devfn);
- if(slot->dev == NULL) {
+ slot->dev = pci_get_slot(slot->bus, slot->devfn);
+ if (slot->dev == NULL) {
err("Could not find PCI device for slot %02x", slot->number);
return 1;
}
@@ -329,8 +295,6 @@ int cpci_configure_slot(struct slot* slot)
pci_bus_assign_resources(slot->dev->bus);
- cpci_enable_device(slot->dev);
-
dbg("%s - exit", __FUNCTION__);
return 0;
}
@@ -341,15 +305,15 @@ int cpci_unconfigure_slot(struct slot* slot)
struct pci_dev *dev;
dbg("%s - enter", __FUNCTION__);
- if(!slot->dev) {
+ if (!slot->dev) {
err("No device for slot %02x\n", slot->number);
return -ENODEV;
}
for (i = 0; i < 8; i++) {
- dev = pci_find_slot(slot->bus->number,
+ dev = pci_get_slot(slot->bus,
PCI_DEVFN(PCI_SLOT(slot->devfn), i));
- if(dev) {
+ if (dev) {
pci_remove_bus_device(dev);
slot->dev = NULL;
}
diff --git a/drivers/pci/hotplug/shpchprm_acpi.c b/drivers/pci/hotplug/shpchprm_acpi.c
index 243a51d88b86..7957cdc72cd0 100644
--- a/drivers/pci/hotplug/shpchprm_acpi.c
+++ b/drivers/pci/hotplug/shpchprm_acpi.c
@@ -1626,7 +1626,7 @@ int shpchprm_set_hpp(
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->bus);
+ ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->slot_bus);
if (ab) {
if (ab->_hpp) {
@@ -1681,7 +1681,7 @@ void shpchprm_enable_card(
| PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
bcmd = bcommand = bcommand | PCI_BRIDGE_CTL_NO_ISA;
- ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->bus);
+ ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->slot_bus);
if (ab) {
if (ab->_hpp) {
if (ab->_hpp->enable_perr) {