summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 02:21:02 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 02:21:02 +0400
commit3ed1c478eff8db80e234d5446cb378b503135888 (patch)
treee1c8e0f488ca49c49b5a31fe59add4254381dd4b /drivers
parent151173e8ce9b95bbbbd7eedb9035cfaffbdb7cb2 (diff)
parent371deb9500831ad1afbf9ea00e373f650deaed2f (diff)
downloadlinux-3ed1c478eff8db80e234d5446cb378b503135888.tar.xz
Merge tag 'pm+acpi-3.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management and ACPI updates from Rafael J Wysocki: - ARM big.LITTLE cpufreq driver from Viresh Kumar. - exynos5440 cpufreq driver from Amit Daniel Kachhap. - cpufreq core cleanup and code consolidation from Viresh Kumar and Stratos Karafotis. - cpufreq scalability improvement from Nathan Zimmer. - AMD "frequency sensitivity feedback" powersave bias for the ondemand cpufreq governor from Jacob Shin. - cpuidle code consolidation and cleanups from Daniel Lezcano. - ARM OMAP cpuidle fixes from Santosh Shilimkar and Daniel Lezcano. - ACPICA fixes and other improvements from Bob Moore, Jung-uk Kim, Lv Zheng, Yinghai Lu, Tang Chen, Colin Ian King, and Linn Crosetto. - ACPI core updates related to hotplug from Toshi Kani, Paul Bolle, Yasuaki Ishimatsu, and Rafael J Wysocki. - Intel Lynxpoint LPSS (Low-Power Subsystem) support improvements from Rafael J Wysocki and Andy Shevchenko. * tag 'pm+acpi-3.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (192 commits) cpufreq: Revert incorrect commit 5800043 cpufreq: MAINTAINERS: Add co-maintainer cpuidle: add maintainer entry ACPI / thermal: do not always return THERMAL_TREND_RAISING for active trip points ARM: s3c64xx: cpuidle: use init/exit common routine cpufreq: pxa2xx: initialize variables ACPI: video: correct acpi_video_bus_add error processing SH: cpuidle: use init/exit common routine ARM: S5pv210: compiling issue, ARM_S5PV210_CPUFREQ needs CONFIG_CPU_FREQ_TABLE=y ACPI: Fix wrong parameter passed to memblock_reserve cpuidle: fix comment format pnp: use %*phC to dump small buffers isapnp: remove debug leftovers ARM: imx: cpuidle: use init/exit common routine ARM: davinci: cpuidle: use init/exit common routine ARM: kirkwood: cpuidle: use init/exit common routine ARM: calxeda: cpuidle: use init/exit common routine ARM: tegra: cpuidle: use init/exit common routine for tegra3 ARM: tegra: cpuidle: use init/exit common routine for tegra2 ARM: OMAP4: cpuidle: use init/exit common routine ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig13
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_lpss.c292
-rw-r--r--drivers/acpi/acpi_memhotplug.c328
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpi_platform.c40
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acglobal.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h29
-rw-r--r--drivers/acpi/acpica/acmacros.h6
-rw-r--r--drivers/acpi/acpica/acnamesp.h29
-rw-r--r--drivers/acpi/acpica/acpredef.h1305
-rw-r--r--drivers/acpi/acpica/acutils.h28
-rw-r--r--drivers/acpi/acpica/dsutils.c10
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/evevent.c12
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/acpica/evsci.c4
-rw-r--r--drivers/acpi/acpica/evxface.c21
-rw-r--r--drivers/acpi/acpica/evxfevnt.c12
-rw-r--r--drivers/acpi/acpica/exoparg2.c11
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/exutils.c4
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/acpica/nsconvert.c443
-rw-r--r--drivers/acpi/acpica/nseval.c26
-rw-r--r--drivers/acpi/acpica/nspredef.c213
-rw-r--r--drivers/acpi/acpica/nsprepkg.c10
-rw-r--r--drivers/acpi/acpica/nsrepair.c381
-rw-r--r--drivers/acpi/acpica/nsrepair2.c16
-rw-r--r--drivers/acpi/acpica/nsutils.c8
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c6
-rw-r--r--drivers/acpi/acpica/rsdump.c8
-rw-r--r--drivers/acpi/acpica/rslist.c8
-rw-r--r--drivers/acpi/acpica/rsxface.c8
-rw-r--r--drivers/acpi/acpica/tbfadt.c4
-rw-r--r--drivers/acpi/acpica/tbxface.c22
-rw-r--r--drivers/acpi/acpica/utaddress.c4
-rw-r--r--drivers/acpi/acpica/utcache.c18
-rw-r--r--drivers/acpi/acpica/utdelete.c96
-rw-r--r--drivers/acpi/acpica/utexcep.c26
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c9
-rw-r--r--drivers/acpi/acpica/utosi.c26
-rw-r--r--drivers/acpi/acpica/utpredef.c399
-rw-r--r--drivers/acpi/acpica/utxface.c17
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/button.c1
-rw-r--r--drivers/acpi/container.c152
-rw-r--r--drivers/acpi/device_pm.c39
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/internal.h21
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pci_link.c1
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/power.c60
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/processor_thermal.c24
-rw-r--r--drivers/acpi/processor_throttling.c3
-rw-r--r--drivers/acpi/scan.c531
-rw-r--r--drivers/acpi/sysfs.c66
-rw-r--r--drivers/acpi/thermal.c16
-rw-r--r--drivers/acpi/video.c318
-rw-r--r--drivers/acpi/video_detect.c25
-rw-r--r--drivers/base/power/domain.c6
-rw-r--r--drivers/base/power/generic_ops.c2
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/opp.c1
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/clk/x86/Makefile2
-rw-r--r--drivers/clk/x86/clk-lpss.c99
-rw-r--r--drivers/clk/x86/clk-lpss.h36
-rw-r--r--drivers/clk/x86/clk-lpt.c40
-rw-r--r--drivers/cpufreq/Kconfig89
-rw-r--r--drivers/cpufreq/Kconfig.arm148
-rw-r--r--drivers/cpufreq/Kconfig.powerpc18
-rw-r--r--drivers/cpufreq/Kconfig.x8617
-rw-r--r--drivers/cpufreq/Makefile41
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c11
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c148
-rw-r--r--drivers/cpufreq/arm_big_little.c278
-rw-r--r--drivers/cpufreq/arm_big_little.h40
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c107
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c123
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c247
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c32
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c11
-rw-r--r--drivers/cpufreq/cpufreq.c145
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c244
-rw-r--r--drivers/cpufreq/cpufreq_governor.c291
-rw-r--r--drivers/cpufreq/cpufreq_governor.h128
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c363
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c146
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c142
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c231
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c22
-rw-r--r--drivers/cpufreq/e_powersaver.c11
-rw-r--r--drivers/cpufreq/elanfreq.c10
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c9
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c481
-rw-r--r--drivers/cpufreq/gx-suspmod.c11
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c438
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c12
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c220
-rw-r--r--drivers/cpufreq/intel_pstate.c21
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c18
-rw-r--r--drivers/cpufreq/longhaul.c18
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c248
-rw-r--r--drivers/cpufreq/maple-cpufreq.c5
-rw-r--r--drivers/cpufreq/omap-cpufreq.c34
-rw-r--r--drivers/cpufreq/p4-clockmod.c13
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c5
-rw-r--r--drivers/cpufreq/powernow-k6.c12
-rw-r--r--drivers/cpufreq/powernow-k7.c10
-rw-r--r--drivers/cpufreq/powernow-k8.c19
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c209
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h24
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c115
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c156
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c492
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c254
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c5
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c7
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c5
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c247
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c406
-rw-r--r--drivers/cpufreq/sc520_freq.c10
-rw-r--r--drivers/cpufreq/sh-cpufreq.c189
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c408
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c269
-rw-r--r--drivers/cpufreq/spear-cpufreq.c7
-rw-r--r--drivers/cpufreq/speedstep-centrino.c28
-rw-r--r--drivers/cpufreq/speedstep-ich.c12
-rw-r--r--drivers/cpufreq/speedstep-smi.c5
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c292
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c92
-rw-r--r--drivers/cpuidle/Kconfig6
-rw-r--r--drivers/cpuidle/Makefile2
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c57
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c29
-rw-r--r--drivers/cpuidle/cpuidle.c153
-rw-r--r--drivers/cpuidle/driver.c31
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/idle/intel_idle.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/pnp/isapnp/core.c11
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c1
-rw-r--r--drivers/pnp/pnpbios/proc.c5
150 files changed, 10754 insertions, 2810 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4bf68c8d4797..100bd724f648 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -298,14 +298,6 @@ config ACPI_DEBUG
Documentation/kernel-parameters.txt to control the type and
amount of debug output.
-config ACPI_DEBUG_FUNC_TRACE
- bool "Additionally enable ACPI function tracing"
- default n
- depends on ACPI_DEBUG
- help
- ACPI Debug Statements slow down ACPI processing. Function trace
- is about half of the penalty and is rarely useful.
-
config ACPI_PCI_SLOT
bool "PCI slot detection driver"
depends on SYSFS
@@ -334,7 +326,7 @@ config X86_PM_TIMER
config ACPI_CONTAINER
bool "Container and Module Devices"
- default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO)
+ default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
help
This driver supports ACPI Container and Module devices (IDs
ACPI0004, PNP0A05, and PNP0A06).
@@ -345,9 +337,8 @@ config ACPI_CONTAINER
the module will be called container.
config ACPI_HOTPLUG_MEMORY
- tristate "Memory Hotplug"
+ bool "Memory Hotplug"
depends on MEMORY_HOTPLUG
- default n
help
This driver supports ACPI memory hotplug. The driver
fields notifications on ACPI memory devices (PNP0C80),
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 474fcfeba66c..ecb743bf05a5 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -39,6 +39,7 @@ acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o
acpi-y += csrt.o
+acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o
acpi-y += acpi_platform.o
acpi-y += power.o
acpi-y += event.o
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
new file mode 100644
index 000000000000..b1c95422ce74
--- /dev/null
+++ b/drivers/acpi/acpi_lpss.c
@@ -0,0 +1,292 @@
+/*
+ * ACPI support for Intel Lynxpoint LPSS.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/clk-lpss.h>
+#include <linux/pm_runtime.h>
+
+#include "internal.h"
+
+ACPI_MODULE_NAME("acpi_lpss");
+
+#define LPSS_CLK_SIZE 0x04
+#define LPSS_LTR_SIZE 0x18
+
+/* Offsets relative to LPSS_PRIVATE_OFFSET */
+#define LPSS_GENERAL 0x08
+#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
+#define LPSS_SW_LTR 0x10
+#define LPSS_AUTO_LTR 0x14
+
+struct lpss_device_desc {
+ bool clk_required;
+ const char *clk_parent;
+ bool ltr_required;
+ unsigned int prv_offset;
+};
+
+struct lpss_private_data {
+ void __iomem *mmio_base;
+ resource_size_t mmio_size;
+ struct clk *clk;
+ const struct lpss_device_desc *dev_desc;
+};
+
+static struct lpss_device_desc lpt_dev_desc = {
+ .clk_required = true,
+ .clk_parent = "lpss_clk",
+ .prv_offset = 0x800,
+ .ltr_required = true,
+};
+
+static struct lpss_device_desc lpt_sdio_dev_desc = {
+ .prv_offset = 0x1000,
+ .ltr_required = true,
+};
+
+static const struct acpi_device_id acpi_lpss_device_ids[] = {
+ /* Lynxpoint LPSS devices */
+ { "INT33C0", (unsigned long)&lpt_dev_desc },
+ { "INT33C1", (unsigned long)&lpt_dev_desc },
+ { "INT33C2", (unsigned long)&lpt_dev_desc },
+ { "INT33C3", (unsigned long)&lpt_dev_desc },
+ { "INT33C4", (unsigned long)&lpt_dev_desc },
+ { "INT33C5", (unsigned long)&lpt_dev_desc },
+ { "INT33C6", (unsigned long)&lpt_sdio_dev_desc },
+ { "INT33C7", },
+
+ { }
+};
+
+static int is_memory(struct acpi_resource *res, void *not_used)
+{
+ struct resource r;
+ return !acpi_dev_resource_memory(res, &r);
+}
+
+/* LPSS main clock device. */
+static struct platform_device *lpss_clk_dev;
+
+static inline void lpt_register_clock_device(void)
+{
+ lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
+}
+
+static int register_device_clock(struct acpi_device *adev,
+ struct lpss_private_data *pdata)
+{
+ const struct lpss_device_desc *dev_desc = pdata->dev_desc;
+
+ if (!lpss_clk_dev)
+ lpt_register_clock_device();
+
+ if (!dev_desc->clk_parent || !pdata->mmio_base
+ || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
+ return -ENODATA;
+
+ pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
+ dev_desc->clk_parent, 0,
+ pdata->mmio_base + dev_desc->prv_offset,
+ 0, 0, NULL);
+ if (IS_ERR(pdata->clk))
+ return PTR_ERR(pdata->clk);
+
+ clk_register_clkdev(pdata->clk, NULL, dev_name(&adev->dev));
+ return 0;
+}
+
+static int acpi_lpss_create_device(struct acpi_device *adev,
+ const struct acpi_device_id *id)
+{
+ struct lpss_device_desc *dev_desc;
+ struct lpss_private_data *pdata;
+ struct resource_list_entry *rentry;
+ struct list_head resource_list;
+ int ret;
+
+ dev_desc = (struct lpss_device_desc *)id->driver_data;
+ if (!dev_desc)
+ return acpi_create_platform_device(adev, id);
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
+ if (ret < 0)
+ goto err_out;
+
+ list_for_each_entry(rentry, &resource_list, node)
+ if (resource_type(&rentry->res) == IORESOURCE_MEM) {
+ pdata->mmio_size = resource_size(&rentry->res);
+ pdata->mmio_base = ioremap(rentry->res.start,
+ pdata->mmio_size);
+ pdata->dev_desc = dev_desc;
+ break;
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (dev_desc->clk_required) {
+ ret = register_device_clock(adev, pdata);
+ if (ret) {
+ /*
+ * Skip the device, but don't terminate the namespace
+ * scan.
+ */
+ kfree(pdata);
+ return 0;
+ }
+ }
+
+ adev->driver_data = pdata;
+ ret = acpi_create_platform_device(adev, id);
+ if (ret > 0)
+ return ret;
+
+ adev->driver_data = NULL;
+
+ err_out:
+ kfree(pdata);
+ return ret;
+}
+
+static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
+{
+ struct acpi_device *adev;
+ struct lpss_private_data *pdata;
+ unsigned long flags;
+ int ret;
+
+ ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
+ if (WARN_ON(ret))
+ return ret;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ if (pm_runtime_suspended(dev)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ pdata = acpi_driver_data(adev);
+ if (WARN_ON(!pdata || !pdata->mmio_base)) {
+ ret = -ENODEV;
+ goto out;
+ }
+ *val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return ret;
+}
+
+static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ltr_value = 0;
+ unsigned int reg;
+ int ret;
+
+ reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
+ ret = lpss_reg_read(dev, reg, &ltr_value);
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
+}
+
+static ssize_t lpss_ltr_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 ltr_mode = 0;
+ char *outstr;
+ int ret;
+
+ ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
+ if (ret)
+ return ret;
+
+ outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
+ return sprintf(buf, "%s\n", outstr);
+}
+
+static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
+static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
+static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
+
+static struct attribute *lpss_attrs[] = {
+ &dev_attr_auto_ltr.attr,
+ &dev_attr_sw_ltr.attr,
+ &dev_attr_ltr_mode.attr,
+ NULL,
+};
+
+static struct attribute_group lpss_attr_group = {
+ .attrs = lpss_attrs,
+ .name = "lpss_ltr",
+};
+
+static int acpi_lpss_platform_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct platform_device *pdev = to_platform_device(data);
+ struct lpss_private_data *pdata;
+ struct acpi_device *adev;
+ const struct acpi_device_id *id;
+ int ret = 0;
+
+ id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
+ if (!id || !id->driver_data)
+ return 0;
+
+ if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
+ return 0;
+
+ pdata = acpi_driver_data(adev);
+ if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
+ return 0;
+
+ if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
+ dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
+ return 0;
+ }
+
+ if (action == BUS_NOTIFY_ADD_DEVICE)
+ ret = sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group);
+ else if (action == BUS_NOTIFY_DEL_DEVICE)
+ sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
+
+ return ret;
+}
+
+static struct notifier_block acpi_lpss_nb = {
+ .notifier_call = acpi_lpss_platform_notify,
+};
+
+static struct acpi_scan_handler lpss_handler = {
+ .ids = acpi_lpss_device_ids,
+ .attach = acpi_lpss_create_device,
+};
+
+void __init acpi_lpss_init(void)
+{
+ if (!lpt_clk_init()) {
+ bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
+ acpi_scan_add_handler(&lpss_handler);
+ }
+}
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index da1f82b445e0..5e6301e94920 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -1,5 +1,7 @@
/*
- * Copyright (C) 2004 Intel Corporation <naveen.b.s@intel.com>
+ * Copyright (C) 2004, 2013 Intel Corporation
+ * Author: Naveen B S <naveen.b.s@intel.com>
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* All rights reserved.
*
@@ -25,14 +27,10 @@
* ranges.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/memory_hotplug.h>
-#include <linux/slab.h>
#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/memory_hotplug.h>
+
+#include "internal.h"
#define ACPI_MEMORY_DEVICE_CLASS "memory"
#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
@@ -44,32 +42,28 @@
#define PREFIX "ACPI:memory_hp:"
ACPI_MODULE_NAME("acpi_memhotplug");
-MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>");
-MODULE_DESCRIPTION("Hotplug Mem Driver");
-MODULE_LICENSE("GPL");
/* Memory Device States */
#define MEMORY_INVALID_STATE 0
#define MEMORY_POWER_ON_STATE 1
#define MEMORY_POWER_OFF_STATE 2
-static int acpi_memory_device_add(struct acpi_device *device);
-static int acpi_memory_device_remove(struct acpi_device *device);
+static int acpi_memory_device_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used);
+static void acpi_memory_device_remove(struct acpi_device *device);
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
{"", 0},
};
-MODULE_DEVICE_TABLE(acpi, memory_device_ids);
-static struct acpi_driver acpi_memory_device_driver = {
- .name = "acpi_memhotplug",
- .class = ACPI_MEMORY_DEVICE_CLASS,
+static struct acpi_scan_handler memory_device_handler = {
.ids = memory_device_ids,
- .ops = {
- .add = acpi_memory_device_add,
- .remove = acpi_memory_device_remove,
- },
+ .attach = acpi_memory_device_add,
+ .detach = acpi_memory_device_remove,
+ .hotplug = {
+ .enabled = true,
+ },
};
struct acpi_memory_info {
@@ -79,7 +73,6 @@ struct acpi_memory_info {
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
unsigned int enabled:1;
- unsigned int failed:1;
};
struct acpi_memory_device {
@@ -153,48 +146,6 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
return 0;
}
-static int acpi_memory_get_device(acpi_handle handle,
- struct acpi_memory_device **mem_device)
-{
- struct acpi_device *device = NULL;
- int result = 0;
-
- acpi_scan_lock_acquire();
-
- acpi_bus_get_device(handle, &device);
- if (device)
- goto end;
-
- /*
- * Now add the notified device. This creates the acpi_device
- * and invokes .add function
- */
- result = acpi_bus_scan(handle);
- if (result) {
- acpi_handle_warn(handle, "ACPI namespace scan failed\n");
- result = -EINVAL;
- goto out;
- }
- result = acpi_bus_get_device(handle, &device);
- if (result) {
- acpi_handle_warn(handle, "Missing device object\n");
- result = -EINVAL;
- goto out;
- }
-
- end:
- *mem_device = acpi_driver_data(device);
- if (!(*mem_device)) {
- dev_err(&device->dev, "driver data not found\n");
- result = -ENODEV;
- goto out;
- }
-
- out:
- acpi_scan_lock_release();
- return result;
-}
-
static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
{
unsigned long long current_status;
@@ -249,13 +200,11 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
* returns -EEXIST. If add_memory() returns the other error, it
* means that this memory block is not used by the kernel.
*/
- if (result && result != -EEXIST) {
- info->failed = 1;
+ if (result && result != -EEXIST)
continue;
- }
- if (!result)
- info->enabled = 1;
+ info->enabled = 1;
+
/*
* Add num_enable even if add_memory() returns -EEXIST, so the
* device is bound to this driver.
@@ -286,16 +235,8 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
nid = acpi_get_node(mem_device->device->handle);
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
- if (info->failed)
- /* The kernel does not use this memory block */
- continue;
-
if (!info->enabled)
- /*
- * The kernel uses this memory block, but it may be not
- * managed by us.
- */
- return -EBUSY;
+ continue;
if (nid < 0)
nid = memory_add_physaddr_to_nid(info->start_addr);
@@ -310,95 +251,21 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
return result;
}
-static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
-{
- struct acpi_memory_device *mem_device;
- struct acpi_device *device;
- struct acpi_eject_event *ej_event = NULL;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
- acpi_status status;
-
- switch (event) {
- case ACPI_NOTIFY_BUS_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived BUS CHECK notification for device\n"));
- /* Fall Through */
- case ACPI_NOTIFY_DEVICE_CHECK:
- if (event == ACPI_NOTIFY_DEVICE_CHECK)
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived DEVICE CHECK notification for device\n"));
- if (acpi_memory_get_device(handle, &mem_device)) {
- acpi_handle_err(handle, "Cannot find driver data\n");
- break;
- }
-
- ost_code = ACPI_OST_SC_SUCCESS;
- break;
-
- case ACPI_NOTIFY_EJECT_REQUEST:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "\nReceived EJECT REQUEST notification for device\n"));
-
- status = AE_ERROR;
- acpi_scan_lock_acquire();
-
- if (acpi_bus_get_device(handle, &device)) {
- acpi_handle_err(handle, "Device doesn't exist\n");
- goto unlock;
- }
- mem_device = acpi_driver_data(device);
- if (!mem_device) {
- acpi_handle_err(handle, "Driver Data is NULL\n");
- goto unlock;
- }
-
- ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
- if (!ej_event) {
- pr_err(PREFIX "No memory, dropping EJECT\n");
- goto unlock;
- }
-
- get_device(&device->dev);
- ej_event->device = device;
- ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
- /* The eject is carried out asynchronously. */
- status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
- ej_event);
- if (ACPI_FAILURE(status)) {
- put_device(&device->dev);
- kfree(ej_event);
- }
-
- unlock:
- acpi_scan_lock_release();
- if (ACPI_SUCCESS(status))
- return;
- default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
-
- /* non-hotplug event; possibly handled by other handler */
- return;
- }
-
- /* Inform firmware that the hotplug operation has completed */
- (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
-}
-
static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
{
if (!mem_device)
return;
acpi_memory_free_device_resources(mem_device);
+ mem_device->device->driver_data = NULL;
kfree(mem_device);
}
-static int acpi_memory_device_add(struct acpi_device *device)
+static int acpi_memory_device_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used)
{
+ struct acpi_memory_device *mem_device;
int result;
- struct acpi_memory_device *mem_device = NULL;
-
if (!device)
return -EINVAL;
@@ -423,147 +290,36 @@ static int acpi_memory_device_add(struct acpi_device *device)
/* Set the device state */
mem_device->state = MEMORY_POWER_ON_STATE;
- pr_debug("%s\n", acpi_device_name(device));
+ result = acpi_memory_check_device(mem_device);
+ if (result) {
+ acpi_memory_device_free(mem_device);
+ return 0;
+ }
- if (!acpi_memory_check_device(mem_device)) {
- /* call add_memory func */
- result = acpi_memory_enable_device(mem_device);
- if (result) {
- dev_err(&device->dev,
- "Error in acpi_memory_enable_device\n");
- acpi_memory_device_free(mem_device);
- }
+ result = acpi_memory_enable_device(mem_device);
+ if (result) {
+ dev_err(&device->dev, "acpi_memory_enable_device() error\n");
+ acpi_memory_device_free(mem_device);
+ return -ENODEV;
}
- return result;
+
+ dev_dbg(&device->dev, "Memory device configured by ACPI\n");
+ return 1;
}
-static int acpi_memory_device_remove(struct acpi_device *device)
+static void acpi_memory_device_remove(struct acpi_device *device)
{
- struct acpi_memory_device *mem_device = NULL;
- int result;
+ struct acpi_memory_device *mem_device;
if (!device || !acpi_driver_data(device))
- return -EINVAL;
+ return;
mem_device = acpi_driver_data(device);
-
- result = acpi_memory_remove_memory(mem_device);
- if (result)
- return result;
-
+ acpi_memory_remove_memory(mem_device);
acpi_memory_device_free(mem_device);
-
- return 0;
-}
-
-/*
- * Helper function to check for memory device
- */
-static acpi_status is_memory_device(acpi_handle handle)
-{
- char *hardware_id;
- acpi_status status;
- struct acpi_device_info *info;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_FAILURE(status))
- return status;
-
- if (!(info->valid & ACPI_VALID_HID)) {
- kfree(info);
- return AE_ERROR;
- }
-
- hardware_id = info->hardware_id.string;
- if ((hardware_id == NULL) ||
- (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
- status = AE_ERROR;
-
- kfree(info);
- return status;
-}
-
-static acpi_status
-acpi_memory_register_notify_handler(acpi_handle handle,
- u32 level, void *ctxt, void **retv)
-{
- acpi_status status;
-
-
- status = is_memory_device(handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* continue */
-
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- acpi_memory_device_notify, NULL);
- /* continue */
- return AE_OK;
-}
-
-static acpi_status
-acpi_memory_deregister_notify_handler(acpi_handle handle,
- u32 level, void *ctxt, void **retv)
-{
- acpi_status status;
-
-
- status = is_memory_device(handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* continue */
-
- status = acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- acpi_memory_device_notify);
-
- return AE_OK; /* continue */
-}
-
-static int __init acpi_memory_device_init(void)
-{
- int result;
- acpi_status status;
-
-
- result = acpi_bus_register_driver(&acpi_memory_device_driver);
-
- if (result < 0)
- return -ENODEV;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- acpi_memory_register_notify_handler, NULL,
- NULL, NULL);
-
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
- acpi_bus_unregister_driver(&acpi_memory_device_driver);
- return -ENODEV;
- }
-
- return 0;
}
-static void __exit acpi_memory_device_exit(void)
+void __init acpi_memory_hotplug_init(void)
{
- acpi_status status;
-
-
- /*
- * Adding this to un-install notification handlers for all the device
- * handles.
- */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- acpi_memory_deregister_notify_handler, NULL,
- NULL, NULL);
-
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
-
- acpi_bus_unregister_driver(&acpi_memory_device_driver);
-
- return;
+ acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
}
-
-module_init(acpi_memory_device_init);
-module_exit(acpi_memory_device_exit);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 31de1043eea0..27bb6a91de5f 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -236,7 +236,7 @@ static int create_power_saving_task(void)
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
"acpi_pad/%d", ps_tsk_num);
- rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
+ rc = PTR_RET(ps_tsks[ps_tsk_num]);
if (!rc)
ps_tsk_num++;
else
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 26fce4b8a632..fafec5ddf17f 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -22,9 +22,6 @@
ACPI_MODULE_NAME("platform");
-/* Flags for acpi_create_platform_device */
-#define ACPI_PLATFORM_CLK BIT(0)
-
/*
* The following ACPI IDs are known to be suitable for representing as
* platform devices.
@@ -33,33 +30,9 @@ static const struct acpi_device_id acpi_platform_device_ids[] = {
{ "PNP0D40" },
- /* Haswell LPSS devices */
- { "INT33C0", ACPI_PLATFORM_CLK },
- { "INT33C1", ACPI_PLATFORM_CLK },
- { "INT33C2", ACPI_PLATFORM_CLK },
- { "INT33C3", ACPI_PLATFORM_CLK },
- { "INT33C4", ACPI_PLATFORM_CLK },
- { "INT33C5", ACPI_PLATFORM_CLK },
- { "INT33C6", ACPI_PLATFORM_CLK },
- { "INT33C7", ACPI_PLATFORM_CLK },
-
{ }
};
-static int acpi_create_platform_clks(struct acpi_device *adev)
-{
- static struct platform_device *pdev;
-
- /* Create Lynxpoint LPSS clocks */
- if (!pdev && !strncmp(acpi_device_hid(adev), "INT33C", 6)) {
- pdev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- }
-
- return 0;
-}
-
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
@@ -71,10 +44,9 @@ static int acpi_create_platform_clks(struct acpi_device *adev)
*
* Name of the platform device will be the same as @adev's.
*/
-static int acpi_create_platform_device(struct acpi_device *adev,
- const struct acpi_device_id *id)
+int acpi_create_platform_device(struct acpi_device *adev,
+ const struct acpi_device_id *id)
{
- unsigned long flags = id->driver_data;
struct platform_device *pdev = NULL;
struct acpi_device *acpi_parent;
struct platform_device_info pdevinfo;
@@ -83,14 +55,6 @@ static int acpi_create_platform_device(struct acpi_device *adev,
struct resource *resources;
int count;
- if (flags & ACPI_PLATFORM_CLK) {
- int ret = acpi_create_platform_clks(adev);
- if (ret) {
- dev_err(&adev->dev, "failed to create clocks\n");
- return ret;
- }
- }
-
/* If the ACPI node already has a physical device attached, skip it. */
if (adev->physical_node_count)
return 0;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a1b9bf5085a2..7ddf29eca9f5 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -83,6 +83,7 @@ acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
acpi-y += \
nsaccess.o \
nsalloc.o \
+ nsconvert.o \
nsdump.o \
nseval.o \
nsinit.o \
@@ -160,6 +161,7 @@ acpi-y += \
utobject.o \
utosi.o \
utownerid.o \
+ utpredef.o \
utresrc.o \
utstate.o \
utstring.o \
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ecb49927b817..07160928ca25 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -224,6 +224,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
*/
ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
/* Mutex for _OSI support */
@@ -413,10 +414,12 @@ ACPI_EXTERN u8 acpi_gbl_db_output_flags;
#ifdef ACPI_DISASSEMBLER
-u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
+ACPI_EXTERN u8 acpi_gbl_num_external_methods;
+ACPI_EXTERN u32 acpi_gbl_resolved_external_methods;
ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
#endif
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 805f419086ab..d5bfbd331bfd 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -294,6 +294,8 @@ acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
#define ACPI_BTYPE_OBJECTS_AND_REFS 0x0001FFFF /* ARG or LOCAL */
#define ACPI_BTYPE_ALL_OBJECTS 0x0000FFFF
+#pragma pack(1)
+
/*
* Information structure for ACPI predefined names.
* Each entry in the table contains the following items:
@@ -304,7 +306,7 @@ acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
*/
struct acpi_name_info {
char name[ACPI_NAME_SIZE];
- u8 param_count;
+ u16 argument_list;
u8 expected_btypes;
};
@@ -327,7 +329,7 @@ struct acpi_package_info {
u8 count1;
u8 object_type2;
u8 count2;
- u8 reserved;
+ u16 reserved;
};
/* Used for ACPI_PTYPE2_FIXED */
@@ -336,6 +338,7 @@ struct acpi_package_info2 {
u8 type;
u8 count;
u8 object_type[4];
+ u8 reserved;
};
/* Used for ACPI_PTYPE1_OPTION */
@@ -345,7 +348,7 @@ struct acpi_package_info3 {
u8 count;
u8 object_type[2];
u8 tail_object_type;
- u8 reserved;
+ u16 reserved;
};
union acpi_predefined_info {
@@ -355,6 +358,10 @@ union acpi_predefined_info {
struct acpi_package_info3 ret_info3;
};
+/* Reset to default packing */
+
+#pragma pack()
+
/* Data block used during object validation */
struct acpi_predefined_data {
@@ -363,6 +370,7 @@ struct acpi_predefined_data {
union acpi_operand_object *parent_package;
struct acpi_namespace_node *node;
u32 flags;
+ u32 return_btype;
u8 node_flags;
};
@@ -371,6 +379,20 @@ struct acpi_predefined_data {
#define ACPI_OBJECT_REPAIRED 1
#define ACPI_OBJECT_WRAPPED 2
+/* Return object auto-repair info */
+
+typedef acpi_status(*acpi_object_converter) (union acpi_operand_object
+ *original_object,
+ union acpi_operand_object
+ **converted_object);
+
+struct acpi_simple_repair_info {
+ char name[ACPI_NAME_SIZE];
+ u32 unexpected_btypes;
+ u32 package_index;
+ acpi_object_converter object_converter;
+};
+
/*
* Bitmapped return value types
* Note: the actual data types must be contiguous, a loop in nspredef.c
@@ -1037,6 +1059,7 @@ struct acpi_external_list {
u16 length;
u8 type;
u8 flags;
+ u8 resolved;
};
/* Values for Flags field above */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index ed7943b9044f..53666bd9193d 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -322,10 +322,12 @@
* where a pointer to an object of type union acpi_operand_object can also
* appear. This macro is used to distinguish them.
*
- * The "Descriptor" field is the first field in both structures.
+ * The "DescriptorType" field is the second field in both structures.
*/
+#define ACPI_GET_DESCRIPTOR_PTR(d) (((union acpi_descriptor *)(void *)(d))->common.common_pointer)
+#define ACPI_SET_DESCRIPTOR_PTR(d, p) (((union acpi_descriptor *)(void *)(d))->common.common_pointer = (p))
#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
-#define ACPI_SET_DESCRIPTOR_TYPE(d, t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t)
+#define ACPI_SET_DESCRIPTOR_TYPE(d, t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = (t))
/*
* Macros for the master AML opcode table
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 02cd5482ff8b..d2e491876bc0 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -167,6 +167,29 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent);
int acpi_ns_compare_names(char *name1, char *name2);
/*
+ * nsconvert - Dynamic object conversion routines
+ */
+acpi_status
+acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+acpi_status
+acpi_ns_convert_to_string(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+acpi_status
+acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+acpi_status
+acpi_ns_convert_to_unicode(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+acpi_status
+acpi_ns_convert_to_resource(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+/*
* nsdump - Namespace dump/print utilities
*/
#ifdef ACPI_FUTURE_USAGE
@@ -208,10 +231,6 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
acpi_status return_status,
union acpi_operand_object **return_object);
-const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
- acpi_namespace_node
- *node);
-
void
acpi_ns_check_parameter_count(char *pathname,
struct acpi_namespace_node *node,
@@ -289,7 +308,7 @@ acpi_ns_get_attached_data(struct acpi_namespace_node *node,
* predefined methods/objects
*/
acpi_status
-acpi_ns_repair_object(struct acpi_predefined_data *data,
+acpi_ns_simple_repair(struct acpi_predefined_data *data,
u32 expected_btypes,
u32 package_index,
union acpi_operand_object **return_object_ptr);
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 752cc40cdc1e..b22b70944fd6 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -56,7 +56,7 @@
* object type
* count
*
- * ACPI_PTYPE1_VAR: Variable-length length:
+ * ACPI_PTYPE1_VAR: Variable-length length. Zero-length package is allowed:
* object type (Int/Buf/Ref)
*
* ACPI_PTYPE1_OPTION: Package has some required and some optional elements
@@ -66,14 +66,16 @@
* 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each
* of the different types describe the contents of each of the sub-packages.
*
- * ACPI_PTYPE2: Each subpackage contains 1 or 2 object types:
+ * ACPI_PTYPE2: Each subpackage contains 1 or 2 object types. Zero-length
+ * parent package is allowed:
* object type
* count
* object type
* count
* (Used for _ALR,_MLS,_PSS,_TRT,_TSS)
*
- * ACPI_PTYPE2_COUNT: Each subpackage has a count as first element:
+ * ACPI_PTYPE2_COUNT: Each subpackage has a count as first element.
+ * Zero-length parent package is allowed:
* object type
* (Used for _CSD,_PSD,_TSD)
*
@@ -84,17 +86,19 @@
* count
* (Used for _CST)
*
- * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length
+ * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length. Zero-length
+ * parent package is allowed.
* (Used for _PRT)
*
- * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length
+ * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length.
+ * Zero-length parent package is allowed:
* (Used for _HPX)
*
* ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
* (Used for _ART, _FPS)
*
* ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
- * followed by an optional element
+ * followed by an optional element. Zero-length parent package is allowed.
* object type
* count
* object type
@@ -116,8 +120,47 @@ enum acpi_return_package_types {
ACPI_PTYPE2_FIX_VAR = 10
};
+/* Support macros for users of the predefined info table */
+
+#define METHOD_PREDEF_ARGS_MAX 4
+#define METHOD_ARG_BIT_WIDTH 3
+#define METHOD_ARG_MASK 0x0007
+#define ARG_COUNT_IS_MINIMUM 0x8000
+#define METHOD_MAX_ARG_TYPE ACPI_TYPE_PACKAGE
+
+#define METHOD_GET_COUNT(arg_list) (arg_list & METHOD_ARG_MASK)
+#define METHOD_GET_NEXT_ARG(arg_list) (arg_list >> METHOD_ARG_BIT_WIDTH)
+
+/* Macros used to build the predefined info table */
+
+#define METHOD_0ARGS 0
+#define METHOD_1ARGS(a1) (1 | (a1 << 3))
+#define METHOD_2ARGS(a1,a2) (2 | (a1 << 3) | (a2 << 6))
+#define METHOD_3ARGS(a1,a2,a3) (3 | (a1 << 3) | (a2 << 6) | (a3 << 9))
+#define METHOD_4ARGS(a1,a2,a3,a4) (4 | (a1 << 3) | (a2 << 6) | (a3 << 9) | (a4 << 12))
+
+#define METHOD_RETURNS(type) (type)
+#define METHOD_NO_RETURN_VALUE 0
+
+#define PACKAGE_INFO(a,b,c,d,e,f) {{{(a),(b),(c),(d)}, ((((u16)(f)) << 8) | (e)), 0}}
+
+/* Support macros for the resource descriptor info table */
+
+#define WIDTH_1 0x0001
+#define WIDTH_2 0x0002
+#define WIDTH_3 0x0004
+#define WIDTH_8 0x0008
+#define WIDTH_16 0x0010
+#define WIDTH_32 0x0020
+#define WIDTH_64 0x0040
+#define VARIABLE_DATA 0x0080
+#define NUM_RESOURCE_WIDTHS 8
+
+#define WIDTH_ADDRESS WIDTH_16 | WIDTH_32 | WIDTH_64
+
#ifdef ACPI_CREATE_PREDEFINED_TABLE
-/*
+/******************************************************************************
+ *
* Predefined method/object information table.
*
* These are the names that can actually be evaluated via acpi_evaluate_object.
@@ -125,23 +168,24 @@ enum acpi_return_package_types {
*
* 1) Predefined/Reserved names that are never evaluated via
* acpi_evaluate_object:
- * _Lxx and _Exx GPE methods
- * _Qxx EC methods
- * _T_x compiler temporary variables
+ * _Lxx and _Exx GPE methods
+ * _Qxx EC methods
+ * _T_x compiler temporary variables
+ * _Wxx wake events
*
* 2) Predefined names that never actually exist within the AML code:
- * Predefined resource descriptor field names
+ * Predefined resource descriptor field names
*
* 3) Predefined names that are implemented within ACPICA:
- * _OSI
- *
- * 4) Some predefined names that are not documented within the ACPI spec.
- * _WDG, _WED
+ * _OSI
*
* The main entries in the table each contain the following items:
*
* name - The ACPI reserved name
- * param_count - Number of arguments to the method
+ * argument_list - Contains (in 16 bits), the number of required
+ * arguments to the method (3 bits), and a 3-bit type
+ * field for each argument (up to 4 arguments). The
+ * METHOD_?ARGS macros generate the correct packed data.
* expected_btypes - Allowed type(s) for the return value.
* 0 means that no return value is expected.
*
@@ -151,256 +195,511 @@ enum acpi_return_package_types {
* overall size of the stored data.
*
* Note: The additional braces are intended to promote portability.
- */
-static const union acpi_predefined_info predefined_names[] = {
- {{"_AC0", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC1", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC2", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC3", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC4", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC5", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC6", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC7", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC8", 0, ACPI_RTYPE_INTEGER}},
- {{"_AC9", 0, ACPI_RTYPE_INTEGER}},
- {{"_ADR", 0, ACPI_RTYPE_INTEGER}},
- {{"_AEI", 0, ACPI_RTYPE_BUFFER}},
- {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL1", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL2", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL4", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL5", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL6", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL7", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL8", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_AL9", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_ALC", 0, ACPI_RTYPE_INTEGER}},
- {{"_ALI", 0, ACPI_RTYPE_INTEGER}},
- {{"_ALP", 0, ACPI_RTYPE_INTEGER}},
- {{"_ALR", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 (Ints) */
- {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 2,0}, 0,0}},
-
- {{"_ALT", 0, ACPI_RTYPE_INTEGER}},
- {{"_ART", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(rev), n Pkg (2 Ref/11 Int) */
- {{{ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER},
- 11, 0}},
-
- {{"_BBN", 0, ACPI_RTYPE_INTEGER}},
- {{"_BCL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
-
- {{"_BCM", 1, 0}},
- {{"_BCT", 1, ACPI_RTYPE_INTEGER}},
- {{"_BDN", 0, ACPI_RTYPE_INTEGER}},
- {{"_BFS", 1, 0}},
- {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4, 0}},
-
- {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
- 0}},
-
- {{"_BLT", 3, 0}},
- {{"_BMA", 1, ACPI_RTYPE_INTEGER}},
- {{"_BMC", 1, 0}},
- {{"_BMD", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (5 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
-
- {{"_BMS", 1, ACPI_RTYPE_INTEGER}},
- {{"_BQC", 0, ACPI_RTYPE_INTEGER}},
- {{"_BST", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
-
- {{"_BTM", 1, ACPI_RTYPE_INTEGER}},
- {{"_BTP", 1, 0}},
- {{"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* See PCI firmware spec 3.0 */
- {{"_CDM", 0, ACPI_RTYPE_INTEGER}},
- {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0, 0}, 0,
- 0}},
-
- {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
-
- {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
- 0}},
-
- {{"_CRS", 0, ACPI_RTYPE_BUFFER}},
- {{"_CRT", 0, ACPI_RTYPE_INTEGER}},
- {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
- {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
-
- {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
- {{{ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,
- 0}},
-
- {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
- {{"_DCK", 1, ACPI_RTYPE_INTEGER}},
- {{"_DCS", 0, ACPI_RTYPE_INTEGER}},
- {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
- {{"_DDN", 0, ACPI_RTYPE_STRING}},
- {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
-
- {{"_DGS", 0, ACPI_RTYPE_INTEGER}},
- {{"_DIS", 0, 0}},
-
- {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
- {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
- ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
-
- {{"_DMA", 0, ACPI_RTYPE_BUFFER}},
- {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
-
- {{"_DOS", 1, 0}},
- {{"_DSM", 4, ACPI_RTYPE_ALL}}, /* Must return a type, but it can be of any type */
- {{"_DSS", 1, 0}},
- {{"_DSW", 3, 0}},
- {{"_DTI", 1, 0}},
- {{"_EC_", 0, ACPI_RTYPE_INTEGER}},
- {{"_EDL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs)*/
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_EJ0", 1, 0}},
- {{"_EJ1", 1, 0}},
- {{"_EJ2", 1, 0}},
- {{"_EJ3", 1, 0}},
- {{"_EJ4", 1, 0}},
- {{"_EJD", 0, ACPI_RTYPE_STRING}},
- {{"_EVT", 1, 0}},
- {{"_FDE", 0, ACPI_RTYPE_BUFFER}},
- {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
-
- {{"_FDM", 1, 0}},
- {{"_FIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0}, 0, 0}},
-
- {{"_FIX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
-
- {{"_FPS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(rev), n Pkg (5 Int) */
- {{{ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 5, 0}, 0, 0}},
-
- {{"_FSL", 1, 0}},
- {{"_FST", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
-
- {{"_GAI", 0, ACPI_RTYPE_INTEGER}},
- {{"_GCP", 0, ACPI_RTYPE_INTEGER}},
- {{"_GHL", 0, ACPI_RTYPE_INTEGER}},
- {{"_GLK", 0, ACPI_RTYPE_INTEGER}},
- {{"_GPD", 0, ACPI_RTYPE_INTEGER}},
- {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
- {{"_GRT", 0, ACPI_RTYPE_BUFFER}},
- {{"_GSB", 0, ACPI_RTYPE_INTEGER}},
- {{"_GTF", 0, ACPI_RTYPE_BUFFER}},
- {{"_GTM", 0, ACPI_RTYPE_BUFFER}},
- {{"_GTS", 1, 0}},
- {{"_GWS", 1, ACPI_RTYPE_INTEGER}},
- {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
- {{"_HOT", 0, ACPI_RTYPE_INTEGER}},
- {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
+ *
+ * Note2: Table is used by the kernel-resident subsystem, the iASL compiler,
+ * and the acpi_help utility.
+ *
+ * TBD: _PRT - currently ignore reversed entries. Attempt to fix in nsrepair.
+ * Possibly fixing package elements like _BIF, etc.
+ *
+ *****************************************************************************/
+
+const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
+ {{"_AC0", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC1", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC2", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC3", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC4", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC5", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC6", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC7", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC8", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AC9", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_ADR", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_AEI", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_AL0", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL1", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL2", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL3", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL4", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL5", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL6", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL7", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL8", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_AL9", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_ALC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_ALI", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_ALP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_ALR", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 2 (Ints) */
+ PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 2, 0, 0, 0),
+
+ {{"_ALT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_ART", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(rev), n Pkg (2 Ref/11 Int) */
+ PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_REFERENCE, 2,
+ ACPI_RTYPE_INTEGER, 11, 0),
+
+ {{"_BBN", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_BCL", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+ {{"_BCM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_BCT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_BDN", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_BFS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_BIF", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (9 Int),(4 Str) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
+ ACPI_RTYPE_STRING, 4, 0),
+
+ {{"_BIX", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (16 Int),(4 Str) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,
+ ACPI_RTYPE_STRING, 4, 0),
+
+ {{"_BLT",
+ METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_BMA", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_BMC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_BMD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (5 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+ {{"_BMS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_BQC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_BST", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+
+ {{"_BTM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_BTP", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_CBA", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See PCI firmware spec 3.0 */
+
+ {{"_CDM", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_CID", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints/Strs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,
+ 0, 0, 0),
+
+ {{"_CLS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (3 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0),
+
+ {{"_CPC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints/Bufs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0,
+ 0, 0, 0),
+
+ {{"_CRS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_CRT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_CSD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(n), n-1 Int) */
+ PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+ {{"_CST", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
+ PACKAGE_INFO(ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1,
+ ACPI_RTYPE_INTEGER, 3, 0),
+
+ {{"_CWS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_DCK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_DCS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_DDC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER)}},
+
+ {{"_DDN", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_STRING)}},
+
+ {{"_DEP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_DGS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_DIS", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_DLM", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
+ PACKAGE_INFO(ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
+ ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER, 0, 0),
+
+ {{"_DMA", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_DOD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+ {{"_DOS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_DSM",
+ METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+ ACPI_TYPE_PACKAGE),
+ METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
+
+ {{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_DSW",
+ METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_DTI", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_EC_", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_EDL", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_EJ0", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_EJ1", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_EJ2", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_EJ3", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_EJ4", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_EJD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_STRING)}},
+
+ {{"_ERR",
+ METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_STRING, ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* Internal use only, used by ACPICA test suites */
+
+ {{"_EVT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_FDE", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_FDI", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (16 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, 0, 0, 0),
+
+ {{"_FDM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_FIF", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+
+ {{"_FIX", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+ {{"_FPS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(rev), n Pkg (5 Int) */
+ PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+ {{"_FSL", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_FST", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (3 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0),
+
+ {{"_GAI", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_GCP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_GHL", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_GLK", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_GPD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_GPE", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* _GPE method, not _GPE scope */
+
+ {{"_GRT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_GSB", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_GTF", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_GTM", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_GTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_GWS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_HID", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING)}},
+
+ {{"_HOT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_HPP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
/*
- * For _HPX, a single package is returned, containing a Variable-length number
+ * For _HPX, a single package is returned, containing a variable-length number
* of sub-packages. Each sub-package contains a PCI record setting.
* There are several different type of record settings, of different
* lengths, but all elements of all settings are Integers.
*/
- {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
- {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
-
- {{"_HRV", 0, ACPI_RTYPE_INTEGER}},
- {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
- {{"_INI", 0, 0}},
- {{"_IRC", 0, 0}},
- {{"_LCK", 1, 0}},
- {{"_LID", 0, ACPI_RTYPE_INTEGER}},
- {{"_MAT", 0, ACPI_RTYPE_BUFFER}},
- {{"_MBM", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (8 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0}, 0, 0}},
-
- {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Str/1 Buf) */
- {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER}, 1, 0}},
-
- {{"_MSG", 1, 0}},
- {{"_MSM", 4, ACPI_RTYPE_INTEGER}},
- {{"_NTT", 0, ACPI_RTYPE_INTEGER}},
- {{"_OFF", 0, 0}},
- {{"_ON_", 0, 0}},
- {{"_OS_", 0, ACPI_RTYPE_STRING}},
- {{"_OSC", 4, ACPI_RTYPE_BUFFER}},
- {{"_OST", 3, 0}},
- {{"_PAI", 1, ACPI_RTYPE_INTEGER}},
- {{"_PCL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_PCT", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Buf) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2,0}, 0,0}},
-
- {{"_PDC", 1, 0}},
- {{"_PDL", 0, ACPI_RTYPE_INTEGER}},
- {{"_PIC", 1, 0}},
- {{"_PIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int),(3 Str) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, ACPI_RTYPE_STRING}, 3, 0}},
-
- {{"_PLD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Bufs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_BUFFER, 0,0}, 0,0}},
-
- {{"_PMC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (11 Int),(3 Str) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 11, ACPI_RTYPE_STRING}, 3,
- 0}},
-
- {{"_PMD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
-
- {{"_PMM", 0, ACPI_RTYPE_INTEGER}},
- {{"_PPC", 0, ACPI_RTYPE_INTEGER}},
- {{"_PPE", 0, ACPI_RTYPE_INTEGER}}, /* See dig64 spec */
- {{"_PR0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_PR1", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_PR2", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
-
- {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
-
- {{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
-
- {{"_PRS", 0, ACPI_RTYPE_BUFFER}},
+ {{"_HPX", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (var Ints) */
+ PACKAGE_INFO(ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+ {{"_HRV", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_IFT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See IPMI spec */
+
+ {{"_INI", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_IRC", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_LCK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_LID", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_MAT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_MBM", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (8 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0, 0, 0),
+
+ {{"_MLS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (1 Str/1 Buf) */
+ PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER, 1,
+ 0),
+
+ {{"_MSG", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_MSM",
+ METHOD_4ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+ ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_NTT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_OFF", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_ON_", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_OS_", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_STRING)}},
+
+ {{"_OSC",
+ METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+ ACPI_TYPE_BUFFER),
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_OST",
+ METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_BUFFER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PAI", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_PCL", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PCT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Buf) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2, 0, 0, 0),
+
+ {{"_PDC", METHOD_1ARGS(ACPI_TYPE_BUFFER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PDL", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_PIC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PIF", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (3 Int),(3 Str) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3,
+ ACPI_RTYPE_STRING, 3, 0),
+
+ {{"_PLD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Bufs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_BUFFER, 0, 0, 0, 0),
+
+ {{"_PMC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (11 Int),(3 Str) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 11,
+ ACPI_RTYPE_STRING, 3, 0),
+
+ {{"_PMD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PMM", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_PPC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_PPE", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See dig64 spec */
+
+ {{"_PR0", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PR1", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PR2", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PR3", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PRE", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PRL", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PRS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
/*
* For _PRT, many BIOSs reverse the 3rd and 4th Package elements (Source
@@ -410,47 +709,89 @@ static const union acpi_predefined_info predefined_names[] = {
* warning, add the ACPI_RTYPE_REFERENCE type to the 4th element (index 3)
* in the statement below.
*/
- {{"_PRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (4): Int,Int,Int/Ref,Int */
- {{{ACPI_PTYPE2_FIXED, 4, ACPI_RTYPE_INTEGER,ACPI_RTYPE_INTEGER},
- ACPI_RTYPE_INTEGER | ACPI_RTYPE_REFERENCE,
- ACPI_RTYPE_INTEGER}},
-
- {{"_PRW", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each: Pkg/Int,Int,[Variable-length Refs] (Pkg is Ref/Int) */
- {{{ACPI_PTYPE1_OPTION, 2, ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE,
- ACPI_RTYPE_INTEGER}, ACPI_RTYPE_REFERENCE,0}},
-
- {{"_PS0", 0, 0}},
- {{"_PS1", 0, 0}},
- {{"_PS2", 0, 0}},
- {{"_PS3", 0, 0}},
- {{"_PSC", 0, ACPI_RTYPE_INTEGER}},
- {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
- {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
-
- {{"_PSE", 1, 0}},
- {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_PSR", 0, ACPI_RTYPE_INTEGER}},
- {{"_PSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (6 Int) */
- {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 6,0}, 0,0}},
-
- {{"_PSV", 0, ACPI_RTYPE_INTEGER}},
- {{"_PSW", 1, 0}},
- {{"_PTC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Buf) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2,0}, 0,0}},
-
- {{"_PTP", 2, ACPI_RTYPE_INTEGER}},
- {{"_PTS", 1, 0}},
- {{"_PUR", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0}, 0, 0}},
-
- {{"_PXM", 0, ACPI_RTYPE_INTEGER}},
- {{"_REG", 2, 0}},
- {{"_REV", 0, ACPI_RTYPE_INTEGER}},
- {{"_RMV", 0, ACPI_RTYPE_INTEGER}},
- {{"_ROM", 2, ACPI_RTYPE_BUFFER}},
- {{"_RTV", 0, ACPI_RTYPE_INTEGER}},
+ {{"_PRT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (4): Int,Int,Int/Ref,Int */
+ PACKAGE_INFO(ACPI_PTYPE2_FIXED, 4, ACPI_RTYPE_INTEGER,
+ ACPI_RTYPE_INTEGER,
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_REFERENCE,
+ ACPI_RTYPE_INTEGER),
+
+ {{"_PRW", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: Pkg/Int,Int,[Variable-length Refs] (Pkg is Ref/Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_OPTION, 2,
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE,
+ ACPI_RTYPE_INTEGER, ACPI_RTYPE_REFERENCE, 0),
+
+ {{"_PS0", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PS1", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PS2", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PS3", METHOD_0ARGS,
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PSC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_PSD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (5 Int) with count */
+ PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+ {{"_PSE", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PSL", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_PSR", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_PSS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each (6 Int) */
+ PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 6, 0, 0, 0),
+
+ {{"_PSV", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_PSW", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PTC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Buf) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2, 0, 0, 0),
+
+ {{"_PTP", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_PTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_PUR", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0),
+
+ {{"_PXM", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_REG", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_REV", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_RMV", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_ROM", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_RTV", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
/*
* For _S0_ through _S5_, the ACPI spec defines a return Package
@@ -458,111 +799,285 @@ static const union acpi_predefined_info predefined_names[] = {
* Allow this by making the objects "Variable-length length", but all elements
* must be Integers.
*/
- {{"_S0_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
-
- {{"_S1_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
-
- {{"_S2_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
-
- {{"_S3_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
-
- {{"_S4_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
-
- {{"_S5_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
-
- {{"_S1D", 0, ACPI_RTYPE_INTEGER}},
- {{"_S2D", 0, ACPI_RTYPE_INTEGER}},
- {{"_S3D", 0, ACPI_RTYPE_INTEGER}},
- {{"_S4D", 0, ACPI_RTYPE_INTEGER}},
- {{"_S0W", 0, ACPI_RTYPE_INTEGER}},
- {{"_S1W", 0, ACPI_RTYPE_INTEGER}},
- {{"_S2W", 0, ACPI_RTYPE_INTEGER}},
- {{"_S3W", 0, ACPI_RTYPE_INTEGER}},
- {{"_S4W", 0, ACPI_RTYPE_INTEGER}},
- {{"_SBS", 0, ACPI_RTYPE_INTEGER}},
- {{"_SCP", 0x13, 0}}, /* Acpi 1.0 allowed 1 arg. Acpi 3.0 expanded to 3 args. Allow both. */
- /* Note: the 3-arg definition may be removed for ACPI 4.0 */
- {{"_SDD", 1, 0}},
- {{"_SEG", 0, ACPI_RTYPE_INTEGER}},
- {{"_SHL", 1, ACPI_RTYPE_INTEGER}},
- {{"_SLI", 0, ACPI_RTYPE_BUFFER}},
- {{"_SPD", 1, ACPI_RTYPE_INTEGER}},
- {{"_SRS", 1, 0}},
- {{"_SRT", 1, ACPI_RTYPE_INTEGER}},
- {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
- {{"_SST", 1, 0}},
- {{"_STA", 0, ACPI_RTYPE_INTEGER}},
- {{"_STM", 3, 0}},
- {{"_STP", 2, ACPI_RTYPE_INTEGER}},
- {{"_STR", 0, ACPI_RTYPE_BUFFER}},
- {{"_STV", 2, ACPI_RTYPE_INTEGER}},
- {{"_SUB", 0, ACPI_RTYPE_STRING}},
- {{"_SUN", 0, ACPI_RTYPE_INTEGER}},
- {{"_SWS", 0, ACPI_RTYPE_INTEGER}},
- {{"_TC1", 0, ACPI_RTYPE_INTEGER}},
- {{"_TC2", 0, ACPI_RTYPE_INTEGER}},
- {{"_TDL", 0, ACPI_RTYPE_INTEGER}},
- {{"_TIP", 1, ACPI_RTYPE_INTEGER}},
- {{"_TIV", 1, ACPI_RTYPE_INTEGER}},
- {{"_TMP", 0, ACPI_RTYPE_INTEGER}},
- {{"_TPC", 0, ACPI_RTYPE_INTEGER}},
- {{"_TPT", 1, 0}},
- {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 Ref/6 Int */
- {{{ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 6, 0}},
-
- {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int with count */
- {{{ACPI_PTYPE2_COUNT,ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
-
- {{"_TSP", 0, ACPI_RTYPE_INTEGER}},
- {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int */
- {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
-
- {{"_TST", 0, ACPI_RTYPE_INTEGER}},
- {{"_TTS", 1, 0}},
- {{"_TZD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
-
- {{"_TZM", 0, ACPI_RTYPE_REFERENCE}},
- {{"_TZP", 0, ACPI_RTYPE_INTEGER}},
- {{"_UID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
- {{"_UPC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
-
- {{"_UPD", 0, ACPI_RTYPE_INTEGER}},
- {{"_UPP", 0, ACPI_RTYPE_INTEGER}},
- {{"_VPO", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S0_", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
+
+ {{"_S1_", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
+
+ {{"_S2_", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
+
+ {{"_S3_", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
+
+ {{"_S4_", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
+
+ {{"_S5_", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
+
+ {{"_S1D", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_S2D", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_S3D", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_S4D", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_S0W", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_S1W", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_S2W", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_S3W", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_S4W", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_SBS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_SCP", METHOD_1ARGS(ACPI_TYPE_INTEGER) | ARG_COUNT_IS_MINIMUM,
+ METHOD_NO_RETURN_VALUE}}, /* Acpi 1.0 allowed 1 integer arg. Acpi 3.0 expanded to 3 args. Allow both. */
+
+ {{"_SDD", METHOD_1ARGS(ACPI_TYPE_BUFFER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_SEG", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_SHL", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_SLI", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_SPD", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_SRS", METHOD_1ARGS(ACPI_TYPE_BUFFER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_SRT", METHOD_1ARGS(ACPI_TYPE_BUFFER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_SRV", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See IPMI spec */
+
+ {{"_SST", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_STA", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_STM",
+ METHOD_3ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_BUFFER, ACPI_TYPE_BUFFER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_STP", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_STR", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_STV", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_SUB", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_STRING)}},
+
+ {{"_SUN", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_SWS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TC1", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TC2", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TDL", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TIP", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TIV", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TMP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TPC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TPT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_TRT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 2 Ref/6 Int */
+ PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER,
+ 6, 0),
+
+ {{"_TSD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 5 Int with count */
+ PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+ {{"_TSP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TSS", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 5 Int */
+ PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+ {{"_TST", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_TTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_NO_RETURN_VALUE}},
+
+ {{"_TZD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
+ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+ {{"_TZM", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_REFERENCE)}},
+
+ {{"_TZP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_UID", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING)}},
+
+ {{"_UPC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+
+ {{"_UPD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_UPP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+ {{"_VPO", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
/* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */
- {{"_WAK", 1,
- ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */
+ {{"_WAK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER |
+ ACPI_RTYPE_PACKAGE)}},
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0), /* Fixed-length (2 Int), but is optional */
/* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */
- {{"_WDG", 0, ACPI_RTYPE_BUFFER}},
- {{"_WED", 1,
- ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}},
+ {{"_WDG", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_WED", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
+ ACPI_RTYPE_BUFFER)}},
- {{{0, 0, 0, 0}, 0, 0}} /* Table terminator */
+ PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
};
+#else
+extern const union acpi_predefined_info acpi_gbl_predefined_methods[];
+#endif
-#if 0
+#if (defined ACPI_CREATE_RESOURCE_TABLE && defined ACPI_APPLICATION)
+/******************************************************************************
+ *
+ * Predefined names for use in Resource Descriptors. These names do not
+ * appear in the global Predefined Name table (since these names never
+ * appear in actual AML byte code, only in the original ASL)
+ *
+ * Note: Used by iASL compiler and acpi_help utility only.
+ *
+ *****************************************************************************/
- /* This is an internally implemented control method, no need to check */
-{ {
-"_OSI", 1, ACPI_RTYPE_INTEGER}},
+const union acpi_predefined_info acpi_gbl_resource_names[] = {
+ {{"_ADR", WIDTH_16 | WIDTH_64, 0}},
+ {{"_ALN", WIDTH_8 | WIDTH_16 | WIDTH_32, 0}},
+ {{"_ASI", WIDTH_8, 0}},
+ {{"_ASZ", WIDTH_8, 0}},
+ {{"_ATT", WIDTH_64, 0}},
+ {{"_BAS", WIDTH_16 | WIDTH_32, 0}},
+ {{"_BM_", WIDTH_1, 0}},
+ {{"_DBT", WIDTH_16, 0}}, /* Acpi 5.0 */
+ {{"_DEC", WIDTH_1, 0}},
+ {{"_DMA", WIDTH_8, 0}},
+ {{"_DPL", WIDTH_1, 0}}, /* Acpi 5.0 */
+ {{"_DRS", WIDTH_16, 0}}, /* Acpi 5.0 */
+ {{"_END", WIDTH_1, 0}}, /* Acpi 5.0 */
+ {{"_FLC", WIDTH_2, 0}}, /* Acpi 5.0 */
+ {{"_GRA", WIDTH_ADDRESS, 0}},
+ {{"_HE_", WIDTH_1, 0}},
+ {{"_INT", WIDTH_16 | WIDTH_32, 0}},
+ {{"_IOR", WIDTH_2, 0}}, /* Acpi 5.0 */
+ {{"_LEN", WIDTH_8 | WIDTH_ADDRESS, 0}},
+ {{"_LIN", WIDTH_8, 0}}, /* Acpi 5.0 */
+ {{"_LL_", WIDTH_1, 0}},
+ {{"_MAF", WIDTH_1, 0}},
+ {{"_MAX", WIDTH_ADDRESS, 0}},
+ {{"_MEM", WIDTH_2, 0}},
+ {{"_MIF", WIDTH_1, 0}},
+ {{"_MIN", WIDTH_ADDRESS, 0}},
+ {{"_MOD", WIDTH_1, 0}}, /* Acpi 5.0 */
+ {{"_MTP", WIDTH_2, 0}},
+ {{"_PAR", WIDTH_8, 0}}, /* Acpi 5.0 */
+ {{"_PHA", WIDTH_1, 0}}, /* Acpi 5.0 */
+ {{"_PIN", WIDTH_16, 0}}, /* Acpi 5.0 */
+ {{"_PPI", WIDTH_8, 0}}, /* Acpi 5.0 */
+ {{"_POL", WIDTH_1 | WIDTH_2, 0}}, /* Acpi 5.0 */
+ {{"_RBO", WIDTH_8, 0}},
+ {{"_RBW", WIDTH_8, 0}},
+ {{"_RNG", WIDTH_1, 0}},
+ {{"_RT_", WIDTH_8, 0}}, /* Acpi 3.0 */
+ {{"_RW_", WIDTH_1, 0}},
+ {{"_RXL", WIDTH_16, 0}}, /* Acpi 5.0 */
+ {{"_SHR", WIDTH_2, 0}},
+ {{"_SIZ", WIDTH_2, 0}},
+ {{"_SLV", WIDTH_1, 0}}, /* Acpi 5.0 */
+ {{"_SPE", WIDTH_32, 0}}, /* Acpi 5.0 */
+ {{"_STB", WIDTH_2, 0}}, /* Acpi 5.0 */
+ {{"_TRA", WIDTH_ADDRESS, 0}},
+ {{"_TRS", WIDTH_1, 0}},
+ {{"_TSF", WIDTH_8, 0}}, /* Acpi 3.0 */
+ {{"_TTP", WIDTH_1, 0}},
+ {{"_TXL", WIDTH_16, 0}}, /* Acpi 5.0 */
+ {{"_TYP", WIDTH_2 | WIDTH_16, 0}},
+ {{"_VEN", VARIABLE_DATA, 0}}, /* Acpi 5.0 */
+ PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
+};
- /* TBD: */
- _PRT - currently ignore reversed entries. attempt to fix here?
- think about possibly fixing package elements like _BIF, etc.
+static const union acpi_predefined_info acpi_gbl_scope_names[] = {
+ {{"_GPE", 0, 0}},
+ {{"_PR_", 0, 0}},
+ {{"_SB_", 0, 0}},
+ {{"_SI_", 0, 0}},
+ {{"_TZ_", 0, 0}},
+ PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
+};
+#else
+extern const union acpi_predefined_info acpi_gbl_resource_names[];
#endif
#endif
-#endif
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 0082fa0a6139..202f4f12d3e2 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -113,9 +113,10 @@ struct acpi_pkg_info {
u32 num_packages;
};
+/* Object reference counts */
+
#define REF_INCREMENT (u16) 0
#define REF_DECREMENT (u16) 1
-#define REF_FORCE_DELETE (u16) 2
/* acpi_ut_dump_buffer */
@@ -421,7 +422,7 @@ acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
*/
acpi_status acpi_ut_initialize_interfaces(void);
-void acpi_ut_interface_terminate(void);
+acpi_status acpi_ut_interface_terminate(void);
acpi_status acpi_ut_install_interface(acpi_string interface_name);
@@ -432,6 +433,26 @@ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name);
acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
/*
+ * utpredef - support for predefined names
+ */
+const union acpi_predefined_info *acpi_ut_get_next_predefined_method(const union
+ acpi_predefined_info
+ *this_name);
+
+const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name);
+
+const union acpi_predefined_info *acpi_ut_match_resource_name(char *name);
+
+void
+acpi_ut_display_predefined_method(char *buffer,
+ const union acpi_predefined_info *this_name,
+ u8 multi_line);
+
+void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes);
+
+u32 acpi_ut_get_resource_bit_width(char *buffer, u16 types);
+
+/*
* utstate - Generic state creation/cache routines
*/
void
@@ -483,7 +504,8 @@ acpi_ut_short_divide(u64 in_dividend,
/*
* utmisc
*/
-const char *acpi_ut_validate_exception(acpi_status status);
+const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status
+ status);
u8 acpi_ut_is_pci_root_bridge(char *id);
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 4d8c992a51d8..99778997c35a 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -178,7 +178,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
if (!op) {
ACPI_ERROR((AE_INFO, "Null Op"));
- return_VALUE(TRUE);
+ return_UINT8(TRUE);
}
/*
@@ -210,7 +210,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
"At Method level, result of [%s] not used\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode)));
- return_VALUE(FALSE);
+ return_UINT8(FALSE);
}
/* Get info on the parent. The root_op is AML_SCOPE */
@@ -219,7 +219,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
if (parent_info->class == AML_CLASS_UNKNOWN) {
ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
- return_VALUE(FALSE);
+ return_UINT8(FALSE);
}
/*
@@ -307,7 +307,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
- return_VALUE(TRUE);
+ return_UINT8(TRUE);
result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@@ -316,7 +316,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
- return_VALUE(FALSE);
+ return_UINT8(FALSE);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 44f8325c2bae..e2199a947470 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -693,7 +693,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p",
+ "Unimplemented opcode, class=0x%X type=0x%X Opcode=0x%X Op=%p",
op_class, op_type, op->common.aml_opcode,
op));
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index b8ea0b26cde3..83cd45f4a870 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -257,6 +257,8 @@ u32 acpi_ev_fixed_event_detect(void)
*
* DESCRIPTION: Clears the status bit for the requested event, calls the
* handler that previously registered for the event.
+ * NOTE: If there is no handler for the event, the event is
+ * disabled to prevent further interrupts.
*
******************************************************************************/
@@ -271,17 +273,17 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
status_register_id, ACPI_CLEAR_STATUS);
/*
- * Make sure we've got a handler. If not, report an error. The event is
- * disabled to prevent further interrupts.
+ * Make sure that a handler exists. If not, report an error
+ * and disable the event to prevent further interrupts.
*/
- if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
+ if (!acpi_gbl_fixed_event_handlers[event].handler) {
(void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event].
enable_register_id,
ACPI_DISABLE_EVENT);
ACPI_ERROR((AE_INFO,
- "No installed handler for fixed event [0x%08X]",
- event));
+ "No installed handler for fixed event - %s (%u), disabling",
+ acpi_ut_get_event_name(event), event));
return (ACPI_INTERRUPT_NOT_HANDLED);
}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index b9adb9a7ed85..a493b528f8f9 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -707,7 +707,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE%02X", gpe_number));
- return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
@@ -724,7 +724,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE%02X", gpe_number));
- return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
@@ -784,7 +784,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
break;
}
- return_VALUE(ACPI_INTERRUPT_HANDLED);
+ return_UINT32(ACPI_INTERRUPT_HANDLED);
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index f4b43bede015..b905acf7aacd 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -89,7 +89,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
- return_VALUE(interrupt_handled);
+ return_UINT32(interrupt_handled);
}
/*******************************************************************************
@@ -120,7 +120,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
- return_VALUE(interrupt_handled);
+ return_UINT32(interrupt_handled);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ddffd6847914..ca5fba99c33b 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -467,9 +467,9 @@ acpi_install_fixed_event_handler(u32 event,
return_ACPI_STATUS(status);
}
- /* Don't allow two handlers. */
+ /* Do not allow multiple handlers */
- if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
+ if (acpi_gbl_fixed_event_handlers[event].handler) {
status = AE_ALREADY_EXISTS;
goto cleanup;
}
@@ -483,8 +483,9 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
- event));
+ ACPI_WARNING((AE_INFO,
+ "Could not enable fixed event - %s (%u)",
+ acpi_ut_get_event_name(event), event));
/* Remove the handler */
@@ -492,7 +493,8 @@ acpi_install_fixed_event_handler(u32 event,
acpi_gbl_fixed_event_handlers[event].context = NULL;
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Enabled fixed event %X, Handler=%p\n", event,
+ "Enabled fixed event %s (%X), Handler=%p\n",
+ acpi_ut_get_event_name(event), event,
handler));
}
@@ -544,11 +546,12 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO,
- "Could not write to fixed event enable register 0x%X",
- event));
+ "Could not disable fixed event - %s (%u)",
+ acpi_ut_get_event_name(event), event));
} else {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
- event));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Disabled fixed event - %s (%X)\n",
+ acpi_ut_get_event_name(event), event));
}
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index d6e4e42316db..7039606a0ba8 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -74,6 +74,12 @@ acpi_status acpi_enable(void)
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
+ /* If the Hardware Reduced flag is set, machine is always in acpi mode */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Check current mode */
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
@@ -126,6 +132,12 @@ acpi_status acpi_disable(void)
ACPI_FUNCTION_TRACE(acpi_disable);
+ /* If the Hardware Reduced flag is set, machine is always in acpi mode */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in legacy (non-ACPI) mode\n"));
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index e491e46f17df..b0838a4ea53e 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -257,7 +257,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
union acpi_operand_object *return_desc = NULL;
u64 index;
acpi_status status = AE_OK;
- acpi_size length;
+ acpi_size length = 0;
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
@@ -320,7 +320,6 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
* NOTE: A length of zero is ok, and will create a zero-length, null
* terminated string.
*/
- length = 0;
while ((length < operand[0]->buffer.length) &&
(length < operand[1]->integer.value) &&
(operand[0]->buffer.pointer[length])) {
@@ -376,6 +375,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
case ACPI_TYPE_STRING:
if (index >= operand[0]->string.length) {
+ length = operand[0]->string.length;
status = AE_AML_STRING_LIMIT;
}
@@ -386,6 +386,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
case ACPI_TYPE_BUFFER:
if (index >= operand[0]->buffer.length) {
+ length = operand[0]->buffer.length;
status = AE_AML_BUFFER_LIMIT;
}
@@ -396,6 +397,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
case ACPI_TYPE_PACKAGE:
if (index >= operand[0]->package.count) {
+ length = operand[0]->package.count;
status = AE_AML_PACKAGE_LIMIT;
}
@@ -414,8 +416,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Index (0x%8.8X%8.8X) is beyond end of object",
- ACPI_FORMAT_UINT64(index)));
+ "Index (0x%X%8.8X) is beyond end of object (length 0x%X)",
+ ACPI_FORMAT_UINT64(index),
+ (u32)length));
goto cleanup;
}
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index d6eab81f54fb..6b728aef2dca 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -276,7 +276,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
/* Invalid field access type */
ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
- return_VALUE(0);
+ return_UINT32(0);
}
if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
@@ -289,7 +289,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
}
*return_byte_alignment = byte_alignment;
- return_VALUE(bit_length);
+ return_UINT32(bit_length);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index b205cbb4b50c..99dc7b287d55 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -340,7 +340,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
/* u64 is unsigned, so we don't worry about a '-' prefix */
if (value == 0) {
- return_VALUE(1);
+ return_UINT32(1);
}
current_value = value;
@@ -354,7 +354,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
num_digits++;
}
- return_VALUE(num_digits);
+ return_UINT32(num_digits);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index deb3f61e2bd1..579c3a53ac87 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -66,6 +66,12 @@ acpi_status acpi_hw_set_mode(u32 mode)
ACPI_FUNCTION_TRACE(hw_set_mode);
+ /* If the Hardware Reduced flag is set, machine is always in acpi mode */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
* system does not support mode transition.
@@ -146,23 +152,29 @@ u32 acpi_hw_get_mode(void)
ACPI_FUNCTION_TRACE(hw_get_mode);
+ /* If the Hardware Reduced flag is set, machine is always in acpi mode */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_UINT32(ACPI_SYS_MODE_ACPI);
+ }
+
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
* system does not support mode transition.
*/
if (!acpi_gbl_FADT.smi_command) {
- return_VALUE(ACPI_SYS_MODE_ACPI);
+ return_UINT32(ACPI_SYS_MODE_ACPI);
}
status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value);
if (ACPI_FAILURE(status)) {
- return_VALUE(ACPI_SYS_MODE_LEGACY);
+ return_UINT32(ACPI_SYS_MODE_LEGACY);
}
if (value) {
- return_VALUE(ACPI_SYS_MODE_ACPI);
+ return_UINT32(ACPI_SYS_MODE_ACPI);
} else {
- return_VALUE(ACPI_SYS_MODE_LEGACY);
+ return_UINT32(ACPI_SYS_MODE_LEGACY);
}
}
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
new file mode 100644
index 000000000000..8f79a9d2d50e
--- /dev/null
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -0,0 +1,443 @@
+/******************************************************************************
+ *
+ * Module Name: nsconvert - Object conversions for objects returned by
+ * predefined methods
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+#include "acpredef.h"
+#include "amlresrc.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsconvert")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_integer
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_status status;
+ u64 value = 0;
+ u32 i;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_STRING:
+
+ /* String-to-Integer conversion */
+
+ status = acpi_ut_strtoul64(original_object->string.pointer,
+ ACPI_ANY_BASE, &value);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ /* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
+
+ if (original_object->buffer.length > 8) {
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ /* Extract each buffer byte to create the integer */
+
+ for (i = 0; i < original_object->buffer.length; i++) {
+ value |=
+ ((u64)original_object->buffer.
+ pointer[i] << (i * 8));
+ }
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ new_object = acpi_ut_create_integer_object(value);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ *return_object = new_object;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_string
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_convert_to_string(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_size length;
+ acpi_status status;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_INTEGER:
+ /*
+ * Integer-to-String conversion. Commonly, convert
+ * an integer of value 0 to a NULL string. The last element of
+ * _BIF and _BIX packages occasionally need this fix.
+ */
+ if (original_object->integer.value == 0) {
+
+ /* Allocate a new NULL string object */
+
+ new_object = acpi_ut_create_string_object(0);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+ } else {
+ status =
+ acpi_ex_convert_to_string(original_object,
+ &new_object,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ /*
+ * Buffer-to-String conversion. Use a to_string
+ * conversion, no transform performed on the buffer data. The best
+ * example of this is the _BIF method, where the string data from
+ * the battery is often (incorrectly) returned as buffer object(s).
+ */
+ length = 0;
+ while ((length < original_object->buffer.length) &&
+ (original_object->buffer.pointer[length])) {
+ length++;
+ }
+
+ /* Allocate a new string object */
+
+ new_object = acpi_ut_create_string_object(length);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ /*
+ * Copy the raw buffer data with no transform. String is already NULL
+ * terminated at Length+1.
+ */
+ ACPI_MEMCPY(new_object->string.pointer,
+ original_object->buffer.pointer, length);
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ *return_object = new_object;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_buffer
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_status status;
+ union acpi_operand_object **elements;
+ u32 *dword_buffer;
+ u32 count;
+ u32 i;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_INTEGER:
+ /*
+ * Integer-to-Buffer conversion.
+ * Convert the Integer to a packed-byte buffer. _MAT and other
+ * objects need this sometimes, if a read has been performed on a
+ * Field object that is less than or equal to the global integer
+ * size (32 or 64 bits).
+ */
+ status =
+ acpi_ex_convert_to_buffer(original_object, &new_object);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ /* String-to-Buffer conversion. Simple data copy */
+
+ new_object =
+ acpi_ut_create_buffer_object(original_object->string.
+ length);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ ACPI_MEMCPY(new_object->buffer.pointer,
+ original_object->string.pointer,
+ original_object->string.length);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * This case is often seen for predefined names that must return a
+ * Buffer object with multiple DWORD integers within. For example,
+ * _FDE and _GTM. The Package can be converted to a Buffer.
+ */
+
+ /* All elements of the Package must be integers */
+
+ elements = original_object->package.elements;
+ count = original_object->package.count;
+
+ for (i = 0; i < count; i++) {
+ if ((!*elements) ||
+ ((*elements)->common.type != ACPI_TYPE_INTEGER)) {
+ return (AE_AML_OPERAND_TYPE);
+ }
+ elements++;
+ }
+
+ /* Create the new buffer object to replace the Package */
+
+ new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count));
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Copy the package elements (integers) to the buffer as DWORDs */
+
+ elements = original_object->package.elements;
+ dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
+
+ for (i = 0; i < count; i++) {
+ *dword_buffer = (u32)(*elements)->integer.value;
+ dword_buffer++;
+ elements++;
+ }
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ *return_object = new_object;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_unicode
+ *
+ * PARAMETERS: original_object - ASCII String Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a String object to a Unicode string Buffer.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_convert_to_unicode(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ char *ascii_string;
+ u16 *unicode_buffer;
+ u32 unicode_length;
+ u32 i;
+
+ if (!original_object) {
+ return (AE_OK);
+ }
+
+ /* If a Buffer was returned, it must be at least two bytes long */
+
+ if (original_object->common.type == ACPI_TYPE_BUFFER) {
+ if (original_object->buffer.length < 2) {
+ return (AE_AML_OPERAND_VALUE);
+ }
+
+ *return_object = NULL;
+ return (AE_OK);
+ }
+
+ /*
+ * The original object is an ASCII string. Convert this string to
+ * a unicode buffer.
+ */
+ ascii_string = original_object->string.pointer;
+ unicode_length = (original_object->string.length * 2) + 2;
+
+ /* Create a new buffer object for the Unicode data */
+
+ new_object = acpi_ut_create_buffer_object(unicode_length);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ unicode_buffer = ACPI_CAST_PTR(u16, new_object->buffer.pointer);
+
+ /* Convert ASCII to Unicode */
+
+ for (i = 0; i < original_object->string.length; i++) {
+ unicode_buffer[i] = (u16)ascii_string[i];
+ }
+
+ *return_object = new_object;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_resource
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful
+ *
+ * DESCRIPTION: Attempt to convert a Integer object to a resource_template
+ * Buffer.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_convert_to_resource(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ u8 *buffer;
+
+ /*
+ * We can fix the following cases for an expected resource template:
+ * 1. No return value (interpreter slack mode is disabled)
+ * 2. A "Return (Zero)" statement
+ * 3. A "Return empty buffer" statement
+ *
+ * We will return a buffer containing a single end_tag
+ * resource descriptor.
+ */
+ if (original_object) {
+ switch (original_object->common.type) {
+ case ACPI_TYPE_INTEGER:
+
+ /* We can only repair an Integer==0 */
+
+ if (original_object->integer.value) {
+ return (AE_AML_OPERAND_TYPE);
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ if (original_object->buffer.length) {
+
+ /* Additional checks can be added in the future */
+
+ *return_object = NULL;
+ return (AE_OK);
+ }
+ break;
+
+ case ACPI_TYPE_STRING:
+ default:
+
+ return (AE_AML_OPERAND_TYPE);
+ }
+ }
+
+ /* Create the new buffer object for the resource descriptor */
+
+ new_object = acpi_ut_create_buffer_object(2);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ buffer = ACPI_CAST_PTR(u8, new_object->buffer.pointer);
+
+ /* Initialize the Buffer with a single end_tag descriptor */
+
+ buffer[0] = (ACPI_RESOURCE_NAME_END_TAG | ASL_RDESC_END_TAG_SIZE);
+ buffer[1] = 0x00;
+
+ *return_object = new_object;
+ return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 1538f3eb2a8f..b61db69d5675 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -98,17 +98,21 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
info->return_object = NULL;
info->param_count = 0;
- /*
- * Get the actual namespace node for the target object. Handles these cases:
- *
- * 1) Null node, Pathname (absolute path)
- * 2) Node, Pathname (path relative to Node)
- * 3) Node, Null Pathname
- */
- status = acpi_ns_get_node(info->prefix_node, info->pathname,
- ACPI_NS_NO_UPSEARCH, &info->resolved_node);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (!info->resolved_node) {
+ /*
+ * Get the actual namespace node for the target object if we need to.
+ * Handles these cases:
+ *
+ * 1) Null node, Pathname (absolute path)
+ * 2) Node, Pathname (path relative to Node)
+ * 3) Node, Null Pathname
+ */
+ status = acpi_ns_get_node(info->prefix_node, info->pathname,
+ ACPI_NS_NO_UPSEARCH,
+ &info->resolved_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
}
/*
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 224c30053401..8a52916148cb 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -76,19 +76,7 @@ static acpi_status
acpi_ns_check_reference(struct acpi_predefined_data *data,
union acpi_operand_object *return_object);
-static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes);
-
-/*
- * Names for the types that can be returned by the predefined objects.
- * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
- */
-static const char *acpi_rtype_names[] = {
- "/Integer",
- "/String",
- "/Buffer",
- "/Package",
- "/Reference",
-};
+static u32 acpi_ns_get_bitmapped_type(union acpi_operand_object *return_object);
/*******************************************************************************
*
@@ -112,7 +100,6 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
acpi_status return_status,
union acpi_operand_object **return_object_ptr)
{
- union acpi_operand_object *return_object = *return_object_ptr;
acpi_status status = AE_OK;
const union acpi_predefined_info *predefined;
char *pathname;
@@ -120,7 +107,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
/* Match the name for this method/object against the predefined list */
- predefined = acpi_ns_check_for_predefined_name(node);
+ predefined = acpi_ut_match_predefined_method(node->name.ascii);
/* Get the full pathname to the object, for use in warning messages */
@@ -152,25 +139,6 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
}
/*
- * If there is no return value, check if we require a return value for
- * this predefined name. Either one return value is expected, or none,
- * for both methods and other objects.
- *
- * Exit now if there is no return object. Warning if one was expected.
- */
- if (!return_object) {
- if ((predefined->info.expected_btypes) &&
- (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
- ACPI_WARN_PREDEFINED((AE_INFO, pathname,
- ACPI_WARN_ALWAYS,
- "Missing expected return value"));
-
- status = AE_AML_NO_RETURN_VALUE;
- }
- goto cleanup;
- }
-
- /*
* Return value validation and possible repair.
*
* 1) Don't perform return value validation/repair if this feature
@@ -310,8 +278,10 @@ acpi_ns_check_parameter_count(char *pathname,
* Validate the user-supplied parameter count.
* Allow two different legal argument counts (_SCP, etc.)
*/
- required_params_current = predefined->info.param_count & 0x0F;
- required_params_old = predefined->info.param_count >> 4;
+ required_params_current =
+ predefined->info.argument_list & METHOD_ARG_MASK;
+ required_params_old =
+ predefined->info.argument_list >> METHOD_ARG_BIT_WIDTH;
if (user_param_count != ACPI_UINT32_MAX) {
if ((user_param_count != required_params_current) &&
@@ -340,52 +310,6 @@ acpi_ns_check_parameter_count(char *pathname,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_check_for_predefined_name
- *
- * PARAMETERS: node - Namespace node for the method/object
- *
- * RETURN: Pointer to entry in predefined table. NULL indicates not found.
- *
- * DESCRIPTION: Check an object name against the predefined object list.
- *
- ******************************************************************************/
-
-const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
- acpi_namespace_node
- *node)
-{
- const union acpi_predefined_info *this_name;
-
- /* Quick check for a predefined name, first character must be underscore */
-
- if (node->name.ascii[0] != '_') {
- return (NULL);
- }
-
- /* Search info table for a predefined method/object name */
-
- this_name = predefined_names;
- while (this_name->info.name[0]) {
- if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
- return (this_name);
- }
-
- /*
- * Skip next entry in the table if this name returns a Package
- * (next entry contains the package info)
- */
- if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) {
- this_name++;
- }
-
- this_name++;
- }
-
- return (NULL); /* Not found */
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_check_object_type
*
* PARAMETERS: data - Pointer to validation data structure
@@ -410,28 +334,12 @@ acpi_ns_check_object_type(struct acpi_predefined_data *data,
{
union acpi_operand_object *return_object = *return_object_ptr;
acpi_status status = AE_OK;
- u32 return_btype;
char type_buffer[48]; /* Room for 5 types */
- /*
- * If we get a NULL return_object here, it is a NULL package element.
- * Since all extraneous NULL package elements were removed earlier by a
- * call to acpi_ns_remove_null_elements, this is an unexpected NULL element.
- * We will attempt to repair it.
- */
- if (!return_object) {
- status = acpi_ns_repair_null_element(data, expected_btypes,
- package_index,
- return_object_ptr);
- if (ACPI_SUCCESS(status)) {
- return (AE_OK); /* Repair was successful */
- }
- goto type_error_exit;
- }
-
/* A Namespace node should not get here, but make sure */
- if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
+ if (return_object &&
+ ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
"Invalid return type - Found a Namespace node [%4.4s] type %s",
return_object->node.name.ascii,
@@ -448,59 +356,31 @@ acpi_ns_check_object_type(struct acpi_predefined_data *data,
* from all of the predefined names (including elements of returned
* packages)
*/
- switch (return_object->common.type) {
- case ACPI_TYPE_INTEGER:
- return_btype = ACPI_RTYPE_INTEGER;
- break;
-
- case ACPI_TYPE_BUFFER:
- return_btype = ACPI_RTYPE_BUFFER;
- break;
-
- case ACPI_TYPE_STRING:
- return_btype = ACPI_RTYPE_STRING;
- break;
+ data->return_btype = acpi_ns_get_bitmapped_type(return_object);
+ if (data->return_btype == ACPI_RTYPE_ANY) {
- case ACPI_TYPE_PACKAGE:
- return_btype = ACPI_RTYPE_PACKAGE;
- break;
-
- case ACPI_TYPE_LOCAL_REFERENCE:
- return_btype = ACPI_RTYPE_REFERENCE;
- break;
-
- default:
/* Not one of the supported objects, must be incorrect */
-
goto type_error_exit;
}
- /* Is the object one of the expected types? */
-
- if (return_btype & expected_btypes) {
-
- /* For reference objects, check that the reference type is correct */
-
- if (return_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
- status = acpi_ns_check_reference(data, return_object);
- }
+ /* For reference objects, check that the reference type is correct */
+ if ((data->return_btype & expected_btypes) == ACPI_RTYPE_REFERENCE) {
+ status = acpi_ns_check_reference(data, return_object);
return (status);
}
- /* Type mismatch -- attempt repair of the returned object */
+ /* Attempt simple repair of the returned object if necessary */
- status = acpi_ns_repair_object(data, expected_btypes,
+ status = acpi_ns_simple_repair(data, expected_btypes,
package_index, return_object_ptr);
- if (ACPI_SUCCESS(status)) {
- return (AE_OK); /* Repair was successful */
- }
+ return (status);
type_error_exit:
/* Create a string with all expected types for this predefined object */
- acpi_ns_get_expected_types(type_buffer, expected_btypes);
+ acpi_ut_get_expected_return_types(type_buffer, expected_btypes);
if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
@@ -558,36 +438,55 @@ acpi_ns_check_reference(struct acpi_predefined_data *data,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_get_expected_types
+ * FUNCTION: acpi_ns_get_bitmapped_type
*
- * PARAMETERS: buffer - Pointer to where the string is returned
- * expected_btypes - Bitmap of expected return type(s)
+ * PARAMETERS: return_object - Object returned from method/obj evaluation
*
- * RETURN: Buffer is populated with type names.
+ * RETURN: Object return type. ACPI_RTYPE_ANY indicates that the object
+ * type is not supported. ACPI_RTYPE_NONE indicates that no
+ * object was returned (return_object is NULL).
*
- * DESCRIPTION: Translate the expected types bitmap into a string of ascii
- * names of expected types, for use in warning messages.
+ * DESCRIPTION: Convert object type into a bitmapped object return type.
*
******************************************************************************/
-static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes)
+static u32 acpi_ns_get_bitmapped_type(union acpi_operand_object *return_object)
{
- u32 this_rtype;
- u32 i;
- u32 j;
+ u32 return_btype;
- j = 1;
- buffer[0] = 0;
- this_rtype = ACPI_RTYPE_INTEGER;
+ if (!return_object) {
+ return (ACPI_RTYPE_NONE);
+ }
- for (i = 0; i < ACPI_NUM_RTYPES; i++) {
+ /* Map acpi_object_type to internal bitmapped type */
- /* If one of the expected types, concatenate the name of this type */
+ switch (return_object->common.type) {
+ case ACPI_TYPE_INTEGER:
+ return_btype = ACPI_RTYPE_INTEGER;
+ break;
- if (expected_btypes & this_rtype) {
- ACPI_STRCAT(buffer, &acpi_rtype_names[i][j]);
- j = 0; /* Use name separator from now on */
- }
- this_rtype <<= 1; /* Next Rtype */
+ case ACPI_TYPE_BUFFER:
+ return_btype = ACPI_RTYPE_BUFFER;
+ break;
+
+ case ACPI_TYPE_STRING:
+ return_btype = ACPI_RTYPE_STRING;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ return_btype = ACPI_RTYPE_PACKAGE;
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ return_btype = ACPI_RTYPE_REFERENCE;
+ break;
+
+ default:
+ /* Not one of the supported objects, must be incorrect */
+
+ return_btype = ACPI_RTYPE_ANY;
+ break;
}
+
+ return (return_btype);
}
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index a40155467d2e..77cdd539de16 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -112,9 +112,15 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
elements = return_object->package.elements;
count = return_object->package.count;
- /* The package must have at least one element, else invalid */
-
+ /*
+ * Most packages must have at least one element. The only exception
+ * is the variable-length package (ACPI_PTYPE1_VAR).
+ */
if (!count) {
+ if (package->ret_info.type == ACPI_PTYPE1_VAR) {
+ return (AE_OK);
+ }
+
ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
"Return Package has no elements (empty)"));
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 9e833353c06a..18f02e4ece01 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -46,6 +46,7 @@
#include "acnamesp.h"
#include "acinterp.h"
#include "acpredef.h"
+#include "amlresrc.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsrepair")
@@ -71,6 +72,11 @@ ACPI_MODULE_NAME("nsrepair")
* Buffer -> String
* Buffer -> Package of Integers
* Package -> Package of one Package
+ *
+ * Additional conversions that are available:
+ * Convert a null return or zero return value to an end_tag descriptor
+ * Convert an ASCII string to a Unicode buffer
+ *
* An incorrect standalone object is wrapped with required outer package
*
* Additional possible repairs:
@@ -78,21 +84,51 @@ ACPI_MODULE_NAME("nsrepair")
*
******************************************************************************/
/* Local prototypes */
-static acpi_status
-acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
- union acpi_operand_object **return_object);
-
-static acpi_status
-acpi_ns_convert_to_string(union acpi_operand_object *original_object,
- union acpi_operand_object **return_object);
+static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
+ acpi_namespace_node
+ *node,
+ u32
+ return_btype,
+ u32
+ package_index);
-static acpi_status
-acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
- union acpi_operand_object **return_object);
+/*
+ * Special but simple repairs for some names.
+ *
+ * 2nd argument: Unexpected types that can be repaired
+ */
+static const struct acpi_simple_repair_info acpi_object_repair_info[] = {
+ /* Resource descriptor conversions */
+
+ {"_CRS",
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
+ ACPI_RTYPE_NONE,
+ ACPI_NOT_PACKAGE_ELEMENT,
+ acpi_ns_convert_to_resource},
+ {"_DMA",
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
+ ACPI_RTYPE_NONE,
+ ACPI_NOT_PACKAGE_ELEMENT,
+ acpi_ns_convert_to_resource},
+ {"_PRS",
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
+ ACPI_RTYPE_NONE,
+ ACPI_NOT_PACKAGE_ELEMENT,
+ acpi_ns_convert_to_resource},
+
+ /* Unicode conversions */
+
+ {"_MLS", ACPI_RTYPE_STRING, 1,
+ acpi_ns_convert_to_unicode},
+ {"_STR", ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER,
+ ACPI_NOT_PACKAGE_ELEMENT,
+ acpi_ns_convert_to_unicode},
+ {{0, 0, 0, 0}, 0, 0, NULL} /* Table terminator */
+};
/*******************************************************************************
*
- * FUNCTION: acpi_ns_repair_object
+ * FUNCTION: acpi_ns_simple_repair
*
* PARAMETERS: data - Pointer to validation data structure
* expected_btypes - Object types expected
@@ -110,16 +146,54 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
******************************************************************************/
acpi_status
-acpi_ns_repair_object(struct acpi_predefined_data *data,
+acpi_ns_simple_repair(struct acpi_predefined_data *data,
u32 expected_btypes,
u32 package_index,
union acpi_operand_object **return_object_ptr)
{
union acpi_operand_object *return_object = *return_object_ptr;
- union acpi_operand_object *new_object;
+ union acpi_operand_object *new_object = NULL;
acpi_status status;
+ const struct acpi_simple_repair_info *predefined;
+
+ ACPI_FUNCTION_NAME(ns_simple_repair);
+
+ /*
+ * Special repairs for certain names that are in the repair table.
+ * Check if this name is in the list of repairable names.
+ */
+ predefined = acpi_ns_match_simple_repair(data->node,
+ data->return_btype,
+ package_index);
+ if (predefined) {
+ if (!return_object) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+ ACPI_WARN_ALWAYS,
+ "Missing expected return value"));
+ }
+
+ status =
+ predefined->object_converter(return_object, &new_object);
+ if (ACPI_FAILURE(status)) {
+
+ /* A fatal error occurred during a conversion */
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During return object analysis"));
+ return (status);
+ }
+ if (new_object) {
+ goto object_repaired;
+ }
+ }
- ACPI_FUNCTION_NAME(ns_repair_object);
+ /*
+ * Do not perform simple object repair unless the return type is not
+ * expected.
+ */
+ if (data->return_btype & expected_btypes) {
+ return (AE_OK);
+ }
/*
* At this point, we know that the type of the returned object was not
@@ -127,6 +201,24 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
* repair the object by converting it to one of the expected object
* types for this predefined name.
*/
+
+ /*
+ * If there is no return value, check if we require a return value for
+ * this predefined name. Either one return value is expected, or none,
+ * for both methods and other objects.
+ *
+ * Exit now if there is no return object. Warning if one was expected.
+ */
+ if (!return_object) {
+ if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+ ACPI_WARN_ALWAYS,
+ "Missing expected return value"));
+
+ return (AE_AML_NO_RETURN_VALUE);
+ }
+ }
+
if (expected_btypes & ACPI_RTYPE_INTEGER) {
status = acpi_ns_convert_to_integer(return_object, &new_object);
if (ACPI_SUCCESS(status)) {
@@ -216,254 +308,51 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
return (AE_OK);
}
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_convert_to_integer
- *
- * PARAMETERS: original_object - Object to be converted
- * return_object - Where the new converted object is returned
- *
- * RETURN: Status. AE_OK if conversion was successful.
- *
- * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
- union acpi_operand_object **return_object)
-{
- union acpi_operand_object *new_object;
- acpi_status status;
- u64 value = 0;
- u32 i;
-
- switch (original_object->common.type) {
- case ACPI_TYPE_STRING:
-
- /* String-to-Integer conversion */
-
- status = acpi_ut_strtoul64(original_object->string.pointer,
- ACPI_ANY_BASE, &value);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- case ACPI_TYPE_BUFFER:
-
- /* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
-
- if (original_object->buffer.length > 8) {
- return (AE_AML_OPERAND_TYPE);
- }
-
- /* Extract each buffer byte to create the integer */
-
- for (i = 0; i < original_object->buffer.length; i++) {
- value |=
- ((u64) original_object->buffer.
- pointer[i] << (i * 8));
- }
- break;
-
- default:
- return (AE_AML_OPERAND_TYPE);
- }
-
- new_object = acpi_ut_create_integer_object(value);
- if (!new_object) {
- return (AE_NO_MEMORY);
- }
-
- *return_object = new_object;
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_convert_to_string
- *
- * PARAMETERS: original_object - Object to be converted
- * return_object - Where the new converted object is returned
- *
- * RETURN: Status. AE_OK if conversion was successful.
- *
- * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_convert_to_string(union acpi_operand_object *original_object,
- union acpi_operand_object **return_object)
-{
- union acpi_operand_object *new_object;
- acpi_size length;
- acpi_status status;
-
- switch (original_object->common.type) {
- case ACPI_TYPE_INTEGER:
- /*
- * Integer-to-String conversion. Commonly, convert
- * an integer of value 0 to a NULL string. The last element of
- * _BIF and _BIX packages occasionally need this fix.
- */
- if (original_object->integer.value == 0) {
-
- /* Allocate a new NULL string object */
-
- new_object = acpi_ut_create_string_object(0);
- if (!new_object) {
- return (AE_NO_MEMORY);
- }
- } else {
- status =
- acpi_ex_convert_to_string(original_object,
- &new_object,
- ACPI_IMPLICIT_CONVERT_HEX);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
- break;
-
- case ACPI_TYPE_BUFFER:
- /*
- * Buffer-to-String conversion. Use a to_string
- * conversion, no transform performed on the buffer data. The best
- * example of this is the _BIF method, where the string data from
- * the battery is often (incorrectly) returned as buffer object(s).
- */
- length = 0;
- while ((length < original_object->buffer.length) &&
- (original_object->buffer.pointer[length])) {
- length++;
- }
-
- /* Allocate a new string object */
-
- new_object = acpi_ut_create_string_object(length);
- if (!new_object) {
- return (AE_NO_MEMORY);
- }
-
- /*
- * Copy the raw buffer data with no transform. String is already NULL
- * terminated at Length+1.
- */
- ACPI_MEMCPY(new_object->string.pointer,
- original_object->buffer.pointer, length);
- break;
-
- default:
- return (AE_AML_OPERAND_TYPE);
- }
-
- *return_object = new_object;
- return (AE_OK);
-}
-
-/*******************************************************************************
+/******************************************************************************
*
- * FUNCTION: acpi_ns_convert_to_buffer
+ * FUNCTION: acpi_ns_match_simple_repair
*
- * PARAMETERS: original_object - Object to be converted
- * return_object - Where the new converted object is returned
+ * PARAMETERS: node - Namespace node for the method/object
+ * return_btype - Object type that was returned
+ * package_index - Index of object within parent package (if
+ * applicable - ACPI_NOT_PACKAGE_ELEMENT
+ * otherwise)
*
- * RETURN: Status. AE_OK if conversion was successful.
+ * RETURN: Pointer to entry in repair table. NULL indicates not found.
*
- * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer.
+ * DESCRIPTION: Check an object name against the repairable object list.
*
- ******************************************************************************/
+ *****************************************************************************/
-static acpi_status
-acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
- union acpi_operand_object **return_object)
+static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
+ acpi_namespace_node
+ *node,
+ u32
+ return_btype,
+ u32
+ package_index)
{
- union acpi_operand_object *new_object;
- acpi_status status;
- union acpi_operand_object **elements;
- u32 *dword_buffer;
- u32 count;
- u32 i;
+ const struct acpi_simple_repair_info *this_name;
- switch (original_object->common.type) {
- case ACPI_TYPE_INTEGER:
- /*
- * Integer-to-Buffer conversion.
- * Convert the Integer to a packed-byte buffer. _MAT and other
- * objects need this sometimes, if a read has been performed on a
- * Field object that is less than or equal to the global integer
- * size (32 or 64 bits).
- */
- status =
- acpi_ex_convert_to_buffer(original_object, &new_object);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
+ /* Search info table for a repairable predefined method/object name */
- case ACPI_TYPE_STRING:
+ this_name = acpi_object_repair_info;
+ while (this_name->object_converter) {
+ if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
- /* String-to-Buffer conversion. Simple data copy */
-
- new_object =
- acpi_ut_create_buffer_object(original_object->string.
- length);
- if (!new_object) {
- return (AE_NO_MEMORY);
- }
+ /* Check if we can actually repair this name/type combination */
- ACPI_MEMCPY(new_object->buffer.pointer,
- original_object->string.pointer,
- original_object->string.length);
- break;
-
- case ACPI_TYPE_PACKAGE:
- /*
- * This case is often seen for predefined names that must return a
- * Buffer object with multiple DWORD integers within. For example,
- * _FDE and _GTM. The Package can be converted to a Buffer.
- */
-
- /* All elements of the Package must be integers */
-
- elements = original_object->package.elements;
- count = original_object->package.count;
-
- for (i = 0; i < count; i++) {
- if ((!*elements) ||
- ((*elements)->common.type != ACPI_TYPE_INTEGER)) {
- return (AE_AML_OPERAND_TYPE);
+ if ((return_btype & this_name->unexpected_btypes) &&
+ (package_index == this_name->package_index)) {
+ return (this_name);
}
- elements++;
- }
-
- /* Create the new buffer object to replace the Package */
- new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count));
- if (!new_object) {
- return (AE_NO_MEMORY);
+ return (NULL);
}
-
- /* Copy the package elements (integers) to the buffer as DWORDs */
-
- elements = original_object->package.elements;
- dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
-
- for (i = 0; i < count; i++) {
- *dword_buffer = (u32) (*elements)->integer.value;
- dword_buffer++;
- elements++;
- }
- break;
-
- default:
- return (AE_AML_OPERAND_TYPE);
+ this_name++;
}
- *return_object = new_object;
- return (AE_OK);
+ return (NULL); /* Name was not found in the repair table */
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index ba4d98287c6a..149e9b9c2c1b 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -66,9 +66,9 @@ typedef struct acpi_repair_info {
/* Local prototypes */
-static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct
- acpi_namespace_node
- *node);
+static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
+ acpi_namespace_node
+ *node);
static acpi_status
acpi_ns_repair_ALR(struct acpi_predefined_data *data,
@@ -175,7 +175,7 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
/* Check if this name is in the list of repairable names */
- predefined = acpi_ns_match_repairable_name(node);
+ predefined = acpi_ns_match_complex_repair(node);
if (!predefined) {
return (validate_status);
}
@@ -186,7 +186,7 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
/******************************************************************************
*
- * FUNCTION: acpi_ns_match_repairable_name
+ * FUNCTION: acpi_ns_match_complex_repair
*
* PARAMETERS: node - Namespace node for the method/object
*
@@ -196,9 +196,9 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
*
*****************************************************************************/
-static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct
- acpi_namespace_node
- *node)
+static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
+ acpi_namespace_node
+ *node)
{
const struct acpi_repair_info *this_name;
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 686420df684f..2808586fad30 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -112,10 +112,10 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node parameter"));
- return_VALUE(ACPI_TYPE_ANY);
+ return_UINT8(ACPI_TYPE_ANY);
}
- return_VALUE(node->type);
+ return_UINT8(node->type);
}
/*******************************************************************************
@@ -140,10 +140,10 @@ u32 acpi_ns_local(acpi_object_type type)
/* Type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
- return_VALUE(ACPI_NS_NORMAL);
+ return_UINT32(ACPI_NS_NORMAL);
}
- return_VALUE(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
+ return_UINT32(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index f51308cdbc65..9f25a3d4e992 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -108,7 +108,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
/* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
package_length |= (aml[0] & byte_zero_mask);
- return_VALUE(package_length);
+ return_UINT32(package_length);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 7816d4eef04e..72077fa1eea5 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -202,6 +202,12 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
}
+ /* Sanity check the length. It must not be zero, or we loop forever */
+
+ if (!resource->length) {
+ return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
+ }
+
/* Get the base size of the (external stream) resource descriptor */
total_size = acpi_gbl_aml_resource_sizes[resource->type];
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index cab51445189d..b5fc0db2e87b 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -385,6 +385,14 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
return;
}
+ /* Sanity check the length. It must not be zero, or we loop forever */
+
+ if (!resource_list->length) {
+ acpi_os_printf
+ ("Invalid zero length descriptor in resource list\n");
+ return;
+ }
+
/* Dump the resource descriptor */
if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index ee2e206fc6c8..6053aa182093 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -178,6 +178,14 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
return_ACPI_STATUS(AE_BAD_DATA);
}
+ /* Sanity check the length. It must not be zero, or we loop forever */
+
+ if (!resource->length) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid zero length descriptor in resource list\n"));
+ return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
+ }
+
/* Perform the conversion */
if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 15d6eaef0e28..c0e5d2d3ce67 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -563,13 +563,19 @@ acpi_walk_resource_buffer(struct acpi_buffer * buffer,
while (resource < resource_end) {
- /* Sanity check the resource */
+ /* Sanity check the resource type */
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
status = AE_AML_INVALID_RESOURCE_TYPE;
break;
}
+ /* Sanity check the length. It must not be zero, or we loop forever */
+
+ if (!resource->length) {
+ return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
+ }
+
/* Invoke the user function, abort on any error returned */
status = user_function(resource, context);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 74181bf181ec..33b00d22300a 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -559,8 +559,12 @@ static void acpi_tb_validate_fadt(void)
/*
* For each extended field, check for length mismatch between the
* legacy length field and the corresponding 64-bit X length field.
+ * Note: If the legacy length field is > 0xFF bits, ignore this
+ * check. (GPE registers can be larger than the 64-bit GAS structure
+ * can accomodate, 0xFF bits).
*/
if (address64->address &&
+ (ACPI_MUL_8(length) <= ACPI_UINT8_MAX) &&
(address64->bit_width != ACPI_MUL_8(length))) {
ACPI_BIOS_WARNING((AE_INFO,
"32/64X length mismatch in FADT/%s: %u/%u",
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index b35a5e6d653a..ad11162482ff 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Module Name: tbxface - ACPI table oriented external interfaces
+ * Module Name: tbxface - ACPI table-oriented external interfaces
*
*****************************************************************************/
@@ -80,7 +80,7 @@ acpi_status acpi_allocate_root_table(u32 initial_table_count)
* array is dynamically allocated.
* initial_table_count - Size of initial_table_array, in number of
* struct acpi_table_desc structures
- * allow_realloc - Flag to tell Table Manager if resize of
+ * allow_resize - Flag to tell Table Manager if resize of
* pre-allocated array is allowed. Ignored
* if initial_table_array is NULL.
*
@@ -107,8 +107,8 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
ACPI_FUNCTION_TRACE(acpi_initialize_tables);
/*
- * Set up the Root Table Array
- * Allocate the table array if requested
+ * Setup the Root Table Array and allocate the table array
+ * if requested
*/
if (!initial_table_array) {
status = acpi_allocate_root_table(initial_table_count);
@@ -305,9 +305,10 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
* instance - Which instance (for SSDTs)
* out_table - Where the pointer to the table is returned
*
- * RETURN: Status and pointer to table
+ * RETURN: Status and pointer to the requested table
*
- * DESCRIPTION: Finds and verifies an ACPI table.
+ * DESCRIPTION: Finds and verifies an ACPI table. Table must be in the
+ * RSDT/XSDT.
*
******************************************************************************/
acpi_status
@@ -375,9 +376,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
* PARAMETERS: table_index - Table index
* table - Where the pointer to the table is returned
*
- * RETURN: Status and pointer to the table
+ * RETURN: Status and pointer to the requested table
*
- * DESCRIPTION: Obtain a table by an index into the global table list.
+ * DESCRIPTION: Obtain a table by an index into the global table list. Used
+ * internally also.
*
******************************************************************************/
acpi_status
@@ -432,7 +434,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
*
* RETURN: Status
*
- * DESCRIPTION: Install table event handler
+ * DESCRIPTION: Install a global table event handler.
*
******************************************************************************/
acpi_status
@@ -479,7 +481,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
*
* RETURN: Status
*
- * DESCRIPTION: Remove table event handler
+ * DESCRIPTION: Remove a table event handler
*
******************************************************************************/
acpi_status acpi_remove_table_handler(acpi_table_handler handler)
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 698b9d385516..e0a2e2779c2e 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -214,7 +214,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
(space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
- return_VALUE(0);
+ return_UINT32(0);
}
range_info = acpi_gbl_address_range_list[space_id];
@@ -256,7 +256,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
range_info = range_info->next;
}
- return_VALUE(overlap_count);
+ return_UINT32(overlap_count);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index e0e8579deaac..a877a9647fd9 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -85,7 +85,6 @@ acpi_os_create_cache(char *cache_name,
/* Populate the cache object and return it */
ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
- cache->link_offset = 8;
cache->list_name = cache_name;
cache->object_size = object_size;
cache->max_depth = max_depth;
@@ -108,7 +107,7 @@ acpi_os_create_cache(char *cache_name,
acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
{
- char *next;
+ void *next;
acpi_status status;
ACPI_FUNCTION_ENTRY();
@@ -128,10 +127,7 @@ acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
/* Delete and unlink one cached state object */
- next = *(ACPI_CAST_INDIRECT_PTR(char,
- &(((char *)cache->
- list_head)[cache->
- link_offset])));
+ next = ACPI_GET_DESCRIPTOR_PTR(cache->list_head);
ACPI_FREE(cache->list_head);
cache->list_head = next;
@@ -221,10 +217,7 @@ acpi_os_release_object(struct acpi_memory_list * cache, void *object)
/* Put the object at the head of the cache list */
- *(ACPI_CAST_INDIRECT_PTR(char,
- &(((char *)object)[cache->
- link_offset]))) =
- cache->list_head;
+ ACPI_SET_DESCRIPTOR_PTR(object, cache->list_head);
cache->list_head = object;
cache->current_depth++;
@@ -272,10 +265,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
/* There is an object available, use it */
object = cache->list_head;
- cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char,
- &(((char *)
- object)[cache->
- link_offset])));
+ cache->list_head = ACPI_GET_DESCRIPTOR_PTR(object);
cache->current_depth--;
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 2541de420249..29b930250b6f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -359,19 +359,20 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
* FUNCTION: acpi_ut_update_ref_count
*
* PARAMETERS: object - Object whose ref count is to be updated
- * action - What to do
+ * action - What to do (REF_INCREMENT or REF_DECREMENT)
*
- * RETURN: New ref count
+ * RETURN: None. Sets new reference count within the object
*
- * DESCRIPTION: Modify the ref count and return it.
+ * DESCRIPTION: Modify the reference count for an internal acpi object
*
******************************************************************************/
static void
acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
{
- u16 count;
- u16 new_count;
+ u16 original_count;
+ u16 new_count = 0;
+ acpi_cpu_flags lock_flags;
ACPI_FUNCTION_NAME(ut_update_ref_count);
@@ -379,76 +380,79 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
return;
}
- count = object->common.reference_count;
- new_count = count;
-
/*
- * Perform the reference count action (increment, decrement, force delete)
+ * Always get the reference count lock. Note: Interpreter and/or
+ * Namespace is not always locked when this function is called.
*/
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_reference_count_lock);
+ original_count = object->common.reference_count;
+
+ /* Perform the reference count action (increment, decrement) */
+
switch (action) {
case REF_INCREMENT:
- new_count++;
+ new_count = original_count + 1;
object->common.reference_count = new_count;
+ acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
+
+ /* The current reference count should never be zero here */
+
+ if (!original_count) {
+ ACPI_WARNING((AE_INFO,
+ "Obj %p, Reference Count was zero before increment\n",
+ object));
+ }
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
- "Obj %p Refs=%X, [Incremented]\n",
- object, new_count));
+ "Obj %p Type %.2X Refs %.2X [Incremented]\n",
+ object, object->common.type, new_count));
break;
case REF_DECREMENT:
- if (count < 1) {
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
- "Obj %p Refs=%X, can't decrement! (Set to 0)\n",
- object, new_count));
-
- new_count = 0;
- } else {
- new_count--;
+ /* The current reference count must be non-zero */
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
- "Obj %p Refs=%X, [Decremented]\n",
- object, new_count));
+ if (original_count) {
+ new_count = original_count - 1;
+ object->common.reference_count = new_count;
}
- if (object->common.type == ACPI_TYPE_METHOD) {
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
- "Method Obj %p Refs=%X, [Decremented]\n",
- object, new_count));
- }
+ acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
- object->common.reference_count = new_count;
- if (new_count == 0) {
- acpi_ut_delete_internal_obj(object);
+ if (!original_count) {
+ ACPI_WARNING((AE_INFO,
+ "Obj %p, Reference Count is already zero, cannot decrement\n",
+ object));
}
- break;
-
- case REF_FORCE_DELETE:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
- "Obj %p Refs=%X, Force delete! (Set to 0)\n",
- object, count));
+ "Obj %p Type %.2X Refs %.2X [Decremented]\n",
+ object, object->common.type, new_count));
- new_count = 0;
- object->common.reference_count = new_count;
- acpi_ut_delete_internal_obj(object);
+ /* Actually delete the object on a reference count of zero */
+
+ if (new_count == 0) {
+ acpi_ut_delete_internal_obj(object);
+ }
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
- break;
+ acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
+ ACPI_ERROR((AE_INFO, "Unknown Reference Count action (0x%X)",
+ action));
+ return;
}
/*
* Sanity check the reference count, for debug purposes only.
* (A deleted object will have a huge reference count)
*/
- if (count > ACPI_MAX_REFERENCE_COUNT) {
+ if (new_count > ACPI_MAX_REFERENCE_COUNT) {
ACPI_WARNING((AE_INFO,
- "Large Reference Count (0x%X) in object %p",
- count, object));
+ "Large Reference Count (0x%X) in object %p, Type=0x%.2X",
+ new_count, object, object->common.type));
}
}
@@ -458,8 +462,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*
* PARAMETERS: object - Increment ref count for this object
* and all sub-objects
- * action - Either REF_INCREMENT or REF_DECREMENT or
- * REF_FORCE_DELETE
+ * action - Either REF_INCREMENT or REF_DECREMENT
*
* RETURN: Status
*
@@ -714,7 +717,6 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
/*
* Allow a NULL pointer to be passed in, just ignore it. This saves
* each caller from having to check. Also, ignore NS nodes.
- *
*/
if (!object ||
(ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index a0ab7c02e87c..b543a144941a 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -64,7 +64,7 @@ ACPI_MODULE_NAME("utexcep")
******************************************************************************/
const char *acpi_format_exception(acpi_status status)
{
- const char *exception = NULL;
+ const struct acpi_exception_info *exception;
ACPI_FUNCTION_ENTRY();
@@ -76,10 +76,10 @@ const char *acpi_format_exception(acpi_status status)
ACPI_ERROR((AE_INFO,
"Unknown exception code: 0x%8.8X", status));
- exception = "UNKNOWN_STATUS_CODE";
+ return ("UNKNOWN_STATUS_CODE");
}
- return (ACPI_CAST_PTR(const char, exception));
+ return (exception->name);
}
ACPI_EXPORT_SYMBOL(acpi_format_exception)
@@ -97,10 +97,10 @@ ACPI_EXPORT_SYMBOL(acpi_format_exception)
* an ASCII string.
*
******************************************************************************/
-const char *acpi_ut_validate_exception(acpi_status status)
+const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status status)
{
u32 sub_status;
- const char *exception = NULL;
+ const struct acpi_exception_info *exception = NULL;
ACPI_FUNCTION_ENTRY();
@@ -113,35 +113,35 @@ const char *acpi_ut_validate_exception(acpi_status status)
case AE_CODE_ENVIRONMENTAL:
if (sub_status <= AE_CODE_ENV_MAX) {
- exception = acpi_gbl_exception_names_env[sub_status];
+ exception = &acpi_gbl_exception_names_env[sub_status];
}
break;
case AE_CODE_PROGRAMMER:
if (sub_status <= AE_CODE_PGM_MAX) {
- exception = acpi_gbl_exception_names_pgm[sub_status];
+ exception = &acpi_gbl_exception_names_pgm[sub_status];
}
break;
case AE_CODE_ACPI_TABLES:
if (sub_status <= AE_CODE_TBL_MAX) {
- exception = acpi_gbl_exception_names_tbl[sub_status];
+ exception = &acpi_gbl_exception_names_tbl[sub_status];
}
break;
case AE_CODE_AML:
if (sub_status <= AE_CODE_AML_MAX) {
- exception = acpi_gbl_exception_names_aml[sub_status];
+ exception = &acpi_gbl_exception_names_aml[sub_status];
}
break;
case AE_CODE_CONTROL:
if (sub_status <= AE_CODE_CTRL_MAX) {
- exception = acpi_gbl_exception_names_ctrl[sub_status];
+ exception = &acpi_gbl_exception_names_ctrl[sub_status];
}
break;
@@ -149,5 +149,9 @@ const char *acpi_ut_validate_exception(acpi_status status)
break;
}
- return (ACPI_CAST_PTR(const char, exception));
+ if (!exception || !exception->name) {
+ return (NULL);
+ }
+
+ return (exception);
}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ffecf4b4f0dd..f736448a8606 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -359,6 +359,8 @@ acpi_status acpi_ut_init_globals(void)
#ifdef ACPI_DISASSEMBLER
acpi_gbl_external_list = NULL;
+ acpi_gbl_num_external_methods = 0;
+ acpi_gbl_resolved_external_methods = 0;
#endif
#ifdef ACPI_DEBUG_OUTPUT
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 22feb99b8e35..08c323245584 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -81,7 +81,7 @@ acpi_status acpi_ut_mutex_initialize(void)
}
}
- /* Create the spinlocks for use at interrupt level */
+ /* Create the spinlocks for use at interrupt level or for speed */
status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
if (ACPI_FAILURE (status)) {
@@ -93,7 +93,13 @@ acpi_status acpi_ut_mutex_initialize(void)
return_ACPI_STATUS (status);
}
+ status = acpi_os_create_lock(&acpi_gbl_reference_count_lock);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
/* Mutex for _OSI support */
+
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -136,6 +142,7 @@ void acpi_ut_mutex_terminate(void)
acpi_os_delete_lock(acpi_gbl_gpe_lock);
acpi_os_delete_lock(acpi_gbl_hardware_lock);
+ acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 36a7d361d7cb..b15acebb96a1 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -108,9 +108,14 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
acpi_status acpi_ut_initialize_interfaces(void)
{
+ acpi_status status;
u32 i;
- (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
acpi_gbl_supported_interfaces = acpi_default_supported_interfaces;
/* Link the static list of supported interfaces */
@@ -132,20 +137,24 @@ acpi_status acpi_ut_initialize_interfaces(void)
*
* PARAMETERS: None
*
- * RETURN: None
+ * RETURN: Status
*
* DESCRIPTION: Delete all interfaces in the global list. Sets
* acpi_gbl_supported_interfaces to NULL.
*
******************************************************************************/
-void acpi_ut_interface_terminate(void)
+acpi_status acpi_ut_interface_terminate(void)
{
+ acpi_status status;
struct acpi_interface_info *next_interface;
- (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
- next_interface = acpi_gbl_supported_interfaces;
+ status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ next_interface = acpi_gbl_supported_interfaces;
while (next_interface) {
acpi_gbl_supported_interfaces = next_interface->next;
@@ -160,6 +169,7 @@ void acpi_ut_interface_terminate(void)
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (AE_OK);
}
/*******************************************************************************
@@ -315,6 +325,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
union acpi_operand_object *return_desc;
struct acpi_interface_info *interface_info;
acpi_interface_handler interface_handler;
+ acpi_status status;
u32 return_value;
ACPI_FUNCTION_TRACE(ut_osi_implementation);
@@ -336,7 +347,10 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
/* Default return value is 0, NOT SUPPORTED */
return_value = 0;
- (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
/* Lookup the interface in the global _OSI list */
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
new file mode 100644
index 000000000000..29459479148f
--- /dev/null
+++ b/drivers/acpi/acpica/utpredef.c
@@ -0,0 +1,399 @@
+/******************************************************************************
+ *
+ * Module Name: utpredef - support functions for predefined names
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acpredef.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utpredef")
+
+/*
+ * Names for the types that can be returned by the predefined objects.
+ * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
+ */
+static const char *ut_rtype_names[] = {
+ "/Integer",
+ "/String",
+ "/Buffer",
+ "/Package",
+ "/Reference",
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_next_predefined_method
+ *
+ * PARAMETERS: this_name - Entry in the predefined method/name table
+ *
+ * RETURN: Pointer to next entry in predefined table.
+ *
+ * DESCRIPTION: Get the next entry in the predefine method table. Handles the
+ * cases where a package info entry follows a method name that
+ * returns a package.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ut_get_next_predefined_method(const union
+ acpi_predefined_info
+ *this_name)
+{
+
+ /*
+ * Skip next entry in the table if this name returns a Package
+ * (next entry contains the package info)
+ */
+ if ((this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) &&
+ (this_name->info.expected_btypes != ACPI_RTYPE_ALL)) {
+ this_name++;
+ }
+
+ this_name++;
+ return (this_name);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_match_predefined_method
+ *
+ * PARAMETERS: name - Name to find
+ *
+ * RETURN: Pointer to entry in predefined table. NULL indicates not found.
+ *
+ * DESCRIPTION: Check an object name against the predefined object list.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name)
+{
+ const union acpi_predefined_info *this_name;
+
+ /* Quick check for a predefined name, first character must be underscore */
+
+ if (name[0] != '_') {
+ return (NULL);
+ }
+
+ /* Search info table for a predefined method/object name */
+
+ this_name = acpi_gbl_predefined_methods;
+ while (this_name->info.name[0]) {
+ if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+ return (this_name);
+ }
+
+ this_name = acpi_ut_get_next_predefined_method(this_name);
+ }
+
+ return (NULL); /* Not found */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_expected_return_types
+ *
+ * PARAMETERS: buffer - Where the formatted string is returned
+ * expected_Btypes - Bitfield of expected data types
+ *
+ * RETURN: Formatted string in Buffer.
+ *
+ * DESCRIPTION: Format the expected object types into a printable string.
+ *
+ ******************************************************************************/
+
+void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes)
+{
+ u32 this_rtype;
+ u32 i;
+ u32 j;
+
+ j = 1;
+ buffer[0] = 0;
+ this_rtype = ACPI_RTYPE_INTEGER;
+
+ for (i = 0; i < ACPI_NUM_RTYPES; i++) {
+
+ /* If one of the expected types, concatenate the name of this type */
+
+ if (expected_btypes & this_rtype) {
+ ACPI_STRCAT(buffer, &ut_rtype_names[i][j]);
+ j = 0; /* Use name separator from now on */
+ }
+
+ this_rtype <<= 1; /* Next Rtype */
+ }
+}
+
+/*******************************************************************************
+ *
+ * The remaining functions are used by iASL and acpi_help only
+ *
+ ******************************************************************************/
+
+#if (defined ACPI_ASL_COMPILER || defined ACPI_HELP_APP)
+#include <stdio.h>
+#include <string.h>
+
+/* Local prototypes */
+
+static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types);
+
+/* Types that can be returned externally by a predefined name */
+
+static const char *ut_external_type_names[] = /* Indexed by ACPI_TYPE_* */
+{
+ ", UNSUPPORTED-TYPE",
+ ", Integer",
+ ", String",
+ ", Buffer",
+ ", Package"
+};
+
+/* Bit widths for resource descriptor predefined names */
+
+static const char *ut_resource_type_names[] = {
+ "/1",
+ "/2",
+ "/3",
+ "/8",
+ "/16",
+ "/32",
+ "/64",
+ "/variable",
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_match_resource_name
+ *
+ * PARAMETERS: name - Name to find
+ *
+ * RETURN: Pointer to entry in the resource table. NULL indicates not
+ * found.
+ *
+ * DESCRIPTION: Check an object name against the predefined resource
+ * descriptor object list.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ut_match_resource_name(char *name)
+{
+ const union acpi_predefined_info *this_name;
+
+ /* Quick check for a predefined name, first character must be underscore */
+
+ if (name[0] != '_') {
+ return (NULL);
+ }
+
+ /* Search info table for a predefined method/object name */
+
+ this_name = acpi_gbl_resource_names;
+ while (this_name->info.name[0]) {
+ if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+ return (this_name);
+ }
+
+ this_name++;
+ }
+
+ return (NULL); /* Not found */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_display_predefined_method
+ *
+ * PARAMETERS: buffer - Scratch buffer for this function
+ * this_name - Entry in the predefined method/name table
+ * multi_line - TRUE if output should be on >1 line
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Display information about a predefined method. Number and
+ * type of the input arguments, and expected type(s) for the
+ * return value, if any.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_display_predefined_method(char *buffer,
+ const union acpi_predefined_info *this_name,
+ u8 multi_line)
+{
+ u32 arg_count;
+
+ /*
+ * Get the argument count and the string buffer
+ * containing all argument types
+ */
+ arg_count = acpi_ut_get_argument_types(buffer,
+ this_name->info.argument_list);
+
+ if (multi_line) {
+ printf(" ");
+ }
+
+ printf("%4.4s Requires %s%u argument%s",
+ this_name->info.name,
+ (this_name->info.argument_list & ARG_COUNT_IS_MINIMUM) ?
+ "(at least) " : "", arg_count, arg_count != 1 ? "s" : "");
+
+ /* Display the types for any arguments */
+
+ if (arg_count > 0) {
+ printf(" (%s)", buffer);
+ }
+
+ if (multi_line) {
+ printf("\n ");
+ }
+
+ /* Get the return value type(s) allowed */
+
+ if (this_name->info.expected_btypes) {
+ acpi_ut_get_expected_return_types(buffer,
+ this_name->info.
+ expected_btypes);
+ printf(" Return value types: %s\n", buffer);
+ } else {
+ printf(" No return value\n");
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_argument_types
+ *
+ * PARAMETERS: buffer - Where to return the formatted types
+ * argument_types - Types field for this method
+ *
+ * RETURN: count - the number of arguments required for this method
+ *
+ * DESCRIPTION: Format the required data types for this method (Integer,
+ * String, Buffer, or Package) and return the required argument
+ * count.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types)
+{
+ u16 this_argument_type;
+ u16 sub_index;
+ u16 arg_count;
+ u32 i;
+
+ *buffer = 0;
+ sub_index = 2;
+
+ /* First field in the types list is the count of args to follow */
+
+ arg_count = (argument_types & METHOD_ARG_MASK);
+ argument_types >>= METHOD_ARG_BIT_WIDTH;
+
+ if (arg_count > METHOD_PREDEF_ARGS_MAX) {
+ printf("**** Invalid argument count (%u) "
+ "in predefined info structure\n", arg_count);
+ return (arg_count);
+ }
+
+ /* Get each argument from the list, convert to ascii, store to buffer */
+
+ for (i = 0; i < arg_count; i++) {
+ this_argument_type = (argument_types & METHOD_ARG_MASK);
+ if (!this_argument_type
+ || (this_argument_type > METHOD_MAX_ARG_TYPE)) {
+ printf("**** Invalid argument type (%u) "
+ "in predefined info structure\n",
+ this_argument_type);
+ return (arg_count);
+ }
+
+ strcat(buffer,
+ ut_external_type_names[this_argument_type] + sub_index);
+
+ /* Shift to next argument type field */
+
+ argument_types >>= METHOD_ARG_BIT_WIDTH;
+ sub_index = 0;
+ }
+
+ return (arg_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_resource_bit_width
+ *
+ * PARAMETERS: buffer - Where the formatted string is returned
+ * types - Bitfield of expected data types
+ *
+ * RETURN: Count of return types. Formatted string in Buffer.
+ *
+ * DESCRIPTION: Format the resource bit widths into a printable string.
+ *
+ ******************************************************************************/
+
+u32 acpi_ut_get_resource_bit_width(char *buffer, u16 types)
+{
+ u32 i;
+ u16 sub_index;
+ u32 found;
+
+ *buffer = 0;
+ sub_index = 1;
+ found = 0;
+
+ for (i = 0; i < NUM_RESOURCE_WIDTHS; i++) {
+ if (types & 1) {
+ strcat(buffer, &(ut_resource_type_names[i][sub_index]));
+ sub_index = 0;
+ found++;
+ }
+
+ types >>= 1;
+ }
+
+ return (found);
+}
+#endif
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 48efb446258c..6505774f223e 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -287,7 +287,10 @@ acpi_status acpi_install_interface(acpi_string interface_name)
return (AE_BAD_PARAMETER);
}
- (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
/* Check if the interface name is already in the global list */
@@ -336,7 +339,10 @@ acpi_status acpi_remove_interface(acpi_string interface_name)
return (AE_BAD_PARAMETER);
}
- (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
status = acpi_ut_remove_interface(interface_name);
@@ -362,9 +368,12 @@ ACPI_EXPORT_SYMBOL(acpi_remove_interface)
****************************************************************************/
acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
{
- acpi_status status = AE_OK;
+ acpi_status status;
- (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
if (handler && acpi_gbl_interface_handler) {
status = AE_ALREADY_EXISTS;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index c5cd5b5513e6..0cc384b72943 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -146,7 +146,7 @@ struct acpi_battery {
#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat)
-inline int acpi_battery_present(struct acpi_battery *battery)
+static inline int acpi_battery_present(struct acpi_battery *battery)
{
return battery->device->status.battery_present;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 01708a165368..292de3cab9cc 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -288,13 +288,12 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
}
out_success:
context->ret.length = out_obj->buffer.length;
- context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL);
+ context->ret.pointer = kmemdup(out_obj->buffer.pointer,
+ context->ret.length, GFP_KERNEL);
if (!context->ret.pointer) {
status = AE_NO_MEMORY;
goto out_kfree;
}
- memcpy(context->ret.pointer, out_obj->buffer.pointer,
- context->ret.length);
status = AE_OK;
out_kfree:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 86c7d5445c38..92a659aa6396 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <acpi/button.h>
#define PREFIX "ACPI: "
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 5523ba7d764d..e23151667655 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -1,12 +1,12 @@
/*
- * acpi_container.c - ACPI Generic Container Driver
- * ($Revision: )
+ * container.c - ACPI Generic Container Driver
*
* Copyright (C) 2004 Anil S Keshavamurthy (anil.s.keshavamurthy@intel.com)
* Copyright (C) 2004 Keiichiro Tokunaga (tokunaga.keiich@jp.fujitsu.com)
* Copyright (C) 2004 Motoyuki Ito (motoyuki@soft.fujitsu.com)
- * Copyright (C) 2004 Intel Corp.
* Copyright (C) 2004 FUJITSU LIMITED
+ * Copyright (C) 2004, 2013 Intel Corp.
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -26,14 +26,11 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
+
+#include "internal.h"
#define PREFIX "ACPI: "
@@ -50,141 +47,20 @@ static const struct acpi_device_id container_device_ids[] = {
static int container_device_attach(struct acpi_device *device,
const struct acpi_device_id *not_used)
{
- /*
- * FIXME: This is necessary, so that acpi_eject_store() doesn't return
- * -ENODEV for containers.
- */
+ /* This is necessary for container hotplug to work. */
return 1;
}
-static struct acpi_scan_handler container_device_handler = {
+static struct acpi_scan_handler container_handler = {
.ids = container_device_ids,
.attach = container_device_attach,
+ .hotplug = {
+ .enabled = true,
+ .mode = AHM_CONTAINER,
+ },
};
-static int is_device_present(acpi_handle handle)
-{
- acpi_handle temp;
- acpi_status status;
- unsigned long long sta;
-
-
- status = acpi_get_handle(handle, "_STA", &temp);
- if (ACPI_FAILURE(status))
- return 1; /* _STA not found, assume device present */
-
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status))
- return 0; /* Firmware error */
-
- return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
-}
-
-static void container_notify_cb(acpi_handle handle, u32 type, void *context)
-{
- struct acpi_device *device = NULL;
- int result;
- int present;
- acpi_status status;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-
- acpi_scan_lock_acquire();
-
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- /* Fall through */
- case ACPI_NOTIFY_DEVICE_CHECK:
- pr_debug("Container driver received %s event\n",
- (type == ACPI_NOTIFY_BUS_CHECK) ?
- "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
-
- present = is_device_present(handle);
- status = acpi_bus_get_device(handle, &device);
- if (!present) {
- if (ACPI_SUCCESS(status)) {
- /* device exist and this is a remove request */
- device->flags.eject_pending = 1;
- kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- goto out;
- }
- break;
- }
-
- if (!ACPI_FAILURE(status) || device)
- break;
-
- result = acpi_bus_scan(handle);
- if (result) {
- acpi_handle_warn(handle, "Failed to add container\n");
- break;
- }
- result = acpi_bus_get_device(handle, &device);
- if (result) {
- acpi_handle_warn(handle, "Missing device object\n");
- break;
- }
-
- kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
- ost_code = ACPI_OST_SC_SUCCESS;
- break;
-
- case ACPI_NOTIFY_EJECT_REQUEST:
- if (!acpi_bus_get_device(handle, &device) && device) {
- device->flags.eject_pending = 1;
- kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- goto out;
- }
- break;
-
- default:
- /* non-hotplug event; possibly handled by other handler */
- goto out;
- }
-
- /* Inform firmware that the hotplug operation has completed */
- (void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
-
- out:
- acpi_scan_lock_release();
-}
-
-static bool is_container(acpi_handle handle)
-{
- struct acpi_device_info *info;
- bool ret = false;
-
- if (ACPI_FAILURE(acpi_get_object_info(handle, &info)))
- return false;
-
- if (info->valid & ACPI_VALID_HID) {
- const struct acpi_device_id *id;
-
- for (id = container_device_ids; id->id[0]; id++) {
- ret = !strcmp((char *)id->id, info->hardware_id.string);
- if (ret)
- break;
- }
- }
- kfree(info);
- return ret;
-}
-
-static acpi_status acpi_container_register_notify_handler(acpi_handle handle,
- u32 lvl, void *ctxt,
- void **retv)
-{
- if (is_container(handle))
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- container_notify_cb, NULL);
-
- return AE_OK;
-}
-
void __init acpi_container_init(void)
{
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
- acpi_container_register_notify_handler, NULL,
- NULL, NULL);
-
- acpi_scan_add_handler(&container_device_handler);
+ acpi_scan_add_handler_with_hotplug(&container_handler, "container");
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index dd314ef9bff1..96de787e6104 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -145,27 +145,36 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
}
/*
- * Get the device's power state either directly (via _PSC) or
- * indirectly (via power resources).
+ * Get the device's power state from power resources settings and _PSC,
+ * if available.
*/
+ if (device->power.flags.power_resources) {
+ int error = acpi_power_get_inferred_state(device, &result);
+ if (error)
+ return error;
+ }
if (device->power.flags.explicit_get) {
+ acpi_handle handle = device->handle;
unsigned long long psc;
- acpi_status status = acpi_evaluate_integer(device->handle,
- "_PSC", NULL, &psc);
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, "_PSC", NULL, &psc);
if (ACPI_FAILURE(status))
return -ENODEV;
- result = psc;
- }
- /* The test below covers ACPI_STATE_UNKNOWN too. */
- if (result <= ACPI_STATE_D2) {
- ; /* Do nothing. */
- } else if (device->power.flags.power_resources) {
- int error = acpi_power_get_inferred_state(device, &result);
- if (error)
- return error;
- } else if (result == ACPI_STATE_D3_HOT) {
- result = ACPI_STATE_D3;
+ /*
+ * The power resources settings may indicate a power state
+ * shallower than the actual power state of the device.
+ *
+ * Moreover, on systems predating ACPI 4.0, if the device
+ * doesn't depend on any power resources and _PSC returns 3,
+ * that means "power off". We need to maintain compatibility
+ * with those systems.
+ */
+ if (psc > result && psc < ACPI_STATE_D3_COLD)
+ result = psc;
+ else if (result == ACPI_STATE_UNKNOWN)
+ result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_COLD : psc;
}
/*
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index f815da82c765..8d1c0105e113 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -174,9 +174,13 @@ static int acpi_fan_add(struct acpi_device *device)
static int acpi_fan_remove(struct acpi_device *device)
{
- struct thermal_cooling_device *cdev = acpi_driver_data(device);
+ struct thermal_cooling_device *cdev;
+
+ if (!device)
+ return -EINVAL;
- if (!device || !cdev)
+ cdev = acpi_driver_data(device);
+ if (!cdev)
return -EINVAL;
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3c94a732b4b3..6f1afd9118c8 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,6 +41,17 @@ void acpi_container_init(void);
#else
static inline void acpi_container_init(void) {}
#endif
+#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
+void acpi_memory_hotplug_init(void);
+#else
+static inline void acpi_memory_hotplug_init(void) {}
+#endif
+
+void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
+ const char *name);
+int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
+ const char *hotplug_profile_name);
+void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val);
#ifdef CONFIG_DEBUG_FS
extern struct dentry *acpi_debugfs_dir;
@@ -48,6 +59,11 @@ int acpi_debugfs_init(void);
#else
static inline void acpi_debugfs_init(void) { return; }
#endif
+#ifdef CONFIG_X86_INTEL_LPSS
+void acpi_lpss_init(void);
+#else
+static inline void acpi_lpss_init(void) {}
+#endif
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
@@ -60,7 +76,7 @@ int acpi_device_add(struct acpi_device *device,
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta);
void acpi_device_add_finalize(struct acpi_device *device);
-void acpi_free_ids(struct acpi_device *device);
+void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
/* --------------------------------------------------------------------------
Power Resource
@@ -131,4 +147,7 @@ static inline void suspend_nvs_restore(void) {}
-------------------------------------------------------------------------- */
struct platform_device;
+int acpi_create_platform_device(struct acpi_device *adev,
+ const struct acpi_device_id *id);
+
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 586e7e993d3d..e72186340fec 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -641,7 +641,7 @@ void __init acpi_initrd_override(void *data, size_t size)
* Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
* works fine.
*/
- memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size);
+ memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
p = early_ioremap(acpi_tables_addr, all_tables_size);
@@ -1555,7 +1555,7 @@ int acpi_check_resource_conflict(const struct resource *res)
else
space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
- length = res->end - res->start + 1;
+ length = resource_size(res);
if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
warn = 1;
clash = acpi_check_address_range(space_id, res->start, length, warn);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index ab764ed34a50..2652a614deeb 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -354,6 +354,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
}
resource->end.type = ACPI_RESOURCE_TYPE_END_TAG;
+ resource->end.length = sizeof(struct acpi_resource);
/* Attempt to set the resource */
status = acpi_set_current_resources(link->device->handle, &buffer);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ac8688b89705..1dd6f6c85874 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -169,8 +169,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
*control &= OSC_PCI_CONTROL_MASKS;
capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
} else {
- /* Run _OSC query for all possible controls. */
- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+ /* Run _OSC query only with existing controls. */
+ capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
}
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 34f5ef11d427..f962047c6c85 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -459,57 +459,79 @@ static struct attribute_group attr_groups[] = {
},
};
-static void acpi_power_hide_list(struct acpi_device *adev, int state)
+static struct attribute_group wakeup_attr_group = {
+ .name = "power_resources_wakeup",
+ .attrs = attrs,
+};
+
+static void acpi_power_hide_list(struct acpi_device *adev,
+ struct list_head *resources,
+ struct attribute_group *attr_group)
{
- struct acpi_device_power_state *ps = &adev->power.states[state];
struct acpi_power_resource_entry *entry;
- if (list_empty(&ps->resources))
+ if (list_empty(resources))
return;
- list_for_each_entry_reverse(entry, &ps->resources, node) {
+ list_for_each_entry_reverse(entry, resources, node) {
struct acpi_device *res_dev = &entry->resource->device;
sysfs_remove_link_from_group(&adev->dev.kobj,
- attr_groups[state].name,
+ attr_group->name,
dev_name(&res_dev->dev));
}
- sysfs_remove_group(&adev->dev.kobj, &attr_groups[state]);
+ sysfs_remove_group(&adev->dev.kobj, attr_group);
}
-static void acpi_power_expose_list(struct acpi_device *adev, int state)
+static void acpi_power_expose_list(struct acpi_device *adev,
+ struct list_head *resources,
+ struct attribute_group *attr_group)
{
- struct acpi_device_power_state *ps = &adev->power.states[state];
struct acpi_power_resource_entry *entry;
int ret;
- if (list_empty(&ps->resources))
+ if (list_empty(resources))
return;
- ret = sysfs_create_group(&adev->dev.kobj, &attr_groups[state]);
+ ret = sysfs_create_group(&adev->dev.kobj, attr_group);
if (ret)
return;
- list_for_each_entry(entry, &ps->resources, node) {
+ list_for_each_entry(entry, resources, node) {
struct acpi_device *res_dev = &entry->resource->device;
ret = sysfs_add_link_to_group(&adev->dev.kobj,
- attr_groups[state].name,
+ attr_group->name,
&res_dev->dev.kobj,
dev_name(&res_dev->dev));
if (ret) {
- acpi_power_hide_list(adev, state);
+ acpi_power_hide_list(adev, resources, attr_group);
break;
}
}
}
+static void acpi_power_expose_hide(struct acpi_device *adev,
+ struct list_head *resources,
+ struct attribute_group *attr_group,
+ bool expose)
+{
+ if (expose)
+ acpi_power_expose_list(adev, resources, attr_group);
+ else
+ acpi_power_hide_list(adev, resources, attr_group);
+}
+
void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
{
struct acpi_device_power_state *ps;
struct acpi_power_resource_entry *entry;
int state;
+ if (adev->wakeup.flags.valid)
+ acpi_power_expose_hide(adev, &adev->wakeup.resources,
+ &wakeup_attr_group, add);
+
if (!adev->power.flags.power_resources)
return;
@@ -523,12 +545,10 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
acpi_power_remove_dependent(resource, adev);
}
- for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
- if (add)
- acpi_power_expose_list(adev, state);
- else
- acpi_power_hide_list(adev, state);
- }
+ for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
+ acpi_power_expose_hide(adev,
+ &adev->power.states[state].resources,
+ &attr_groups[state], add);
}
int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
@@ -824,7 +844,7 @@ static void acpi_release_power_resource(struct device *dev)
list_del(&resource->list_node);
mutex_unlock(&power_resource_list_lock);
- acpi_free_ids(device);
+ acpi_free_pnp_ids(&device->pnp);
kfree(resource);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index ee255c60bdac..f0df2c9434d2 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -918,7 +918,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
- .en_core_tk_irqen = 1,
};
/**
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 641b5450a0db..e8e652710e65 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -218,9 +218,13 @@ processor_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct acpi_device *device = cdev->devdata;
- struct acpi_processor *pr = acpi_driver_data(device);
+ struct acpi_processor *pr;
- if (!device || !pr)
+ if (!device)
+ return -EINVAL;
+
+ pr = acpi_driver_data(device);
+ if (!pr)
return -EINVAL;
*state = acpi_processor_max_state(pr);
@@ -232,9 +236,13 @@ processor_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *cur_state)
{
struct acpi_device *device = cdev->devdata;
- struct acpi_processor *pr = acpi_driver_data(device);
+ struct acpi_processor *pr;
- if (!device || !pr)
+ if (!device)
+ return -EINVAL;
+
+ pr = acpi_driver_data(device);
+ if (!pr)
return -EINVAL;
*cur_state = cpufreq_get_cur_state(pr->id);
@@ -248,11 +256,15 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct acpi_device *device = cdev->devdata;
- struct acpi_processor *pr = acpi_driver_data(device);
+ struct acpi_processor *pr;
int result = 0;
int max_pstate;
- if (!device || !pr)
+ if (!device)
+ return -EINVAL;
+
+ pr = acpi_driver_data(device);
+ if (!pr)
return -EINVAL;
max_pstate = cpufreq_get_max_state(pr->id);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 1d02b7b5ade0..e7dd2c1fee79 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -211,9 +211,10 @@ err_ret:
*/
void acpi_processor_throttling_init(void)
{
- if (acpi_processor_update_tsd_coord())
+ if (acpi_processor_update_tsd_coord()) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Assume no T-state coordination\n"));
+ }
return;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index f54d1985e594..fe158fd4f1df 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -63,6 +63,19 @@ int acpi_scan_add_handler(struct acpi_scan_handler *handler)
return 0;
}
+int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
+ const char *hotplug_profile_name)
+{
+ int error;
+
+ error = acpi_scan_add_handler(handler);
+ if (error)
+ return error;
+
+ acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name);
+ return 0;
+}
+
/*
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
@@ -107,32 +120,20 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-/**
- * acpi_bus_hot_remove_device: hot-remove a device and its children
- * @context: struct acpi_eject_event pointer (freed in this func)
- *
- * Hot-remove a device and its children. This function frees up the
- * memory space passed by arg context, so that the caller may call
- * this function asynchronously through acpi_os_hotplug_execute().
- */
-void acpi_bus_hot_remove_device(void *context)
+static int acpi_scan_hot_remove(struct acpi_device *device)
{
- struct acpi_eject_event *ej_event = context;
- struct acpi_device *device = ej_event->device;
acpi_handle handle = device->handle;
- acpi_handle temp;
+ acpi_handle not_used;
struct acpi_object_list arg_list;
union acpi_object arg;
- acpi_status status = AE_OK;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-
- mutex_lock(&acpi_scan_lock);
+ acpi_status status;
+ unsigned long long sta;
/* If there is no handle, the device node has been unregistered. */
- if (!device->handle) {
+ if (!handle) {
dev_dbg(&device->dev, "ACPI handle missing\n");
put_device(&device->dev);
- goto out;
+ return -EINVAL;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -143,7 +144,7 @@ void acpi_bus_hot_remove_device(void *context)
put_device(&device->dev);
device = NULL;
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) {
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &not_used))) {
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
@@ -161,18 +162,205 @@ void acpi_bus_hot_remove_device(void *context)
*/
status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status)) {
- if (status != AE_NOT_FOUND)
- acpi_handle_warn(handle, "Eject failed\n");
+ if (status == AE_NOT_FOUND) {
+ return -ENODEV;
+ } else {
+ acpi_handle_warn(handle, "Eject failed (0x%x)\n",
+ status);
+ return -EIO;
+ }
+ }
- /* Tell the firmware the hot-remove operation has failed. */
- acpi_evaluate_hotplug_ost(handle, ej_event->event,
- ost_code, NULL);
+ /*
+ * Verify if eject was indeed successful. If not, log an error
+ * message. No need to call _OST since _EJ0 call was made OK.
+ */
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_warn(handle,
+ "Status check after eject failed (0x%x)\n", status);
+ } else if (sta & ACPI_STA_DEVICE_ENABLED) {
+ acpi_handle_warn(handle,
+ "Eject incomplete - status 0x%llx\n", sta);
+ }
+
+ return 0;
+}
+
+static void acpi_bus_device_eject(void *context)
+{
+ acpi_handle handle = context;
+ struct acpi_device *device = NULL;
+ struct acpi_scan_handler *handler;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+
+ mutex_lock(&acpi_scan_lock);
+
+ acpi_bus_get_device(handle, &device);
+ if (!device)
+ goto err_out;
+
+ handler = device->handler;
+ if (!handler || !handler->hotplug.enabled) {
+ ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
+ goto err_out;
+ }
+ acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
+ ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+ if (handler->hotplug.mode == AHM_CONTAINER) {
+ device->flags.eject_pending = true;
+ kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+ } else {
+ int error;
+
+ get_device(&device->dev);
+ error = acpi_scan_hot_remove(device);
+ if (error)
+ goto err_out;
}
out:
mutex_unlock(&acpi_scan_lock);
- kfree(context);
return;
+
+ err_out:
+ acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ost_code,
+ NULL);
+ goto out;
+}
+
+static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
+{
+ struct acpi_device *device = NULL;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+ int error;
+
+ mutex_lock(&acpi_scan_lock);
+
+ acpi_bus_get_device(handle, &device);
+ if (device) {
+ dev_warn(&device->dev, "Attempt to re-insert\n");
+ goto out;
+ }
+ acpi_evaluate_hotplug_ost(handle, ost_source,
+ ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
+ error = acpi_bus_scan(handle);
+ if (error) {
+ acpi_handle_warn(handle, "Namespace scan failure\n");
+ goto out;
+ }
+ error = acpi_bus_get_device(handle, &device);
+ if (error) {
+ acpi_handle_warn(handle, "Missing device node object\n");
+ goto out;
+ }
+ ost_code = ACPI_OST_SC_SUCCESS;
+ if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
+ kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
+
+ out:
+ acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL);
+ mutex_unlock(&acpi_scan_lock);
+}
+
+static void acpi_scan_bus_check(void *context)
+{
+ acpi_scan_bus_device_check((acpi_handle)context,
+ ACPI_NOTIFY_BUS_CHECK);
+}
+
+static void acpi_scan_device_check(void *context)
+{
+ acpi_scan_bus_device_check((acpi_handle)context,
+ ACPI_NOTIFY_DEVICE_CHECK);
+}
+
+static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
+{
+ u32 ost_status;
+
+ switch (type) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ acpi_handle_debug(handle,
+ "ACPI_NOTIFY_BUS_CHECK event: unsupported\n");
+ ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
+ break;
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ acpi_handle_debug(handle,
+ "ACPI_NOTIFY_DEVICE_CHECK event: unsupported\n");
+ ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
+ break;
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ acpi_handle_debug(handle,
+ "ACPI_NOTIFY_EJECT_REQUEST event: unsupported\n");
+ ost_status = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
+ break;
+ default:
+ /* non-hotplug event; possibly handled by other handler */
+ return;
+ }
+
+ acpi_evaluate_hotplug_ost(handle, type, ost_status, NULL);
+}
+
+static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
+{
+ acpi_osd_exec_callback callback;
+ struct acpi_scan_handler *handler = data;
+ acpi_status status;
+
+ if (!handler->hotplug.enabled)
+ return acpi_hotplug_unsupported(handle, type);
+
+ switch (type) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
+ callback = acpi_scan_bus_check;
+ break;
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
+ callback = acpi_scan_device_check;
+ break;
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
+ callback = acpi_bus_device_eject;
+ break;
+ default:
+ /* non-hotplug event; possibly handled by other handler */
+ return;
+ }
+ status = acpi_os_hotplug_execute(callback, handle);
+ if (ACPI_FAILURE(status))
+ acpi_evaluate_hotplug_ost(handle, type,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE,
+ NULL);
+}
+
+/**
+ * acpi_bus_hot_remove_device: hot-remove a device and its children
+ * @context: struct acpi_eject_event pointer (freed in this func)
+ *
+ * Hot-remove a device and its children. This function frees up the
+ * memory space passed by arg context, so that the caller may call
+ * this function asynchronously through acpi_os_hotplug_execute().
+ */
+void acpi_bus_hot_remove_device(void *context)
+{
+ struct acpi_eject_event *ej_event = context;
+ struct acpi_device *device = ej_event->device;
+ acpi_handle handle = device->handle;
+ int error;
+
+ mutex_lock(&acpi_scan_lock);
+
+ error = acpi_scan_hot_remove(device);
+ if (error && handle)
+ acpi_evaluate_hotplug_ost(handle, ej_event->event,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE,
+ NULL);
+
+ mutex_unlock(&acpi_scan_lock);
+ kfree(context);
}
EXPORT_SYMBOL(acpi_bus_hot_remove_device);
@@ -206,51 +394,61 @@ static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
- int ret = count;
- acpi_status status;
- acpi_object_type type = 0;
struct acpi_device *acpi_device = to_acpi_device(d);
struct acpi_eject_event *ej_event;
+ acpi_object_type not_used;
+ acpi_status status;
+ u32 ost_source;
+ int ret;
- if ((!count) || (buf[0] != '1')) {
+ if (!count || buf[0] != '1')
return -EINVAL;
- }
- if (!acpi_device->driver && !acpi_device->handler) {
- ret = -ENODEV;
- goto err;
- }
- status = acpi_get_type(acpi_device->handle, &type);
- if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
- ret = -ENODEV;
- goto err;
- }
- ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
- if (!ej_event) {
- ret = -ENOMEM;
- goto err;
- }
+ if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
+ && !acpi_device->driver)
+ return -ENODEV;
+
+ status = acpi_get_type(acpi_device->handle, &not_used);
+ if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
+ return -ENODEV;
+
+ mutex_lock(&acpi_scan_lock);
- get_device(&acpi_device->dev);
- ej_event->device = acpi_device;
if (acpi_device->flags.eject_pending) {
- /* event originated from ACPI eject notification */
- ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+ /* ACPI eject notification event. */
+ ost_source = ACPI_NOTIFY_EJECT_REQUEST;
acpi_device->flags.eject_pending = 0;
} else {
- /* event originated from user */
- ej_event->event = ACPI_OST_EC_OSPM_EJECT;
- (void) acpi_evaluate_hotplug_ost(acpi_device->handle,
- ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+ /* Eject initiated by user space. */
+ ost_source = ACPI_OST_EC_OSPM_EJECT;
}
-
+ ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
+ if (!ej_event) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source,
+ ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+ ej_event->device = acpi_device;
+ ej_event->event = ost_source;
+ get_device(&acpi_device->dev);
status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
if (ACPI_FAILURE(status)) {
put_device(&acpi_device->dev);
kfree(ej_event);
+ ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
+ goto err_out;
}
-err:
+ ret = count;
+
+ out:
+ mutex_unlock(&acpi_scan_lock);
return ret;
+
+ err_out:
+ acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+ goto out;
}
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
@@ -376,7 +574,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
- if (dev->flags.bus_address)
+ if (dev->pnp.type.bus_address)
result = device_create_file(&dev->dev, &dev_attr_adr);
if (dev->pnp.unique_id)
result = device_create_file(&dev->dev, &dev_attr_uid);
@@ -449,7 +647,7 @@ static void acpi_device_remove_files(struct acpi_device *dev)
if (dev->pnp.unique_id)
device_remove_file(&dev->dev, &dev_attr_uid);
- if (dev->flags.bus_address)
+ if (dev->pnp.type.bus_address)
device_remove_file(&dev->dev, &dev_attr_adr);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
@@ -512,17 +710,6 @@ int acpi_match_device_ids(struct acpi_device *device,
}
EXPORT_SYMBOL(acpi_match_device_ids);
-void acpi_free_ids(struct acpi_device *device)
-{
- struct acpi_hardware_id *id, *tmp;
-
- list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
- kfree(id->id);
- kfree(id);
- }
- kfree(device->pnp.unique_id);
-}
-
static void acpi_free_power_resources_lists(struct acpi_device *device)
{
int i;
@@ -543,7 +730,7 @@ static void acpi_device_release(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- acpi_free_ids(acpi_dev);
+ acpi_free_pnp_ids(&acpi_dev->pnp);
acpi_free_power_resources_lists(acpi_dev);
kfree(acpi_dev);
}
@@ -1256,19 +1443,17 @@ static void acpi_device_get_busid(struct acpi_device *device)
}
/*
- * acpi_bay_match - see if a device is an ejectable driver bay
+ * acpi_bay_match - see if an acpi object is an ejectable driver bay
*
* If an acpi object is ejectable and has one of the ACPI ATA methods defined,
* then we can safely call it an ejectable drive bay
*/
-static int acpi_bay_match(struct acpi_device *device){
+static int acpi_bay_match(acpi_handle handle)
+{
acpi_status status;
- acpi_handle handle;
acpi_handle tmp;
acpi_handle phandle;
- handle = device->handle;
-
status = acpi_get_handle(handle, "_EJ0", &tmp);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -1292,12 +1477,12 @@ static int acpi_bay_match(struct acpi_device *device){
}
/*
- * acpi_dock_match - see if a device has a _DCK method
+ * acpi_dock_match - see if an acpi object has a _DCK method
*/
-static int acpi_dock_match(struct acpi_device *device)
+static int acpi_dock_match(acpi_handle handle)
{
acpi_handle tmp;
- return acpi_get_handle(device->handle, "_DCK", &tmp);
+ return acpi_get_handle(handle, "_DCK", &tmp);
}
const char *acpi_device_hid(struct acpi_device *device)
@@ -1312,7 +1497,7 @@ const char *acpi_device_hid(struct acpi_device *device)
}
EXPORT_SYMBOL(acpi_device_hid);
-static void acpi_add_id(struct acpi_device *device, const char *dev_id)
+static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
{
struct acpi_hardware_id *id;
@@ -1326,7 +1511,8 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id)
return;
}
- list_add_tail(&id->list, &device->pnp.ids);
+ list_add_tail(&id->list, &pnp->ids);
+ pnp->type.hardware_id = 1;
}
/*
@@ -1334,7 +1520,7 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id)
* lacks the SMBUS01 HID and the methods do not have the necessary "_"
* prefix. Work around this.
*/
-static int acpi_ibm_smbus_match(struct acpi_device *device)
+static int acpi_ibm_smbus_match(acpi_handle handle)
{
acpi_handle h_dummy;
struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -1344,7 +1530,7 @@ static int acpi_ibm_smbus_match(struct acpi_device *device)
return -ENODEV;
/* Look for SMBS object */
- result = acpi_get_name(device->handle, ACPI_SINGLE_NAME, &path);
+ result = acpi_get_name(handle, ACPI_SINGLE_NAME, &path);
if (result)
return result;
@@ -1355,48 +1541,50 @@ static int acpi_ibm_smbus_match(struct acpi_device *device)
/* Does it have the necessary (but misnamed) methods? */
result = -ENODEV;
- if (ACPI_SUCCESS(acpi_get_handle(device->handle, "SBI", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(device->handle, "SBR", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(device->handle, "SBW", &h_dummy)))
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "SBI", &h_dummy)) &&
+ ACPI_SUCCESS(acpi_get_handle(handle, "SBR", &h_dummy)) &&
+ ACPI_SUCCESS(acpi_get_handle(handle, "SBW", &h_dummy)))
result = 0;
out:
kfree(path.pointer);
return result;
}
-static void acpi_device_set_id(struct acpi_device *device)
+static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
+ int device_type)
{
acpi_status status;
struct acpi_device_info *info;
struct acpi_pnp_device_id_list *cid_list;
int i;
- switch (device->device_type) {
+ switch (device_type) {
case ACPI_BUS_TYPE_DEVICE:
- if (ACPI_IS_ROOT_DEVICE(device)) {
- acpi_add_id(device, ACPI_SYSTEM_HID);
+ if (handle == ACPI_ROOT_OBJECT) {
+ acpi_add_id(pnp, ACPI_SYSTEM_HID);
break;
}
- status = acpi_get_object_info(device->handle, &info);
+ status = acpi_get_object_info(handle, &info);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
+ pr_err(PREFIX "%s: Error reading device info\n",
+ __func__);
return;
}
if (info->valid & ACPI_VALID_HID)
- acpi_add_id(device, info->hardware_id.string);
+ acpi_add_id(pnp, info->hardware_id.string);
if (info->valid & ACPI_VALID_CID) {
cid_list = &info->compatible_id_list;
for (i = 0; i < cid_list->count; i++)
- acpi_add_id(device, cid_list->ids[i].string);
+ acpi_add_id(pnp, cid_list->ids[i].string);
}
if (info->valid & ACPI_VALID_ADR) {
- device->pnp.bus_address = info->address;
- device->flags.bus_address = 1;
+ pnp->bus_address = info->address;
+ pnp->type.bus_address = 1;
}
if (info->valid & ACPI_VALID_UID)
- device->pnp.unique_id = kstrdup(info->unique_id.string,
+ pnp->unique_id = kstrdup(info->unique_id.string,
GFP_KERNEL);
kfree(info);
@@ -1405,40 +1593,50 @@ static void acpi_device_set_id(struct acpi_device *device)
* Some devices don't reliably have _HIDs & _CIDs, so add
* synthetic HIDs to make sure drivers can find them.
*/
- if (acpi_is_video_device(device))
- acpi_add_id(device, ACPI_VIDEO_HID);
- else if (ACPI_SUCCESS(acpi_bay_match(device)))
- acpi_add_id(device, ACPI_BAY_HID);
- else if (ACPI_SUCCESS(acpi_dock_match(device)))
- acpi_add_id(device, ACPI_DOCK_HID);
- else if (!acpi_ibm_smbus_match(device))
- acpi_add_id(device, ACPI_SMBUS_IBM_HID);
- else if (list_empty(&device->pnp.ids) &&
- ACPI_IS_ROOT_DEVICE(device->parent)) {
- acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
- strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
- strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
+ if (acpi_is_video_device(handle))
+ acpi_add_id(pnp, ACPI_VIDEO_HID);
+ else if (ACPI_SUCCESS(acpi_bay_match(handle)))
+ acpi_add_id(pnp, ACPI_BAY_HID);
+ else if (ACPI_SUCCESS(acpi_dock_match(handle)))
+ acpi_add_id(pnp, ACPI_DOCK_HID);
+ else if (!acpi_ibm_smbus_match(handle))
+ acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
+ else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
+ acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+ strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
+ strcpy(pnp->device_class, ACPI_BUS_CLASS);
}
break;
case ACPI_BUS_TYPE_POWER:
- acpi_add_id(device, ACPI_POWER_HID);
+ acpi_add_id(pnp, ACPI_POWER_HID);
break;
case ACPI_BUS_TYPE_PROCESSOR:
- acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID);
+ acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID);
break;
case ACPI_BUS_TYPE_THERMAL:
- acpi_add_id(device, ACPI_THERMAL_HID);
+ acpi_add_id(pnp, ACPI_THERMAL_HID);
break;
case ACPI_BUS_TYPE_POWER_BUTTON:
- acpi_add_id(device, ACPI_BUTTON_HID_POWERF);
+ acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF);
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
- acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
+ acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
break;
}
}
+void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
+{
+ struct acpi_hardware_id *id, *tmp;
+
+ list_for_each_entry_safe(id, tmp, &pnp->ids, list) {
+ kfree(id->id);
+ kfree(id);
+ }
+ kfree(pnp->unique_id);
+}
+
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta)
{
@@ -1448,7 +1646,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device->parent = acpi_bus_get_parent(handle);
STRUCT_TO_INT(device->status) = sta;
acpi_device_get_busid(device);
- acpi_device_set_id(device);
+ acpi_set_pnp_ids(handle, &device->pnp, type);
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device_initialize(&device->dev);
@@ -1536,6 +1734,75 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
return 0;
}
+static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
+ char *idstr,
+ const struct acpi_device_id **matchid)
+{
+ const struct acpi_device_id *devid;
+
+ for (devid = handler->ids; devid->id[0]; devid++)
+ if (!strcmp((char *)devid->id, idstr)) {
+ if (matchid)
+ *matchid = devid;
+
+ return true;
+ }
+
+ return false;
+}
+
+static struct acpi_scan_handler *acpi_scan_match_handler(char *idstr,
+ const struct acpi_device_id **matchid)
+{
+ struct acpi_scan_handler *handler;
+
+ list_for_each_entry(handler, &acpi_scan_handlers_list, list_node)
+ if (acpi_scan_handler_matching(handler, idstr, matchid))
+ return handler;
+
+ return NULL;
+}
+
+void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
+{
+ if (!!hotplug->enabled == !!val)
+ return;
+
+ mutex_lock(&acpi_scan_lock);
+
+ hotplug->enabled = val;
+
+ mutex_unlock(&acpi_scan_lock);
+}
+
+static void acpi_scan_init_hotplug(acpi_handle handle, int type)
+{
+ struct acpi_device_pnp pnp = {};
+ struct acpi_hardware_id *hwid;
+ struct acpi_scan_handler *handler;
+
+ INIT_LIST_HEAD(&pnp.ids);
+ acpi_set_pnp_ids(handle, &pnp, type);
+
+ if (!pnp.type.hardware_id)
+ return;
+
+ /*
+ * This relies on the fact that acpi_install_notify_handler() will not
+ * install the same notify handler routine twice for the same handle.
+ */
+ list_for_each_entry(hwid, &pnp.ids, list) {
+ handler = acpi_scan_match_handler(hwid->id, NULL);
+ if (handler) {
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ acpi_hotplug_notify_cb, handler);
+ break;
+ }
+ }
+
+ acpi_free_pnp_ids(&pnp);
+}
+
static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
void *not_used, void **return_value)
{
@@ -1558,6 +1825,8 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
}
+ acpi_scan_init_hotplug(handle, type);
+
if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
!(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
struct acpi_device_wakeup wakeup;
@@ -1583,42 +1852,26 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
}
-static int acpi_scan_do_attach_handler(struct acpi_device *device, char *id)
+static int acpi_scan_attach_handler(struct acpi_device *device)
{
- struct acpi_scan_handler *handler;
+ struct acpi_hardware_id *hwid;
+ int ret = 0;
- list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) {
+ list_for_each_entry(hwid, &device->pnp.ids, list) {
const struct acpi_device_id *devid;
+ struct acpi_scan_handler *handler;
- for (devid = handler->ids; devid->id[0]; devid++) {
- int ret;
-
- if (strcmp((char *)devid->id, id))
- continue;
-
+ handler = acpi_scan_match_handler(hwid->id, &devid);
+ if (handler) {
ret = handler->attach(device, devid);
if (ret > 0) {
device->handler = handler;
- return ret;
+ break;
} else if (ret < 0) {
- return ret;
+ break;
}
}
}
- return 0;
-}
-
-static int acpi_scan_attach_handler(struct acpi_device *device)
-{
- struct acpi_hardware_id *hwid;
- int ret = 0;
-
- list_for_each_entry(hwid, &device->pnp.ids, list) {
- ret = acpi_scan_do_attach_handler(device, hwid->id);
- if (ret)
- break;
-
- }
return ret;
}
@@ -1788,8 +2041,10 @@ int __init acpi_scan_init(void)
acpi_pci_root_init();
acpi_pci_link_init();
acpi_platform_init();
+ acpi_lpss_init();
acpi_csrt_init();
acpi_container_init();
+ acpi_memory_hotplug_init();
mutex_lock(&acpi_scan_lock);
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 41c0504470db..fcae5fa2e1b3 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -7,6 +7,8 @@
#include <linux/moduleparam.h>
#include <acpi/acpi_drivers.h>
+#include "internal.h"
+
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("sysfs");
@@ -249,6 +251,7 @@ module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
static LIST_HEAD(acpi_table_attr_list);
static struct kobject *tables_kobj;
static struct kobject *dynamic_tables_kobj;
+static struct kobject *hotplug_kobj;
struct acpi_table_attr {
struct bin_attribute attr;
@@ -716,6 +719,67 @@ acpi_show_profile(struct device *dev, struct device_attribute *attr,
static const struct device_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
+static ssize_t hotplug_enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
+
+ return sprintf(buf, "%d\n", hotplug->enabled);
+}
+
+static ssize_t hotplug_enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
+ unsigned int val;
+
+ if (kstrtouint(buf, 10, &val) || val > 1)
+ return -EINVAL;
+
+ acpi_scan_hotplug_enabled(hotplug, val);
+ return size;
+}
+
+static struct kobj_attribute hotplug_enabled_attr =
+ __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
+ hotplug_enabled_store);
+
+static struct attribute *hotplug_profile_attrs[] = {
+ &hotplug_enabled_attr.attr,
+ NULL
+};
+
+static struct kobj_type acpi_hotplug_profile_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = hotplug_profile_attrs,
+};
+
+void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
+ const char *name)
+{
+ int error;
+
+ if (!hotplug_kobj)
+ goto err_out;
+
+ kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
+ error = kobject_set_name(&hotplug->kobj, "%s", name);
+ if (error)
+ goto err_out;
+
+ hotplug->kobj.parent = hotplug_kobj;
+ error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
+ if (error)
+ goto err_out;
+
+ kobject_uevent(&hotplug->kobj, KOBJ_ADD);
+ return;
+
+ err_out:
+ pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
+}
+
int __init acpi_sysfs_init(void)
{
int result;
@@ -723,6 +787,8 @@ int __init acpi_sysfs_init(void)
result = acpi_tables_sysfs_init();
if (result)
return result;
+
+ hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
return result;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 8470771e5eae..a33821ca3895 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -723,9 +723,19 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
return -EINVAL;
if (type == THERMAL_TRIP_ACTIVE) {
- /* aggressive active cooling */
- *trend = THERMAL_TREND_RAISING;
- return 0;
+ unsigned long trip_temp;
+ unsigned long temp = KELVIN_TO_MILLICELSIUS(tz->temperature,
+ tz->kelvin_offset);
+ if (thermal_get_trip_temp(thermal, trip, &trip_temp))
+ return -EINVAL;
+
+ if (temp > trip_temp) {
+ *trend = THERMAL_TREND_RAISING;
+ return 0;
+ } else {
+ /* Fall back on default trend */
+ return -EINVAL;
+ }
}
/*
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 313f959413dc..c3932d0876e0 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -167,7 +167,8 @@ struct acpi_video_device_flags {
u8 dvi:1;
u8 bios:1;
u8 unknown:1;
- u8 reserved:2;
+ u8 notify:1;
+ u8 reserved:1;
};
struct acpi_video_device_cap {
@@ -222,7 +223,7 @@ static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
int level);
static int acpi_video_device_lcd_get_level_current(
struct acpi_video_device *device,
- unsigned long long *level, int init);
+ unsigned long long *level, bool raw);
static int acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event);
static int acpi_video_switch_brightness(struct acpi_video_device *device,
@@ -236,7 +237,7 @@ static int acpi_video_get_brightness(struct backlight_device *bd)
struct acpi_video_device *vd =
(struct acpi_video_device *)bl_get_data(bd);
- if (acpi_video_device_lcd_get_level_current(vd, &cur_level, 0))
+ if (acpi_video_device_lcd_get_level_current(vd, &cur_level, false))
return -EINVAL;
for (i = 2; i < vd->brightness->count; i++) {
if (vd->brightness->levels[i] == cur_level)
@@ -281,7 +282,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev, unsig
unsigned long long level;
int offset;
- if (acpi_video_device_lcd_get_level_current(video, &level, 0))
+ if (acpi_video_device_lcd_get_level_current(video, &level, false))
return -EINVAL;
for (offset = 2; offset < video->brightness->count; offset++)
if (level == video->brightness->levels[offset]) {
@@ -447,12 +448,45 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
},
},
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion dm4",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
+ },
+ },
{}
};
+static unsigned long long
+acpi_video_bqc_value_to_level(struct acpi_video_device *device,
+ unsigned long long bqc_value)
+{
+ unsigned long long level;
+
+ if (device->brightness->flags._BQC_use_index) {
+ /*
+ * _BQC returns an index that doesn't account for
+ * the first 2 items with special meaning, so we need
+ * to compensate for that by offsetting ourselves
+ */
+ if (device->brightness->flags._BCL_reversed)
+ bqc_value = device->brightness->count - 3 - bqc_value;
+
+ level = device->brightness->levels[bqc_value + 2];
+ } else {
+ level = bqc_value;
+ }
+
+ level += bqc_offset_aml_bug_workaround;
+
+ return level;
+}
+
static int
acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
- unsigned long long *level, int init)
+ unsigned long long *level, bool raw)
{
acpi_status status = AE_OK;
int i;
@@ -463,29 +497,30 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
status = acpi_evaluate_integer(device->dev->handle, buf,
NULL, level);
if (ACPI_SUCCESS(status)) {
- if (device->brightness->flags._BQC_use_index) {
- if (device->brightness->flags._BCL_reversed)
- *level = device->brightness->count
- - 3 - (*level);
- *level = device->brightness->levels[*level + 2];
-
+ if (raw) {
+ /*
+ * Caller has indicated he wants the raw
+ * value returned by _BQC, so don't furtherly
+ * mess with the value.
+ */
+ return 0;
}
- *level += bqc_offset_aml_bug_workaround;
+
+ *level = acpi_video_bqc_value_to_level(device, *level);
+
for (i = 2; i < device->brightness->count; i++)
if (device->brightness->levels[i] == *level) {
device->brightness->curr = *level;
return 0;
}
- if (!init) {
- /*
- * BQC returned an invalid level.
- * Stop using it.
- */
- ACPI_WARNING((AE_INFO,
- "%s returned an invalid level",
- buf));
- device->cap._BQC = device->cap._BCQ = 0;
- }
+ /*
+ * BQC returned an invalid level.
+ * Stop using it.
+ */
+ ACPI_WARNING((AE_INFO,
+ "%s returned an invalid level",
+ buf));
+ device->cap._BQC = device->cap._BCQ = 0;
} else {
/* Fixme:
* should we return an error or ignore this failure?
@@ -598,6 +633,56 @@ acpi_video_cmp_level(const void *a, const void *b)
}
/*
+ * Decides if _BQC/_BCQ for this system is usable
+ *
+ * We do this by changing the level first and then read out the current
+ * brightness level, if the value does not match, find out if it is using
+ * index. If not, clear the _BQC/_BCQ capability.
+ */
+static int acpi_video_bqc_quirk(struct acpi_video_device *device,
+ int max_level, int current_level)
+{
+ struct acpi_video_device_brightness *br = device->brightness;
+ int result;
+ unsigned long long level;
+ int test_level;
+
+ /* don't mess with existing known broken systems */
+ if (bqc_offset_aml_bug_workaround)
+ return 0;
+
+ /*
+ * Some systems always report current brightness level as maximum
+ * through _BQC, we need to test another value for them.
+ */
+ test_level = current_level == max_level ? br->levels[2] : max_level;
+
+ result = acpi_video_device_lcd_set_level(device, test_level);
+ if (result)
+ return result;
+
+ result = acpi_video_device_lcd_get_level_current(device, &level, true);
+ if (result)
+ return result;
+
+ if (level != test_level) {
+ /* buggy _BQC found, need to find out if it uses index */
+ if (level < br->count) {
+ if (br->flags._BCL_reversed)
+ level = br->count - 3 - level;
+ if (br->levels[level + 2] == test_level)
+ br->flags._BQC_use_index = 1;
+ }
+
+ if (!br->flags._BQC_use_index)
+ device->cap._BQC = device->cap._BCQ = 0;
+ }
+
+ return 0;
+}
+
+
+/*
* Arg:
* device : video output device (LCD, CRT, ..)
*
@@ -703,42 +788,36 @@ acpi_video_init_brightness(struct acpi_video_device *device)
if (!device->cap._BQC)
goto set_level;
- result = acpi_video_device_lcd_get_level_current(device, &level_old, 1);
- if (result)
- goto out_free_levels;
-
- /*
- * Set the level to maximum and check if _BQC uses indexed value
- */
- result = acpi_video_device_lcd_set_level(device, max_level);
+ result = acpi_video_device_lcd_get_level_current(device,
+ &level_old, true);
if (result)
goto out_free_levels;
- result = acpi_video_device_lcd_get_level_current(device, &level, 0);
+ result = acpi_video_bqc_quirk(device, max_level, level_old);
if (result)
goto out_free_levels;
+ /*
+ * cap._BQC may get cleared due to _BQC is found to be broken
+ * in acpi_video_bqc_quirk, so check again here.
+ */
+ if (!device->cap._BQC)
+ goto set_level;
- br->flags._BQC_use_index = (level == max_level ? 0 : 1);
-
- if (!br->flags._BQC_use_index) {
+ if (use_bios_initial_backlight) {
+ level = acpi_video_bqc_value_to_level(device, level_old);
/*
- * Set the backlight to the initial state.
- * On some buggy laptops, _BQC returns an uninitialized value
- * when invoked for the first time, i.e. level_old is invalid.
- * set the backlight to max_level in this case
+ * On some buggy laptops, _BQC returns an uninitialized
+ * value when invoked for the first time, i.e.
+ * level_old is invalid (no matter whether it's a level
+ * or an index). Set the backlight to max_level in this case.
*/
- if (use_bios_initial_backlight) {
- for (i = 2; i < br->count; i++)
- if (level_old == br->levels[i])
- level = level_old;
- }
- goto set_level;
+ for (i = 2; i < br->count; i++)
+ if (level_old == br->levels[i])
+ break;
+ if (i == br->count)
+ level = max_level;
}
- if (br->flags._BCL_reversed)
- level_old = (br->count - 1) - level_old;
- level = br->levels[level_old];
-
set_level:
result = acpi_video_device_lcd_set_level(device, level);
if (result)
@@ -996,53 +1075,51 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_device *data;
struct acpi_video_device_attrib* attribute;
- if (!device || !video)
- return -EINVAL;
-
status =
acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
- if (ACPI_SUCCESS(status)) {
-
- data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
- device->driver_data = data;
-
- data->device_id = device_id;
- data->video = video;
- data->dev = device;
+ /* Some device omits _ADR, we skip them instead of fail */
+ if (ACPI_FAILURE(status))
+ return 0;
- attribute = acpi_video_get_device_attr(video, device_id);
+ data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- if((attribute != NULL) && attribute->device_id_scheme) {
- switch (attribute->display_type) {
- case ACPI_VIDEO_DISPLAY_CRT:
- data->flags.crt = 1;
- break;
- case ACPI_VIDEO_DISPLAY_TV:
- data->flags.tvout = 1;
- break;
- case ACPI_VIDEO_DISPLAY_DVI:
- data->flags.dvi = 1;
- break;
- case ACPI_VIDEO_DISPLAY_LCD:
- data->flags.lcd = 1;
- break;
- default:
- data->flags.unknown = 1;
- break;
- }
- if(attribute->bios_can_detect)
- data->flags.bios = 1;
- } else {
- /* Check for legacy IDs */
- device_type = acpi_video_get_device_type(video,
- device_id);
- /* Ignore bits 16 and 18-20 */
- switch (device_type & 0xffe2ffff) {
+ strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+ device->driver_data = data;
+
+ data->device_id = device_id;
+ data->video = video;
+ data->dev = device;
+
+ attribute = acpi_video_get_device_attr(video, device_id);
+
+ if((attribute != NULL) && attribute->device_id_scheme) {
+ switch (attribute->display_type) {
+ case ACPI_VIDEO_DISPLAY_CRT:
+ data->flags.crt = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_TV:
+ data->flags.tvout = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_DVI:
+ data->flags.dvi = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LCD:
+ data->flags.lcd = 1;
+ break;
+ default:
+ data->flags.unknown = 1;
+ break;
+ }
+ if(attribute->bios_can_detect)
+ data->flags.bios = 1;
+ } else {
+ /* Check for legacy IDs */
+ device_type = acpi_video_get_device_type(video, device_id);
+ /* Ignore bits 16 and 18-20 */
+ switch (device_type & 0xffe2ffff) {
case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
data->flags.crt = 1;
break;
@@ -1054,34 +1131,24 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
break;
default:
data->flags.unknown = 1;
- }
}
+ }
- acpi_video_device_bind(video, data);
- acpi_video_device_find_cap(data);
-
- status = acpi_install_notify_handler(device->handle,
- ACPI_DEVICE_NOTIFY,
- acpi_video_device_notify,
- data);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Error installing notify handler\n");
- if(data->brightness)
- kfree(data->brightness->levels);
- kfree(data->brightness);
- kfree(data);
- return -ENODEV;
- }
+ acpi_video_device_bind(video, data);
+ acpi_video_device_find_cap(data);
- mutex_lock(&video->device_list_lock);
- list_add_tail(&data->entry, &video->video_device_list);
- mutex_unlock(&video->device_list_lock);
+ status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify, data);
+ if (ACPI_FAILURE(status))
+ dev_err(&device->dev, "Error installing notify handler\n");
+ else
+ data->flags.notify = 1;
- return 0;
- }
+ mutex_lock(&video->device_list_lock);
+ list_add_tail(&data->entry, &video->video_device_list);
+ mutex_unlock(&video->device_list_lock);
- return -ENOENT;
+ return status;
}
/*
@@ -1268,7 +1335,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
goto out;
result = acpi_video_device_lcd_get_level_current(device,
- &level_current, 0);
+ &level_current,
+ false);
if (result)
goto out;
@@ -1373,9 +1441,8 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
status = acpi_video_bus_get_one_device(dev, video);
if (status) {
- printk(KERN_WARNING PREFIX
- "Can't attach device\n");
- continue;
+ dev_err(&dev->dev, "Can't attach device\n");
+ break;
}
}
return status;
@@ -1388,13 +1455,14 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
if (!device || !device->video)
return -ENOENT;
- status = acpi_remove_notify_handler(device->dev->handle,
- ACPI_DEVICE_NOTIFY,
- acpi_video_device_notify);
- if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING PREFIX
- "Can't remove video notify handler\n");
+ if (device->flags.notify) {
+ status = acpi_remove_notify_handler(device->dev->handle,
+ ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
+ if (ACPI_FAILURE(status))
+ dev_err(&device->dev->dev,
+ "Can't remove video notify handler\n");
}
+
if (device->backlight) {
backlight_device_unregister(device->backlight);
device->backlight = NULL;
@@ -1676,7 +1744,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
error = acpi_video_bus_get_devices(video, device);
if (error)
- goto err_free_video;
+ goto err_put_video;
video->input = input = input_allocate_device();
if (!input) {
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 4ac2593234e7..66f67626f02e 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -67,40 +67,37 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
return 0;
}
-/* Returns true if the device is a video device which can be handled by
- * video.ko.
+/* Returns true if the ACPI object is a video device which can be
+ * handled by video.ko.
* The device will get a Linux specific CID added in scan.c to
* identify the device as an ACPI graphics device
* Be aware that the graphics device may not be physically present
* Use acpi_video_get_capabilities() to detect general ACPI video
* capabilities of present cards
*/
-long acpi_is_video_device(struct acpi_device *device)
+long acpi_is_video_device(acpi_handle handle)
{
acpi_handle h_dummy;
long video_caps = 0;
- if (!device)
- return 0;
-
/* Is this device able to support video switching ? */
- if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
- ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_DOD", &h_dummy)) ||
+ ACPI_SUCCESS(acpi_get_handle(handle, "_DOS", &h_dummy)))
video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
/* Is this device able to retrieve a video ROM ? */
- if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_ROM", &h_dummy)))
video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
/* Is this device able to configure which video head to be POSTed ? */
- if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy)))
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_VPO", &h_dummy)) &&
+ ACPI_SUCCESS(acpi_get_handle(handle, "_GPD", &h_dummy)) &&
+ ACPI_SUCCESS(acpi_get_handle(handle, "_SPD", &h_dummy)))
video_caps |= ACPI_VIDEO_DEVICE_POSTING;
/* Only check for backlight functionality if one of the above hit. */
if (video_caps)
- acpi_walk_namespace(ACPI_TYPE_DEVICE, device->handle,
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL,
&video_caps, NULL);
@@ -127,7 +124,7 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!dev)
return AE_OK;
pci_dev_put(dev);
- *cap |= acpi_is_video_device(acpi_dev);
+ *cap |= acpi_is_video_device(handle);
}
return AE_OK;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9a6b05a35603..7072404c8b6d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -920,7 +920,7 @@ static int pm_genpd_prepare(struct device *dev)
pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
return -EBUSY;
}
@@ -961,7 +961,7 @@ static int pm_genpd_prepare(struct device *dev)
pm_runtime_enable(dev);
}
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
return ret;
}
@@ -1327,7 +1327,7 @@ static void pm_genpd_complete(struct device *dev)
pm_generic_complete(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- pm_runtime_idle(dev);
+ pm_request_idle(dev);
}
}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index d03d290f31c2..bfd898b8988e 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -324,6 +324,6 @@ void pm_generic_complete(struct device *dev)
* Let runtime PM try to suspend devices that haven't been in use before
* going into the system-wide sleep state we're resuming from.
*/
- pm_runtime_idle(dev);
+ pm_request_idle(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 15beb500a4e4..5a9b6569dd74 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -756,7 +756,7 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
}
/**
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 32ee0fc7ea54..f0077cb8e249 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -55,6 +55,7 @@
* @rate: Frequency in hertz
* @u_volt: Nominal voltage in microvolts corresponding to this OPP
* @dev_opp: points back to the device_opp struct this opp belongs to
+ * @head: RCU callback head used for deferred freeing
*
* This structure stores the OPP information for a given device.
*/
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1244930e3d7a..ef13ad08afb2 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1400,5 +1400,5 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
if (dev->power.irq_safe && dev->parent)
- pm_runtime_put_sync(dev->parent);
+ pm_runtime_put(dev->parent);
}
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index f9ba4fab0ddc..04781389d0fb 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,2 +1,2 @@
-clk-x86-lpss-objs := clk-lpss.o clk-lpt.o
+clk-x86-lpss-objs := clk-lpt.o
obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
diff --git a/drivers/clk/x86/clk-lpss.c b/drivers/clk/x86/clk-lpss.c
deleted file mode 100644
index b5e229f3c3d9..000000000000
--- a/drivers/clk/x86/clk-lpss.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Intel Low Power Subsystem clocks.
- *
- * Copyright (C) 2013, Intel Corporation
- * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
- * Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/acpi.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-
-static int clk_lpss_is_mmio_resource(struct acpi_resource *res, void *data)
-{
- struct resource r;
- return !acpi_dev_resource_memory(res, &r);
-}
-
-static acpi_status clk_lpss_find_mmio(acpi_handle handle, u32 level,
- void *data, void **retval)
-{
- struct resource_list_entry *rentry;
- struct list_head resource_list;
- struct acpi_device *adev;
- const char *uid = data;
- int ret;
-
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
-
- if (uid) {
- if (!adev->pnp.unique_id)
- return AE_OK;
- if (strcmp(uid, adev->pnp.unique_id))
- return AE_OK;
- }
-
- INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list,
- clk_lpss_is_mmio_resource, NULL);
- if (ret < 0)
- return AE_NO_MEMORY;
-
- list_for_each_entry(rentry, &resource_list, node)
- if (resource_type(&rentry->res) == IORESOURCE_MEM) {
- *(struct resource *)retval = rentry->res;
- break;
- }
-
- acpi_dev_free_resource_list(&resource_list);
- return AE_OK;
-}
-
-/**
- * clk_register_lpss_gate - register LPSS clock gate
- * @name: name of this clock gate
- * @parent_name: parent clock name
- * @hid: ACPI _HID of the device
- * @uid: ACPI _UID of the device (optional)
- * @offset: LPSS PRV_CLOCK_PARAMS offset
- *
- * Creates and registers LPSS clock gate.
- */
-struct clk *clk_register_lpss_gate(const char *name, const char *parent_name,
- const char *hid, const char *uid,
- unsigned offset)
-{
- struct resource res = { };
- void __iomem *mmio_base;
- acpi_status status;
- struct clk *clk;
-
- /*
- * First try to look the device and its mmio resource from the
- * ACPI namespace.
- */
- status = acpi_get_devices(hid, clk_lpss_find_mmio, (void *)uid,
- (void **)&res);
- if (ACPI_FAILURE(status) || !res.start)
- return ERR_PTR(-ENODEV);
-
- mmio_base = ioremap(res.start, resource_size(&res));
- if (!mmio_base)
- return ERR_PTR(-ENOMEM);
-
- clk = clk_register_gate(NULL, name, parent_name, 0, mmio_base + offset,
- 0, 0, NULL);
- if (IS_ERR(clk))
- iounmap(mmio_base);
-
- return clk;
-}
diff --git a/drivers/clk/x86/clk-lpss.h b/drivers/clk/x86/clk-lpss.h
deleted file mode 100644
index e9460f442297..000000000000
--- a/drivers/clk/x86/clk-lpss.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Intel Low Power Subsystem clock.
- *
- * Copyright (C) 2013, Intel Corporation
- * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
- * Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __CLK_LPSS_H
-#define __CLK_LPSS_H
-
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-
-#ifdef CONFIG_ACPI
-extern struct clk *clk_register_lpss_gate(const char *name,
- const char *parent_name,
- const char *hid, const char *uid,
- unsigned offset);
-#else
-static inline struct clk *clk_register_lpss_gate(const char *name,
- const char *parent_name,
- const char *hid,
- const char *uid,
- unsigned offset)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
-
-#endif /* __CLK_LPSS_H */
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index 81298aeef7e3..5cf4f4686406 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -18,8 +17,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include "clk-lpss.h"
-
#define PRV_CLOCK_PARAMS 0x800
static int lpt_clk_probe(struct platform_device *pdev)
@@ -34,40 +31,6 @@ static int lpt_clk_probe(struct platform_device *pdev)
/* Shared DMA clock */
clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
-
- /* SPI clocks */
- clk = clk_register_lpss_gate("spi0_clk", "lpss_clk", "INT33C0", NULL,
- PRV_CLOCK_PARAMS);
- if (!IS_ERR(clk))
- clk_register_clkdev(clk, NULL, "INT33C0:00");
-
- clk = clk_register_lpss_gate("spi1_clk", "lpss_clk", "INT33C1", NULL,
- PRV_CLOCK_PARAMS);
- if (!IS_ERR(clk))
- clk_register_clkdev(clk, NULL, "INT33C1:00");
-
- /* I2C clocks */
- clk = clk_register_lpss_gate("i2c0_clk", "lpss_clk", "INT33C2", NULL,
- PRV_CLOCK_PARAMS);
- if (!IS_ERR(clk))
- clk_register_clkdev(clk, NULL, "INT33C2:00");
-
- clk = clk_register_lpss_gate("i2c1_clk", "lpss_clk", "INT33C3", NULL,
- PRV_CLOCK_PARAMS);
- if (!IS_ERR(clk))
- clk_register_clkdev(clk, NULL, "INT33C3:00");
-
- /* UART clocks */
- clk = clk_register_lpss_gate("uart0_clk", "lpss_clk", "INT33C4", NULL,
- PRV_CLOCK_PARAMS);
- if (!IS_ERR(clk))
- clk_register_clkdev(clk, NULL, "INT33C4:00");
-
- clk = clk_register_lpss_gate("uart1_clk", "lpss_clk", "INT33C5", NULL,
- PRV_CLOCK_PARAMS);
- if (!IS_ERR(clk))
- clk_register_clkdev(clk, NULL, "INT33C5:00");
-
return 0;
}
@@ -79,8 +42,7 @@ static struct platform_driver lpt_clk_driver = {
.probe = lpt_clk_probe,
};
-static int __init lpt_clk_init(void)
+int __init lpt_clk_init(void)
{
return platform_driver_register(&lpt_clk_driver);
}
-arch_initcall(lpt_clk_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index cbcb21e32771..a1488f58f6ca 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -205,10 +205,99 @@ depends on ARM
source "drivers/cpufreq/Kconfig.arm"
endmenu
+menu "AVR32 CPU frequency scaling drivers"
+depends on AVR32
+
+config AVR32_AT32AP_CPUFREQ
+ bool "CPU frequency driver for AT32AP"
+ depends on PLATFORM_AT32AP
+ default n
+ help
+ This enables the CPU frequency driver for AT32AP processors.
+ If in doubt, say N.
+
+endmenu
+
+menu "CPUFreq processor drivers"
+depends on IA64
+
+config IA64_ACPI_CPUFREQ
+ tristate "ACPI Processor P-States driver"
+ select CPU_FREQ_TABLE
+ depends on ACPI_PROCESSOR
+ help
+ This driver adds a CPUFreq driver which utilizes the ACPI
+ Processor Performance States.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+endmenu
+
+menu "MIPS CPUFreq processor drivers"
+depends on MIPS
+
+config LOONGSON2_CPUFREQ
+ tristate "Loongson2 CPUFreq Driver"
+ select CPU_FREQ_TABLE
+ help
+ This option adds a CPUFreq driver for loongson processors which
+ support software configurable cpu frequency.
+
+ Loongson2F and it's successors support this feature.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+endmenu
+
menu "PowerPC CPU frequency scaling drivers"
depends on PPC32 || PPC64
source "drivers/cpufreq/Kconfig.powerpc"
endmenu
+menu "SPARC CPU frequency scaling drivers"
+depends on SPARC64
+config SPARC_US3_CPUFREQ
+ tristate "UltraSPARC-III CPU Frequency driver"
+ select CPU_FREQ_TABLE
+ help
+ This adds the CPUFreq driver for UltraSPARC-III processors.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If in doubt, say N.
+
+config SPARC_US2E_CPUFREQ
+ tristate "UltraSPARC-IIe CPU Frequency driver"
+ select CPU_FREQ_TABLE
+ help
+ This adds the CPUFreq driver for UltraSPARC-IIe processors.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If in doubt, say N.
+endmenu
+
+menu "SH CPU Frequency scaling"
+depends on SUPERH
+config SH_CPU_FREQ
+ tristate "SuperH CPU Frequency driver"
+ select CPU_FREQ_TABLE
+ help
+ This adds the cpufreq driver for SuperH. Any CPU that supports
+ clock rate rounding through the clock framework can use this
+ driver. While it will make the kernel slightly larger, this is
+ harmless for CPUs that don't support rate rounding. The driver
+ will also generate a notice in the boot log before disabling
+ itself if the CPU in question is not capable of rate rounding.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If unsure, say N.
+endmenu
+
endif
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 030ddf6dd3f1..f3af18b9acc5 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,93 @@
# ARM CPU Frequency scaling drivers
#
+config ARM_BIG_LITTLE_CPUFREQ
+ tristate
+ depends on ARM_CPU_TOPOLOGY
+
+config ARM_DT_BL_CPUFREQ
+ tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
+ select ARM_BIG_LITTLE_CPUFREQ
+ depends on OF && HAVE_CLK
+ help
+ This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
+ This gets frequency tables from DT.
+
+config ARM_EXYNOS_CPUFREQ
+ bool "SAMSUNG EXYNOS SoCs"
+ depends on ARCH_EXYNOS
+ default y
+ help
+ This adds the CPUFreq driver common part for Samsung
+ EXYNOS SoCs.
+
+ If in doubt, say N.
+
+config ARM_EXYNOS4210_CPUFREQ
+ def_bool CPU_EXYNOS4210
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS4210
+ SoC (S5PV310 or S5PC210).
+
+config ARM_EXYNOS4X12_CPUFREQ
+ def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS4X12
+ SoC (EXYNOS4212 or EXYNOS4412).
+
+config ARM_EXYNOS5250_CPUFREQ
+ def_bool SOC_EXYNOS5250
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS5250
+ SoC.
+
+config ARM_EXYNOS5440_CPUFREQ
+ def_bool SOC_EXYNOS5440
+ depends on HAVE_CLK && PM_OPP && OF
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS5440
+ SoC. The nature of exynos5440 clock controller is
+ different than previous exynos controllers so not using
+ the common exynos framework.
+
+config ARM_HIGHBANK_CPUFREQ
+ tristate "Calxeda Highbank-based"
+ depends on ARCH_HIGHBANK
+ select CPU_FREQ_TABLE
+ select GENERIC_CPUFREQ_CPU0
+ select PM_OPP
+ select REGULATOR
+
+ default m
+ help
+ This adds the CPUFreq driver for Calxeda Highbank SoC
+ based boards.
+
+ If in doubt, say N.
+
+config ARM_IMX6Q_CPUFREQ
+ tristate "Freescale i.MX6Q cpufreq support"
+ depends on SOC_IMX6Q
+ depends on REGULATOR_ANATOP
+ help
+ This adds cpufreq driver support for Freescale i.MX6Q SOC.
+
+ If in doubt, say N.
+
+config ARM_INTEGRATOR
+ tristate "CPUfreq driver for ARM Integrator CPUs"
+ depends on ARCH_INTEGRATOR
+ default y
+ help
+ This enables the CPUfreq driver for ARM Integrator CPUs.
+ If in doubt, say Y.
+
+config ARM_KIRKWOOD_CPUFREQ
+ def_bool ARCH_KIRKWOOD && OF
+ help
+ This adds the CPUFreq driver for Marvell Kirkwood
+ SoCs.
+
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
@@ -42,6 +129,7 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210
+ select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S5PV210 and
@@ -49,48 +137,11 @@ config ARM_S5PV210_CPUFREQ
If in doubt, say N.
-config ARM_EXYNOS_CPUFREQ
- bool "SAMSUNG EXYNOS SoCs"
- depends on ARCH_EXYNOS
- default y
- help
- This adds the CPUFreq driver common part for Samsung
- EXYNOS SoCs.
-
- If in doubt, say N.
+config ARM_SA1100_CPUFREQ
+ bool
-config ARM_EXYNOS4210_CPUFREQ
- def_bool CPU_EXYNOS4210
- help
- This adds the CPUFreq driver for Samsung EXYNOS4210
- SoC (S5PV310 or S5PC210).
-
-config ARM_EXYNOS4X12_CPUFREQ
- def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
- help
- This adds the CPUFreq driver for Samsung EXYNOS4X12
- SoC (EXYNOS4212 or EXYNOS4412).
-
-config ARM_EXYNOS5250_CPUFREQ
- def_bool SOC_EXYNOS5250
- help
- This adds the CPUFreq driver for Samsung EXYNOS5250
- SoC.
-
-config ARM_KIRKWOOD_CPUFREQ
- def_bool ARCH_KIRKWOOD && OF
- help
- This adds the CPUFreq driver for Marvell Kirkwood
- SoCs.
-
-config ARM_IMX6Q_CPUFREQ
- tristate "Freescale i.MX6Q cpufreq support"
- depends on SOC_IMX6Q
- depends on REGULATOR_ANATOP
- help
- This adds cpufreq driver support for Freescale i.MX6Q SOC.
-
- If in doubt, say N.
+config ARM_SA1110_CPUFREQ
+ bool
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
@@ -98,18 +149,3 @@ config ARM_SPEAR_CPUFREQ
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
-
-config ARM_HIGHBANK_CPUFREQ
- tristate "Calxeda Highbank-based"
- depends on ARCH_HIGHBANK
- select CPU_FREQ_TABLE
- select GENERIC_CPUFREQ_CPU0
- select PM_OPP
- select REGULATOR
-
- default m
- help
- This adds the CPUFreq driver for Calxeda Highbank SoC
- based boards.
-
- If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index e76992f79683..9c926ca0d718 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,3 +1,21 @@
+config CPU_FREQ_CBE
+ tristate "CBE frequency scaling"
+ depends on CBE_RAS && PPC_CELL
+ default m
+ help
+ This adds the cpufreq driver for Cell BE processors.
+ For details, take a look at <file:Documentation/cpu-freq/>.
+ If you don't have such processor, say N
+
+config CPU_FREQ_CBE_PMI
+ bool "CBE frequency scaling using PMI interface"
+ depends on CPU_FREQ_CBE
+ default n
+ help
+ Select this, if you want to use the PMI interface to switch
+ frequencies. Using PMI, the processor will not only be able to run at
+ lower speed, but also at lower core voltage.
+
config CPU_FREQ_MAPLE
bool "Support for Maple 970FX Evaluation Board"
depends on PPC_MAPLE
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index d7dc0ed6adb0..2b8a8c374548 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -129,6 +129,23 @@ config X86_POWERNOW_K8
For details, take a look at <file:Documentation/cpu-freq/>.
+config X86_AMD_FREQ_SENSITIVITY
+ tristate "AMD frequency sensitivity feedback powersave bias"
+ depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
+ help
+ This adds AMD-specific powersave bias function to the ondemand
+ governor, which allows it to make more power-conscious frequency
+ change decisions based on feedback from hardware (availble on AMD
+ Family 16h and above).
+
+ Hardware feedback tells software how "sensitive" to frequency changes
+ the CPUs' workloads are. CPU-bound workloads will be more sensitive
+ -- they will perform better as frequency increases. Memory/IO-bound
+ workloads will be less sensitive -- they will not necessarily perform
+ better as frequency increases.
+
+ If in doubt, say N.
+
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
depends on X86_32 && PCI
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 863fd1865d45..315b9231feb1 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -41,23 +41,54 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
+obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
+# big LITTLE per platform glues. Keep DT_BL_CPUFREQ as the last entry in all big
+# LITTLE drivers, so that it is probed last.
+obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+
+obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
-obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
-obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
-obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
+obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
+obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o
+obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
+obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
+obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
+obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
+obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
-obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
-obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
+obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o
##################################################################################
# PowerPC platform drivers
+obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
+ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
+obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
+
+##################################################################################
+# Other platform drivers
+obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o
+obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o
+obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
+obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
+obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
+obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
+obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
+obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
+obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
+obj-$(CONFIG_UNICORE32) += unicore2-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 57a8774f0b4e..11b8b4b54ceb 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -423,7 +423,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
struct drv_cmd cmd;
unsigned int next_state = 0; /* Index into freq_table */
unsigned int next_perf_state = 0; /* Index into perf table */
- unsigned int i;
int result = 0;
pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
@@ -486,10 +485,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = perf->states[perf->state].core_frequency * 1000;
freqs.new = data->freq_table[next_state].frequency;
- for_each_cpu(i, policy->cpus) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
drv_write(&cmd);
@@ -502,10 +498,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
}
}
- for_each_cpu(i, policy->cpus) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
perf->state = next_perf_state;
out:
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
new file mode 100644
index 000000000000..f6b79ab0070b
--- /dev/null
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -0,0 +1,148 @@
+/*
+ * amd_freq_sensitivity.c: AMD frequency sensitivity feedback powersave bias
+ * for the ondemand governor.
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Jacob Shin <jacob.shin@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/percpu-defs.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+
+#include "cpufreq_governor.h"
+
+#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
+#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
+#define CLASS_CODE_SHIFT 56
+#define POWERSAVE_BIAS_MAX 1000
+#define POWERSAVE_BIAS_DEF 400
+
+struct cpu_data_t {
+ u64 actual;
+ u64 reference;
+ unsigned int freq_prev;
+};
+
+static DEFINE_PER_CPU(struct cpu_data_t, cpu_data);
+
+static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
+ unsigned int freq_next,
+ unsigned int relation)
+{
+ int sensitivity;
+ long d_actual, d_reference;
+ struct msr actual, reference;
+ struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
+ struct dbs_data *od_data = policy->governor_data;
+ struct od_dbs_tuners *od_tuners = od_data->tuners;
+ struct od_cpu_dbs_info_s *od_info =
+ od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
+
+ if (!od_info->freq_table)
+ return freq_next;
+
+ rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
+ &actual.l, &actual.h);
+ rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
+ &reference.l, &reference.h);
+ actual.h &= 0x00ffffff;
+ reference.h &= 0x00ffffff;
+
+ /* counter wrapped around, so stay on current frequency */
+ if (actual.q < data->actual || reference.q < data->reference) {
+ freq_next = policy->cur;
+ goto out;
+ }
+
+ d_actual = actual.q - data->actual;
+ d_reference = reference.q - data->reference;
+
+ /* divide by 0, so stay on current frequency as well */
+ if (d_reference == 0) {
+ freq_next = policy->cur;
+ goto out;
+ }
+
+ sensitivity = POWERSAVE_BIAS_MAX -
+ (POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference);
+
+ clamp(sensitivity, 0, POWERSAVE_BIAS_MAX);
+
+ /* this workload is not CPU bound, so choose a lower freq */
+ if (sensitivity < od_tuners->powersave_bias) {
+ if (data->freq_prev == policy->cur)
+ freq_next = policy->cur;
+
+ if (freq_next > policy->cur)
+ freq_next = policy->cur;
+ else if (freq_next < policy->cur)
+ freq_next = policy->min;
+ else {
+ unsigned int index;
+
+ cpufreq_frequency_table_target(policy,
+ od_info->freq_table, policy->cur - 1,
+ CPUFREQ_RELATION_H, &index);
+ freq_next = od_info->freq_table[index].frequency;
+ }
+
+ data->freq_prev = freq_next;
+ } else
+ data->freq_prev = 0;
+
+out:
+ data->actual = actual.q;
+ data->reference = reference.q;
+ return freq_next;
+}
+
+static int __init amd_freq_sensitivity_init(void)
+{
+ u64 val;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+
+ if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ return -ENODEV;
+
+ if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+ return -ENODEV;
+
+ if (!(val >> CLASS_CODE_SHIFT))
+ return -ENODEV;
+
+ od_register_powersave_bias_handler(amd_powersave_bias_target,
+ POWERSAVE_BIAS_DEF);
+ return 0;
+}
+late_initcall(amd_freq_sensitivity_init);
+
+static void __exit amd_freq_sensitivity_exit(void)
+{
+ od_unregister_powersave_bias_handler();
+}
+module_exit(amd_freq_sensitivity_exit);
+
+static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
+
+MODULE_AUTHOR("Jacob Shin <jacob.shin@amd.com>");
+MODULE_DESCRIPTION("AMD frequency sensitivity feedback powersave bias for "
+ "the ondemand governor.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
new file mode 100644
index 000000000000..dbdf677d2f36
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.c
@@ -0,0 +1,278 @@
+/*
+ * ARM big.LITTLE Platforms CPUFreq support
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/of_platform.h>
+#include <linux/opp.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+
+#include "arm_big_little.h"
+
+/* Currently we support only two clusters */
+#define MAX_CLUSTERS 2
+
+static struct cpufreq_arm_bL_ops *arm_bL_ops;
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
+static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
+
+static int cpu_to_cluster(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static unsigned int bL_cpufreq_get(unsigned int cpu)
+{
+ u32 cur_cluster = cpu_to_cluster(cpu);
+
+ return clk_get_rate(clk[cur_cluster]) / 1000;
+}
+
+/* Validate policy frequency range */
+static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+ return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+}
+
+/* Set clock frequency */
+static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
+ int ret = 0;
+
+ cur_cluster = cpu_to_cluster(policy->cpu);
+
+ freqs.old = bL_cpufreq_get(policy->cpu);
+
+ /* Determine valid target frequency using freq_table */
+ cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
+ target_freq, relation, &freq_tab_idx);
+ freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+ pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
+ __func__, cpu, cur_cluster, freqs.old, target_freq,
+ freqs.new);
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
+ if (ret) {
+ pr_err("clk_set_rate failed: %d\n", ret);
+ return ret;
+ }
+
+ policy->cur = freqs.new;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+
+ if (!atomic_dec_return(&cluster_usage[cluster])) {
+ clk_put(clk[cluster]);
+ opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
+ }
+}
+
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ char name[14] = "cpu-cluster.";
+ int ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ ret = arm_bL_ops->init_opp_table(cpu_dev);
+ if (ret) {
+ dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
+ __func__, cpu_dev->id, ret);
+ goto atomic_dec;
+ }
+
+ ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
+ __func__, cpu_dev->id, ret);
+ goto atomic_dec;
+ }
+
+ name[12] = cluster + '0';
+ clk[cluster] = clk_get_sys(name, NULL);
+ if (!IS_ERR(clk[cluster])) {
+ dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
+ __func__, clk[cluster], freq_table[cluster],
+ cluster);
+ return 0;
+ }
+
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
+ __func__, cpu_dev->id, cluster);
+ ret = PTR_ERR(clk[cluster]);
+ opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+
+atomic_dec:
+ atomic_dec(&cluster_usage[cluster]);
+ dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
+ cluster);
+ return ret;
+}
+
+/* Per-CPU initialization */
+static int bL_cpufreq_init(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ ret = get_cluster_clk_and_freq_table(cpu_dev);
+ if (ret)
+ return ret;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ if (ret) {
+ dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
+ policy->cpu, cur_cluster);
+ put_cluster_clk_and_freq_table(cpu_dev);
+ return ret;
+ }
+
+ cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+
+ if (arm_bL_ops->get_transition_latency)
+ policy->cpuinfo.transition_latency =
+ arm_bL_ops->get_transition_latency(cpu_dev);
+ else
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ policy->cur = bL_cpufreq_get(policy->cpu);
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu);
+ return 0;
+}
+
+static int bL_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ put_cluster_clk_and_freq_table(cpu_dev);
+ dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
+
+ return 0;
+}
+
+/* Export freq_table to sysfs */
+static struct freq_attr *bL_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver bL_cpufreq_driver = {
+ .name = "arm-big-little",
+ .flags = CPUFREQ_STICKY,
+ .verify = bL_cpufreq_verify_policy,
+ .target = bL_cpufreq_set_target,
+ .get = bL_cpufreq_get,
+ .init = bL_cpufreq_init,
+ .exit = bL_cpufreq_exit,
+ .have_governor_per_policy = true,
+ .attr = bL_cpufreq_attr,
+};
+
+int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
+{
+ int ret;
+
+ if (arm_bL_ops) {
+ pr_debug("%s: Already registered: %s, exiting\n", __func__,
+ arm_bL_ops->name);
+ return -EBUSY;
+ }
+
+ if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
+ pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
+ return -ENODEV;
+ }
+
+ arm_bL_ops = ops;
+
+ ret = cpufreq_register_driver(&bL_cpufreq_driver);
+ if (ret) {
+ pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+ __func__, ops->name, ret);
+ arm_bL_ops = NULL;
+ } else {
+ pr_info("%s: Registered platform driver: %s\n", __func__,
+ ops->name);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bL_cpufreq_register);
+
+void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
+{
+ if (arm_bL_ops != ops) {
+ pr_err("%s: Registered with: %s, can't unregister, exiting\n",
+ __func__, arm_bL_ops->name);
+ return;
+ }
+
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ pr_info("%s: Un-registered platform driver: %s\n", __func__,
+ arm_bL_ops->name);
+ arm_bL_ops = NULL;
+}
+EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
new file mode 100644
index 000000000000..70f18fc12d4a
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.h
@@ -0,0 +1,40 @@
+/*
+ * ARM big.LITTLE platform's CPUFreq header file
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef CPUFREQ_ARM_BIG_LITTLE_H
+#define CPUFREQ_ARM_BIG_LITTLE_H
+
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct cpufreq_arm_bL_ops {
+ char name[CPUFREQ_NAME_LEN];
+ int (*get_transition_latency)(struct device *cpu_dev);
+
+ /*
+ * This must set opp table for cpu_dev in a similar way as done by
+ * of_init_opp_table().
+ */
+ int (*init_opp_table)(struct device *cpu_dev);
+};
+
+int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
+void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
+
+#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
new file mode 100644
index 000000000000..44be3115375c
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -0,0 +1,107 @@
+/*
+ * Generic big.LITTLE CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver and gets
+ * Frequency information from Device Tree. Freq table in DT must be in KHz.
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/opp.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "arm_big_little.h"
+
+static int dt_init_opp_table(struct device *cpu_dev)
+{
+ struct device_node *np, *parent;
+ int count = 0, ret;
+
+ parent = of_find_node_by_path("/cpus");
+ if (!parent) {
+ pr_err("failed to find OF /cpus\n");
+ return -ENOENT;
+ }
+
+ for_each_child_of_node(parent, np) {
+ if (count++ != cpu_dev->id)
+ continue;
+ if (!of_get_property(np, "operating-points", NULL)) {
+ ret = -ENODATA;
+ } else {
+ cpu_dev->of_node = np;
+ ret = of_init_opp_table(cpu_dev);
+ }
+ of_node_put(np);
+ of_node_put(parent);
+
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static int dt_get_transition_latency(struct device *cpu_dev)
+{
+ struct device_node *np, *parent;
+ u32 transition_latency = CPUFREQ_ETERNAL;
+ int count = 0;
+
+ parent = of_find_node_by_path("/cpus");
+ if (!parent) {
+ pr_err("failed to find OF /cpus\n");
+ return -ENOENT;
+ }
+
+ for_each_child_of_node(parent, np) {
+ if (count++ != cpu_dev->id)
+ continue;
+
+ of_property_read_u32(np, "clock-latency", &transition_latency);
+ of_node_put(np);
+ of_node_put(parent);
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static struct cpufreq_arm_bL_ops dt_bL_ops = {
+ .name = "dt-bl",
+ .get_transition_latency = dt_get_transition_latency,
+ .init_opp_table = dt_init_opp_table,
+};
+
+static int generic_bL_init(void)
+{
+ return bL_cpufreq_register(&dt_bL_ops);
+}
+module_init(generic_bL_init);
+
+static void generic_bL_exit(void)
+{
+ return bL_cpufreq_unregister(&dt_bL_ops);
+}
+module_exit(generic_bL_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
new file mode 100644
index 000000000000..654488723cb5
--- /dev/null
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * Based on MIPS implementation arch/mips/kernel/time.c
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+static struct clk *cpuclk;
+
+static int at32_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+ return 0;
+}
+
+static unsigned int at32_get_speed(unsigned int cpu)
+{
+ /* No SMP support */
+ if (cpu)
+ return 0;
+ return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000);
+}
+
+static unsigned int ref_freq;
+static unsigned long loops_per_jiffy_ref;
+
+static int at32_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ long freq;
+
+ /* Convert target_freq from kHz to Hz */
+ freq = clk_round_rate(cpuclk, target_freq * 1000);
+
+ /* Check if policy->min <= new_freq <= policy->max */
+ if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
+ return -EINVAL;
+
+ pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
+
+ freqs.old = at32_get_speed(0);
+ freqs.new = (freq + 500) / 1000;
+ freqs.flags = 0;
+
+ if (!ref_freq) {
+ ref_freq = freqs.old;
+ loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
+ }
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ if (freqs.old < freqs.new)
+ boot_cpu_data.loops_per_jiffy = cpufreq_scale(
+ loops_per_jiffy_ref, ref_freq, freqs.new);
+ clk_set_rate(cpuclk, freq);
+ if (freqs.new < freqs.old)
+ boot_cpu_data.loops_per_jiffy = cpufreq_scale(
+ loops_per_jiffy_ref, ref_freq, freqs.new);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ pr_debug("cpufreq: set frequency %lu Hz\n", freq);
+
+ return 0;
+}
+
+static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ cpuclk = clk_get(NULL, "cpu");
+ if (IS_ERR(cpuclk)) {
+ pr_debug("cpufreq: could not get CPU clk\n");
+ return PTR_ERR(cpuclk);
+ }
+
+ policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = at32_get_speed(0);
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ printk("cpufreq: AT32AP CPU frequency driver\n");
+
+ return 0;
+}
+
+static struct cpufreq_driver at32_driver = {
+ .name = "at32ap",
+ .owner = THIS_MODULE,
+ .init = at32_cpufreq_driver_init,
+ .verify = at32_verify_speed,
+ .target = at32_set_target,
+ .get = at32_get_speed,
+ .flags = CPUFREQ_STICKY,
+};
+
+static int __init at32_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&at32_driver);
+}
+late_initcall(at32_cpufreq_init);
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
new file mode 100644
index 000000000000..995511e80bef
--- /dev/null
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -0,0 +1,247 @@
+/*
+ * Blackfin core clock scaling
+ *
+ * Copyright 2008-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <asm/blackfin.h>
+#include <asm/time.h>
+#include <asm/dpmc.h>
+
+
+/* this is the table of CCLK frequencies, in Hz */
+/* .index is the entry in the auxiliary dpm_state_table[] */
+static struct cpufreq_frequency_table bfin_freq_table[] = {
+ {
+ .frequency = CPUFREQ_TABLE_END,
+ .index = 0,
+ },
+ {
+ .frequency = CPUFREQ_TABLE_END,
+ .index = 1,
+ },
+ {
+ .frequency = CPUFREQ_TABLE_END,
+ .index = 2,
+ },
+ {
+ .frequency = CPUFREQ_TABLE_END,
+ .index = 0,
+ },
+};
+
+static struct bfin_dpm_state {
+ unsigned int csel; /* system clock divider */
+ unsigned int tscale; /* change the divider on the core timer interrupt */
+} dpm_state_table[3];
+
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+/*
+ * normalized to maximum frequency offset for CYCLES,
+ * used in time-ts cycles clock source, but could be used
+ * somewhere also.
+ */
+unsigned long long __bfin_cycles_off;
+unsigned int __bfin_cycles_mod;
+#endif
+
+/**************************************************************************/
+static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
+{
+
+ unsigned long csel, min_cclk;
+ int index;
+
+ /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
+#if ANOMALY_05000273 || ANOMALY_05000274 || \
+ (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \
+ && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
+ min_cclk = sclk * 2;
+#else
+ min_cclk = sclk;
+#endif
+
+#ifndef CONFIG_BF60x
+ csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
+#else
+ csel = bfin_read32(CGU0_DIV) & 0x1F;
+#endif
+
+ for (index = 0; (cclk >> index) >= min_cclk && csel <= 3 && index < 3; index++, csel++) {
+ bfin_freq_table[index].frequency = cclk >> index;
+#ifndef CONFIG_BF60x
+ dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
+#else
+ dpm_state_table[index].csel = csel;
+#endif
+ dpm_state_table[index].tscale = (TIME_SCALE >> index) - 1;
+
+ pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
+ bfin_freq_table[index].frequency,
+ dpm_state_table[index].csel,
+ dpm_state_table[index].tscale);
+ }
+ return;
+}
+
+static void bfin_adjust_core_timer(void *info)
+{
+ unsigned int tscale;
+ unsigned int index = *(unsigned int *)info;
+
+ /* we have to adjust the core timer, because it is using cclk */
+ tscale = dpm_state_table[index].tscale;
+ bfin_write_TSCALE(tscale);
+ return;
+}
+
+static unsigned int bfin_getfreq_khz(unsigned int cpu)
+{
+ /* Both CoreA/B have the same core clock */
+ return get_cclk() / 1000;
+}
+
+#ifdef CONFIG_BF60x
+unsigned long cpu_set_cclk(int cpu, unsigned long new)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(NULL, "CCLK");
+ if (IS_ERR(clk))
+ return -ENODEV;
+
+ ret = clk_set_rate(clk, new);
+ clk_put(clk);
+ return ret;
+}
+#endif
+
+static int bfin_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+#ifndef CONFIG_BF60x
+ unsigned int plldiv;
+#endif
+ unsigned int index;
+ unsigned long cclk_hz;
+ struct cpufreq_freqs freqs;
+ static unsigned long lpj_ref;
+ static unsigned int lpj_ref_freq;
+ int ret = 0;
+
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+ cycles_t cycles;
+#endif
+
+ if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq,
+ relation, &index))
+ return -EINVAL;
+
+ cclk_hz = bfin_freq_table[index].frequency;
+
+ freqs.old = bfin_getfreq_khz(0);
+ freqs.new = cclk_hz;
+
+ pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
+ cclk_hz, target_freq, freqs.old);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+#ifndef CONFIG_BF60x
+ plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
+ bfin_write_PLL_DIV(plldiv);
+#else
+ ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
+ if (ret != 0) {
+ WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
+ return ret;
+ }
+#endif
+ on_each_cpu(bfin_adjust_core_timer, &index, 1);
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+ cycles = get_cycles();
+ SSYNC();
+ cycles += 10; /* ~10 cycles we lose after get_cycles() */
+ __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
+ __bfin_cycles_mod = index;
+#endif
+ if (!lpj_ref_freq) {
+ lpj_ref = loops_per_jiffy;
+ lpj_ref_freq = freqs.old;
+ }
+ if (freqs.new != freqs.old) {
+ loops_per_jiffy = cpufreq_scale(lpj_ref,
+ lpj_ref_freq, freqs.new);
+ }
+
+ /* TODO: just test case for cycles clock source, remove later */
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ pr_debug("cpufreq: done\n");
+ return ret;
+}
+
+static int bfin_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, bfin_freq_table);
+}
+
+static int __bfin_cpu_init(struct cpufreq_policy *policy)
+{
+
+ unsigned long cclk, sclk;
+
+ cclk = get_cclk() / 1000;
+ sclk = get_sclk() / 1000;
+
+ if (policy->cpu == CPUFREQ_CPU)
+ bfin_init_tables(cclk, sclk);
+
+ policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
+
+ policy->cur = cclk;
+ cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
+ return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
+}
+
+static struct freq_attr *bfin_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver bfin_driver = {
+ .verify = bfin_verify_speed,
+ .target = bfin_target,
+ .get = bfin_getfreq_khz,
+ .init = __bfin_cpu_init,
+ .name = "bfin cpufreq",
+ .owner = THIS_MODULE,
+ .attr = bfin_freq_attr,
+};
+
+static int __init bfin_cpu_init(void)
+{
+ return cpufreq_register_driver(&bfin_driver);
+}
+
+static void __exit bfin_cpu_exit(void)
+{
+ cpufreq_unregister_driver(&bfin_driver);
+}
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("cpufreq driver for Blackfin");
+MODULE_LICENSE("GPL");
+
+module_init(bfin_cpu_init);
+module_exit(bfin_cpu_exit);
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 37d23a0f8c56..3ab8294eab04 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -44,8 +44,9 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
{
struct cpufreq_freqs freqs;
struct opp *opp;
- unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
- unsigned int index, cpu;
+ unsigned long volt = 0, volt_old = 0, tol = 0;
+ long freq_Hz;
+ unsigned int index;
int ret;
ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
@@ -65,10 +66,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new)
return 0;
- for_each_online_cpu(cpu) {
- freqs.cpu = cpu;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (cpu_reg) {
rcu_read_lock();
@@ -76,7 +74,9 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
- return PTR_ERR(opp);
+ freqs.new = freqs.old;
+ ret = PTR_ERR(opp);
+ goto post_notify;
}
volt = opp_get_voltage(opp);
rcu_read_unlock();
@@ -94,7 +94,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
if (ret) {
pr_err("failed to scale voltage up: %d\n", ret);
freqs.new = freqs.old;
- return ret;
+ goto post_notify;
}
}
@@ -103,7 +103,8 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
pr_err("failed to set clock rate: %d\n", ret);
if (cpu_reg)
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- return ret;
+ freqs.new = freqs.old;
+ goto post_notify;
}
/* scaling down? scale voltage after frequency */
@@ -113,25 +114,19 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
pr_err("failed to scale voltage down: %d\n", ret);
clk_set_rate(cpu_clk, freqs.old * 1000);
freqs.new = freqs.old;
- return ret;
}
}
- for_each_online_cpu(cpu) {
- freqs.cpu = cpu;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+post_notify:
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return 0;
+ return ret;
}
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- if (policy->cpu != 0)
- return -EINVAL;
-
ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
@@ -262,6 +257,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
}
of_node_put(np);
+ of_node_put(parent);
return 0;
out_free_table:
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index 13d311ee08b3..af1542d41440 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -263,7 +263,6 @@ static int nforce2_target(struct cpufreq_policy *policy,
freqs.old = nforce2_get(policy->cpu);
freqs.new = target_fsb * fid * 100;
- freqs.cpu = 0; /* Only one CPU on nForce2 platforms */
if (freqs.old == freqs.new)
return 0;
@@ -271,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
freqs.old, freqs.new);
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Disable IRQs */
/* local_irq_save(flags); */
@@ -286,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
/* Enable IRQs */
/* local_irq_restore(flags); */
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}
@@ -360,12 +359,10 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
min_fsb = NFORCE2_MIN_FSB;
/* cpuinfo and default policy values */
- policy->cpuinfo.min_freq = min_fsb * fid * 100;
- policy->cpuinfo.max_freq = max_fsb * fid * 100;
+ policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
+ policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = nforce2_get(policy->cpu);
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b02824d092e7..1b8a48eaf90f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -45,7 +45,7 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif
-static DEFINE_SPINLOCK(cpufreq_driver_lock);
+static DEFINE_RWLOCK(cpufreq_driver_lock);
/*
* cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
@@ -128,6 +128,11 @@ void disable_cpufreq(void)
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);
+bool have_governor_per_policy(void)
+{
+ return cpufreq_driver->have_governor_per_policy;
+}
+
static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
{
struct cpufreq_policy *data;
@@ -137,7 +142,7 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
goto err_out;
/* get the cpufreq driver */
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
if (!cpufreq_driver)
goto err_out_unlock;
@@ -155,13 +160,13 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
if (!sysfs && !kobject_get(&data->kobj))
goto err_out_put_module;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
return data;
err_out_put_module:
module_put(cpufreq_driver->owner);
err_out_unlock:
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
err_out:
return NULL;
}
@@ -244,19 +249,9 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
#endif
-/**
- * cpufreq_notify_transition - call notifier chain and adjust_jiffies
- * on frequency transition.
- *
- * This function calls the transition notifiers and the "adjust_jiffies"
- * function. It is called twice on all CPU frequency changes that have
- * external effects.
- */
-void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
+void __cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state)
{
- struct cpufreq_policy *policy;
- unsigned long flags;
-
BUG_ON(irqs_disabled());
if (cpufreq_disabled())
@@ -266,10 +261,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
switch (state) {
case CPUFREQ_PRECHANGE:
@@ -303,6 +294,20 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
break;
}
}
+/**
+ * cpufreq_notify_transition - call notifier chain and adjust_jiffies
+ * on frequency transition.
+ *
+ * This function calls the transition notifiers and the "adjust_jiffies"
+ * function. It is called twice on all CPU frequency changes that have
+ * external effects.
+ */
+void cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state)
+{
+ for_each_cpu(freqs->cpu, policy->cpus)
+ __cpufreq_notify_transition(policy, freqs, state);
+}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
@@ -765,12 +770,12 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
goto err_out_kobj_put;
}
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
ret = cpufreq_add_dev_symlink(cpu, policy);
if (ret)
@@ -803,27 +808,30 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
struct device *dev)
{
struct cpufreq_policy *policy;
- int ret = 0;
+ int ret = 0, has_target = !!cpufreq_driver->target;
unsigned long flags;
policy = cpufreq_cpu_get(sibling);
WARN_ON(!policy);
- __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (has_target)
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
lock_policy_rwsem_write(sibling);
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
cpumask_set_cpu(cpu, policy->cpus);
per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
per_cpu(cpufreq_cpu_data, cpu) = policy;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(sibling);
- __cpufreq_governor(policy, CPUFREQ_GOV_START);
- __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ if (has_target) {
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ }
ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
if (ret) {
@@ -871,15 +879,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
#ifdef CONFIG_HOTPLUG_CPU
/* Check if this cpu was hot-unplugged earlier and has siblings */
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_online_cpu(sibling) {
struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
return cpufreq_add_policy_cpu(cpu, sibling, dev);
}
}
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
#endif
@@ -952,10 +960,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return 0;
err_out_unregister:
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
@@ -1008,12 +1016,12 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
data = per_cpu(cpufreq_cpu_data, cpu);
per_cpu(cpufreq_cpu_data, cpu) = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!data) {
pr_debug("%s: No cpu_data found\n", __func__);
@@ -1031,7 +1039,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
WARN_ON(lock_policy_rwsem_write(cpu));
cpus = cpumask_weight(data->cpus);
- cpumask_clear_cpu(cpu, data->cpus);
+
+ if (cpus > 1)
+ cpumask_clear_cpu(cpu, data->cpus);
unlock_policy_rwsem_write(cpu);
if (cpu != data->cpu) {
@@ -1047,9 +1057,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
WARN_ON(lock_policy_rwsem_write(cpu));
cpumask_set_cpu(cpu, data->cpus);
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
per_cpu(cpufreq_cpu_data, cpu) = data;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(cpu);
@@ -1070,6 +1080,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
+ if (cpufreq_driver->target)
+ __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
lock_policy_rwsem_read(cpu);
kobj = &data->kobj;
cmp = &data->kobj_unregister;
@@ -1134,16 +1147,23 @@ static void handle_update(struct work_struct *work)
static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
unsigned int new_freq)
{
+ struct cpufreq_policy *policy;
struct cpufreq_freqs freqs;
+ unsigned long flags;
+
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
"core thinks of %u, is %u kHz.\n", old_freq, new_freq);
- freqs.cpu = cpu;
freqs.old = old_freq;
freqs.new = new_freq;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
}
@@ -1544,10 +1564,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->cpu, event);
ret = policy->governor->governor(policy, event);
- if (event == CPUFREQ_GOV_START)
- policy->governor->initialized++;
- else if (event == CPUFREQ_GOV_STOP)
- policy->governor->initialized--;
+ if (!ret) {
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ policy->governor->initialized++;
+ else if (event == CPUFREQ_GOV_POLICY_EXIT)
+ policy->governor->initialized--;
+ }
/* we keep one module reference alive for
each CPU governed by this CPU */
@@ -1651,7 +1673,7 @@ EXPORT_SYMBOL(cpufreq_get_policy);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy)
{
- int ret = 0;
+ int ret = 0, failed = 1;
pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
policy->min, policy->max);
@@ -1705,18 +1727,31 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
pr_debug("governor switch\n");
/* end old governor */
- if (data->governor)
+ if (data->governor) {
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ __cpufreq_governor(data,
+ CPUFREQ_GOV_POLICY_EXIT);
+ }
/* start new governor */
data->governor = policy->governor;
- if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
+ if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
+ if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
+ failed = 0;
+ else
+ __cpufreq_governor(data,
+ CPUFREQ_GOV_POLICY_EXIT);
+ }
+
+ if (failed) {
/* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n",
data->governor->name);
if (old_gov) {
data->governor = old_gov;
__cpufreq_governor(data,
+ CPUFREQ_GOV_POLICY_INIT);
+ __cpufreq_governor(data,
CPUFREQ_GOV_START);
}
ret = -EINVAL;
@@ -1848,13 +1883,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
return -EBUSY;
}
cpufreq_driver = driver_data;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
@@ -1886,9 +1921,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_null_driver:
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
@@ -1914,9 +1949,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
subsys_interface_unregister(&cpufreq_interface);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 4fd0006b1291..0ceb2eff5a7e 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/percpu-defs.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
@@ -28,25 +29,29 @@
/* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
+#define DEF_FREQUENCY_STEP (5)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
-static struct dbs_data cs_dbs_data;
static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
-static struct cs_dbs_tuners cs_tuners = {
- .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
- .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
- .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
- .ignore_nice = 0,
- .freq_step = 5,
-};
+static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
+ struct cpufreq_policy *policy)
+{
+ unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
+
+ /* max freq cannot be less than 100. But who knows... */
+ if (unlikely(freq_target == 0))
+ freq_target = DEF_FREQUENCY_STEP;
+
+ return freq_target;
+}
/*
* Every sampling_rate, we check, if current idle time is less than 20%
- * (default), then we try to increase frequency Every sampling_rate *
- * sampling_down_factor, we check, if current idle time is more than 80%, then
- * we try to decrease frequency
+ * (default), then we try to increase frequency. Every sampling_rate *
+ * sampling_down_factor, we check, if current idle time is more than 80%
+ * (default), then we try to decrease frequency
*
* Any frequency increase takes it to the maximum frequency. Frequency reduction
* happens at minimum steps of 5% (default) of maximum frequency
@@ -55,30 +60,25 @@ static void cs_check_cpu(int cpu, unsigned int load)
{
struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
- unsigned int freq_target;
+ struct dbs_data *dbs_data = policy->governor_data;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
/*
* break out if we 'cannot' reduce the speed as the user might
* want freq_step to be zero
*/
- if (cs_tuners.freq_step == 0)
+ if (cs_tuners->freq_step == 0)
return;
/* Check for frequency increase */
- if (load > cs_tuners.up_threshold) {
+ if (load > cs_tuners->up_threshold) {
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
if (dbs_info->requested_freq == policy->max)
return;
- freq_target = (cs_tuners.freq_step * policy->max) / 100;
-
- /* max freq cannot be less than 100. But who knows.... */
- if (unlikely(freq_target == 0))
- freq_target = 5;
-
- dbs_info->requested_freq += freq_target;
+ dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
if (dbs_info->requested_freq > policy->max)
dbs_info->requested_freq = policy->max;
@@ -87,45 +87,48 @@ static void cs_check_cpu(int cpu, unsigned int load)
return;
}
- /*
- * The optimal frequency is the frequency that is the lowest that can
- * support the current CPU usage without triggering the up policy. To be
- * safe, we focus 10 points under the threshold.
- */
- if (load < (cs_tuners.down_threshold - 10)) {
- freq_target = (cs_tuners.freq_step * policy->max) / 100;
-
- dbs_info->requested_freq -= freq_target;
- if (dbs_info->requested_freq < policy->min)
- dbs_info->requested_freq = policy->min;
+ /* if sampling_down_factor is active break out early */
+ if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
+ return;
+ dbs_info->down_skip = 0;
+ /* Check for frequency decrease */
+ if (load < cs_tuners->down_threshold) {
/*
* if we cannot reduce the frequency anymore, break out early
*/
if (policy->cur == policy->min)
return;
+ dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
+ if (dbs_info->requested_freq < policy->min)
+ dbs_info->requested_freq = policy->min;
+
__cpufreq_driver_target(policy, dbs_info->requested_freq,
- CPUFREQ_RELATION_H);
+ CPUFREQ_RELATION_L);
return;
}
}
static void cs_dbs_timer(struct work_struct *work)
{
- struct delayed_work *dw = to_delayed_work(work);
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
struct cs_cpu_dbs_info_s, cdbs.work.work);
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
cpu);
- int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
+ struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
+ bool modify_all = true;
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
- if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
- dbs_check_cpu(&cs_dbs_data, cpu);
+ if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
+ modify_all = false;
+ else
+ dbs_check_cpu(dbs_data, cpu);
- schedule_delayed_work_on(smp_processor_id(), dw, delay);
+ gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
@@ -154,16 +157,12 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
}
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_min(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
-}
+static struct common_dbs_data cs_dbs_cdata;
-static ssize_t store_sampling_down_factor(struct kobject *a,
- struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -171,13 +170,14 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- cs_tuners.sampling_down_factor = input;
+ cs_tuners->sampling_down_factor = input;
return count;
}
-static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -185,43 +185,46 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate);
+ cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
return count;
}
-static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
- if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold)
+ if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
- cs_tuners.up_threshold = input;
+ cs_tuners->up_threshold = input;
return count;
}
-static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= cs_tuners.up_threshold)
+ input >= cs_tuners->up_threshold)
return -EINVAL;
- cs_tuners.down_threshold = input;
+ cs_tuners->down_threshold = input;
return count;
}
-static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input, j;
int ret;
@@ -232,27 +235,28 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- if (input == cs_tuners.ignore_nice) /* nothing to do */
+ if (input == cs_tuners->ignore_nice) /* nothing to do */
return count;
- cs_tuners.ignore_nice = input;
+ cs_tuners->ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
struct cs_cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->cdbs.prev_cpu_wall);
- if (cs_tuners.ignore_nice)
+ &dbs_info->cdbs.prev_cpu_wall, 0);
+ if (cs_tuners->ignore_nice)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
}
-static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -267,43 +271,88 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :)
*/
- cs_tuners.freq_step = input;
+ cs_tuners->freq_step = input;
return count;
}
-show_one(cs, sampling_rate, sampling_rate);
-show_one(cs, sampling_down_factor, sampling_down_factor);
-show_one(cs, up_threshold, up_threshold);
-show_one(cs, down_threshold, down_threshold);
-show_one(cs, ignore_nice_load, ignore_nice);
-show_one(cs, freq_step, freq_step);
-
-define_one_global_rw(sampling_rate);
-define_one_global_rw(sampling_down_factor);
-define_one_global_rw(up_threshold);
-define_one_global_rw(down_threshold);
-define_one_global_rw(ignore_nice_load);
-define_one_global_rw(freq_step);
-define_one_global_ro(sampling_rate_min);
-
-static struct attribute *dbs_attributes[] = {
- &sampling_rate_min.attr,
- &sampling_rate.attr,
- &sampling_down_factor.attr,
- &up_threshold.attr,
- &down_threshold.attr,
- &ignore_nice_load.attr,
- &freq_step.attr,
+show_store_one(cs, sampling_rate);
+show_store_one(cs, sampling_down_factor);
+show_store_one(cs, up_threshold);
+show_store_one(cs, down_threshold);
+show_store_one(cs, ignore_nice);
+show_store_one(cs, freq_step);
+declare_show_sampling_rate_min(cs);
+
+gov_sys_pol_attr_rw(sampling_rate);
+gov_sys_pol_attr_rw(sampling_down_factor);
+gov_sys_pol_attr_rw(up_threshold);
+gov_sys_pol_attr_rw(down_threshold);
+gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(freq_step);
+gov_sys_pol_attr_ro(sampling_rate_min);
+
+static struct attribute *dbs_attributes_gov_sys[] = {
+ &sampling_rate_min_gov_sys.attr,
+ &sampling_rate_gov_sys.attr,
+ &sampling_down_factor_gov_sys.attr,
+ &up_threshold_gov_sys.attr,
+ &down_threshold_gov_sys.attr,
+ &ignore_nice_gov_sys.attr,
+ &freq_step_gov_sys.attr,
NULL
};
-static struct attribute_group cs_attr_group = {
- .attrs = dbs_attributes,
+static struct attribute_group cs_attr_group_gov_sys = {
+ .attrs = dbs_attributes_gov_sys,
+ .name = "conservative",
+};
+
+static struct attribute *dbs_attributes_gov_pol[] = {
+ &sampling_rate_min_gov_pol.attr,
+ &sampling_rate_gov_pol.attr,
+ &sampling_down_factor_gov_pol.attr,
+ &up_threshold_gov_pol.attr,
+ &down_threshold_gov_pol.attr,
+ &ignore_nice_gov_pol.attr,
+ &freq_step_gov_pol.attr,
+ NULL
+};
+
+static struct attribute_group cs_attr_group_gov_pol = {
+ .attrs = dbs_attributes_gov_pol,
.name = "conservative",
};
/************************** sysfs end ************************/
+static int cs_init(struct dbs_data *dbs_data)
+{
+ struct cs_dbs_tuners *tuners;
+
+ tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
+ if (!tuners) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
+ tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ tuners->ignore_nice = 0;
+ tuners->freq_step = DEF_FREQUENCY_STEP;
+
+ dbs_data->tuners = tuners;
+ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ mutex_init(&dbs_data->mutex);
+ return 0;
+}
+
+static void cs_exit(struct dbs_data *dbs_data)
+{
+ kfree(dbs_data->tuners);
+}
+
define_get_cpu_dbs_routines(cs_cpu_dbs_info);
static struct notifier_block cs_cpufreq_notifier_block = {
@@ -314,21 +363,23 @@ static struct cs_ops cs_ops = {
.notifier_block = &cs_cpufreq_notifier_block,
};
-static struct dbs_data cs_dbs_data = {
+static struct common_dbs_data cs_dbs_cdata = {
.governor = GOV_CONSERVATIVE,
- .attr_group = &cs_attr_group,
- .tuners = &cs_tuners,
+ .attr_group_gov_sys = &cs_attr_group_gov_sys,
+ .attr_group_gov_pol = &cs_attr_group_gov_pol,
.get_cpu_cdbs = get_cpu_cdbs,
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
.gov_dbs_timer = cs_dbs_timer,
.gov_check_cpu = cs_check_cpu,
.gov_ops = &cs_ops,
+ .init = cs_init,
+ .exit = cs_exit,
};
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
- return cpufreq_governor_dbs(&cs_dbs_data, policy, event);
+ return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
}
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
@@ -343,7 +394,6 @@ struct cpufreq_governor cpufreq_gov_conservative = {
static int __init cpufreq_gov_dbs_init(void)
{
- mutex_init(&cs_dbs_data.mutex);
return cpufreq_register_governor(&cpufreq_gov_conservative);
}
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5a76086ff09b..443442df113b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,12 +22,29 @@
#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/tick.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "cpufreq_governor.h"
+static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+ if (have_governor_per_policy())
+ return &policy->kobj;
+ else
+ return cpufreq_global_kobject;
+}
+
+static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
+{
+ if (have_governor_per_policy())
+ return dbs_data->cdata->attr_group_gov_pol;
+ else
+ return dbs_data->cdata->attr_group_gov_sys;
+}
+
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
u64 idle_time;
@@ -50,13 +67,13 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
return cputime_to_usecs(idle_time);
}
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall)
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
{
- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
+ u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
if (idle_time == -1ULL)
return get_cpu_idle_time_jiffy(cpu, wall);
- else
+ else if (!io_busy)
idle_time += get_cpu_iowait_time_us(cpu, wall);
return idle_time;
@@ -65,7 +82,7 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
{
- struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+ struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
struct cpufreq_policy *policy;
@@ -73,7 +90,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
unsigned int ignore_nice;
unsigned int j;
- if (dbs_data->governor == GOV_ONDEMAND)
+ if (dbs_data->cdata->governor == GOV_ONDEMAND)
ignore_nice = od_tuners->ignore_nice;
else
ignore_nice = cs_tuners->ignore_nice;
@@ -83,13 +100,22 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
/* Get Absolute Load (in terms of freq for ondemand gov) */
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs;
- u64 cur_wall_time, cur_idle_time, cur_iowait_time;
- unsigned int idle_time, wall_time, iowait_time;
+ u64 cur_wall_time, cur_idle_time;
+ unsigned int idle_time, wall_time;
unsigned int load;
+ int io_busy = 0;
- j_cdbs = dbs_data->get_cpu_cdbs(j);
+ j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
- cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+ /*
+ * For the purpose of ondemand, waiting for disk IO is
+ * an indication that you're performance critical, and
+ * not that the system is actually idle. So do not add
+ * the iowait time to the cpu idle time.
+ */
+ if (dbs_data->cdata->governor == GOV_ONDEMAND)
+ io_busy = od_tuners->io_is_busy;
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
wall_time = (unsigned int)
(cur_wall_time - j_cdbs->prev_cpu_wall);
@@ -117,35 +143,12 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
- if (dbs_data->governor == GOV_ONDEMAND) {
- struct od_cpu_dbs_info_s *od_j_dbs_info =
- dbs_data->get_cpu_dbs_info_s(cpu);
-
- cur_iowait_time = get_cpu_iowait_time_us(j,
- &cur_wall_time);
- if (cur_iowait_time == -1ULL)
- cur_iowait_time = 0;
-
- iowait_time = (unsigned int) (cur_iowait_time -
- od_j_dbs_info->prev_cpu_iowait);
- od_j_dbs_info->prev_cpu_iowait = cur_iowait_time;
-
- /*
- * For the purpose of ondemand, waiting for disk IO is
- * an indication that you're performance critical, and
- * not that the system is actually idle. So subtract the
- * iowait time from the cpu idle time.
- */
- if (od_tuners->io_is_busy && idle_time >= iowait_time)
- idle_time -= iowait_time;
- }
-
if (unlikely(!wall_time || wall_time < idle_time))
continue;
load = 100 * (wall_time - idle_time) / wall_time;
- if (dbs_data->governor == GOV_ONDEMAND) {
+ if (dbs_data->cdata->governor == GOV_ONDEMAND) {
int freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
@@ -157,24 +160,42 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
max_load = load;
}
- dbs_data->gov_check_cpu(cpu, max_load);
+ dbs_data->cdata->gov_check_cpu(cpu, max_load);
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);
-static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
- unsigned int sampling_rate)
+static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
+ unsigned int delay)
{
- int delay = delay_for_sampling_rate(sampling_rate);
- struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+ struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
- schedule_delayed_work_on(cpu, &cdbs->work, delay);
+ mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
}
-static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
+void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
+ unsigned int delay, bool all_cpus)
{
- struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+ int i;
- cancel_delayed_work_sync(&cdbs->work);
+ if (!all_cpus) {
+ __gov_queue_work(smp_processor_id(), dbs_data, delay);
+ } else {
+ for_each_cpu(i, policy->cpus)
+ __gov_queue_work(i, dbs_data, delay);
+ }
+}
+EXPORT_SYMBOL_GPL(gov_queue_work);
+
+static inline void gov_cancel_work(struct dbs_data *dbs_data,
+ struct cpufreq_policy *policy)
+{
+ struct cpu_dbs_common_info *cdbs;
+ int i;
+
+ for_each_cpu(i, policy->cpus) {
+ cdbs = dbs_data->cdata->get_cpu_cdbs(i);
+ cancel_delayed_work_sync(&cdbs->work);
+ }
}
/* Will return if we need to evaluate cpu load again or not */
@@ -196,31 +217,130 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
}
EXPORT_SYMBOL_GPL(need_load_eval);
-int cpufreq_governor_dbs(struct dbs_data *dbs_data,
- struct cpufreq_policy *policy, unsigned int event)
+static void set_sampling_rate(struct dbs_data *dbs_data,
+ unsigned int sampling_rate)
{
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ cs_tuners->sampling_rate = sampling_rate;
+ } else {
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ od_tuners->sampling_rate = sampling_rate;
+ }
+}
+
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ struct common_dbs_data *cdata, unsigned int event)
+{
+ struct dbs_data *dbs_data;
struct od_cpu_dbs_info_s *od_dbs_info = NULL;
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
- struct cs_ops *cs_ops = NULL;
struct od_ops *od_ops = NULL;
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ struct od_dbs_tuners *od_tuners = NULL;
+ struct cs_dbs_tuners *cs_tuners = NULL;
struct cpu_dbs_common_info *cpu_cdbs;
- unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
+ unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
+ int io_busy = 0;
int rc;
- cpu_cdbs = dbs_data->get_cpu_cdbs(cpu);
+ if (have_governor_per_policy())
+ dbs_data = policy->governor_data;
+ else
+ dbs_data = cdata->gdbs_data;
+
+ WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
+
+ switch (event) {
+ case CPUFREQ_GOV_POLICY_INIT:
+ if (have_governor_per_policy()) {
+ WARN_ON(dbs_data);
+ } else if (dbs_data) {
+ policy->governor_data = dbs_data;
+ return 0;
+ }
+
+ dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
+ if (!dbs_data) {
+ pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ dbs_data->cdata = cdata;
+ rc = cdata->init(dbs_data);
+ if (rc) {
+ pr_err("%s: POLICY_INIT: init() failed\n", __func__);
+ kfree(dbs_data);
+ return rc;
+ }
+
+ rc = sysfs_create_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr(dbs_data));
+ if (rc) {
+ cdata->exit(dbs_data);
+ kfree(dbs_data);
+ return rc;
+ }
+
+ policy->governor_data = dbs_data;
- if (dbs_data->governor == GOV_CONSERVATIVE) {
- cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
- sampling_rate = &cs_tuners->sampling_rate;
+ /* policy latency is in nS. Convert it to uS first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
+
+ /* Bring kernel and HW constraints together */
+ dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
+ MIN_LATENCY_MULTIPLIER * latency);
+ set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
+ latency * LATENCY_MULTIPLIER));
+
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+
+ cpufreq_register_notifier(cs_ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ if (!have_governor_per_policy())
+ cdata->gdbs_data = dbs_data;
+
+ return 0;
+ case CPUFREQ_GOV_POLICY_EXIT:
+ if ((policy->governor->initialized == 1) ||
+ have_governor_per_policy()) {
+ sysfs_remove_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr(dbs_data));
+
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+
+ cpufreq_unregister_notifier(cs_ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ cdata->exit(dbs_data);
+ kfree(dbs_data);
+ cdata->gdbs_data = NULL;
+ }
+
+ policy->governor_data = NULL;
+ return 0;
+ }
+
+ cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ cs_tuners = dbs_data->tuners;
+ cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ sampling_rate = cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice;
- cs_ops = dbs_data->gov_ops;
} else {
- od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
- sampling_rate = &od_tuners->sampling_rate;
+ od_tuners = dbs_data->tuners;
+ od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ sampling_rate = od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice;
- od_ops = dbs_data->gov_ops;
+ od_ops = dbs_data->cdata->gov_ops;
+ io_busy = od_tuners->io_is_busy;
}
switch (event) {
@@ -232,96 +352,53 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs =
- dbs_data->get_cpu_cdbs(j);
+ dbs_data->cdata->get_cpu_cdbs(j);
j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
- &j_cdbs->prev_cpu_wall);
+ &j_cdbs->prev_cpu_wall, io_busy);
if (ignore_nice)
j_cdbs->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
mutex_init(&j_cdbs->timer_mutex);
INIT_DEFERRABLE_WORK(&j_cdbs->work,
- dbs_data->gov_dbs_timer);
- }
-
- if (!policy->governor->initialized) {
- rc = sysfs_create_group(cpufreq_global_kobject,
- dbs_data->attr_group);
- if (rc) {
- mutex_unlock(&dbs_data->mutex);
- return rc;
- }
+ dbs_data->cdata->gov_dbs_timer);
}
/*
* conservative does not implement micro like ondemand
* governor, thus we are bound to jiffes/HZ
*/
- if (dbs_data->governor == GOV_CONSERVATIVE) {
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
cs_dbs_info->requested_freq = policy->cur;
-
- if (!policy->governor->initialized) {
- cpufreq_register_notifier(cs_ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- dbs_data->min_sampling_rate =
- MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
- }
} else {
od_dbs_info->rate_mult = 1;
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
od_ops->powersave_bias_init_cpu(cpu);
-
- if (!policy->governor->initialized)
- od_tuners->io_is_busy = od_ops->io_busy();
}
- if (policy->governor->initialized)
- goto unlock;
-
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
-
- /* Bring kernel and HW constraints together */
- dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- *sampling_rate = max(dbs_data->min_sampling_rate, latency *
- LATENCY_MULTIPLIER);
-unlock:
mutex_unlock(&dbs_data->mutex);
/* Initiate timer time stamp */
cpu_cdbs->time_stamp = ktime_get();
- for_each_cpu(j, policy->cpus)
- dbs_timer_init(dbs_data, j, *sampling_rate);
+ gov_queue_work(dbs_data, policy,
+ delay_for_sampling_rate(sampling_rate), true);
break;
case CPUFREQ_GOV_STOP:
- if (dbs_data->governor == GOV_CONSERVATIVE)
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
cs_dbs_info->enable = 0;
- for_each_cpu(j, policy->cpus)
- dbs_timer_exit(dbs_data, j);
+ gov_cancel_work(dbs_data, policy);
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);
- if (policy->governor->initialized == 1) {
- sysfs_remove_group(cpufreq_global_kobject,
- dbs_data->attr_group);
- if (dbs_data->governor == GOV_CONSERVATIVE)
- cpufreq_unregister_notifier(cs_ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- }
mutex_unlock(&dbs_data->mutex);
break;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index cc4bd2f6838a..8ac33538d0bd 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -34,20 +34,81 @@
*/
#define MIN_SAMPLING_RATE_RATIO (2)
#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (100)
+#define MIN_LATENCY_MULTIPLIER (20)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
/* Ondemand Sampling types */
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
-/* Macro creating sysfs show routines */
-#define show_one(_gov, file_name, object) \
-static ssize_t show_##file_name \
+/*
+ * Macro for creating governors sysfs routines
+ *
+ * - gov_sys: One governor instance per whole system
+ * - gov_pol: One governor instance per policy
+ */
+
+/* Create attributes */
+#define gov_sys_attr_ro(_name) \
+static struct global_attr _name##_gov_sys = \
+__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
+
+#define gov_sys_attr_rw(_name) \
+static struct global_attr _name##_gov_sys = \
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_ro(_name) \
+static struct freq_attr _name##_gov_pol = \
+__ATTR(_name, 0444, show_##_name##_gov_pol, NULL)
+
+#define gov_pol_attr_rw(_name) \
+static struct freq_attr _name##_gov_pol = \
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name) \
+ gov_sys_attr_rw(_name); \
+ gov_pol_attr_rw(_name)
+
+#define gov_sys_pol_attr_ro(_name) \
+ gov_sys_attr_ro(_name); \
+ gov_pol_attr_ro(_name)
+
+/* Create show/store routines */
+#define show_one(_gov, file_name) \
+static ssize_t show_##file_name##_gov_sys \
(struct kobject *kobj, struct attribute *attr, char *buf) \
{ \
- return sprintf(buf, "%u\n", _gov##_tuners.object); \
+ struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
+ return sprintf(buf, "%u\n", tuners->file_name); \
+} \
+ \
+static ssize_t show_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ struct dbs_data *dbs_data = policy->governor_data; \
+ struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
+ return sprintf(buf, "%u\n", tuners->file_name); \
+}
+
+#define store_one(_gov, file_name) \
+static ssize_t store_##file_name##_gov_sys \
+(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
+{ \
+ struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
+ return store_##file_name(dbs_data, buf, count); \
+} \
+ \
+static ssize_t store_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, const char *buf, size_t count) \
+{ \
+ struct dbs_data *dbs_data = policy->governor_data; \
+ return store_##file_name(dbs_data, buf, count); \
}
+#define show_store_one(_gov, file_name) \
+show_one(_gov, file_name); \
+store_one(_gov, file_name)
+
+/* create helper routines */
#define define_get_cpu_dbs_routines(_dbs_info) \
static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
{ \
@@ -87,7 +148,6 @@ struct cpu_dbs_common_info {
struct od_cpu_dbs_info_s {
struct cpu_dbs_common_info cdbs;
- u64 prev_cpu_iowait;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
@@ -103,7 +163,7 @@ struct cs_cpu_dbs_info_s {
unsigned int enable:1;
};
-/* Governers sysfs tunables */
+/* Per policy Governers sysfs tunables */
struct od_dbs_tuners {
unsigned int ignore_nice;
unsigned int sampling_rate;
@@ -123,31 +183,42 @@ struct cs_dbs_tuners {
unsigned int freq_step;
};
-/* Per Governer data */
-struct dbs_data {
+/* Common Governer data across policies */
+struct dbs_data;
+struct common_dbs_data {
/* Common across governors */
#define GOV_ONDEMAND 0
#define GOV_CONSERVATIVE 1
int governor;
- unsigned int min_sampling_rate;
- struct attribute_group *attr_group;
- void *tuners;
+ struct attribute_group *attr_group_gov_sys; /* one governor - system */
+ struct attribute_group *attr_group_gov_pol; /* one governor - policy */
- /* dbs_mutex protects dbs_enable in governor start/stop */
- struct mutex mutex;
+ /* Common data for platforms that don't set have_governor_per_policy */
+ struct dbs_data *gdbs_data;
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
void *(*get_cpu_dbs_info_s)(int cpu);
void (*gov_dbs_timer)(struct work_struct *work);
void (*gov_check_cpu)(int cpu, unsigned int load);
+ int (*init)(struct dbs_data *dbs_data);
+ void (*exit)(struct dbs_data *dbs_data);
/* Governor specific ops, see below */
void *gov_ops;
};
+/* Governer Per policy data */
+struct dbs_data {
+ struct common_dbs_data *cdata;
+ unsigned int min_sampling_rate;
+ void *tuners;
+
+ /* dbs_mutex protects dbs_enable in governor start/stop */
+ struct mutex mutex;
+};
+
/* Governor specific ops, will be passed to dbs_data->gov_ops */
struct od_ops {
- int (*io_busy)(void);
void (*powersave_bias_init_cpu)(int cpu);
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation);
@@ -169,10 +240,31 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
return delay;
}
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
+#define declare_show_sampling_rate_min(_gov) \
+static ssize_t show_sampling_rate_min_gov_sys \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
+ return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
+} \
+ \
+static ssize_t show_sampling_rate_min_gov_pol \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ struct dbs_data *dbs_data = policy->governor_data; \
+ return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
-int cpufreq_governor_dbs(struct dbs_data *dbs_data,
- struct cpufreq_policy *policy, unsigned int event);
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ struct common_dbs_data *cdata, unsigned int event);
+void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
+ unsigned int delay, bool all_cpus);
+void od_register_powersave_bias_handler(unsigned int (*f)
+ (struct cpufreq_policy *, unsigned int, unsigned int),
+ unsigned int powersave_bias);
+void od_unregister_powersave_bias_handler(void);
#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index f3eb26cd848f..b0ffef96bf77 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -20,9 +20,11 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu-defs.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/tick.h>
#include <linux/types.h>
+#include <linux/cpu.h>
#include "cpufreq_governor.h"
@@ -37,22 +39,14 @@
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
-static struct dbs_data od_dbs_data;
static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
+static struct od_ops od_ops;
+
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static struct cpufreq_governor cpufreq_gov_ondemand;
#endif
-static struct od_dbs_tuners od_tuners = {
- .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
- .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
- .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
- DEF_FREQUENCY_DOWN_DIFFERENTIAL,
- .ignore_nice = 0,
- .powersave_bias = 0,
-};
-
static void ondemand_powersave_bias_init_cpu(int cpu)
{
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -89,7 +83,7 @@ static int should_io_be_busy(void)
* Returns the freq_hi to be used right now and will set freq_hi_jiffies,
* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
*/
-static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
+static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation)
{
unsigned int freq_req, freq_reduc, freq_avg;
@@ -98,6 +92,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
policy->cpu);
+ struct dbs_data *dbs_data = policy->governor_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (!dbs_info->freq_table) {
dbs_info->freq_lo = 0;
@@ -108,7 +104,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
relation, &index);
freq_req = dbs_info->freq_table[index].frequency;
- freq_reduc = freq_req * od_tuners.powersave_bias / 1000;
+ freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
@@ -127,7 +123,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
dbs_info->freq_lo_jiffies = 0;
return freq_lo;
}
- jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate);
+ jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
jiffies_hi += ((freq_hi - freq_lo) / 2);
jiffies_hi /= (freq_hi - freq_lo);
@@ -148,12 +144,16 @@ static void ondemand_powersave_bias_init(void)
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
{
- if (od_tuners.powersave_bias)
- freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
+ struct dbs_data *dbs_data = p->governor_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+ if (od_tuners->powersave_bias)
+ freq = od_ops.powersave_bias_target(p, freq,
+ CPUFREQ_RELATION_H);
else if (p->cur == p->max)
return;
- __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ?
+ __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
}
@@ -170,15 +170,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
{
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ struct dbs_data *dbs_data = policy->governor_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
dbs_info->freq_lo = 0;
/* Check for frequency increase */
- if (load_freq > od_tuners.up_threshold * policy->cur) {
+ if (load_freq > od_tuners->up_threshold * policy->cur) {
/* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max)
dbs_info->rate_mult =
- od_tuners.sampling_down_factor;
+ od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
return;
}
@@ -193,9 +195,10 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
* support the current CPU usage without triggering the up policy. To be
* safe, we focus 10 points under the threshold.
*/
- if (load_freq < od_tuners.adj_up_threshold * policy->cur) {
+ if (load_freq < od_tuners->adj_up_threshold
+ * policy->cur) {
unsigned int freq_next;
- freq_next = load_freq / od_tuners.adj_up_threshold;
+ freq_next = load_freq / od_tuners->adj_up_threshold;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
@@ -203,65 +206,62 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
if (freq_next < policy->min)
freq_next = policy->min;
- if (!od_tuners.powersave_bias) {
+ if (!od_tuners->powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
- } else {
- int freq = powersave_bias_target(policy, freq_next,
- CPUFREQ_RELATION_L);
- __cpufreq_driver_target(policy, freq,
- CPUFREQ_RELATION_L);
+ return;
}
+
+ freq_next = od_ops.powersave_bias_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
}
}
static void od_dbs_timer(struct work_struct *work)
{
- struct delayed_work *dw = to_delayed_work(work);
struct od_cpu_dbs_info_s *dbs_info =
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
cpu);
- int delay, sample_type = core_dbs_info->sample_type;
- bool eval_load;
+ struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ int delay = 0, sample_type = core_dbs_info->sample_type;
+ bool modify_all = true;
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
- eval_load = need_load_eval(&core_dbs_info->cdbs,
- od_tuners.sampling_rate);
+ if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
+ modify_all = false;
+ goto max_delay;
+ }
/* Common NORMAL_SAMPLE setup */
core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) {
delay = core_dbs_info->freq_lo_jiffies;
- if (eval_load)
- __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
- core_dbs_info->freq_lo,
- CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
+ core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
} else {
- if (eval_load)
- dbs_check_cpu(&od_dbs_data, cpu);
+ dbs_check_cpu(dbs_data, cpu);
if (core_dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
core_dbs_info->sample_type = OD_SUB_SAMPLE;
delay = core_dbs_info->freq_hi_jiffies;
- } else {
- delay = delay_for_sampling_rate(od_tuners.sampling_rate
- * core_dbs_info->rate_mult);
}
}
- schedule_delayed_work_on(smp_processor_id(), dw, delay);
+max_delay:
+ if (!delay)
+ delay = delay_for_sampling_rate(od_tuners->sampling_rate
+ * core_dbs_info->rate_mult);
+
+ gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
/************************** sysfs interface ************************/
-
-static ssize_t show_sampling_rate_min(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
-}
+static struct common_dbs_data od_dbs_cdata;
/**
* update_sampling_rate - update sampling rate effective immediately if needed.
@@ -276,12 +276,14 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
* reducing the sampling rate, we need to make the new value effective
* immediately.
*/
-static void update_sampling_rate(unsigned int new_rate)
+static void update_sampling_rate(struct dbs_data *dbs_data,
+ unsigned int new_rate)
{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
int cpu;
- od_tuners.sampling_rate = new_rate = max(new_rate,
- od_dbs_data.min_sampling_rate);
+ od_tuners->sampling_rate = new_rate = max(new_rate,
+ dbs_data->min_sampling_rate);
for_each_online_cpu(cpu) {
struct cpufreq_policy *policy;
@@ -314,42 +316,54 @@ static void update_sampling_rate(unsigned int new_rate)
cancel_delayed_work_sync(&dbs_info->cdbs.work);
mutex_lock(&dbs_info->cdbs.timer_mutex);
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
- usecs_to_jiffies(new_rate));
+ gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
+ usecs_to_jiffies(new_rate), true);
}
mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
}
-static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- update_sampling_rate(input);
+
+ update_sampling_rate(dbs_data, input);
return count;
}
-static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
+ unsigned int j;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- od_tuners.io_is_busy = !!input;
+ od_tuners->io_is_busy = !!input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ for_each_online_cpu(j) {
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ j);
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
+ }
return count;
}
-static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -359,23 +373,24 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
return -EINVAL;
}
/* Calculate the new adj_up_threshold */
- od_tuners.adj_up_threshold += input;
- od_tuners.adj_up_threshold -= od_tuners.up_threshold;
+ od_tuners->adj_up_threshold += input;
+ od_tuners->adj_up_threshold -= od_tuners->up_threshold;
- od_tuners.up_threshold = input;
+ od_tuners->up_threshold = input;
return count;
}
-static ssize_t store_sampling_down_factor(struct kobject *a,
- struct attribute *b, const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input, j;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- od_tuners.sampling_down_factor = input;
+ od_tuners->sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
for_each_online_cpu(j) {
@@ -386,9 +401,10 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
return count;
}
-static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
@@ -401,18 +417,18 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- if (input == od_tuners.ignore_nice) { /* nothing to do */
+ if (input == od_tuners->ignore_nice) { /* nothing to do */
return count;
}
- od_tuners.ignore_nice = input;
+ od_tuners->ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
struct od_cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->cdbs.prev_cpu_wall);
- if (od_tuners.ignore_nice)
+ &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
+ if (od_tuners->ignore_nice)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -420,9 +436,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -433,68 +450,179 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- od_tuners.powersave_bias = input;
+ od_tuners->powersave_bias = input;
ondemand_powersave_bias_init();
return count;
}
-show_one(od, sampling_rate, sampling_rate);
-show_one(od, io_is_busy, io_is_busy);
-show_one(od, up_threshold, up_threshold);
-show_one(od, sampling_down_factor, sampling_down_factor);
-show_one(od, ignore_nice_load, ignore_nice);
-show_one(od, powersave_bias, powersave_bias);
-
-define_one_global_rw(sampling_rate);
-define_one_global_rw(io_is_busy);
-define_one_global_rw(up_threshold);
-define_one_global_rw(sampling_down_factor);
-define_one_global_rw(ignore_nice_load);
-define_one_global_rw(powersave_bias);
-define_one_global_ro(sampling_rate_min);
-
-static struct attribute *dbs_attributes[] = {
- &sampling_rate_min.attr,
- &sampling_rate.attr,
- &up_threshold.attr,
- &sampling_down_factor.attr,
- &ignore_nice_load.attr,
- &powersave_bias.attr,
- &io_is_busy.attr,
+show_store_one(od, sampling_rate);
+show_store_one(od, io_is_busy);
+show_store_one(od, up_threshold);
+show_store_one(od, sampling_down_factor);
+show_store_one(od, ignore_nice);
+show_store_one(od, powersave_bias);
+declare_show_sampling_rate_min(od);
+
+gov_sys_pol_attr_rw(sampling_rate);
+gov_sys_pol_attr_rw(io_is_busy);
+gov_sys_pol_attr_rw(up_threshold);
+gov_sys_pol_attr_rw(sampling_down_factor);
+gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(powersave_bias);
+gov_sys_pol_attr_ro(sampling_rate_min);
+
+static struct attribute *dbs_attributes_gov_sys[] = {
+ &sampling_rate_min_gov_sys.attr,
+ &sampling_rate_gov_sys.attr,
+ &up_threshold_gov_sys.attr,
+ &sampling_down_factor_gov_sys.attr,
+ &ignore_nice_gov_sys.attr,
+ &powersave_bias_gov_sys.attr,
+ &io_is_busy_gov_sys.attr,
+ NULL
+};
+
+static struct attribute_group od_attr_group_gov_sys = {
+ .attrs = dbs_attributes_gov_sys,
+ .name = "ondemand",
+};
+
+static struct attribute *dbs_attributes_gov_pol[] = {
+ &sampling_rate_min_gov_pol.attr,
+ &sampling_rate_gov_pol.attr,
+ &up_threshold_gov_pol.attr,
+ &sampling_down_factor_gov_pol.attr,
+ &ignore_nice_gov_pol.attr,
+ &powersave_bias_gov_pol.attr,
+ &io_is_busy_gov_pol.attr,
NULL
};
-static struct attribute_group od_attr_group = {
- .attrs = dbs_attributes,
+static struct attribute_group od_attr_group_gov_pol = {
+ .attrs = dbs_attributes_gov_pol,
.name = "ondemand",
};
/************************** sysfs end ************************/
+static int od_init(struct dbs_data *dbs_data)
+{
+ struct od_dbs_tuners *tuners;
+ u64 idle_time;
+ int cpu;
+
+ tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
+ if (!tuners) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ cpu = get_cpu();
+ idle_time = get_cpu_idle_time_us(cpu, NULL);
+ put_cpu();
+ if (idle_time != -1ULL) {
+ /* Idle micro accounting is supported. Use finer thresholds */
+ tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
+ MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ /*
+ * In nohz/micro accounting case we set the minimum frequency
+ * not depending on HZ, but fixed (very low). The deferred
+ * timer might skip some samples if idle/sleeping as needed.
+ */
+ dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
+ } else {
+ tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
+ DEF_FREQUENCY_DOWN_DIFFERENTIAL;
+
+ /* For correct statistics, we need 10 ticks for each measure */
+ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ }
+
+ tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ tuners->ignore_nice = 0;
+ tuners->powersave_bias = 0;
+ tuners->io_is_busy = should_io_be_busy();
+
+ dbs_data->tuners = tuners;
+ pr_info("%s: tuners %p\n", __func__, tuners);
+ mutex_init(&dbs_data->mutex);
+ return 0;
+}
+
+static void od_exit(struct dbs_data *dbs_data)
+{
+ kfree(dbs_data->tuners);
+}
+
define_get_cpu_dbs_routines(od_cpu_dbs_info);
static struct od_ops od_ops = {
- .io_busy = should_io_be_busy,
.powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
- .powersave_bias_target = powersave_bias_target,
+ .powersave_bias_target = generic_powersave_bias_target,
.freq_increase = dbs_freq_increase,
};
-static struct dbs_data od_dbs_data = {
+static struct common_dbs_data od_dbs_cdata = {
.governor = GOV_ONDEMAND,
- .attr_group = &od_attr_group,
- .tuners = &od_tuners,
+ .attr_group_gov_sys = &od_attr_group_gov_sys,
+ .attr_group_gov_pol = &od_attr_group_gov_pol,
.get_cpu_cdbs = get_cpu_cdbs,
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
.gov_dbs_timer = od_dbs_timer,
.gov_check_cpu = od_check_cpu,
.gov_ops = &od_ops,
+ .init = od_init,
+ .exit = od_exit,
};
+static void od_set_powersave_bias(unsigned int powersave_bias)
+{
+ struct cpufreq_policy *policy;
+ struct dbs_data *dbs_data;
+ struct od_dbs_tuners *od_tuners;
+ unsigned int cpu;
+ cpumask_t done;
+
+ cpumask_clear(&done);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &done))
+ continue;
+
+ policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
+ dbs_data = policy->governor_data;
+ od_tuners = dbs_data->tuners;
+ od_tuners->powersave_bias = powersave_bias;
+
+ cpumask_or(&done, &done, policy->cpus);
+ }
+ put_online_cpus();
+}
+
+void od_register_powersave_bias_handler(unsigned int (*f)
+ (struct cpufreq_policy *, unsigned int, unsigned int),
+ unsigned int powersave_bias)
+{
+ od_ops.powersave_bias_target = f;
+ od_set_powersave_bias(powersave_bias);
+}
+EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
+
+void od_unregister_powersave_bias_handler(void)
+{
+ od_ops.powersave_bias_target = generic_powersave_bias_target;
+ od_set_powersave_bias(0);
+}
+EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
+
static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
- return cpufreq_governor_dbs(&od_dbs_data, policy, event);
+ return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
}
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
@@ -509,29 +637,6 @@ struct cpufreq_governor cpufreq_gov_ondemand = {
static int __init cpufreq_gov_dbs_init(void)
{
- u64 idle_time;
- int cpu = get_cpu();
-
- mutex_init(&od_dbs_data.mutex);
- idle_time = get_cpu_idle_time_us(cpu, NULL);
- put_cpu();
- if (idle_time != -1ULL) {
- /* Idle micro accounting is supported. Use finer thresholds */
- od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
- MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
- /*
- * In nohz/micro accounting case we set the minimum frequency
- * not depending on HZ, but fixed (very low). The deferred
- * timer might skip some samples if idle/sleeping as needed.
- */
- od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
- } else {
- /* For correct statistics, we need 10 ticks for each measure */
- od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
- }
-
return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
new file mode 100644
index 000000000000..ee142c490575
--- /dev/null
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -0,0 +1,146 @@
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <hwregs/reg_map.h>
+#include <hwregs/reg_rdwr.h>
+#include <hwregs/clkgen_defs.h>
+#include <hwregs/ddr2_defs.h>
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data);
+
+static struct notifier_block cris_sdram_freq_notifier_block = {
+ .notifier_call = cris_sdram_freq_notifier
+};
+
+static struct cpufreq_frequency_table cris_freq_table[] = {
+ {0x01, 6000},
+ {0x02, 200000},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
+{
+ reg_clkgen_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
+ return clk_ctrl.pll ? 200000 : 6000;
+}
+
+static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
+ unsigned int state)
+{
+ struct cpufreq_freqs freqs;
+ reg_clkgen_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
+
+ freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
+ freqs.new = cris_freq_table[state].frequency;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ local_irq_disable();
+
+ /* Even though we may be SMP they will share the same clock
+ * so all settings are made on CPU0. */
+ if (cris_freq_table[state].frequency == 200000)
+ clk_ctrl.pll = 1;
+ else
+ clk_ctrl.pll = 0;
+ REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
+
+ local_irq_enable();
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+};
+
+static int cris_freq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
+}
+
+static int cris_freq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int newstate = 0;
+
+ if (cpufreq_frequency_table_target(policy, cris_freq_table,
+ target_freq, relation, &newstate))
+ return -EINVAL;
+
+ cris_freq_set_cpu_state(policy, newstate);
+
+ return 0;
+}
+
+static int cris_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 1000000; /* 1ms */
+ policy->cur = cris_freq_get_cpu_frequency(0);
+
+ result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
+ if (result)
+ return (result);
+
+ cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
+
+ return 0;
+}
+
+
+static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+
+static struct freq_attr *cris_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver cris_freq_driver = {
+ .get = cris_freq_get_cpu_frequency,
+ .verify = cris_freq_verify,
+ .target = cris_freq_target,
+ .init = cris_freq_cpu_init,
+ .exit = cris_freq_cpu_exit,
+ .name = "cris_freq",
+ .owner = THIS_MODULE,
+ .attr = cris_freq_attr,
+};
+
+static int __init cris_freq_init(void)
+{
+ int ret;
+ ret = cpufreq_register_driver(&cris_freq_driver);
+ cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ return ret;
+}
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ int i;
+ struct cpufreq_freqs *freqs = data;
+ if (val == CPUFREQ_PRECHANGE) {
+ reg_ddr2_rw_cfg cfg =
+ REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg);
+ cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46);
+
+ if (freqs->new == 200000)
+ for (i = 0; i < 50000; i++);
+ REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
+ }
+ return 0;
+}
+
+
+module_init(cris_freq_init);
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
new file mode 100644
index 000000000000..12952235d5db
--- /dev/null
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -0,0 +1,142 @@
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <hwregs/reg_map.h>
+#include <arch/hwregs/reg_rdwr.h>
+#include <arch/hwregs/config_defs.h>
+#include <arch/hwregs/bif_core_defs.h>
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data);
+
+static struct notifier_block cris_sdram_freq_notifier_block = {
+ .notifier_call = cris_sdram_freq_notifier
+};
+
+static struct cpufreq_frequency_table cris_freq_table[] = {
+ {0x01, 6000},
+ {0x02, 200000},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
+{
+ reg_config_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
+ return clk_ctrl.pll ? 200000 : 6000;
+}
+
+static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
+ unsigned int state)
+{
+ struct cpufreq_freqs freqs;
+ reg_config_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
+
+ freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
+ freqs.new = cris_freq_table[state].frequency;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ local_irq_disable();
+
+ /* Even though we may be SMP they will share the same clock
+ * so all settings are made on CPU0. */
+ if (cris_freq_table[state].frequency == 200000)
+ clk_ctrl.pll = 1;
+ else
+ clk_ctrl.pll = 0;
+ REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl);
+
+ local_irq_enable();
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+};
+
+static int cris_freq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
+}
+
+static int cris_freq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ unsigned int newstate = 0;
+
+ if (cpufreq_frequency_table_target
+ (policy, cris_freq_table, target_freq, relation, &newstate))
+ return -EINVAL;
+
+ cris_freq_set_cpu_state(policy, newstate);
+
+ return 0;
+}
+
+static int cris_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 1000000; /* 1ms */
+ policy->cur = cris_freq_get_cpu_frequency(0);
+
+ result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
+ if (result)
+ return (result);
+
+ cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
+
+ return 0;
+}
+
+static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *cris_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver cris_freq_driver = {
+ .get = cris_freq_get_cpu_frequency,
+ .verify = cris_freq_verify,
+ .target = cris_freq_target,
+ .init = cris_freq_cpu_init,
+ .exit = cris_freq_cpu_exit,
+ .name = "cris_freq",
+ .owner = THIS_MODULE,
+ .attr = cris_freq_attr,
+};
+
+static int __init cris_freq_init(void)
+{
+ int ret;
+ ret = cpufreq_register_driver(&cris_freq_driver);
+ cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ return ret;
+}
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ int i;
+ struct cpufreq_freqs *freqs = data;
+ if (val == CPUFREQ_PRECHANGE) {
+ reg_bif_core_rw_sdram_timing timing =
+ REG_RD(bif_core, regi_bif_core, rw_sdram_timing);
+ timing.cpd = (freqs->new == 200000 ? 0 : 1);
+
+ if (freqs->new == 200000)
+ for (i = 0; i < 50000; i++) ;
+ REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
+ }
+ return 0;
+}
+
+module_init(cris_freq_init);
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
new file mode 100644
index 000000000000..c33c76c360fa
--- /dev/null
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -0,0 +1,231 @@
+/*
+ * CPU frequency scaling for DaVinci
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Updated to support OMAP3
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+
+#include <mach/hardware.h>
+#include <mach/cpufreq.h>
+#include <mach/common.h>
+
+struct davinci_cpufreq {
+ struct device *dev;
+ struct clk *armclk;
+ struct clk *asyncclk;
+ unsigned long asyncrate;
+};
+static struct davinci_cpufreq cpufreq;
+
+static int davinci_verify_speed(struct cpufreq_policy *policy)
+{
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct cpufreq_frequency_table *freq_table = pdata->freq_table;
+ struct clk *armclk = cpufreq.armclk;
+
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ if (policy->cpu)
+ return -EINVAL;
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
+ policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+ return 0;
+}
+
+static unsigned int davinci_getspeed(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+
+ return clk_get_rate(cpufreq.armclk) / 1000;
+}
+
+static int davinci_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ int ret = 0;
+ unsigned int idx;
+ struct cpufreq_freqs freqs;
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct clk *armclk = cpufreq.armclk;
+
+ freqs.old = davinci_getspeed(0);
+ freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
+
+ if (freqs.old == freqs.new)
+ return ret;
+
+ dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
+
+ ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
+ freqs.new, relation, &idx);
+ if (ret)
+ return -EINVAL;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ /* if moving to higher frequency, up the voltage beforehand */
+ if (pdata->set_voltage && freqs.new > freqs.old) {
+ ret = pdata->set_voltage(idx);
+ if (ret)
+ goto out;
+ }
+
+ ret = clk_set_rate(armclk, idx);
+ if (ret)
+ goto out;
+
+ if (cpufreq.asyncclk) {
+ ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
+ if (ret)
+ goto out;
+ }
+
+ /* if moving to lower freq, lower the voltage after lowering freq */
+ if (pdata->set_voltage && freqs.new < freqs.old)
+ pdata->set_voltage(idx);
+
+out:
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+static int davinci_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct cpufreq_frequency_table *freq_table = pdata->freq_table;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ /* Finish platform specific initialization */
+ if (pdata->init) {
+ result = pdata->init();
+ if (result)
+ return result;
+ }
+
+ policy->cur = davinci_getspeed(0);
+
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (result) {
+ pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
+ __func__);
+ return result;
+ }
+
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+ /*
+ * Time measurement across the target() function yields ~1500-1800us
+ * time taken with no drivers on notification list.
+ * Setting the latency to 2000 us to accommodate addition of drivers
+ * to pre/post change notification list.
+ */
+ policy->cpuinfo.transition_latency = 2000 * 1000;
+ return 0;
+}
+
+static int davinci_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *davinci_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver davinci_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = davinci_verify_speed,
+ .target = davinci_target,
+ .get = davinci_getspeed,
+ .init = davinci_cpu_init,
+ .exit = davinci_cpu_exit,
+ .name = "davinci",
+ .attr = davinci_cpufreq_attr,
+};
+
+static int __init davinci_cpufreq_probe(struct platform_device *pdev)
+{
+ struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
+ struct clk *asyncclk;
+
+ if (!pdata)
+ return -EINVAL;
+ if (!pdata->freq_table)
+ return -EINVAL;
+
+ cpufreq.dev = &pdev->dev;
+
+ cpufreq.armclk = clk_get(NULL, "arm");
+ if (IS_ERR(cpufreq.armclk)) {
+ dev_err(cpufreq.dev, "Unable to get ARM clock\n");
+ return PTR_ERR(cpufreq.armclk);
+ }
+
+ asyncclk = clk_get(cpufreq.dev, "async");
+ if (!IS_ERR(asyncclk)) {
+ cpufreq.asyncclk = asyncclk;
+ cpufreq.asyncrate = clk_get_rate(asyncclk);
+ }
+
+ return cpufreq_register_driver(&davinci_driver);
+}
+
+static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
+{
+ clk_put(cpufreq.armclk);
+
+ if (cpufreq.asyncclk)
+ clk_put(cpufreq.asyncclk);
+
+ return cpufreq_unregister_driver(&davinci_driver);
+}
+
+static struct platform_driver davinci_cpufreq_driver = {
+ .driver = {
+ .name = "cpufreq-davinci",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(davinci_cpufreq_remove),
+};
+
+int __init davinci_cpufreq_init(void)
+{
+ return platform_driver_probe(&davinci_cpufreq_driver,
+ davinci_cpufreq_probe);
+}
+
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 72f0c3efa76e..6ec6539ae041 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -37,12 +37,6 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
unsigned int idx;
int ret;
- /* scale the target frequency to one of the extremes supported */
- if (target_freq < policy->cpuinfo.min_freq)
- target_freq = policy->cpuinfo.min_freq;
- if (target_freq > policy->cpuinfo.max_freq)
- target_freq = policy->cpuinfo.max_freq;
-
/* Lookup the next frequency */
if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
relation, &idx))
@@ -55,8 +49,7 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
return 0;
/* pre-change notification */
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* update armss clk frequency */
ret = clk_set_rate(armss_clk, freqs.new * 1000);
@@ -68,8 +61,7 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
}
/* post change notification */
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}
@@ -79,15 +71,15 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
int i = 0;
unsigned long freq = clk_get_rate(armss_clk) / 1000;
- while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
- if (freq <= freq_table[i].frequency)
+ /* The value is rounded to closest frequency in the defined table. */
+ while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) {
+ if (freq < freq_table[i].frequency +
+ (freq_table[i + 1].frequency - freq_table[i].frequency) / 2)
return freq_table[i].frequency;
i++;
}
- /* We could not find a corresponding frequency. */
- pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n");
- return 0;
+ return freq_table[i].frequency;
}
static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 3fffbe6025cd..37380fb92621 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -104,7 +104,7 @@ static unsigned int eps_get(unsigned int cpu)
}
static int eps_set_state(struct eps_cpu_data *centaur,
- unsigned int cpu,
+ struct cpufreq_policy *policy,
u32 dest_state)
{
struct cpufreq_freqs freqs;
@@ -112,10 +112,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
int err = 0;
int i;
- freqs.old = eps_get(cpu);
+ freqs.old = eps_get(policy->cpu);
freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
- freqs.cpu = cpu;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Wait while CPU is busy */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
@@ -162,7 +161,7 @@ postchange:
current_multiplier);
}
#endif
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return err;
}
@@ -190,7 +189,7 @@ static int eps_target(struct cpufreq_policy *policy,
/* Make frequency transition */
dest_state = centaur->freq_table[newstate].index & 0xffff;
- ret = eps_set_state(centaur, cpu, dest_state);
+ ret = eps_set_state(centaur, policy, dest_state);
if (ret)
printk(KERN_ERR "eps: Timeout!\n");
return ret;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 960671fd3d7e..658d860344b0 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -117,15 +117,15 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
* There is no return value.
*/
-static void elanfreq_set_cpu_state(unsigned int state)
+static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
+ unsigned int state)
{
struct cpufreq_freqs freqs;
freqs.old = elanfreq_get_cpu_frequency(0);
freqs.new = elan_multiplier[state].clock;
- freqs.cpu = 0; /* elanfreq.c is UP only driver */
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
elan_multiplier[state].clock);
@@ -161,7 +161,7 @@ static void elanfreq_set_cpu_state(unsigned int state)
udelay(10000);
local_irq_enable();
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
};
@@ -188,7 +188,7 @@ static int elanfreq_target(struct cpufreq_policy *policy,
target_freq, relation, &newstate))
return -EINVAL;
- elanfreq_set_cpu_state(newstate);
+ elanfreq_set_cpu_state(policy, newstate);
return 0;
}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 78057a357ddb..475b4f607f0d 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -70,7 +70,6 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
freqs.old = policy->cur;
freqs.new = target_freq;
- freqs.cpu = policy->cpu;
if (freqs.new == freqs.old)
goto out;
@@ -105,8 +104,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
}
arm_volt = volt_table[index];
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
@@ -131,8 +129,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
exynos_info->set_freq(old_index, index);
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
/* When the new frequency is lower than current frequency */
if ((freqs.new < freqs.old) ||
@@ -297,7 +294,7 @@ static int __init exynos_cpufreq_init(void)
else if (soc_is_exynos5250())
ret = exynos5250_cpufreq_init(exynos_info);
else
- pr_err("%s: CPU type not found\n", __func__);
+ return 0;
if (ret)
goto err_vdd_arm;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
new file mode 100644
index 000000000000..0c74018eda47
--- /dev/null
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * EXYNOS5440 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/opp.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Register definitions */
+#define XMU_DVFS_CTRL 0x0060
+#define XMU_PMU_P0_7 0x0064
+#define XMU_C0_3_PSTATE 0x0090
+#define XMU_P_LIMIT 0x00a0
+#define XMU_P_STATUS 0x00a4
+#define XMU_PMUEVTEN 0x00d0
+#define XMU_PMUIRQEN 0x00d4
+#define XMU_PMUIRQ 0x00d8
+
+/* PMU mask and shift definations */
+#define P_VALUE_MASK 0x7
+
+#define XMU_DVFS_CTRL_EN_SHIFT 0
+
+#define P0_7_CPUCLKDEV_SHIFT 21
+#define P0_7_CPUCLKDEV_MASK 0x7
+#define P0_7_ATBCLKDEV_SHIFT 18
+#define P0_7_ATBCLKDEV_MASK 0x7
+#define P0_7_CSCLKDEV_SHIFT 15
+#define P0_7_CSCLKDEV_MASK 0x7
+#define P0_7_CPUEMA_SHIFT 28
+#define P0_7_CPUEMA_MASK 0xf
+#define P0_7_L2EMA_SHIFT 24
+#define P0_7_L2EMA_MASK 0xf
+#define P0_7_VDD_SHIFT 8
+#define P0_7_VDD_MASK 0x7f
+#define P0_7_FREQ_SHIFT 0
+#define P0_7_FREQ_MASK 0xff
+
+#define C0_3_PSTATE_VALID_SHIFT 8
+#define C0_3_PSTATE_CURR_SHIFT 4
+#define C0_3_PSTATE_NEW_SHIFT 0
+
+#define PSTATE_CHANGED_EVTEN_SHIFT 0
+
+#define PSTATE_CHANGED_IRQEN_SHIFT 0
+
+#define PSTATE_CHANGED_SHIFT 0
+
+/* some constant values for clock divider calculation */
+#define CPU_DIV_FREQ_MAX 500
+#define CPU_DBG_FREQ_MAX 375
+#define CPU_ATB_FREQ_MAX 500
+
+#define PMIC_LOW_VOLT 0x30
+#define PMIC_HIGH_VOLT 0x28
+
+#define CPUEMA_HIGH 0x2
+#define CPUEMA_MID 0x4
+#define CPUEMA_LOW 0x7
+
+#define L2EMA_HIGH 0x1
+#define L2EMA_MID 0x3
+#define L2EMA_LOW 0x4
+
+#define DIV_TAB_MAX 2
+/* frequency unit is 20MHZ */
+#define FREQ_UNIT 20
+#define MAX_VOLTAGE 1550000 /* In microvolt */
+#define VOLTAGE_STEP 12500 /* In microvolt */
+
+#define CPUFREQ_NAME "exynos5440_dvfs"
+#define DEF_TRANS_LATENCY 100000
+
+enum cpufreq_level_index {
+ L0, L1, L2, L3, L4,
+ L5, L6, L7, L8, L9,
+};
+#define CPUFREQ_LEVEL_END (L7 + 1)
+
+struct exynos_dvfs_data {
+ void __iomem *base;
+ struct resource *mem;
+ int irq;
+ struct clk *cpu_clk;
+ unsigned int cur_frequency;
+ unsigned int latency;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_count;
+ struct device *dev;
+ bool dvfs_enabled;
+ struct work_struct irq_work;
+};
+
+static struct exynos_dvfs_data *dvfs_info;
+static DEFINE_MUTEX(cpufreq_lock);
+static struct cpufreq_freqs freqs;
+
+static int init_div_table(void)
+{
+ struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
+ unsigned int tmp, clk_div, ema_div, freq, volt_id;
+ int i = 0;
+ struct opp *opp;
+
+ rcu_read_lock();
+ for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
+
+ opp = opp_find_freq_exact(dvfs_info->dev,
+ freq_tbl[i].frequency * 1000, true);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(dvfs_info->dev,
+ "failed to find valid OPP for %u KHZ\n",
+ freq_tbl[i].frequency);
+ return PTR_ERR(opp);
+ }
+
+ freq = freq_tbl[i].frequency / 1000; /* In MHZ */
+ clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
+ << P0_7_CPUCLKDEV_SHIFT;
+ clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
+ << P0_7_ATBCLKDEV_SHIFT;
+ clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
+ << P0_7_CSCLKDEV_SHIFT;
+
+ /* Calculate EMA */
+ volt_id = opp_get_voltage(opp);
+ volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
+ if (volt_id < PMIC_HIGH_VOLT) {
+ ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
+ (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
+ } else if (volt_id > PMIC_LOW_VOLT) {
+ ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
+ (L2EMA_LOW << P0_7_L2EMA_SHIFT);
+ } else {
+ ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
+ (L2EMA_MID << P0_7_L2EMA_SHIFT);
+ }
+
+ tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
+ | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
+
+ __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
+ }
+
+ rcu_read_unlock();
+ return 0;
+}
+
+static void exynos_enable_dvfs(void)
+{
+ unsigned int tmp, i, cpu;
+ struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+ /* Disable DVFS */
+ __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
+
+ /* Enable PSTATE Change Event */
+ tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
+ tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
+ __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
+
+ /* Enable PSTATE Change IRQ */
+ tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
+ tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
+ __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
+
+ /* Set initial performance index */
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ if (freq_table[i].frequency == dvfs_info->cur_frequency)
+ break;
+
+ if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
+ dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
+ /* Assign the highest frequency */
+ i = 0;
+ dvfs_info->cur_frequency = freq_table[i].frequency;
+ }
+
+ dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
+ dvfs_info->cur_frequency);
+
+ for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
+ tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
+ tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
+ tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
+ __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
+ }
+
+ /* Enable DVFS */
+ __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
+ dvfs_info->base + XMU_DVFS_CTRL);
+}
+
+static int exynos_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ dvfs_info->freq_table);
+}
+
+static unsigned int exynos_getspeed(unsigned int cpu)
+{
+ return dvfs_info->cur_frequency;
+}
+
+static int exynos_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int index, tmp;
+ int ret = 0, i;
+ struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+
+ mutex_lock(&cpufreq_lock);
+
+ ret = cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index);
+ if (ret)
+ goto out;
+
+ freqs.old = dvfs_info->cur_frequency;
+ freqs.new = freq_table[index].frequency;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ /* Set the target frequency in all C0_3_PSTATE register */
+ for_each_cpu(i, policy->cpus) {
+ tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
+ tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
+ tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
+
+ __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
+ }
+out:
+ mutex_unlock(&cpufreq_lock);
+ return ret;
+}
+
+static void exynos_cpufreq_work(struct work_struct *work)
+{
+ unsigned int cur_pstate, index;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
+ struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+
+ /* Ensure we can access cpufreq structures */
+ if (unlikely(dvfs_info->dvfs_enabled == false))
+ goto skip_work;
+
+ mutex_lock(&cpufreq_lock);
+ freqs.old = dvfs_info->cur_frequency;
+
+ cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
+ if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
+ index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
+ else
+ index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
+
+ if (likely(index < dvfs_info->freq_count)) {
+ freqs.new = freq_table[index].frequency;
+ dvfs_info->cur_frequency = freqs.new;
+ } else {
+ dev_crit(dvfs_info->dev, "New frequency out of range\n");
+ freqs.new = dvfs_info->cur_frequency;
+ }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ cpufreq_cpu_put(policy);
+ mutex_unlock(&cpufreq_lock);
+skip_work:
+ enable_irq(dvfs_info->irq);
+}
+
+static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
+ if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
+ __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
+ disable_irq_nosync(irq);
+ schedule_work(&dvfs_info->irq_work);
+ }
+ return IRQ_HANDLED;
+}
+
+static void exynos_sort_descend_freq_table(void)
+{
+ struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
+ int i = 0, index;
+ unsigned int tmp_freq;
+ /*
+ * Exynos5440 clock controller state logic expects the cpufreq table to
+ * be in descending order. But the OPP library constructs the table in
+ * ascending order. So to make the table descending we just need to
+ * swap the i element with the N - i element.
+ */
+ for (i = 0; i < dvfs_info->freq_count / 2; i++) {
+ index = dvfs_info->freq_count - i - 1;
+ tmp_freq = freq_tbl[i].frequency;
+ freq_tbl[i].frequency = freq_tbl[index].frequency;
+ freq_tbl[index].frequency = tmp_freq;
+ }
+}
+
+static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
+ if (ret) {
+ dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
+ return ret;
+ }
+
+ policy->cur = dvfs_info->cur_frequency;
+ policy->cpuinfo.transition_latency = dvfs_info->latency;
+ cpumask_setall(policy->cpus);
+
+ cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
+
+ return 0;
+}
+
+static struct cpufreq_driver exynos_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = exynos_verify_speed,
+ .target = exynos_target,
+ .get = exynos_getspeed,
+ .init = exynos_cpufreq_cpu_init,
+ .name = CPUFREQ_NAME,
+};
+
+static const struct of_device_id exynos_cpufreq_match[] = {
+ {
+ .compatible = "samsung,exynos5440-cpufreq",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
+
+static int exynos_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ struct device_node *np;
+ struct resource res;
+
+ np = pdev->dev.of_node;
+ if (!np)
+ return -ENODEV;
+
+ dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
+ if (!dvfs_info) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ dvfs_info->dev = &pdev->dev;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ goto err_put_node;
+
+ dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
+ if (IS_ERR(dvfs_info->base)) {
+ ret = PTR_ERR(dvfs_info->base);
+ goto err_put_node;
+ }
+
+ dvfs_info->irq = irq_of_parse_and_map(np, 0);
+ if (!dvfs_info->irq) {
+ dev_err(dvfs_info->dev, "No cpufreq irq found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ ret = of_init_opp_table(dvfs_info->dev);
+ if (ret) {
+ dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
+ goto err_put_node;
+ }
+
+ ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ if (ret) {
+ dev_err(dvfs_info->dev,
+ "failed to init cpufreq table: %d\n", ret);
+ goto err_put_node;
+ }
+ dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
+ exynos_sort_descend_freq_table();
+
+ if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
+ dvfs_info->latency = DEF_TRANS_LATENCY;
+
+ dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
+ if (IS_ERR(dvfs_info->cpu_clk)) {
+ dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
+ ret = PTR_ERR(dvfs_info->cpu_clk);
+ goto err_free_table;
+ }
+
+ dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
+ if (!dvfs_info->cur_frequency) {
+ dev_err(dvfs_info->dev, "Failed to get clock rate\n");
+ ret = -EINVAL;
+ goto err_free_table;
+ }
+ dvfs_info->cur_frequency /= 1000;
+
+ INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
+ ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
+ exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
+ CPUFREQ_NAME, dvfs_info);
+ if (ret) {
+ dev_err(dvfs_info->dev, "Failed to register IRQ\n");
+ goto err_free_table;
+ }
+
+ ret = init_div_table();
+ if (ret) {
+ dev_err(dvfs_info->dev, "Failed to initialise div table\n");
+ goto err_free_table;
+ }
+
+ exynos_enable_dvfs();
+ ret = cpufreq_register_driver(&exynos_driver);
+ if (ret) {
+ dev_err(dvfs_info->dev,
+ "%s: failed to register cpufreq driver\n", __func__);
+ goto err_free_table;
+ }
+
+ of_node_put(np);
+ dvfs_info->dvfs_enabled = true;
+ return 0;
+
+err_free_table:
+ opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+err_put_node:
+ of_node_put(np);
+ dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
+ return ret;
+}
+
+static int exynos_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&exynos_driver);
+ opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ return 0;
+}
+
+static struct platform_driver exynos_cpufreq_platdrv = {
+ .driver = {
+ .name = "exynos5440-cpufreq",
+ .owner = THIS_MODULE,
+ .of_match_table = exynos_cpufreq_match,
+ },
+ .probe = exynos_cpufreq_probe,
+ .remove = exynos_cpufreq_remove,
+};
+module_platform_driver(exynos_cpufreq_platdrv);
+
+MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
+MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 456bee058fe6..3dfc99b9ca86 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -251,14 +251,13 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
* set cpu speed in khz.
**/
-static void gx_set_cpuspeed(unsigned int khz)
+static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
{
u8 suscfg, pmer1;
unsigned int new_khz;
unsigned long flags;
struct cpufreq_freqs freqs;
- freqs.cpu = 0;
freqs.old = gx_get_cpuspeed(0);
new_khz = gx_validate_speed(khz, &gx_params->on_duration,
@@ -266,11 +265,9 @@ static void gx_set_cpuspeed(unsigned int khz)
freqs.new = new_khz;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
local_irq_save(flags);
-
-
if (new_khz != stock_freq) {
/* if new khz == 100% of CPU speed, it is special case */
switch (gx_params->cs55x0->device) {
@@ -317,7 +314,7 @@ static void gx_set_cpuspeed(unsigned int khz)
gx_params->pci_suscfg = suscfg;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
gx_params->on_duration * 32, gx_params->off_duration * 32);
@@ -397,7 +394,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
}
- gx_set_cpuspeed(tmp_freq);
+ gx_set_cpuspeed(policy, tmp_freq);
return 0;
}
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
new file mode 100644
index 000000000000..c0075dbaa633
--- /dev/null
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -0,0 +1,438 @@
+/*
+ * This file provides the ACPI based P-state support. This
+ * module works with generic cpufreq infrastructure. Most of
+ * the code is based on i386 version
+ * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
+ *
+ * Copyright (C) 2005 Intel Corp
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pal.h>
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+MODULE_AUTHOR("Venkatesh Pallipadi");
+MODULE_DESCRIPTION("ACPI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+
+struct cpufreq_acpi_io {
+ struct acpi_processor_performance acpi_data;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int resume;
+};
+
+static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
+
+static struct cpufreq_driver acpi_cpufreq_driver;
+
+
+static int
+processor_set_pstate (
+ u32 value)
+{
+ s64 retval;
+
+ pr_debug("processor_set_pstate\n");
+
+ retval = ia64_pal_set_pstate((u64)value);
+
+ if (retval) {
+ pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
+ value, retval);
+ return -ENODEV;
+ }
+ return (int)retval;
+}
+
+
+static int
+processor_get_pstate (
+ u32 *value)
+{
+ u64 pstate_index = 0;
+ s64 retval;
+
+ pr_debug("processor_get_pstate\n");
+
+ retval = ia64_pal_get_pstate(&pstate_index,
+ PAL_GET_PSTATE_TYPE_INSTANT);
+ *value = (u32) pstate_index;
+
+ if (retval)
+ pr_debug("Failed to get current freq with "
+ "error 0x%lx, idx 0x%x\n", retval, *value);
+
+ return (int)retval;
+}
+
+
+/* To be used only after data->acpi_data is initialized */
+static unsigned
+extract_clock (
+ struct cpufreq_acpi_io *data,
+ unsigned value,
+ unsigned int cpu)
+{
+ unsigned long i;
+
+ pr_debug("extract_clock\n");
+
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ if (value == data->acpi_data.states[i].status)
+ return data->acpi_data.states[i].core_frequency;
+ }
+ return data->acpi_data.states[i-1].core_frequency;
+}
+
+
+static unsigned int
+processor_get_freq (
+ struct cpufreq_acpi_io *data,
+ unsigned int cpu)
+{
+ int ret = 0;
+ u32 value = 0;
+ cpumask_t saved_mask;
+ unsigned long clock_freq;
+
+ pr_debug("processor_get_freq\n");
+
+ saved_mask = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ if (smp_processor_id() != cpu)
+ goto migrate_end;
+
+ /* processor_get_pstate gets the instantaneous frequency */
+ ret = processor_get_pstate(&value);
+
+ if (ret) {
+ set_cpus_allowed_ptr(current, &saved_mask);
+ printk(KERN_WARNING "get performance failed with error %d\n",
+ ret);
+ ret = 0;
+ goto migrate_end;
+ }
+ clock_freq = extract_clock(data, value, cpu);
+ ret = (clock_freq*1000);
+
+migrate_end:
+ set_cpus_allowed_ptr(current, &saved_mask);
+ return ret;
+}
+
+
+static int
+processor_set_freq (
+ struct cpufreq_acpi_io *data,
+ struct cpufreq_policy *policy,
+ int state)
+{
+ int ret = 0;
+ u32 value = 0;
+ struct cpufreq_freqs cpufreq_freqs;
+ cpumask_t saved_mask;
+ int retval;
+
+ pr_debug("processor_set_freq\n");
+
+ saved_mask = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
+ if (smp_processor_id() != policy->cpu) {
+ retval = -EAGAIN;
+ goto migrate_end;
+ }
+
+ if (state == data->acpi_data.state) {
+ if (unlikely(data->resume)) {
+ pr_debug("Called after resume, resetting to P%d\n", state);
+ data->resume = 0;
+ } else {
+ pr_debug("Already at target state (P%d)\n", state);
+ retval = 0;
+ goto migrate_end;
+ }
+ }
+
+ pr_debug("Transitioning from P%d to P%d\n",
+ data->acpi_data.state, state);
+
+ /* cpufreq frequency struct */
+ cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
+ cpufreq_freqs.new = data->freq_table[state].frequency;
+
+ /* notify cpufreq */
+ cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
+
+ /*
+ * First we write the target state's 'control' value to the
+ * control_register.
+ */
+
+ value = (u32) data->acpi_data.states[state].control;
+
+ pr_debug("Transitioning to state: 0x%08x\n", value);
+
+ ret = processor_set_pstate(value);
+ if (ret) {
+ unsigned int tmp = cpufreq_freqs.new;
+ cpufreq_notify_transition(policy, &cpufreq_freqs,
+ CPUFREQ_POSTCHANGE);
+ cpufreq_freqs.new = cpufreq_freqs.old;
+ cpufreq_freqs.old = tmp;
+ cpufreq_notify_transition(policy, &cpufreq_freqs,
+ CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &cpufreq_freqs,
+ CPUFREQ_POSTCHANGE);
+ printk(KERN_WARNING "Transition failed with error %d\n", ret);
+ retval = -ENODEV;
+ goto migrate_end;
+ }
+
+ cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
+
+ data->acpi_data.state = state;
+
+ retval = 0;
+
+migrate_end:
+ set_cpus_allowed_ptr(current, &saved_mask);
+ return (retval);
+}
+
+
+static unsigned int
+acpi_cpufreq_get (
+ unsigned int cpu)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+
+ pr_debug("acpi_cpufreq_get\n");
+
+ return processor_get_freq(data, cpu);
+}
+
+
+static int
+acpi_cpufreq_target (
+ struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+ unsigned int next_state = 0;
+ unsigned int result = 0;
+
+ pr_debug("acpi_cpufreq_setpolicy\n");
+
+ result = cpufreq_frequency_table_target(policy,
+ data->freq_table, target_freq, relation, &next_state);
+ if (result)
+ return (result);
+
+ result = processor_set_freq(data, policy, next_state);
+
+ return (result);
+}
+
+
+static int
+acpi_cpufreq_verify (
+ struct cpufreq_policy *policy)
+{
+ unsigned int result = 0;
+ struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+
+ pr_debug("acpi_cpufreq_verify\n");
+
+ result = cpufreq_frequency_table_verify(policy,
+ data->freq_table);
+
+ return (result);
+}
+
+
+static int
+acpi_cpufreq_cpu_init (
+ struct cpufreq_policy *policy)
+{
+ unsigned int i;
+ unsigned int cpu = policy->cpu;
+ struct cpufreq_acpi_io *data;
+ unsigned int result = 0;
+
+ pr_debug("acpi_cpufreq_cpu_init\n");
+
+ data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
+ if (!data)
+ return (-ENOMEM);
+
+ acpi_io_data[cpu] = data;
+
+ result = acpi_processor_register_performance(&data->acpi_data, cpu);
+
+ if (result)
+ goto err_free;
+
+ /* capability check */
+ if (data->acpi_data.state_count <= 1) {
+ pr_debug("No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ if ((data->acpi_data.control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (data->acpi_data.status_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ pr_debug("Unsupported address space [%d, %d]\n",
+ (u32) (data->acpi_data.control_register.space_id),
+ (u32) (data->acpi_data.status_register.space_id));
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ /* alloc freq_table */
+ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
+ (data->acpi_data.state_count + 1),
+ GFP_KERNEL);
+ if (!data->freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+
+ /* detect transition latency */
+ policy->cpuinfo.transition_latency = 0;
+ for (i=0; i<data->acpi_data.state_count; i++) {
+ if ((data->acpi_data.states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency) {
+ policy->cpuinfo.transition_latency =
+ data->acpi_data.states[i].transition_latency * 1000;
+ }
+ }
+ policy->cur = processor_get_freq(data, policy->cpu);
+
+ /* table init */
+ for (i = 0; i <= data->acpi_data.state_count; i++)
+ {
+ data->freq_table[i].index = i;
+ if (i < data->acpi_data.state_count) {
+ data->freq_table[i].frequency =
+ data->acpi_data.states[i].core_frequency * 1000;
+ } else {
+ data->freq_table[i].frequency = CPUFREQ_TABLE_END;
+ }
+ }
+
+ result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ if (result) {
+ goto err_freqfree;
+ }
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
+ "activated.\n", cpu);
+
+ for (i = 0; i < data->acpi_data.state_count; i++)
+ pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
+ (i == data->acpi_data.state?'*':' '), i,
+ (u32) data->acpi_data.states[i].core_frequency,
+ (u32) data->acpi_data.states[i].power,
+ (u32) data->acpi_data.states[i].transition_latency,
+ (u32) data->acpi_data.states[i].bus_master_latency,
+ (u32) data->acpi_data.states[i].status,
+ (u32) data->acpi_data.states[i].control);
+
+ cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
+
+ /* the first call to ->target() should result in us actually
+ * writing something to the appropriate registers. */
+ data->resume = 1;
+
+ return (result);
+
+ err_freqfree:
+ kfree(data->freq_table);
+ err_unreg:
+ acpi_processor_unregister_performance(&data->acpi_data, cpu);
+ err_free:
+ kfree(data);
+ acpi_io_data[cpu] = NULL;
+
+ return (result);
+}
+
+
+static int
+acpi_cpufreq_cpu_exit (
+ struct cpufreq_policy *policy)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+
+ pr_debug("acpi_cpufreq_cpu_exit\n");
+
+ if (data) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ acpi_io_data[policy->cpu] = NULL;
+ acpi_processor_unregister_performance(&data->acpi_data,
+ policy->cpu);
+ kfree(data);
+ }
+
+ return (0);
+}
+
+
+static struct freq_attr* acpi_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+
+static struct cpufreq_driver acpi_cpufreq_driver = {
+ .verify = acpi_cpufreq_verify,
+ .target = acpi_cpufreq_target,
+ .get = acpi_cpufreq_get,
+ .init = acpi_cpufreq_cpu_init,
+ .exit = acpi_cpufreq_cpu_exit,
+ .name = "acpi-cpufreq",
+ .owner = THIS_MODULE,
+ .attr = acpi_cpufreq_attr,
+};
+
+
+static int __init
+acpi_cpufreq_init (void)
+{
+ pr_debug("acpi_cpufreq_init\n");
+
+ return cpufreq_register_driver(&acpi_cpufreq_driver);
+}
+
+
+static void __exit
+acpi_cpufreq_exit (void)
+{
+ pr_debug("acpi_cpufreq_exit\n");
+
+ cpufreq_unregister_driver(&acpi_cpufreq_driver);
+ return;
+}
+
+
+late_initcall(acpi_cpufreq_init);
+module_exit(acpi_cpufreq_exit);
+
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 54e336de373b..b78bc35973ba 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -50,7 +50,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
struct opp *opp;
unsigned long freq_hz, volt, volt_old;
- unsigned int index, cpu;
+ unsigned int index;
int ret;
ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
@@ -68,10 +68,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new)
return 0;
- for_each_online_cpu(cpu) {
- freqs.cpu = cpu;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
@@ -166,10 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
}
}
- for_each_online_cpu(cpu) {
- freqs.cpu = cpu;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
new file mode 100644
index 000000000000..f7c99df0880b
--- /dev/null
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * CPU support functions
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/platform.h>
+#include <asm/mach-types.h>
+#include <asm/hardware/icst.h>
+
+static struct cpufreq_driver integrator_driver;
+
+#define CM_ID __io_address(INTEGRATOR_HDR_ID)
+#define CM_OSC __io_address(INTEGRATOR_HDR_OSC)
+#define CM_STAT __io_address(INTEGRATOR_HDR_STAT)
+#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK)
+
+static const struct icst_params lclk_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
+ .vd_min = 8,
+ .vd_max = 132,
+ .rd_min = 24,
+ .rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+static const struct icst_params cclk_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
+ .vd_min = 12,
+ .vd_max = 160,
+ .rd_min = 24,
+ .rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+/*
+ * Validate the speed policy.
+ */
+static int integrator_verify_policy(struct cpufreq_policy *policy)
+{
+ struct icst_vco vco;
+
+ cpufreq_verify_within_limits(policy,
+ policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
+ policy->max = icst_hz(&cclk_params, vco) / 1000;
+
+ vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
+ policy->min = icst_hz(&cclk_params, vco) / 1000;
+
+ cpufreq_verify_within_limits(policy,
+ policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ return 0;
+}
+
+
+static int integrator_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ cpumask_t cpus_allowed;
+ int cpu = policy->cpu;
+ struct icst_vco vco;
+ struct cpufreq_freqs freqs;
+ u_int cm_osc;
+
+ /*
+ * Save this threads cpus_allowed mask.
+ */
+ cpus_allowed = current->cpus_allowed;
+
+ /*
+ * Bind to the specified CPU. When this call returns,
+ * we should be running on the right CPU.
+ */
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ BUG_ON(cpu != smp_processor_id());
+
+ /* get current setting */
+ cm_osc = __raw_readl(CM_OSC);
+
+ if (machine_is_integrator()) {
+ vco.s = (cm_osc >> 8) & 7;
+ } else if (machine_is_cintegrator()) {
+ vco.s = 1;
+ }
+ vco.v = cm_osc & 255;
+ vco.r = 22;
+ freqs.old = icst_hz(&cclk_params, vco) / 1000;
+
+ /* icst_hz_to_vco rounds down -- so we need the next
+ * larger freq in case of CPUFREQ_RELATION_L.
+ */
+ if (relation == CPUFREQ_RELATION_L)
+ target_freq += 999;
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
+ freqs.new = icst_hz(&cclk_params, vco) / 1000;
+
+ if (freqs.old == freqs.new) {
+ set_cpus_allowed(current, cpus_allowed);
+ return 0;
+ }
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ cm_osc = __raw_readl(CM_OSC);
+
+ if (machine_is_integrator()) {
+ cm_osc &= 0xfffff800;
+ cm_osc |= vco.s << 8;
+ } else if (machine_is_cintegrator()) {
+ cm_osc &= 0xffffff00;
+ }
+ cm_osc |= vco.v;
+
+ __raw_writel(0xa05f, CM_LOCK);
+ __raw_writel(cm_osc, CM_OSC);
+ __raw_writel(0, CM_LOCK);
+
+ /*
+ * Restore the CPUs allowed mask.
+ */
+ set_cpus_allowed(current, cpus_allowed);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static unsigned int integrator_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned int current_freq;
+ u_int cm_osc;
+ struct icst_vco vco;
+
+ cpus_allowed = current->cpus_allowed;
+
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ BUG_ON(cpu != smp_processor_id());
+
+ /* detect memory etc. */
+ cm_osc = __raw_readl(CM_OSC);
+
+ if (machine_is_integrator()) {
+ vco.s = (cm_osc >> 8) & 7;
+ } else {
+ vco.s = 1;
+ }
+ vco.v = cm_osc & 255;
+ vco.r = 22;
+
+ current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
+
+ set_cpus_allowed(current, cpus_allowed);
+
+ return current_freq;
+}
+
+static int integrator_cpufreq_init(struct cpufreq_policy *policy)
+{
+
+ /* set default policy and cpuinfo */
+ policy->cpuinfo.max_freq = 160000;
+ policy->cpuinfo.min_freq = 12000;
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
+ policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
+
+ return 0;
+}
+
+static struct cpufreq_driver integrator_driver = {
+ .verify = integrator_verify_policy,
+ .target = integrator_set_target,
+ .get = integrator_get,
+ .init = integrator_cpufreq_init,
+ .name = "integrator",
+};
+
+static int __init integrator_cpu_init(void)
+{
+ return cpufreq_register_driver(&integrator_driver);
+}
+
+static void __exit integrator_cpu_exit(void)
+{
+ cpufreq_unregister_driver(&integrator_driver);
+}
+
+MODULE_AUTHOR ("Russell M. King");
+MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
+MODULE_LICENSE ("GPL");
+
+module_init(integrator_cpu_init);
+module_exit(integrator_cpu_exit);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6133ef5cf671..cc3a8e6c92be 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1,5 +1,5 @@
/*
- * cpufreq_snb.c: Native P state management for Intel processors
+ * intel_pstate.c: Native P state management for Intel processors
*
* (C) Copyright 2012 Intel Corporation
* Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
@@ -657,30 +657,27 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
- int min, max;
cpu = all_cpu_data[policy->cpu];
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- intel_pstate_get_min_max(cpu, &min, &max);
-
- limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
- limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
- limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
-
- limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
- limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
- limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
-
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits.min_perf_pct = 100;
limits.min_perf = int_tofp(1);
limits.max_perf_pct = 100;
limits.max_perf = int_tofp(1);
limits.no_turbo = 0;
+ return 0;
}
+ limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+ limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
return 0;
}
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 0e83e3c24f5b..d36ea8dc96eb 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -55,7 +55,8 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
return kirkwood_freq_table[0].frequency;
}
-static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
+static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
+ unsigned int index)
{
struct cpufreq_freqs freqs;
unsigned int state = kirkwood_freq_table[index].index;
@@ -63,9 +64,8 @@ static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
freqs.new = kirkwood_freq_table[index].frequency;
- freqs.cpu = 0; /* Kirkwood is UP */
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
kirkwood_freq_table[index].frequency);
@@ -99,7 +99,7 @@ static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
local_irq_enable();
}
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
};
static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
@@ -117,7 +117,7 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
target_freq, relation, &index))
return -EINVAL;
- kirkwood_cpufreq_set_cpu_state(index);
+ kirkwood_cpufreq_set_cpu_state(policy, index);
return 0;
}
@@ -175,11 +175,9 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Cannot get memory resource\n");
return -ENODEV;
}
- priv.base = devm_request_and_ioremap(&pdev->dev, res);
- if (!priv.base) {
- dev_err(&pdev->dev, "Cannot ioremap\n");
- return -EADDRNOTAVAIL;
- }
+ priv.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv.base))
+ return PTR_ERR(priv.base);
np = of_find_node_by_path("/cpus/cpu@0");
if (!np)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 1180d536d1eb..b448638e34de 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -242,7 +242,8 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
* Sets a new clock ratio.
*/
-static void longhaul_setstate(unsigned int table_index)
+static void longhaul_setstate(struct cpufreq_policy *policy,
+ unsigned int table_index)
{
unsigned int mults_index;
int speed, mult;
@@ -267,9 +268,8 @@ static void longhaul_setstate(unsigned int table_index)
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
- freqs.cpu = 0; /* longhaul.c is UP only driver */
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
@@ -386,7 +386,7 @@ retry_loop:
}
}
/* Report true CPU frequency */
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (!bm_timeout)
printk(KERN_INFO PFX "Warning: Timeout while waiting for "
@@ -648,7 +648,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
return 0;
if (!can_scale_voltage)
- longhaul_setstate(table_index);
+ longhaul_setstate(policy, table_index);
else {
/* On test system voltage transitions exceeding single
* step up or down were turning motherboard off. Both
@@ -663,7 +663,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
while (i != table_index) {
vid = (longhaul_table[i].index >> 8) & 0x1f;
if (vid != current_vid) {
- longhaul_setstate(i);
+ longhaul_setstate(policy, i);
current_vid = vid;
msleep(200);
}
@@ -672,7 +672,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
else
i--;
}
- longhaul_setstate(table_index);
+ longhaul_setstate(policy, table_index);
}
longhaul_index = table_index;
return 0;
@@ -998,15 +998,17 @@ static int __init longhaul_init(void)
static void __exit longhaul_exit(void)
{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
- longhaul_setstate(i);
+ longhaul_setstate(policy, i);
break;
}
}
+ cpufreq_cpu_put(policy);
cpufreq_unregister_driver(&longhaul_driver);
kfree(longhaul_table);
}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
new file mode 100644
index 000000000000..84889573b566
--- /dev/null
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -0,0 +1,248 @@
+/*
+ * Cpufreq driver for the loongson-2 processors
+ *
+ * The 2E revision of loongson processor not support this feature.
+ *
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Author: Yanhua, yanh@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/sched.h> /* set_cpus_allowed() */
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <asm/clock.h>
+
+#include <asm/mach-loongson/loongson.h>
+
+static uint nowait;
+
+static struct clk *cpuclk;
+
+static void (*saved_cpu_wait) (void);
+
+static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data);
+
+static struct notifier_block loongson2_cpufreq_notifier_block = {
+ .notifier_call = loongson2_cpu_freq_notifier
+};
+
+static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ current_cpu_data.udelay_val = loops_per_jiffy;
+
+ return 0;
+}
+
+static unsigned int loongson2_cpufreq_get(unsigned int cpu)
+{
+ return clk_get_rate(cpuclk);
+}
+
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned int newstate = 0;
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+ unsigned int freq;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ if (cpufreq_frequency_table_target
+ (policy, &loongson2_clockmod_table[0], target_freq, relation,
+ &newstate))
+ return -EINVAL;
+
+ freq =
+ ((cpu_clock_freq / 1000) *
+ loongson2_clockmod_table[newstate].index) / 8;
+ if (freq < policy->min || freq > policy->max)
+ return -EINVAL;
+
+ pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
+
+ freqs.old = loongson2_cpufreq_get(cpu);
+ freqs.new = freq;
+ freqs.flags = 0;
+
+ if (freqs.new == freqs.old)
+ return 0;
+
+ /* notifiers */
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ /* setting the cpu frequency */
+ clk_set_rate(cpuclk, freq);
+
+ /* notifiers */
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ pr_debug("cpufreq: set frequency %u kHz\n", freq);
+
+ return 0;
+}
+
+static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int i;
+ unsigned long rate;
+ int ret;
+
+ cpuclk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(cpuclk)) {
+ printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
+ return PTR_ERR(cpuclk);
+ }
+
+ rate = cpu_clock_freq / 1000;
+ if (!rate) {
+ clk_put(cpuclk);
+ return -EINVAL;
+ }
+ ret = clk_set_rate(cpuclk, rate);
+ if (ret) {
+ clk_put(cpuclk);
+ return ret;
+ }
+
+ /* clock table init */
+ for (i = 2;
+ (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
+ i++)
+ loongson2_clockmod_table[i].frequency = (rate * i) / 8;
+
+ policy->cur = loongson2_cpufreq_get(policy->cpu);
+
+ cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
+ policy->cpu);
+
+ return cpufreq_frequency_table_cpuinfo(policy,
+ &loongson2_clockmod_table[0]);
+}
+
+static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ &loongson2_clockmod_table[0]);
+}
+
+static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ clk_put(cpuclk);
+ return 0;
+}
+
+static struct freq_attr *loongson2_table_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver loongson2_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .name = "loongson2",
+ .init = loongson2_cpufreq_cpu_init,
+ .verify = loongson2_cpufreq_verify,
+ .target = loongson2_cpufreq_target,
+ .get = loongson2_cpufreq_get,
+ .exit = loongson2_cpufreq_exit,
+ .attr = loongson2_table_attr,
+};
+
+static struct platform_device_id platform_device_ids[] = {
+ {
+ .name = "loongson2_cpufreq",
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(platform, platform_device_ids);
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "loongson2_cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .id_table = platform_device_ids,
+};
+
+/*
+ * This is the simple version of Loongson-2 wait, Maybe we need do this in
+ * interrupt disabled context.
+ */
+
+static DEFINE_SPINLOCK(loongson2_wait_lock);
+
+static void loongson2_cpu_wait(void)
+{
+ unsigned long flags;
+ u32 cpu_freq;
+
+ spin_lock_irqsave(&loongson2_wait_lock, flags);
+ cpu_freq = LOONGSON_CHIPCFG0;
+ LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
+ LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
+ spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+}
+
+static int __init cpufreq_init(void)
+{
+ int ret;
+
+ /* Register platform stuff */
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ return ret;
+
+ pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
+
+ cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
+
+ if (!ret && !nowait) {
+ saved_cpu_wait = cpu_wait;
+ cpu_wait = loongson2_cpu_wait;
+ }
+
+ return ret;
+}
+
+static void __exit cpufreq_exit(void)
+{
+ if (!nowait && saved_cpu_wait)
+ cpu_wait = saved_cpu_wait;
+ cpufreq_unregister_driver(&loongson2_cpufreq_driver);
+ cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ platform_driver_unregister(&platform_driver);
+}
+
+module_init(cpufreq_init);
+module_exit(cpufreq_exit);
+
+module_param(nowait, uint, 0644);
+MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
+
+MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
+MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index d4c4989823dc..cdd62915efaf 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -158,11 +158,10 @@ static int maple_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
freqs.new = maple_cpu_freqs[newstate].frequency;
- freqs.cpu = 0;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
rc = maple_scom_switch_freq(newstate);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&maple_switch_mutex);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 9128c07bafba..0279d18a57f9 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/opp.h>
#include <linux/cpu.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <asm/smp_plat.h>
@@ -88,16 +89,12 @@ static int omap_target(struct cpufreq_policy *policy,
}
freqs.old = omap_getspeed(policy->cpu);
- freqs.cpu = policy->cpu;
if (freqs.old == freqs.new && policy->cur == freqs.new)
return ret;
/* notifiers */
- for_each_cpu(i, policy->cpus) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
freq = freqs.new * 1000;
ret = clk_round_rate(mpu_clk, freq);
@@ -157,10 +154,7 @@ static int omap_target(struct cpufreq_policy *policy,
done:
/* notifiers */
- for_each_cpu(i, policy->cpus) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
@@ -184,7 +178,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
goto fail_ck;
}
- policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
+ policy->cur = omap_getspeed(policy->cpu);
if (!freq_table)
result = opp_init_cpufreq_table(mpu_dev, &freq_table);
@@ -203,8 +197,6 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
policy->cur = omap_getspeed(policy->cpu);
/*
@@ -252,7 +244,7 @@ static struct cpufreq_driver omap_driver = {
.attr = omap_cpufreq_attr,
};
-static int __init omap_cpufreq_init(void)
+static int omap_cpufreq_probe(struct platform_device *pdev)
{
mpu_dev = get_cpu_device(0);
if (!mpu_dev) {
@@ -280,12 +272,20 @@ static int __init omap_cpufreq_init(void)
return cpufreq_register_driver(&omap_driver);
}
-static void __exit omap_cpufreq_exit(void)
+static int omap_cpufreq_remove(struct platform_device *pdev)
{
- cpufreq_unregister_driver(&omap_driver);
+ return cpufreq_unregister_driver(&omap_driver);
}
+static struct platform_driver omap_cpufreq_platdrv = {
+ .driver = {
+ .name = "omap-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = omap_cpufreq_probe,
+ .remove = omap_cpufreq_remove,
+};
+module_platform_driver(omap_cpufreq_platdrv);
+
MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
MODULE_LICENSE("GPL");
-module_init(omap_cpufreq_init);
-module_exit(omap_cpufreq_exit);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 827629c9aad7..421ef37d0bb3 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -58,8 +58,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
{
u32 l, h;
- if (!cpu_online(cpu) ||
- (newstate > DC_DISABLE) || (newstate == DC_RESV))
+ if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
return -EINVAL;
rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
@@ -125,10 +124,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
return 0;
/* notifiers */
- for_each_cpu(i, policy->cpus) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* run on each logical CPU,
* see section 13.15.3 of IA32 Intel Architecture Software
@@ -138,10 +134,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
/* notifiers */
- for_each_cpu(i, policy->cpus) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 503996a94a6a..0de00081a81e 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -215,8 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
(pcch_virt_addr + pcc_cpu_data->input_offset));
freqs.new = target_freq;
- freqs.cpu = cpu;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
input_buffer = 0x1 | (((target_freq * 100)
/ (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
@@ -237,7 +236,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
}
iowrite16(0, &pcch_hdr->status);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
spin_unlock(&pcc_lock);
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index af23e0b9ec92..ea0222a45b7b 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -68,7 +68,8 @@ static int powernow_k6_get_cpu_multiplier(void)
*
* Tries to change the PowerNow! multiplier
*/
-static void powernow_k6_set_state(unsigned int best_i)
+static void powernow_k6_set_state(struct cpufreq_policy *policy,
+ unsigned int best_i)
{
unsigned long outvalue = 0, invalue = 0;
unsigned long msrval;
@@ -81,9 +82,8 @@ static void powernow_k6_set_state(unsigned int best_i)
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
freqs.new = busfreq * clock_ratio[best_i].index;
- freqs.cpu = 0; /* powernow-k6.c is UP only driver */
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* we now need to transform best_i to the BVC format, see AMD#23446 */
@@ -98,7 +98,7 @@ static void powernow_k6_set_state(unsigned int best_i)
msrval = POWERNOW_IOPORT + 0x0;
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return;
}
@@ -136,7 +136,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
target_freq, relation, &newstate))
return -EINVAL;
- powernow_k6_set_state(newstate);
+ powernow_k6_set_state(policy, newstate);
return 0;
}
@@ -182,7 +182,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
unsigned int i;
for (i = 0; i < 8; i++) {
if (i == max_multiplier)
- powernow_k6_set_state(i);
+ powernow_k6_set_state(policy, i);
}
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 334cc2f1e9f1..53888dacbe58 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -248,7 +248,7 @@ static void change_VID(int vid)
}
-static void change_speed(unsigned int index)
+static void change_speed(struct cpufreq_policy *policy, unsigned int index)
{
u8 fid, vid;
struct cpufreq_freqs freqs;
@@ -263,15 +263,13 @@ static void change_speed(unsigned int index)
fid = powernow_table[index].index & 0xFF;
vid = (powernow_table[index].index & 0xFF00) >> 8;
- freqs.cpu = 0;
-
rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
freqs.old = fsb * fid_codes[cfid] / 10;
freqs.new = powernow_table[index].frequency;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Now do the magic poking into the MSRs. */
@@ -292,7 +290,7 @@ static void change_speed(unsigned int index)
if (have_a0 == 1)
local_irq_enable();
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
}
@@ -546,7 +544,7 @@ static int powernow_target(struct cpufreq_policy *policy,
relation, &newstate))
return -EINVAL;
- change_speed(newstate);
+ change_speed(policy, newstate);
return 0;
}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index d13a13678b5f..b828efe4b2f8 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -928,9 +928,10 @@ static int get_transition_latency(struct powernow_k8_data *data)
static int transition_frequency_fidvid(struct powernow_k8_data *data,
unsigned int index)
{
+ struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
- int res, i;
+ int res;
struct cpufreq_freqs freqs;
pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
@@ -959,10 +960,10 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- for_each_cpu(i, data->available_cores) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- }
+ policy = cpufreq_cpu_get(smp_processor_id());
+ cpufreq_cpu_put(policy);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
res = transition_fid_vid(data, fid, vid);
if (res)
@@ -970,10 +971,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.new = find_khz_freq_from_fid(data->currfid);
- for_each_cpu(i, data->available_cores) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return res;
}
@@ -1104,9 +1102,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
struct init_on_cpu init_on_cpu;
int rc;
- if (!cpu_online(pol->cpu))
- return -ENODEV;
-
smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
if (rc)
return -ENODEV;
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
new file mode 100644
index 000000000000..e577a1dbbfcd
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -0,0 +1,209 @@
+/*
+ * cpufreq driver for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/cell-regs.h>
+
+#include "ppc_cbe_cpufreq.h"
+
+static DEFINE_MUTEX(cbe_switch_mutex);
+
+
+/* the CBE supports an 8 step frequency scaling */
+static struct cpufreq_frequency_table cbe_freqs[] = {
+ {1, 0},
+ {2, 0},
+ {3, 0},
+ {4, 0},
+ {5, 0},
+ {6, 0},
+ {8, 0},
+ {10, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int set_pmode(unsigned int cpu, unsigned int slow_mode)
+{
+ int rc;
+
+ if (cbe_cpufreq_has_pmi)
+ rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
+ else
+ rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
+
+ pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
+
+ return rc;
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ const u32 *max_freqp;
+ u32 max_freq;
+ int i, cur_pmode;
+ struct device_node *cpu;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+
+ if (!cpu)
+ return -ENODEV;
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+ /*
+ * Let's check we can actually get to the CELL regs
+ */
+ if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
+ !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
+ pr_info("invalid CBE regs pointers for cpufreq\n");
+ return -EINVAL;
+ }
+
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+
+ of_node_put(cpu);
+
+ if (!max_freqp)
+ return -EINVAL;
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+ /* initialize frequency table */
+ for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
+ cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index;
+ pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
+ }
+
+ /* if DEBUG is enabled set_pmode() measures the latency
+ * of a transition */
+ policy->cpuinfo.transition_latency = 25000;
+
+ cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
+ pr_debug("current pmode is at %d\n",cur_pmode);
+
+ policy->cur = cbe_freqs[cur_pmode].frequency;
+
+#ifdef CONFIG_SMP
+ cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
+#endif
+
+ cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
+
+ /* this ensures that policy->cpuinfo_min
+ * and policy->cpuinfo_max are set correctly */
+ return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
+}
+
+static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, cbe_freqs);
+}
+
+static int cbe_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int rc;
+ struct cpufreq_freqs freqs;
+ unsigned int cbe_pmode_new;
+
+ cpufreq_frequency_table_target(policy,
+ cbe_freqs,
+ target_freq,
+ relation,
+ &cbe_pmode_new);
+
+ freqs.old = policy->cur;
+ freqs.new = cbe_freqs[cbe_pmode_new].frequency;
+
+ mutex_lock(&cbe_switch_mutex);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ pr_debug("setting frequency for cpu %d to %d kHz, " \
+ "1/%d of max frequency\n",
+ policy->cpu,
+ cbe_freqs[cbe_pmode_new].frequency,
+ cbe_freqs[cbe_pmode_new].index);
+
+ rc = set_pmode(policy->cpu, cbe_pmode_new);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ mutex_unlock(&cbe_switch_mutex);
+
+ return rc;
+}
+
+static struct cpufreq_driver cbe_cpufreq_driver = {
+ .verify = cbe_cpufreq_verify,
+ .target = cbe_cpufreq_target,
+ .init = cbe_cpufreq_cpu_init,
+ .exit = cbe_cpufreq_cpu_exit,
+ .name = "cbe-cpufreq",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init cbe_cpufreq_init(void)
+{
+ if (!machine_is(cell))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&cbe_cpufreq_driver);
+}
+
+static void __exit cbe_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&cbe_cpufreq_driver);
+}
+
+module_init(cbe_cpufreq_init);
+module_exit(cbe_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
new file mode 100644
index 000000000000..b4c00a5a6a59
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -0,0 +1,24 @@
+/*
+ * ppc_cbe_cpufreq.h
+ *
+ * This file contains the definitions used by the cbe_cpufreq driver.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/types.h>
+
+int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
+int cbe_cpufreq_get_pmode(int cpu);
+
+int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
+
+#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
+extern bool cbe_cpufreq_has_pmi;
+#else
+#define cbe_cpufreq_has_pmi (0)
+#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
new file mode 100644
index 000000000000..84d2f2cf5ba7
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
@@ -0,0 +1,115 @@
+/*
+ * pervasive backend for the cbe_cpufreq driver
+ *
+ * This driver makes use of the pervasive unit to
+ * engage the desired frequency.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <asm/machdep.h>
+#include <asm/hw_irq.h>
+#include <asm/cell-regs.h>
+
+#include "ppc_cbe_cpufreq.h"
+
+/* to write to MIC register */
+static u64 MIC_Slow_Fast_Timer_table[] = {
+ [0 ... 7] = 0x007fc00000000000ull,
+};
+
+/* more values for the MIC */
+static u64 MIC_Slow_Next_Timer_table[] = {
+ 0x0000240000000000ull,
+ 0x0000268000000000ull,
+ 0x000029C000000000ull,
+ 0x00002D0000000000ull,
+ 0x0000300000000000ull,
+ 0x0000334000000000ull,
+ 0x000039C000000000ull,
+ 0x00003FC000000000ull,
+};
+
+
+int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+ unsigned long flags;
+ u64 value;
+#ifdef DEBUG
+ long time;
+#endif
+
+ local_irq_save(flags);
+
+ mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+#ifdef DEBUG
+ time = jiffies;
+#endif
+
+ out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
+
+ out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
+
+ value = in_be64(&pmd_regs->pmcr);
+ /* set bits to zero */
+ value &= 0xFFFFFFFFFFFFFFF8ull;
+ /* set bits to next pmode */
+ value |= pmode;
+
+ out_be64(&pmd_regs->pmcr, value);
+
+#ifdef DEBUG
+ /* wait until new pmode appears in status register */
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ while (value != pmode) {
+ cpu_relax();
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ }
+
+ time = jiffies - time;
+ time = jiffies_to_msecs(time);
+ pr_debug("had to wait %lu ms for a transition using " \
+ "pervasive unit\n", time);
+#endif
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+int cbe_cpufreq_get_pmode(int cpu)
+{
+ int ret;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+ ret = in_be64(&pmd_regs->pmsr) & 0x07;
+
+ return ret;
+}
+
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
new file mode 100644
index 000000000000..d29e8da396a0
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -0,0 +1,156 @@
+/*
+ * pmi backend for the cbe_cpufreq driver
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/pmi.h>
+#include <asm/cell-regs.h>
+
+#ifdef DEBUG
+#include <asm/time.h>
+#endif
+
+#include "ppc_cbe_cpufreq.h"
+
+static u8 pmi_slow_mode_limit[MAX_CBE];
+
+bool cbe_cpufreq_has_pmi = false;
+EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
+
+/*
+ * hardware specific functions
+ */
+
+int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
+{
+ int ret;
+ pmi_message_t pmi_msg;
+#ifdef DEBUG
+ long time;
+#endif
+ pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
+ pmi_msg.data1 = cbe_cpu_to_node(cpu);
+ pmi_msg.data2 = pmode;
+
+#ifdef DEBUG
+ time = jiffies;
+#endif
+ pmi_send_message(pmi_msg);
+
+#ifdef DEBUG
+ time = jiffies - time;
+ time = jiffies_to_msecs(time);
+ pr_debug("had to wait %lu ms for a transition using " \
+ "PMI\n", time);
+#endif
+ ret = pmi_msg.data2;
+ pr_debug("PMI returned slow mode %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
+
+
+static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
+{
+ u8 node, slow_mode;
+
+ BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
+
+ node = pmi_msg.data1;
+ slow_mode = pmi_msg.data2;
+
+ pmi_slow_mode_limit[node] = slow_mode;
+
+ pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
+}
+
+static int pmi_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ struct cpufreq_frequency_table *cbe_freqs;
+ u8 node;
+
+ /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
+ * and CPUFREQ_NOTIFY policy events?)
+ */
+ if (event == CPUFREQ_START)
+ return 0;
+
+ cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
+ node = cbe_cpu_to_node(policy->cpu);
+
+ pr_debug("got notified, event=%lu, node=%u\n", event, node);
+
+ if (pmi_slow_mode_limit[node] != 0) {
+ pr_debug("limiting node %d to slow mode %d\n",
+ node, pmi_slow_mode_limit[node]);
+
+ cpufreq_verify_within_limits(policy, 0,
+
+ cbe_freqs[pmi_slow_mode_limit[node]].frequency);
+ }
+
+ return 0;
+}
+
+static struct notifier_block pmi_notifier_block = {
+ .notifier_call = pmi_notifier,
+};
+
+static struct pmi_handler cbe_pmi_handler = {
+ .type = PMI_TYPE_FREQ_CHANGE,
+ .handle_pmi_message = cbe_cpufreq_handle_pmi,
+};
+
+
+
+static int __init cbe_cpufreq_pmi_init(void)
+{
+ cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
+
+ if (!cbe_cpufreq_has_pmi)
+ return -ENODEV;
+
+ cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+
+ return 0;
+}
+
+static void __exit cbe_cpufreq_pmi_exit(void)
+{
+ cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+ pmi_unregister_handler(&cbe_pmi_handler);
+}
+
+module_init(cbe_cpufreq_pmi_init);
+module_exit(cbe_cpufreq_pmi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
new file mode 100644
index 000000000000..9e5bc8e388a0
--- /dev/null
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (C) 2002,2003 Intrinsyc Software
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * History:
+ * 31-Jul-2002 : Initial version [FB]
+ * 29-Jan-2003 : added PXA255 support [FB]
+ * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.)
+ *
+ * Note:
+ * This driver may change the memory bus clock rate, but will not do any
+ * platform specific access timing changes... for example if you have flash
+ * memory connected to CS0, you will need to register a platform specific
+ * notifier which will adjust the memory access strobes to maintain a
+ * minimum strobe width.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+
+#include <mach/pxa2xx-regs.h>
+#include <mach/smemc.h>
+
+#ifdef DEBUG
+static unsigned int freq_debug;
+module_param(freq_debug, uint, 0);
+MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
+#else
+#define freq_debug 0
+#endif
+
+static struct regulator *vcc_core;
+
+static unsigned int pxa27x_maxfreq;
+module_param(pxa27x_maxfreq, uint, 0);
+MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
+ "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
+
+typedef struct {
+ unsigned int khz;
+ unsigned int membus;
+ unsigned int cccr;
+ unsigned int div2;
+ unsigned int cclkcfg;
+ int vmin;
+ int vmax;
+} pxa_freqs_t;
+
+/* Define the refresh period in mSec for the SDRAM and the number of rows */
+#define SDRAM_TREF 64 /* standard 64ms SDRAM */
+static unsigned int sdram_rows;
+
+#define CCLKCFG_TURBO 0x1
+#define CCLKCFG_FCS 0x2
+#define CCLKCFG_HALFTURBO 0x4
+#define CCLKCFG_FASTBUS 0x8
+#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
+#define MDREFR_DRI_MASK 0xFFF
+
+#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
+#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
+
+/*
+ * PXA255 definitions
+ */
+/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
+#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
+
+static pxa_freqs_t pxa255_run_freqs[] =
+{
+ /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
+ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
+ {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
+ {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
+ {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
+ {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
+ {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
+};
+
+/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
+static pxa_freqs_t pxa255_turbo_freqs[] =
+{
+ /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
+ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
+ {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
+ {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
+ {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
+ {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
+};
+
+#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
+#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs)
+
+static struct cpufreq_frequency_table
+ pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1];
+static struct cpufreq_frequency_table
+ pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1];
+
+static unsigned int pxa255_turbo_table;
+module_param(pxa255_turbo_table, uint, 0);
+MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
+
+/*
+ * PXA270 definitions
+ *
+ * For the PXA27x:
+ * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
+ *
+ * A = 0 => memory controller clock from table 3-7,
+ * A = 1 => memory controller clock = system bus clock
+ * Run mode frequency = 13 MHz * L
+ * Turbo mode frequency = 13 MHz * L * N
+ * System bus frequency = 13 MHz * L / (B + 1)
+ *
+ * In CCCR:
+ * A = 1
+ * L = 16 oscillator to run mode ratio
+ * 2N = 6 2 * (turbo mode to run mode ratio)
+ *
+ * In CCLKCFG:
+ * B = 1 Fast bus mode
+ * HT = 0 Half-Turbo mode
+ * T = 1 Turbo mode
+ *
+ * For now, just support some of the combinations in table 3-7 of
+ * PXA27x Processor Family Developer's Manual to simplify frequency
+ * change sequences.
+ */
+#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
+#define CCLKCFG2(B, HT, T) \
+ (CCLKCFG_FCS | \
+ ((B) ? CCLKCFG_FASTBUS : 0) | \
+ ((HT) ? CCLKCFG_HALFTURBO : 0) | \
+ ((T) ? CCLKCFG_TURBO : 0))
+
+static pxa_freqs_t pxa27x_freqs[] = {
+ {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
+ {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
+ {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
+ {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
+ {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
+ {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
+ {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
+};
+
+#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
+static struct cpufreq_frequency_table
+ pxa27x_freq_table[NUM_PXA27x_FREQS+1];
+
+extern unsigned get_clk_frequency_khz(int info);
+
+#ifdef CONFIG_REGULATOR
+
+static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
+{
+ int ret = 0;
+ int vmin, vmax;
+
+ if (!cpu_is_pxa27x())
+ return 0;
+
+ vmin = pxa_freq->vmin;
+ vmax = pxa_freq->vmax;
+ if ((vmin == -1) || (vmax == -1))
+ return 0;
+
+ ret = regulator_set_voltage(vcc_core, vmin, vmax);
+ if (ret)
+ pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
+ vmin, vmax);
+ return ret;
+}
+
+static __init void pxa_cpufreq_init_voltages(void)
+{
+ vcc_core = regulator_get(NULL, "vcc_core");
+ if (IS_ERR(vcc_core)) {
+ pr_info("cpufreq: Didn't find vcc_core regulator\n");
+ vcc_core = NULL;
+ } else {
+ pr_info("cpufreq: Found vcc_core regulator\n");
+ }
+}
+#else
+static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
+{
+ return 0;
+}
+
+static __init void pxa_cpufreq_init_voltages(void) { }
+#endif
+
+static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
+ pxa_freqs_t **pxa_freqs)
+{
+ if (cpu_is_pxa25x()) {
+ if (!pxa255_turbo_table) {
+ *pxa_freqs = pxa255_run_freqs;
+ *freq_table = pxa255_run_freq_table;
+ } else {
+ *pxa_freqs = pxa255_turbo_freqs;
+ *freq_table = pxa255_turbo_freq_table;
+ }
+ } else if (cpu_is_pxa27x()) {
+ *pxa_freqs = pxa27x_freqs;
+ *freq_table = pxa27x_freq_table;
+ } else {
+ BUG();
+ }
+}
+
+static void pxa27x_guess_max_freq(void)
+{
+ if (!pxa27x_maxfreq) {
+ pxa27x_maxfreq = 416000;
+ printk(KERN_INFO "PXA CPU 27x max frequency not defined "
+ "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
+ pxa27x_maxfreq);
+ } else {
+ pxa27x_maxfreq *= 1000;
+ }
+}
+
+static void init_sdram_rows(void)
+{
+ uint32_t mdcnfg = __raw_readl(MDCNFG);
+ unsigned int drac2 = 0, drac0 = 0;
+
+ if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
+ drac2 = MDCNFG_DRAC2(mdcnfg);
+
+ if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
+ drac0 = MDCNFG_DRAC0(mdcnfg);
+
+ sdram_rows = 1 << (11 + max(drac0, drac2));
+}
+
+static u32 mdrefr_dri(unsigned int freq)
+{
+ u32 interval = freq * SDRAM_TREF / sdram_rows;
+
+ return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
+}
+
+/* find a valid frequency point */
+static int pxa_verify_policy(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pxa_freqs_table;
+ pxa_freqs_t *pxa_freqs;
+ int ret;
+
+ find_freq_tables(&pxa_freqs_table, &pxa_freqs);
+ ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
+
+ if (freq_debug)
+ pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
+ policy->min, policy->max);
+
+ return ret;
+}
+
+static unsigned int pxa_cpufreq_get(unsigned int cpu)
+{
+ return get_clk_frequency_khz(0);
+}
+
+static int pxa_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_frequency_table *pxa_freqs_table;
+ pxa_freqs_t *pxa_freq_settings;
+ struct cpufreq_freqs freqs;
+ unsigned int idx;
+ unsigned long flags;
+ unsigned int new_freq_cpu, new_freq_mem;
+ unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
+ int ret = 0;
+
+ /* Get the current policy */
+ find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
+
+ /* Lookup the next frequency */
+ if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
+ target_freq, relation, &idx)) {
+ return -EINVAL;
+ }
+
+ new_freq_cpu = pxa_freq_settings[idx].khz;
+ new_freq_mem = pxa_freq_settings[idx].membus;
+ freqs.old = policy->cur;
+ freqs.new = new_freq_cpu;
+
+ if (freq_debug)
+ pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
+ freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
+ (new_freq_mem / 2000) : (new_freq_mem / 1000));
+
+ if (vcc_core && freqs.new > freqs.old)
+ ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
+ if (ret)
+ return ret;
+ /*
+ * Tell everyone what we're about to do...
+ * you should add a notify client with any platform specific
+ * Vcc changing capability
+ */
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
+ * we need to preset the smaller DRI before the change. If we're
+ * speeding up we need to set the larger DRI value after the change.
+ */
+ preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
+ if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
+ preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
+ preset_mdrefr |= mdrefr_dri(new_freq_mem);
+ }
+ postset_mdrefr =
+ (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
+
+ /* If we're dividing the memory clock by two for the SDRAM clock, this
+ * must be set prior to the change. Clearing the divide must be done
+ * after the change.
+ */
+ if (pxa_freq_settings[idx].div2) {
+ preset_mdrefr |= MDREFR_DB2_MASK;
+ postset_mdrefr |= MDREFR_DB2_MASK;
+ } else {
+ postset_mdrefr &= ~MDREFR_DB2_MASK;
+ }
+
+ local_irq_save(flags);
+
+ /* Set new the CCCR and prepare CCLKCFG */
+ CCCR = pxa_freq_settings[idx].cccr;
+ cclkcfg = pxa_freq_settings[idx].cclkcfg;
+
+ asm volatile(" \n\
+ ldr r4, [%1] /* load MDREFR */ \n\
+ b 2f \n\
+ .align 5 \n\
+1: \n\
+ str %3, [%1] /* preset the MDREFR */ \n\
+ mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
+ str %4, [%1] /* postset the MDREFR */ \n\
+ \n\
+ b 3f \n\
+2: b 1b \n\
+3: nop \n\
+ "
+ : "=&r" (unused)
+ : "r" (MDREFR), "r" (cclkcfg),
+ "r" (preset_mdrefr), "r" (postset_mdrefr)
+ : "r4", "r5");
+ local_irq_restore(flags);
+
+ /*
+ * Tell everyone what we've just done...
+ * you should add a notify client with any platform specific
+ * SDRAM refresh timer adjustments
+ */
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ /*
+ * Even if voltage setting fails, we don't report it, as the frequency
+ * change succeeded. The voltage reduction is not a critical failure,
+ * only power savings will suffer from this.
+ *
+ * Note: if the voltage change fails, and a return value is returned, a
+ * bug is triggered (seems a deadlock). Should anybody find out where,
+ * the "return 0" should become a "return ret".
+ */
+ if (vcc_core && freqs.new < freqs.old)
+ ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
+
+ return 0;
+}
+
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int i;
+ unsigned int freq;
+ struct cpufreq_frequency_table *pxa255_freq_table;
+ pxa_freqs_t *pxa255_freqs;
+
+ /* try to guess pxa27x cpu */
+ if (cpu_is_pxa27x())
+ pxa27x_guess_max_freq();
+
+ pxa_cpufreq_init_voltages();
+
+ init_sdram_rows();
+
+ /* set default policy and cpuinfo */
+ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
+ policy->cur = get_clk_frequency_khz(0); /* current freq */
+ policy->min = policy->max = policy->cur;
+
+ /* Generate pxa25x the run cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
+ pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
+ pxa255_run_freq_table[i].index = i;
+ }
+ pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ /* Generate pxa25x the turbo cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
+ pxa255_turbo_freq_table[i].frequency =
+ pxa255_turbo_freqs[i].khz;
+ pxa255_turbo_freq_table[i].index = i;
+ }
+ pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ pxa255_turbo_table = !!pxa255_turbo_table;
+
+ /* Generate the pxa27x cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA27x_FREQS; i++) {
+ freq = pxa27x_freqs[i].khz;
+ if (freq > pxa27x_maxfreq)
+ break;
+ pxa27x_freq_table[i].frequency = freq;
+ pxa27x_freq_table[i].index = i;
+ }
+ pxa27x_freq_table[i].index = i;
+ pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ /*
+ * Set the policy's minimum and maximum frequencies from the tables
+ * just constructed. This sets cpuinfo.mxx_freq, min and max.
+ */
+ if (cpu_is_pxa25x()) {
+ find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
+ pr_info("PXA255 cpufreq using %s frequency table\n",
+ pxa255_turbo_table ? "turbo" : "run");
+ cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
+ }
+ else if (cpu_is_pxa27x())
+ cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
+
+ printk(KERN_INFO "PXA CPU frequency change support initialized\n");
+
+ return 0;
+}
+
+static struct cpufreq_driver pxa_cpufreq_driver = {
+ .verify = pxa_verify_policy,
+ .target = pxa_set_target,
+ .init = pxa_cpufreq_init,
+ .get = pxa_cpufreq_get,
+ .name = "PXA2xx",
+};
+
+static int __init pxa_cpu_init(void)
+{
+ int ret = -ENODEV;
+ if (cpu_is_pxa25x() || cpu_is_pxa27x())
+ ret = cpufreq_register_driver(&pxa_cpufreq_driver);
+ return ret;
+}
+
+static void __exit pxa_cpu_exit(void)
+{
+ cpufreq_unregister_driver(&pxa_cpufreq_driver);
+}
+
+
+MODULE_AUTHOR("Intrinsyc Software Inc.");
+MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture");
+MODULE_LICENSE("GPL");
+module_init(pxa_cpu_init);
+module_exit(pxa_cpu_exit);
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
new file mode 100644
index 000000000000..15d60f857ad5
--- /dev/null
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2008 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <mach/generic.h>
+#include <mach/pxa3xx-regs.h>
+
+#define HSS_104M (0)
+#define HSS_156M (1)
+#define HSS_208M (2)
+#define HSS_312M (3)
+
+#define SMCFS_78M (0)
+#define SMCFS_104M (2)
+#define SMCFS_208M (5)
+
+#define SFLFS_104M (0)
+#define SFLFS_156M (1)
+#define SFLFS_208M (2)
+#define SFLFS_312M (3)
+
+#define XSPCLK_156M (0)
+#define XSPCLK_NONE (3)
+
+#define DMCFS_26M (0)
+#define DMCFS_260M (3)
+
+struct pxa3xx_freq_info {
+ unsigned int cpufreq_mhz;
+ unsigned int core_xl : 5;
+ unsigned int core_xn : 3;
+ unsigned int hss : 2;
+ unsigned int dmcfs : 2;
+ unsigned int smcfs : 3;
+ unsigned int sflfs : 2;
+ unsigned int df_clkdiv : 3;
+
+ int vcc_core; /* in mV */
+ int vcc_sram; /* in mV */
+};
+
+#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \
+{ \
+ .cpufreq_mhz = cpufreq, \
+ .core_xl = _xl, \
+ .core_xn = _xn, \
+ .hss = HSS_##_hss##M, \
+ .dmcfs = DMCFS_##_dmc##M, \
+ .smcfs = SMCFS_##_smc##M, \
+ .sflfs = SFLFS_##_sfl##M, \
+ .df_clkdiv = _dfi, \
+ .vcc_core = vcore, \
+ .vcc_sram = vsram, \
+}
+
+static struct pxa3xx_freq_info pxa300_freqs[] = {
+ /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
+ OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
+ OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
+ OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
+ OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
+};
+
+static struct pxa3xx_freq_info pxa320_freqs[] = {
+ /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
+ OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
+ OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
+ OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
+ OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
+ OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */
+};
+
+static unsigned int pxa3xx_freqs_num;
+static struct pxa3xx_freq_info *pxa3xx_freqs;
+static struct cpufreq_frequency_table *pxa3xx_freqs_table;
+
+static int setup_freqs_table(struct cpufreq_policy *policy,
+ struct pxa3xx_freq_info *freqs, int num)
+{
+ struct cpufreq_frequency_table *table;
+ int i;
+
+ table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL);
+ if (table == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ table[i].index = i;
+ table[i].frequency = freqs[i].cpufreq_mhz * 1000;
+ }
+ table[num].index = i;
+ table[num].frequency = CPUFREQ_TABLE_END;
+
+ pxa3xx_freqs = freqs;
+ pxa3xx_freqs_num = num;
+ pxa3xx_freqs_table = table;
+
+ return cpufreq_frequency_table_cpuinfo(policy, table);
+}
+
+static void __update_core_freq(struct pxa3xx_freq_info *info)
+{
+ uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK;
+ uint32_t accr = ACCR;
+ uint32_t xclkcfg;
+
+ accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK);
+ accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
+
+ /* No clock until core PLL is re-locked */
+ accr |= ACCR_XSPCLK(XSPCLK_NONE);
+
+ xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
+
+ ACCR = accr;
+ __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
+
+ while ((ACSR & mask) != (accr & mask))
+ cpu_relax();
+}
+
+static void __update_bus_freq(struct pxa3xx_freq_info *info)
+{
+ uint32_t mask;
+ uint32_t accr = ACCR;
+
+ mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
+ ACCR_DMCFS_MASK;
+
+ accr &= ~mask;
+ accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
+ ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
+
+ ACCR = accr;
+
+ while ((ACSR & mask) != (accr & mask))
+ cpu_relax();
+}
+
+static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
+}
+
+static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
+{
+ return pxa3xx_get_clk_frequency_khz(0);
+}
+
+static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct pxa3xx_freq_info *next;
+ struct cpufreq_freqs freqs;
+ unsigned long flags;
+ int idx;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ /* Lookup the next frequency */
+ if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table,
+ target_freq, relation, &idx))
+ return -EINVAL;
+
+ next = &pxa3xx_freqs[idx];
+
+ freqs.old = policy->cur;
+ freqs.new = next->cpufreq_mhz * 1000;
+
+ pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
+ freqs.old / 1000, freqs.new / 1000,
+ (freqs.old == freqs.new) ? " (skipped)" : "");
+
+ if (freqs.old == target_freq)
+ return 0;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ local_irq_save(flags);
+ __update_core_freq(next);
+ __update_bus_freq(next);
+ local_irq_restore(flags);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret = -EINVAL;
+
+ /* set default policy and cpuinfo */
+ policy->cpuinfo.min_freq = 104000;
+ policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
+ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
+ policy->max = pxa3xx_get_clk_frequency_khz(0);
+ policy->cur = policy->min = policy->max;
+
+ if (cpu_is_pxa300() || cpu_is_pxa310())
+ ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs));
+
+ if (cpu_is_pxa320())
+ ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs));
+
+ if (ret) {
+ pr_err("failed to setup frequency table\n");
+ return ret;
+ }
+
+ pr_info("CPUFREQ support for PXA3xx initialized\n");
+ return 0;
+}
+
+static struct cpufreq_driver pxa3xx_cpufreq_driver = {
+ .verify = pxa3xx_cpufreq_verify,
+ .target = pxa3xx_cpufreq_set,
+ .init = pxa3xx_cpufreq_init,
+ .get = pxa3xx_cpufreq_get,
+ .name = "pxa3xx-cpufreq",
+};
+
+static int __init cpufreq_init(void)
+{
+ if (cpu_is_pxa3xx())
+ return cpufreq_register_driver(&pxa3xx_cpufreq_driver);
+
+ return 0;
+}
+module_init(cpufreq_init);
+
+static void __exit cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pxa3xx_cpufreq_driver);
+}
+module_exit(cpufreq_exit);
+
+MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index bcc053bc02c4..4f1881eee3f1 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -256,7 +256,6 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
goto out;
}
- freqs.cpu = 0;
freqs.flags = 0;
freqs.old = s3c_freq->is_dvs ? FREQ_DVS
: clk_get_rate(s3c_freq->armclk) / 1000;
@@ -274,7 +273,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
if (!to_dvs && freqs.old == freqs.new)
goto out;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (to_dvs) {
pr_debug("cpufreq: enter dvs\n");
@@ -287,7 +286,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
}
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
out:
mutex_unlock(&cpufreq_lock);
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 6f9490b3c356..27cacb524796 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -84,7 +84,6 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (ret != 0)
return ret;
- freqs.cpu = 0;
freqs.old = clk_get_rate(armclk) / 1000;
freqs.new = s3c64xx_freq_table[i].frequency;
freqs.flags = 0;
@@ -95,7 +94,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new > freqs.old) {
@@ -117,7 +116,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
goto err;
}
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new < freqs.old) {
@@ -141,7 +140,7 @@ err_clk:
if (clk_set_rate(armclk, freqs.old * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
err:
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index a484aaea9809..5c7757073793 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -229,7 +229,6 @@ static int s5pv210_target(struct cpufreq_policy *policy,
}
freqs.new = s5pv210_freq_table[index].frequency;
- freqs.cpu = 0;
if (freqs.new == freqs.old)
goto exit;
@@ -256,7 +255,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
goto exit;
}
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Check if there need to change PLL */
if ((index == L0) || (priv_index == L0))
@@ -468,7 +467,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
}
}
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (freqs.new < freqs.old) {
regulator_set_voltage(int_regulator,
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
new file mode 100644
index 000000000000..cff18e87ca58
--- /dev/null
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -0,0 +1,247 @@
+/*
+ * cpu-sa1100.c: clock scaling for the SA1100
+ *
+ * Copyright (C) 2000 2001, The Delft University of Technology
+ *
+ * Authors:
+ * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version
+ * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
+ * - major rewrite for linux-2.3.99
+ * - rewritten for the more generic power management scheme in
+ * linux-2.4.5-rmk1
+ *
+ * This software has been developed while working on the LART
+ * computing board (http://www.lartmaker.nl/), which is
+ * sponsored by the Mobile Multi-media Communications
+ * (http://www.mobimedia.org/) and Ubiquitous Communications
+ * (http://www.ubicom.tudelft.nl/) projects.
+ *
+ * The authors can be reached at:
+ *
+ * Erik Mouw
+ * Information and Communication Theory Group
+ * Faculty of Information Technology and Systems
+ * Delft University of Technology
+ * P.O. Box 5031
+ * 2600 GA Delft
+ * The Netherlands
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Theory of operations
+ * ====================
+ *
+ * Clock scaling can be used to lower the power consumption of the CPU
+ * core. This will give you a somewhat longer running time.
+ *
+ * The SA-1100 has a single register to change the core clock speed:
+ *
+ * PPCR 0x90020014 PLL config
+ *
+ * However, the DRAM timings are closely related to the core clock
+ * speed, so we need to change these, too. The used registers are:
+ *
+ * MDCNFG 0xA0000000 DRAM config
+ * MDCAS0 0xA0000004 Access waveform
+ * MDCAS1 0xA0000008 Access waveform
+ * MDCAS2 0xA000000C Access waveform
+ *
+ * Care must be taken to change the DRAM parameters the correct way,
+ * because otherwise the DRAM becomes unusable and the kernel will
+ * crash.
+ *
+ * The simple solution to avoid a kernel crash is to put the actual
+ * clock change in ROM and jump to that code from the kernel. The main
+ * disadvantage is that the ROM has to be modified, which is not
+ * possible on all SA-1100 platforms. Another disadvantage is that
+ * jumping to ROM makes clock switching unnecessary complicated.
+ *
+ * The idea behind this driver is that the memory configuration can be
+ * changed while running from DRAM (even with interrupts turned on!)
+ * as long as all re-configuration steps yield a valid DRAM
+ * configuration. The advantages are clear: it will run on all SA-1100
+ * platforms, and the code is very simple.
+ *
+ * If you really want to understand what is going on in
+ * sa1100_update_dram_timings(), you'll have to read sections 8.2,
+ * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
+ * Developers Manual" (available for free from Intel).
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+
+#include <mach/generic.h>
+#include <mach/hardware.h>
+
+struct sa1100_dram_regs {
+ int speed;
+ u32 mdcnfg;
+ u32 mdcas0;
+ u32 mdcas1;
+ u32 mdcas2;
+};
+
+
+static struct cpufreq_driver sa1100_driver;
+
+static struct sa1100_dram_regs sa1100_dram_settings[] = {
+ /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
+ { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
+ { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
+ { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
+ {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
+ {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
+ {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
+ {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
+ {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
+ {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
+ {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
+ {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
+ {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
+ {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
+ {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
+ {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
+ {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
+ { 0, 0, 0, 0, 0 } /* last entry */
+};
+
+static void sa1100_update_dram_timings(int current_speed, int new_speed)
+{
+ struct sa1100_dram_regs *settings = sa1100_dram_settings;
+
+ /* find speed */
+ while (settings->speed != 0) {
+ if (new_speed == settings->speed)
+ break;
+
+ settings++;
+ }
+
+ if (settings->speed == 0) {
+ panic("%s: couldn't find dram setting for speed %d\n",
+ __func__, new_speed);
+ }
+
+ /* No risk, no fun: run with interrupts on! */
+ if (new_speed > current_speed) {
+ /* We're going FASTER, so first relax the memory
+ * timings before changing the core frequency
+ */
+
+ /* Half the memory access clock */
+ MDCNFG |= MDCNFG_CDB2;
+
+ /* The order of these statements IS important, keep 8
+ * pulses!!
+ */
+ MDCAS2 = settings->mdcas2;
+ MDCAS1 = settings->mdcas1;
+ MDCAS0 = settings->mdcas0;
+ MDCNFG = settings->mdcnfg;
+ } else {
+ /* We're going SLOWER: first decrease the core
+ * frequency and then tighten the memory settings.
+ */
+
+ /* Half the memory access clock */
+ MDCNFG |= MDCNFG_CDB2;
+
+ /* The order of these statements IS important, keep 8
+ * pulses!!
+ */
+ MDCAS0 = settings->mdcas0;
+ MDCAS1 = settings->mdcas1;
+ MDCAS2 = settings->mdcas2;
+ MDCNFG = settings->mdcnfg;
+ }
+}
+
+static int sa1100_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cur = sa11x0_getspeed(0);
+ unsigned int new_ppcr;
+ struct cpufreq_freqs freqs;
+
+ new_ppcr = sa11x0_freq_to_ppcr(target_freq);
+ switch (relation) {
+ case CPUFREQ_RELATION_L:
+ if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
+ new_ppcr--;
+ break;
+ case CPUFREQ_RELATION_H:
+ if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
+ (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
+ new_ppcr--;
+ break;
+ }
+
+ freqs.old = cur;
+ freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ if (freqs.new > cur)
+ sa1100_update_dram_timings(cur, freqs.new);
+
+ PPCR = new_ppcr;
+
+ if (freqs.new < cur)
+ sa1100_update_dram_timings(cur, freqs.new);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+ policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
+ policy->cpuinfo.min_freq = 59000;
+ policy->cpuinfo.max_freq = 287000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ return 0;
+}
+
+static struct cpufreq_driver sa1100_driver __refdata = {
+ .flags = CPUFREQ_STICKY,
+ .verify = sa11x0_verify_speed,
+ .target = sa1100_target,
+ .get = sa11x0_getspeed,
+ .init = sa1100_cpu_init,
+ .name = "sa1100",
+};
+
+static int __init sa1100_dram_init(void)
+{
+ if (cpu_is_sa1100())
+ return cpufreq_register_driver(&sa1100_driver);
+ else
+ return -ENODEV;
+}
+
+arch_initcall(sa1100_dram_init);
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
new file mode 100644
index 000000000000..39c90b6f4286
--- /dev/null
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -0,0 +1,406 @@
+/*
+ * linux/arch/arm/mach-sa1100/cpu-sa1110.c
+ *
+ * Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Note: there are two erratas that apply to the SA1110 here:
+ * 7 - SDRAM auto-power-up failure (rev A0)
+ * 13 - Corruption of internal register reads/writes following
+ * SDRAM reads (rev A0, B0, B1)
+ *
+ * We ignore rev. A0 and B0 devices; I don't think they're worth supporting.
+ *
+ * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type
+ */
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include <asm/cputype.h>
+#include <asm/mach-types.h>
+
+#include <mach/generic.h>
+#include <mach/hardware.h>
+
+#undef DEBUG
+
+struct sdram_params {
+ const char name[20];
+ u_char rows; /* bits */
+ u_char cas_latency; /* cycles */
+ u_char tck; /* clock cycle time (ns) */
+ u_char trcd; /* activate to r/w (ns) */
+ u_char trp; /* precharge to activate (ns) */
+ u_char twr; /* write recovery time (ns) */
+ u_short refresh; /* refresh time for array (us) */
+};
+
+struct sdram_info {
+ u_int mdcnfg;
+ u_int mdrefr;
+ u_int mdcas[3];
+};
+
+static struct sdram_params sdram_tbl[] __initdata = {
+ { /* Toshiba TC59SM716 CL2 */
+ .name = "TC59SM716-CL2",
+ .rows = 12,
+ .tck = 10,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 10,
+ .refresh = 64000,
+ .cas_latency = 2,
+ }, { /* Toshiba TC59SM716 CL3 */
+ .name = "TC59SM716-CL3",
+ .rows = 12,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung K4S641632D TC75 */
+ .name = "K4S641632D",
+ .rows = 14,
+ .tck = 9,
+ .trcd = 27,
+ .trp = 20,
+ .twr = 9,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung K4S281632B-1H */
+ .name = "K4S281632B-1H",
+ .rows = 12,
+ .tck = 10,
+ .trp = 20,
+ .twr = 10,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung KM416S4030CT */
+ .name = "KM416S4030CT",
+ .rows = 13,
+ .tck = 8,
+ .trcd = 24, /* 3 CLKs */
+ .trp = 24, /* 3 CLKs */
+ .twr = 16, /* Trdl: 2 CLKs */
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Winbond W982516AH75L CL3 */
+ .name = "W982516AH75L",
+ .rows = 16,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Micron MT48LC8M16A2TG-75 */
+ .name = "MT48LC8M16A2TG-75",
+ .rows = 12,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ },
+};
+
+static struct sdram_params sdram_params;
+
+/*
+ * Given a period in ns and frequency in khz, calculate the number of
+ * cycles of frequency in period. Note that we round up to the next
+ * cycle, even if we are only slightly over.
+ */
+static inline u_int ns_to_cycles(u_int ns, u_int khz)
+{
+ return (ns * khz + 999999) / 1000000;
+}
+
+/*
+ * Create the MDCAS register bit pattern.
+ */
+static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd)
+{
+ u_int shift;
+
+ rcd = 2 * rcd - 1;
+ shift = delayed + 1 + rcd;
+
+ mdcas[0] = (1 << rcd) - 1;
+ mdcas[0] |= 0x55555555 << shift;
+ mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1);
+}
+
+static void
+sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
+ struct sdram_params *sdram)
+{
+ u_int mem_khz, sd_khz, trp, twr;
+
+ mem_khz = cpu_khz / 2;
+ sd_khz = mem_khz;
+
+ /*
+ * If SDCLK would invalidate the SDRAM timings,
+ * run SDCLK at half speed.
+ *
+ * CPU steppings prior to B2 must either run the memory at
+ * half speed or use delayed read latching (errata 13).
+ */
+ if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
+ (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000))
+ sd_khz /= 2;
+
+ sd->mdcnfg = MDCNFG & 0x007f007f;
+
+ twr = ns_to_cycles(sdram->twr, mem_khz);
+
+ /* trp should always be >1 */
+ trp = ns_to_cycles(sdram->trp, mem_khz) - 1;
+ if (trp < 1)
+ trp = 1;
+
+ sd->mdcnfg |= trp << 8;
+ sd->mdcnfg |= trp << 24;
+ sd->mdcnfg |= sdram->cas_latency << 12;
+ sd->mdcnfg |= sdram->cas_latency << 28;
+ sd->mdcnfg |= twr << 14;
+ sd->mdcnfg |= twr << 30;
+
+ sd->mdrefr = MDREFR & 0xffbffff0;
+ sd->mdrefr |= 7;
+
+ if (sd_khz != mem_khz)
+ sd->mdrefr |= MDREFR_K1DB2;
+
+ /* initial number of '1's in MDCAS + 1 */
+ set_mdcas(sd->mdcas, sd_khz >= 62000,
+ ns_to_cycles(sdram->trcd, mem_khz));
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n",
+ sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1],
+ sd->mdcas[2]);
+#endif
+}
+
+/*
+ * Set the SDRAM refresh rate.
+ */
+static inline void sdram_set_refresh(u_int dri)
+{
+ MDREFR = (MDREFR & 0xffff000f) | (dri << 4);
+ (void) MDREFR;
+}
+
+/*
+ * Update the refresh period. We do this such that we always refresh
+ * the SDRAMs within their permissible period. The refresh period is
+ * always a multiple of the memory clock (fixed at cpu_clock / 2).
+ *
+ * FIXME: we don't currently take account of burst accesses here,
+ * but neither do Intels DM nor Angel.
+ */
+static void
+sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
+{
+ u_int ns_row = (sdram->refresh * 1000) >> sdram->rows;
+ u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32;
+
+#ifdef DEBUG
+ mdelay(250);
+ printk(KERN_DEBUG "new dri value = %d\n", dri);
+#endif
+
+ sdram_set_refresh(dri);
+}
+
+/*
+ * Ok, set the CPU frequency.
+ */
+static int sa1110_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct sdram_params *sdram = &sdram_params;
+ struct cpufreq_freqs freqs;
+ struct sdram_info sd;
+ unsigned long flags;
+ unsigned int ppcr, unused;
+
+ switch (relation) {
+ case CPUFREQ_RELATION_L:
+ ppcr = sa11x0_freq_to_ppcr(target_freq);
+ if (sa11x0_ppcr_to_freq(ppcr) > policy->max)
+ ppcr--;
+ break;
+ case CPUFREQ_RELATION_H:
+ ppcr = sa11x0_freq_to_ppcr(target_freq);
+ if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) &&
+ (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
+ ppcr--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ freqs.old = sa11x0_getspeed(0);
+ freqs.new = sa11x0_ppcr_to_freq(ppcr);
+
+ sdram_calculate_timing(&sd, freqs.new, sdram);
+
+#if 0
+ /*
+ * These values are wrong according to the SA1110 documentation
+ * and errata, but they seem to work. Need to get a storage
+ * scope on to the SDRAM signals to work out why.
+ */
+ if (policy->max < 147500) {
+ sd.mdrefr |= MDREFR_K1DB2;
+ sd.mdcas[0] = 0xaaaaaa7f;
+ } else {
+ sd.mdrefr &= ~MDREFR_K1DB2;
+ sd.mdcas[0] = 0xaaaaaa9f;
+ }
+ sd.mdcas[1] = 0xaaaaaaaa;
+ sd.mdcas[2] = 0xaaaaaaaa;
+#endif
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ /*
+ * The clock could be going away for some time. Set the SDRAMs
+ * to refresh rapidly (every 64 memory clock cycles). To get
+ * through the whole array, we need to wait 262144 mclk cycles.
+ * We wait 20ms to be safe.
+ */
+ sdram_set_refresh(2);
+ if (!irqs_disabled())
+ msleep(20);
+ else
+ mdelay(20);
+
+ /*
+ * Reprogram the DRAM timings with interrupts disabled, and
+ * ensure that we are doing this within a complete cache line.
+ * This means that we won't access SDRAM for the duration of
+ * the programming.
+ */
+ local_irq_save(flags);
+ asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
+ udelay(10);
+ __asm__ __volatile__("\n\
+ b 2f \n\
+ .align 5 \n\
+1: str %3, [%1, #0] @ MDCNFG \n\
+ str %4, [%1, #28] @ MDREFR \n\
+ str %5, [%1, #4] @ MDCAS0 \n\
+ str %6, [%1, #8] @ MDCAS1 \n\
+ str %7, [%1, #12] @ MDCAS2 \n\
+ str %8, [%2, #0] @ PPCR \n\
+ ldr %0, [%1, #0] \n\
+ b 3f \n\
+2: b 1b \n\
+3: nop \n\
+ nop"
+ : "=&r" (unused)
+ : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg),
+ "r" (sd.mdrefr), "r" (sd.mdcas[0]),
+ "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr));
+ local_irq_restore(flags);
+
+ /*
+ * Now, return the SDRAM refresh back to normal.
+ */
+ sdram_update_refresh(freqs.new, sdram);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+ policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
+ policy->cpuinfo.min_freq = 59000;
+ policy->cpuinfo.max_freq = 287000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ return 0;
+}
+
+/* sa1110_driver needs __refdata because it must remain after init registers
+ * it with cpufreq_register_driver() */
+static struct cpufreq_driver sa1110_driver __refdata = {
+ .flags = CPUFREQ_STICKY,
+ .verify = sa11x0_verify_speed,
+ .target = sa1110_target,
+ .get = sa11x0_getspeed,
+ .init = sa1110_cpu_init,
+ .name = "sa1110",
+};
+
+static struct sdram_params *sa1110_find_sdram(const char *name)
+{
+ struct sdram_params *sdram;
+
+ for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl);
+ sdram++)
+ if (strcmp(name, sdram->name) == 0)
+ return sdram;
+
+ return NULL;
+}
+
+static char sdram_name[16];
+
+static int __init sa1110_clk_init(void)
+{
+ struct sdram_params *sdram;
+ const char *name = sdram_name;
+
+ if (!cpu_is_sa1110())
+ return -ENODEV;
+
+ if (!name[0]) {
+ if (machine_is_assabet())
+ name = "TC59SM716-CL3";
+ if (machine_is_pt_system3())
+ name = "K4S641632D";
+ if (machine_is_h3100())
+ name = "KM416S4030CT";
+ if (machine_is_jornada720())
+ name = "K4S281632B-1H";
+ if (machine_is_nanoengine())
+ name = "MT48LC8M16A2TG-75";
+ }
+
+ sdram = sa1110_find_sdram(name);
+ if (sdram) {
+ printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d"
+ " twr: %d refresh: %d cas_latency: %d\n",
+ sdram->tck, sdram->trcd, sdram->trp,
+ sdram->twr, sdram->refresh, sdram->cas_latency);
+
+ memcpy(&sdram_params, sdram, sizeof(sdram_params));
+
+ return cpufreq_register_driver(&sa1110_driver);
+ }
+
+ return 0;
+}
+
+module_param_string(sdram, sdram_name, sizeof(sdram_name), 0);
+arch_initcall(sa1110_clk_init);
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index e42e073cd9b8..f740b134d27b 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -53,7 +53,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
}
}
-static void sc520_freq_set_cpu_state(unsigned int state)
+static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
+ unsigned int state)
{
struct cpufreq_freqs freqs;
@@ -61,9 +62,8 @@ static void sc520_freq_set_cpu_state(unsigned int state)
freqs.old = sc520_freq_get_cpu_frequency(0);
freqs.new = sc520_freq_table[state].frequency;
- freqs.cpu = 0; /* AMD Elan is UP */
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
pr_debug("attempting to set frequency to %i kHz\n",
sc520_freq_table[state].frequency);
@@ -75,7 +75,7 @@ static void sc520_freq_set_cpu_state(unsigned int state)
local_irq_enable();
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
};
static int sc520_freq_verify(struct cpufreq_policy *policy)
@@ -93,7 +93,7 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
target_freq, relation, &newstate))
return -EINVAL;
- sc520_freq_set_cpu_state(newstate);
+ sc520_freq_set_cpu_state(policy, newstate);
return 0;
}
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
new file mode 100644
index 000000000000..73adb64651e8
--- /dev/null
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -0,0 +1,189 @@
+/*
+ * cpufreq driver for the SuperH processors.
+ *
+ * Copyright (C) 2002 - 2012 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown
+ *
+ * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
+ *
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "cpufreq: " fmt
+
+#include <linux/types.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/sched.h> /* set_cpus_allowed() */
+#include <linux/clk.h>
+#include <linux/percpu.h>
+#include <linux/sh_clk.h>
+
+static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+
+static unsigned int sh_cpufreq_get(unsigned int cpu)
+{
+ return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
+}
+
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int sh_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+ struct device *dev;
+ long freq;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ BUG_ON(smp_processor_id() != cpu);
+
+ dev = get_cpu_device(cpu);
+
+ /* Convert target_freq from kHz to Hz */
+ freq = clk_round_rate(cpuclk, target_freq * 1000);
+
+ if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
+ return -EINVAL;
+
+ dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
+
+ freqs.old = sh_cpufreq_get(cpu);
+ freqs.new = (freq + 500) / 1000;
+ freqs.flags = 0;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+ clk_set_rate(cpuclk, freq);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ dev_dbg(dev, "set frequency %lu Hz\n", freq);
+
+ return 0;
+}
+
+static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
+ struct cpufreq_frequency_table *freq_table;
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ return 0;
+}
+
+static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ struct cpufreq_frequency_table *freq_table;
+ struct device *dev;
+
+ dev = get_cpu_device(cpu);
+
+ cpuclk = clk_get(dev, "cpu_clk");
+ if (IS_ERR(cpuclk)) {
+ dev_err(dev, "couldn't get CPU clk\n");
+ return PTR_ERR(cpuclk);
+ }
+
+ policy->cur = sh_cpufreq_get(cpu);
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table) {
+ int result;
+
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (!result)
+ cpufreq_frequency_table_get_attr(freq_table, cpu);
+ } else {
+ dev_notice(dev, "no frequency table found, falling back "
+ "to rate rounding.\n");
+
+ policy->min = policy->cpuinfo.min_freq =
+ (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->max = policy->cpuinfo.max_freq =
+ (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ }
+
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
+ "Maximum %u.%03u MHz.\n",
+ policy->min / 1000, policy->min % 1000,
+ policy->max / 1000, policy->max % 1000);
+
+ return 0;
+}
+
+static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+
+ cpufreq_frequency_table_put_attr(cpu);
+ clk_put(cpuclk);
+
+ return 0;
+}
+
+static struct freq_attr *sh_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver sh_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .name = "sh",
+ .get = sh_cpufreq_get,
+ .target = sh_cpufreq_target,
+ .verify = sh_cpufreq_verify,
+ .init = sh_cpufreq_cpu_init,
+ .exit = sh_cpufreq_cpu_exit,
+ .attr = sh_freq_attr,
+};
+
+static int __init sh_cpufreq_module_init(void)
+{
+ pr_notice("SuperH CPU frequency driver.\n");
+ return cpufreq_register_driver(&sh_cpufreq_driver);
+}
+
+static void __exit sh_cpufreq_module_exit(void)
+{
+ cpufreq_unregister_driver(&sh_cpufreq_driver);
+}
+
+module_init(sh_cpufreq_module_init);
+module_exit(sh_cpufreq_module_exit);
+
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
+MODULE_DESCRIPTION("cpufreq driver for SuperH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
new file mode 100644
index 000000000000..306ae462bba6
--- /dev/null
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -0,0 +1,408 @@
+/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/asi.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us2e_driver;
+
+struct us2e_freq_percpu_info {
+ struct cpufreq_frequency_table table[6];
+};
+
+/* Indexed by cpu number. */
+static struct us2e_freq_percpu_info *us2e_freq_table;
+
+#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
+#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
+
+/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
+ * in the ESTAR mode control register.
+ */
+#define ESTAR_MODE_DIV_1 0x0000000000000000UL
+#define ESTAR_MODE_DIV_2 0x0000000000000001UL
+#define ESTAR_MODE_DIV_4 0x0000000000000003UL
+#define ESTAR_MODE_DIV_6 0x0000000000000002UL
+#define ESTAR_MODE_DIV_8 0x0000000000000004UL
+#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
+
+#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
+#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
+#define MCTRL0_REFR_COUNT_SHIFT 8
+#define MCTRL0_REFR_INTERVAL 7800
+#define MCTRL0_REFR_CLKS_P_CNT 64
+
+static unsigned long read_hbreg(unsigned long addr)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=&r" (ret)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+ return ret;
+}
+
+static void write_hbreg(unsigned long addr, unsigned long val)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
+ : "memory");
+ if (addr == HBIRD_ESTAR_MODE_ADDR) {
+ /* Need to wait 16 clock cycles for the PLL to lock. */
+ udelay(1);
+ }
+}
+
+static void self_refresh_ctl(int enable)
+{
+ unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+ if (enable)
+ mctrl |= MCTRL0_SREFRESH_ENAB;
+ else
+ mctrl &= ~MCTRL0_SREFRESH_ENAB;
+ write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+ (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+}
+
+static void frob_mem_refresh(int cpu_slowing_down,
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+{
+ unsigned long old_refr_count, refr_count, mctrl;
+
+ refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
+ refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
+
+ mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+ old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
+ >> MCTRL0_REFR_COUNT_SHIFT;
+
+ mctrl &= ~MCTRL0_REFR_COUNT_MASK;
+ mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
+ write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+ mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+ if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
+ unsigned long usecs;
+
+ /* We have to wait for both refresh counts (old
+ * and new) to go to zero.
+ */
+ usecs = (MCTRL0_REFR_CLKS_P_CNT *
+ (refr_count + old_refr_count) *
+ 1000000UL *
+ old_divisor) / clock_tick;
+ udelay(usecs + 1UL);
+ }
+}
+
+static void us2e_transition(unsigned long estar, unsigned long new_bits,
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ estar &= ~ESTAR_MODE_DIV_MASK;
+
+ /* This is based upon the state transition diagram in the IIe manual. */
+ if (old_divisor == 2 && divisor == 1) {
+ self_refresh_ctl(0);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+ } else if (old_divisor == 1 && divisor == 2) {
+ frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ self_refresh_ctl(1);
+ } else if (old_divisor == 1 && divisor > 2) {
+ us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+ 1, 2);
+ us2e_transition(estar, new_bits, clock_tick,
+ 2, divisor);
+ } else if (old_divisor > 2 && divisor == 1) {
+ us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+ old_divisor, 2);
+ us2e_transition(estar, new_bits, clock_tick,
+ 2, divisor);
+ } else if (old_divisor < divisor) {
+ frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ } else if (old_divisor > divisor) {
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+ } else {
+ BUG();
+ }
+
+ local_irq_restore(flags);
+}
+
+static unsigned long index_to_estar_mode(unsigned int index)
+{
+ switch (index) {
+ case 0:
+ return ESTAR_MODE_DIV_1;
+
+ case 1:
+ return ESTAR_MODE_DIV_2;
+
+ case 2:
+ return ESTAR_MODE_DIV_4;
+
+ case 3:
+ return ESTAR_MODE_DIV_6;
+
+ case 4:
+ return ESTAR_MODE_DIV_8;
+
+ default:
+ BUG();
+ }
+}
+
+static unsigned long index_to_divisor(unsigned int index)
+{
+ switch (index) {
+ case 0:
+ return 1;
+
+ case 1:
+ return 2;
+
+ case 2:
+ return 4;
+
+ case 3:
+ return 6;
+
+ case 4:
+ return 8;
+
+ default:
+ BUG();
+ }
+}
+
+static unsigned long estar_to_divisor(unsigned long estar)
+{
+ unsigned long ret;
+
+ switch (estar & ESTAR_MODE_DIV_MASK) {
+ case ESTAR_MODE_DIV_1:
+ ret = 1;
+ break;
+ case ESTAR_MODE_DIV_2:
+ ret = 2;
+ break;
+ case ESTAR_MODE_DIV_4:
+ ret = 4;
+ break;
+ case ESTAR_MODE_DIV_6:
+ ret = 6;
+ break;
+ case ESTAR_MODE_DIV_8:
+ ret = 8;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static unsigned int us2e_freq_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned long clock_tick, estar;
+
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ return clock_tick / estar_to_divisor(estar);
+}
+
+static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long new_bits, new_freq;
+ unsigned long clock_tick, divisor, old_divisor, estar;
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ new_bits = index_to_estar_mode(index);
+ divisor = index_to_divisor(index);
+ new_freq /= divisor;
+
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ old_divisor = estar_to_divisor(estar);
+
+ freqs.old = clock_tick / old_divisor;
+ freqs.new = new_freq;
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ if (old_divisor != divisor)
+ us2e_transition(estar, new_bits, clock_tick * 1000,
+ old_divisor, divisor);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+}
+
+static int us2e_freq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int new_index = 0;
+
+ if (cpufreq_frequency_table_target(policy,
+ &us2e_freq_table[policy->cpu].table[0],
+ target_freq, relation, &new_index))
+ return -EINVAL;
+
+ us2e_set_cpu_divider_index(policy, new_index);
+
+ return 0;
+}
+
+static int us2e_freq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ &us2e_freq_table[policy->cpu].table[0]);
+}
+
+static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ struct cpufreq_frequency_table *table =
+ &us2e_freq_table[cpu].table[0];
+
+ table[0].index = 0;
+ table[0].frequency = clock_tick / 1;
+ table[1].index = 1;
+ table[1].frequency = clock_tick / 2;
+ table[2].index = 2;
+ table[2].frequency = clock_tick / 4;
+ table[2].index = 3;
+ table[2].frequency = clock_tick / 6;
+ table[2].index = 4;
+ table[2].frequency = clock_tick / 8;
+ table[2].index = 5;
+ table[3].frequency = CPUFREQ_TABLE_END;
+
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = clock_tick;
+
+ return cpufreq_frequency_table_cpuinfo(policy, table);
+}
+
+static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (cpufreq_us2e_driver)
+ us2e_set_cpu_divider_index(policy, 0);
+
+ return 0;
+}
+
+static int __init us2e_freq_init(void)
+{
+ unsigned long manuf, impl, ver;
+ int ret;
+
+ if (tlb_type != spitfire)
+ return -ENODEV;
+
+ __asm__("rdpr %%ver, %0" : "=r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ if (manuf == 0x17 && impl == 0x13) {
+ struct cpufreq_driver *driver;
+
+ ret = -ENOMEM;
+ driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+ if (!driver)
+ goto err_out;
+
+ us2e_freq_table = kzalloc(
+ (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
+ GFP_KERNEL);
+ if (!us2e_freq_table)
+ goto err_out;
+
+ driver->init = us2e_freq_cpu_init;
+ driver->verify = us2e_freq_verify;
+ driver->target = us2e_freq_target;
+ driver->get = us2e_freq_get;
+ driver->exit = us2e_freq_cpu_exit;
+ driver->owner = THIS_MODULE,
+ strcpy(driver->name, "UltraSPARC-IIe");
+
+ cpufreq_us2e_driver = driver;
+ ret = cpufreq_register_driver(driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ if (driver) {
+ kfree(driver);
+ cpufreq_us2e_driver = NULL;
+ }
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit us2e_freq_exit(void)
+{
+ if (cpufreq_us2e_driver) {
+ cpufreq_unregister_driver(cpufreq_us2e_driver);
+ kfree(cpufreq_us2e_driver);
+ cpufreq_us2e_driver = NULL;
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
+ }
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
+MODULE_LICENSE("GPL");
+
+module_init(us2e_freq_init);
+module_exit(us2e_freq_exit);
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
new file mode 100644
index 000000000000..c71ee142347a
--- /dev/null
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -0,0 +1,269 @@
+/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <asm/head.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us3_driver;
+
+struct us3_freq_percpu_info {
+ struct cpufreq_frequency_table table[4];
+};
+
+/* Indexed by cpu number. */
+static struct us3_freq_percpu_info *us3_freq_table;
+
+/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
+ * in the Safari config register.
+ */
+#define SAFARI_CFG_DIV_1 0x0000000000000000UL
+#define SAFARI_CFG_DIV_2 0x0000000040000000UL
+#define SAFARI_CFG_DIV_32 0x0000000080000000UL
+#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
+
+static unsigned long read_safari_cfg(void)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=&r" (ret)
+ : "i" (ASI_SAFARI_CONFIG));
+ return ret;
+}
+
+static void write_safari_cfg(unsigned long val)
+{
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "i" (ASI_SAFARI_CONFIG)
+ : "memory");
+}
+
+static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
+{
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ unsigned long ret;
+
+ switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
+ case SAFARI_CFG_DIV_1:
+ ret = clock_tick / 1;
+ break;
+ case SAFARI_CFG_DIV_2:
+ ret = clock_tick / 2;
+ break;
+ case SAFARI_CFG_DIV_32:
+ ret = clock_tick / 32;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static unsigned int us3_freq_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned long reg;
+ unsigned int ret;
+
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ reg = read_safari_cfg();
+ ret = get_current_freq(cpu, reg);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ return ret;
+}
+
+static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long new_bits, new_freq, reg;
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ new_freq = sparc64_get_clock_tick(cpu) / 1000;
+ switch (index) {
+ case 0:
+ new_bits = SAFARI_CFG_DIV_1;
+ new_freq /= 1;
+ break;
+ case 1:
+ new_bits = SAFARI_CFG_DIV_2;
+ new_freq /= 2;
+ break;
+ case 2:
+ new_bits = SAFARI_CFG_DIV_32;
+ new_freq /= 32;
+ break;
+
+ default:
+ BUG();
+ }
+
+ reg = read_safari_cfg();
+
+ freqs.old = get_current_freq(cpu, reg);
+ freqs.new = new_freq;
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ reg &= ~SAFARI_CFG_DIV_MASK;
+ reg |= new_bits;
+ write_safari_cfg(reg);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+}
+
+static int us3_freq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int new_index = 0;
+
+ if (cpufreq_frequency_table_target(policy,
+ &us3_freq_table[policy->cpu].table[0],
+ target_freq,
+ relation,
+ &new_index))
+ return -EINVAL;
+
+ us3_set_cpu_divider_index(policy, new_index);
+
+ return 0;
+}
+
+static int us3_freq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ &us3_freq_table[policy->cpu].table[0]);
+}
+
+static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ struct cpufreq_frequency_table *table =
+ &us3_freq_table[cpu].table[0];
+
+ table[0].index = 0;
+ table[0].frequency = clock_tick / 1;
+ table[1].index = 1;
+ table[1].frequency = clock_tick / 2;
+ table[2].index = 2;
+ table[2].frequency = clock_tick / 32;
+ table[3].index = 0;
+ table[3].frequency = CPUFREQ_TABLE_END;
+
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = clock_tick;
+
+ return cpufreq_frequency_table_cpuinfo(policy, table);
+}
+
+static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (cpufreq_us3_driver)
+ us3_set_cpu_divider_index(policy, 0);
+
+ return 0;
+}
+
+static int __init us3_freq_init(void)
+{
+ unsigned long manuf, impl, ver;
+ int ret;
+
+ if (tlb_type != cheetah && tlb_type != cheetah_plus)
+ return -ENODEV;
+
+ __asm__("rdpr %%ver, %0" : "=r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ if (manuf == CHEETAH_MANUF &&
+ (impl == CHEETAH_IMPL ||
+ impl == CHEETAH_PLUS_IMPL ||
+ impl == JAGUAR_IMPL ||
+ impl == PANTHER_IMPL)) {
+ struct cpufreq_driver *driver;
+
+ ret = -ENOMEM;
+ driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+ if (!driver)
+ goto err_out;
+
+ us3_freq_table = kzalloc(
+ (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
+ GFP_KERNEL);
+ if (!us3_freq_table)
+ goto err_out;
+
+ driver->init = us3_freq_cpu_init;
+ driver->verify = us3_freq_verify;
+ driver->target = us3_freq_target;
+ driver->get = us3_freq_get;
+ driver->exit = us3_freq_cpu_exit;
+ driver->owner = THIS_MODULE,
+ strcpy(driver->name, "UltraSPARC-III");
+
+ cpufreq_us3_driver = driver;
+ ret = cpufreq_register_driver(driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ if (driver) {
+ kfree(driver);
+ cpufreq_us3_driver = NULL;
+ }
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit us3_freq_exit(void)
+{
+ if (cpufreq_us3_driver) {
+ cpufreq_unregister_driver(cpufreq_us3_driver);
+ kfree(cpufreq_us3_driver);
+ cpufreq_us3_driver = NULL;
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ }
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
+MODULE_LICENSE("GPL");
+
+module_init(us3_freq_init);
+module_exit(us3_freq_exit);
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 7e4d77327957..156829f4576d 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -121,7 +121,6 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
target_freq, relation, &index))
return -EINVAL;
- freqs.cpu = policy->cpu;
freqs.old = spear_cpufreq_get(0);
newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
@@ -158,8 +157,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = newfreq / 1000;
freqs.new /= mult;
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
@@ -172,8 +170,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
}
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3a953d519f46..618e6f417b1c 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -457,7 +457,7 @@ static int centrino_target (struct cpufreq_policy *policy,
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
int retval = 0;
- unsigned int j, k, first_cpu, tmp;
+ unsigned int j, first_cpu, tmp;
cpumask_var_t covered_cpus;
if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
@@ -481,10 +481,6 @@ static int centrino_target (struct cpufreq_policy *policy,
for_each_cpu(j, policy->cpus) {
int good_cpu;
- /* cpufreq holds the hotplug lock, so we are safe here */
- if (!cpu_online(j))
- continue;
-
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
@@ -522,13 +518,8 @@ static int centrino_target (struct cpufreq_policy *policy,
pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
target_freq, freqs.old, freqs.new, msr);
- for_each_cpu(k, policy->cpus) {
- if (!cpu_online(k))
- continue;
- freqs.cpu = k;
- cpufreq_notify_transition(&freqs,
+ cpufreq_notify_transition(policy, &freqs,
CPUFREQ_PRECHANGE);
- }
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
@@ -544,12 +535,7 @@ static int centrino_target (struct cpufreq_policy *policy,
cpumask_set_cpu(j, covered_cpus);
}
- for_each_cpu(k, policy->cpus) {
- if (!cpu_online(k))
- continue;
- freqs.cpu = k;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (unlikely(retval)) {
/*
@@ -565,12 +551,8 @@ static int centrino_target (struct cpufreq_policy *policy,
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
- for_each_cpu(j, policy->cpus) {
- if (!cpu_online(j))
- continue;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
}
retval = 0;
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index e29b59aa68a8..e2e5aa971452 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -263,7 +263,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
{
unsigned int newstate = 0, policy_cpu;
struct cpufreq_freqs freqs;
- int i;
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
target_freq, relation, &newstate))
@@ -272,7 +271,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
freqs.old = speedstep_get(policy_cpu);
freqs.new = speedstep_freqs[newstate].frequency;
- freqs.cpu = policy->cpu;
pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
@@ -280,18 +278,12 @@ static int speedstep_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new)
return 0;
- for_each_cpu(i, policy->cpus) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
true);
- for_each_cpu(i, policy->cpus) {
- freqs.cpu = i;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- }
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 6a457fcaaad5..f5a6b70ee6c0 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -252,14 +252,13 @@ static int speedstep_target(struct cpufreq_policy *policy,
freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
freqs.new = speedstep_freqs[newstate].frequency;
- freqs.cpu = 0; /* speedstep.c is UP only driver */
if (freqs.old == freqs.new)
return 0;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
speedstep_set_state(newstate);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
new file mode 100644
index 000000000000..c74c0e130ef4
--- /dev/null
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+
+/* Frequency table index must be sequential starting at 0 */
+static struct cpufreq_frequency_table freq_table[] = {
+ { 0, 216000 },
+ { 1, 312000 },
+ { 2, 456000 },
+ { 3, 608000 },
+ { 4, 760000 },
+ { 5, 816000 },
+ { 6, 912000 },
+ { 7, 1000000 },
+ { 8, CPUFREQ_TABLE_END },
+};
+
+#define NUM_CPUS 2
+
+static struct clk *cpu_clk;
+static struct clk *pll_x_clk;
+static struct clk *pll_p_clk;
+static struct clk *emc_clk;
+
+static unsigned long target_cpu_speed[NUM_CPUS];
+static DEFINE_MUTEX(tegra_cpu_lock);
+static bool is_suspended;
+
+static int tegra_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int tegra_getspeed(unsigned int cpu)
+{
+ unsigned long rate;
+
+ if (cpu >= NUM_CPUS)
+ return 0;
+
+ rate = clk_get_rate(cpu_clk) / 1000;
+ return rate;
+}
+
+static int tegra_cpu_clk_set_rate(unsigned long rate)
+{
+ int ret;
+
+ /*
+ * Take an extra reference to the main pll so it doesn't turn
+ * off when we move the cpu off of it
+ */
+ clk_prepare_enable(pll_x_clk);
+
+ ret = clk_set_parent(cpu_clk, pll_p_clk);
+ if (ret) {
+ pr_err("Failed to switch cpu to clock pll_p\n");
+ goto out;
+ }
+
+ if (rate == clk_get_rate(pll_p_clk))
+ goto out;
+
+ ret = clk_set_rate(pll_x_clk, rate);
+ if (ret) {
+ pr_err("Failed to change pll_x to %lu\n", rate);
+ goto out;
+ }
+
+ ret = clk_set_parent(cpu_clk, pll_x_clk);
+ if (ret) {
+ pr_err("Failed to switch cpu to clock pll_x\n");
+ goto out;
+ }
+
+out:
+ clk_disable_unprepare(pll_x_clk);
+ return ret;
+}
+
+static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
+ unsigned long rate)
+{
+ int ret = 0;
+ struct cpufreq_freqs freqs;
+
+ freqs.old = tegra_getspeed(0);
+ freqs.new = rate;
+
+ if (freqs.old == freqs.new)
+ return ret;
+
+ /*
+ * Vote on memory bus frequency based on cpu frequency
+ * This sets the minimum frequency, display or avp may request higher
+ */
+ if (rate >= 816000)
+ clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
+ else if (rate >= 456000)
+ clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
+ else
+ clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_CPU_FREQ_DEBUG
+ printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
+ freqs.old, freqs.new);
+#endif
+
+ ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
+ if (ret) {
+ pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
+ freqs.new);
+ return ret;
+ }
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static unsigned long tegra_cpu_highest_speed(void)
+{
+ unsigned long rate = 0;
+ int i;
+
+ for_each_online_cpu(i)
+ rate = max(rate, target_cpu_speed[i]);
+ return rate;
+}
+
+static int tegra_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int idx;
+ unsigned int freq;
+ int ret = 0;
+
+ mutex_lock(&tegra_cpu_lock);
+
+ if (is_suspended) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &idx);
+
+ freq = freq_table[idx].frequency;
+
+ target_cpu_speed[policy->cpu] = freq;
+
+ ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed());
+
+out:
+ mutex_unlock(&tegra_cpu_lock);
+ return ret;
+}
+
+static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
+ void *dummy)
+{
+ mutex_lock(&tegra_cpu_lock);
+ if (event == PM_SUSPEND_PREPARE) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ is_suspended = true;
+ pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
+ freq_table[0].frequency);
+ tegra_update_cpu_speed(policy, freq_table[0].frequency);
+ cpufreq_cpu_put(policy);
+ } else if (event == PM_POST_SUSPEND) {
+ is_suspended = false;
+ }
+ mutex_unlock(&tegra_cpu_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block tegra_cpu_pm_notifier = {
+ .notifier_call = tegra_pm_notify,
+};
+
+static int tegra_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu >= NUM_CPUS)
+ return -EINVAL;
+
+ clk_prepare_enable(emc_clk);
+ clk_prepare_enable(cpu_clk);
+
+ cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+ policy->cur = tegra_getspeed(policy->cpu);
+ target_cpu_speed[policy->cpu] = policy->cur;
+
+ /* FIXME: what's the actual transition time? */
+ policy->cpuinfo.transition_latency = 300 * 1000;
+
+ cpumask_copy(policy->cpus, cpu_possible_mask);
+
+ if (policy->cpu == 0)
+ register_pm_notifier(&tegra_cpu_pm_notifier);
+
+ return 0;
+}
+
+static int tegra_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ clk_disable_unprepare(emc_clk);
+ return 0;
+}
+
+static struct freq_attr *tegra_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver tegra_cpufreq_driver = {
+ .verify = tegra_verify_speed,
+ .target = tegra_target,
+ .get = tegra_getspeed,
+ .init = tegra_cpu_init,
+ .exit = tegra_cpu_exit,
+ .name = "tegra",
+ .attr = tegra_cpufreq_attr,
+};
+
+static int __init tegra_cpufreq_init(void)
+{
+ cpu_clk = clk_get_sys(NULL, "cpu");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ pll_x_clk = clk_get_sys(NULL, "pll_x");
+ if (IS_ERR(pll_x_clk))
+ return PTR_ERR(pll_x_clk);
+
+ pll_p_clk = clk_get_sys(NULL, "pll_p_cclk");
+ if (IS_ERR(pll_p_clk))
+ return PTR_ERR(pll_p_clk);
+
+ emc_clk = clk_get_sys("cpu", "emc");
+ if (IS_ERR(emc_clk)) {
+ clk_put(cpu_clk);
+ return PTR_ERR(emc_clk);
+ }
+
+ return cpufreq_register_driver(&tegra_cpufreq_driver);
+}
+
+static void __exit tegra_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&tegra_cpufreq_driver);
+ clk_put(emc_clk);
+ clk_put(cpu_clk);
+}
+
+
+MODULE_AUTHOR("Colin Cross <ccross@android.com>");
+MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
+MODULE_LICENSE("GPL");
+module_init(tegra_cpufreq_init);
+module_exit(tegra_cpufreq_exit);
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
new file mode 100644
index 000000000000..12fc904d7dab
--- /dev/null
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -0,0 +1,92 @@
+/*
+ * clock scaling for the UniCore-II
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+
+#include <mach/hardware.h>
+
+static struct cpufreq_driver ucv2_driver;
+
+/* make sure that only the "userspace" governor is run
+ * -- anything else wouldn't make sense on this platform, anyway.
+ */
+int ucv2_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu)
+ return -EINVAL;
+
+ cpufreq_verify_within_limits(policy,
+ policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
+
+ return 0;
+}
+
+static unsigned int ucv2_getspeed(unsigned int cpu)
+{
+ struct clk *mclk = clk_get(NULL, "MAIN_CLK");
+
+ if (cpu)
+ return 0;
+ return clk_get_rate(mclk)/1000;
+}
+
+static int ucv2_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cur = ucv2_getspeed(0);
+ struct cpufreq_freqs freqs;
+ struct clk *mclk = clk_get(NULL, "MAIN_CLK");
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ if (!clk_set_rate(mclk, target_freq * 1000)) {
+ freqs.old = cur;
+ freqs.new = target_freq;
+ }
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+ policy->cur = ucv2_getspeed(0);
+ policy->min = policy->cpuinfo.min_freq = 250000;
+ policy->max = policy->cpuinfo.max_freq = 1000000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ return 0;
+}
+
+static struct cpufreq_driver ucv2_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = ucv2_verify_speed,
+ .target = ucv2_target,
+ .get = ucv2_getspeed,
+ .init = ucv2_cpu_init,
+ .name = "UniCore-II",
+};
+
+static int __init ucv2_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&ucv2_driver);
+}
+
+arch_initcall(ucv2_cpufreq_init);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 071e2c3eec4f..c4cc27e5c8a5 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -39,10 +39,4 @@ config CPU_IDLE_CALXEDA
help
Select this to enable cpuidle on Calxeda processors.
-config CPU_IDLE_KIRKWOOD
- bool "CPU Idle Driver for Kirkwood processors"
- depends on ARCH_KIRKWOOD
- help
- Select this to enable cpuidle on Kirkwood processors.
-
endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 24c6e7d945ed..0d8bd55e776f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -6,4 +6,4 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
-obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o
+obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index e1aab38c5a8d..223379169cb0 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -1,7 +1,7 @@
/*
* Copyright 2012 Calxeda, Inc.
*
- * Based on arch/arm/plat-mxc/cpuidle.c:
+ * Based on arch/arm/plat-mxc/cpuidle.c: #v3.7
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2012 Linaro Ltd.
*
@@ -16,6 +16,8 @@
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Maintainer: Rob Herring <rob.herring@calxeda.com>
*/
#include <linux/cpuidle.h>
@@ -35,8 +37,6 @@
extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
extern void *scu_base_addr;
-static struct cpuidle_device __percpu *calxeda_idle_cpuidle_devices;
-
static inline unsigned int get_auxcr(void)
{
unsigned int val;
@@ -85,22 +85,8 @@ static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
return index;
}
-static void calxeda_idle_cpuidle_devices_uninit(void)
-{
- int i;
- struct cpuidle_device *dev;
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
- }
-
- free_percpu(calxeda_idle_cpuidle_devices);
-}
-
static struct cpuidle_driver calxeda_idle_driver = {
.name = "calxeda_idle",
- .en_core_tk_irqen = 1,
.states = {
ARM_CPUIDLE_WFI_STATE,
{
@@ -118,44 +104,9 @@ static struct cpuidle_driver calxeda_idle_driver = {
static int __init calxeda_cpuidle_init(void)
{
- int cpu_id;
- int ret;
- struct cpuidle_device *dev;
- struct cpuidle_driver *drv = &calxeda_idle_driver;
-
if (!of_machine_is_compatible("calxeda,highbank"))
return -ENODEV;
- ret = cpuidle_register_driver(drv);
- if (ret)
- return ret;
-
- calxeda_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (calxeda_idle_cpuidle_devices == NULL) {
- ret = -ENOMEM;
- goto unregister_drv;
- }
-
- /* initialize state data for each cpuidle_device */
- for_each_possible_cpu(cpu_id) {
- dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, cpu_id);
- dev->cpu = cpu_id;
- dev->state_count = drv->state_count;
-
- ret = cpuidle_register_device(dev);
- if (ret) {
- pr_err("Failed to register cpu %u, error: %d\n",
- cpu_id, ret);
- goto uninit;
- }
- }
-
- return 0;
-
-uninit:
- calxeda_idle_cpuidle_devices_uninit();
-unregister_drv:
- cpuidle_unregister_driver(drv);
- return ret;
+ return cpuidle_register(&calxeda_idle_driver, NULL);
}
module_init(calxeda_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 670aa1e55cd6..521b0a7fdd89 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -1,6 +1,4 @@
/*
- * arch/arm/mach-kirkwood/cpuidle.c
- *
* CPU idle Marvell Kirkwood SoCs
*
* This file is licensed under the terms of the GNU General Public
@@ -11,6 +9,9 @@
* to implement two idle states -
* #1 wait-for-interrupt
* #2 wait-for-interrupt and DDR self refresh
+ *
+ * Maintainer: Jason Cooper <jason@lakedaemon.net>
+ * Maintainer: Andrew Lunn <andrew@lunn.ch>
*/
#include <linux/kernel.h>
@@ -41,7 +42,6 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev,
static struct cpuidle_driver kirkwood_idle_driver = {
.name = "kirkwood_idle",
.owner = THIS_MODULE,
- .en_core_tk_irqen = 1,
.states[0] = ARM_CPUIDLE_WFI_STATE,
.states[1] = {
.enter = kirkwood_enter_idle,
@@ -53,9 +53,6 @@ static struct cpuidle_driver kirkwood_idle_driver = {
},
.state_count = KIRKWOOD_MAX_STATES,
};
-static struct cpuidle_device *device;
-
-static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
/* Initialize CPU idle by registering the idle states */
static int kirkwood_cpuidle_probe(struct platform_device *pdev)
@@ -66,26 +63,16 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
if (res == NULL)
return -EINVAL;
- ddr_operation_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!ddr_operation_base)
- return -EADDRNOTAVAIL;
+ ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddr_operation_base))
+ return PTR_ERR(ddr_operation_base);
- device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
- device->state_count = KIRKWOOD_MAX_STATES;
-
- cpuidle_register_driver(&kirkwood_idle_driver);
- if (cpuidle_register_device(device)) {
- pr_err("kirkwood_init_cpuidle: Failed registering\n");
- return -EIO;
- }
- return 0;
+ return cpuidle_register(&kirkwood_idle_driver, NULL);
}
int kirkwood_cpuidle_remove(struct platform_device *pdev)
{
- cpuidle_unregister_device(device);
- cpuidle_unregister_driver(&kirkwood_idle_driver);
-
+ cpuidle_unregister(&kirkwood_idle_driver);
return 0;
}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index eba69290e074..c3a93fece819 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -8,6 +8,7 @@
* This code is licenced under the GPL.
*/
+#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/sched.h>
@@ -23,6 +24,7 @@
#include "cpuidle.h"
DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
+DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
@@ -42,24 +44,6 @@ void disable_cpuidle(void)
static int __cpuidle_register_device(struct cpuidle_device *dev);
-static inline int cpuidle_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- struct cpuidle_state *target_state = &drv->states[index];
- return target_state->enter(dev, drv, index);
-}
-
-static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
-}
-
-typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index);
-
-static cpuidle_enter_t cpuidle_enter_ops;
-
/**
* cpuidle_play_dead - cpu off-lining
*
@@ -89,11 +73,27 @@ int cpuidle_play_dead(void)
* @next_state: index into drv->states of the state to enter
*/
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
- int next_state)
+ int index)
{
int entered_state;
- entered_state = cpuidle_enter_ops(dev, drv, next_state);
+ struct cpuidle_state *target_state = &drv->states[index];
+ ktime_t time_start, time_end;
+ s64 diff;
+
+ time_start = ktime_get();
+
+ entered_state = target_state->enter(dev, drv, index);
+
+ time_end = ktime_get();
+
+ local_irq_enable();
+
+ diff = ktime_to_us(ktime_sub(time_end, time_start));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int) diff;
if (entered_state >= 0) {
/* Update cpuidle counters */
@@ -146,12 +146,20 @@ int cpuidle_idle_call(void)
trace_cpu_idle_rcuidle(next_state, dev->cpu);
+ if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+ &dev->cpu);
+
if (cpuidle_state_is_coupled(dev, drv, next_state))
entered_state = cpuidle_enter_state_coupled(dev, drv,
next_state);
else
entered_state = cpuidle_enter_state(dev, drv, next_state);
+ if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+ &dev->cpu);
+
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* give the governor an opportunity to reflect on the outcome */
@@ -222,37 +230,6 @@ void cpuidle_resume(void)
mutex_unlock(&cpuidle_lock);
}
-/**
- * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
- * @dev: pointer to a valid cpuidle_device object
- * @drv: pointer to a valid cpuidle_driver object
- * @index: index of the target cpuidle state.
- */
-int cpuidle_wrap_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index,
- int (*enter)(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index))
-{
- ktime_t time_start, time_end;
- s64 diff;
-
- time_start = ktime_get();
-
- index = enter(dev, drv, index);
-
- time_end = ktime_get();
-
- local_irq_enable();
-
- diff = ktime_to_us(ktime_sub(time_end, time_start));
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- dev->last_residency = (int) diff;
-
- return index;
-}
-
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
static int poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
@@ -324,9 +301,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return ret;
}
- cpuidle_enter_ops = drv->en_core_tk_irqen ?
- cpuidle_enter_tk : cpuidle_enter;
-
poll_idle_init(drv);
ret = cpuidle_add_device_sysfs(dev);
@@ -480,6 +454,77 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
+/**
+ * cpuidle_unregister: unregister a driver and the devices. This function
+ * can be used only if the driver has been previously registered through
+ * the cpuidle_register function.
+ *
+ * @drv: a valid pointer to a struct cpuidle_driver
+ */
+void cpuidle_unregister(struct cpuidle_driver *drv)
+{
+ int cpu;
+ struct cpuidle_device *device;
+
+ for_each_possible_cpu(cpu) {
+ device = &per_cpu(cpuidle_dev, cpu);
+ cpuidle_unregister_device(device);
+ }
+
+ cpuidle_unregister_driver(drv);
+}
+EXPORT_SYMBOL_GPL(cpuidle_unregister);
+
+/**
+ * cpuidle_register: registers the driver and the cpu devices with the
+ * coupled_cpus passed as parameter. This function is used for all common
+ * initialization pattern there are in the arch specific drivers. The
+ * devices is globally defined in this file.
+ *
+ * @drv : a valid pointer to a struct cpuidle_driver
+ * @coupled_cpus: a cpumask for the coupled states
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+int cpuidle_register(struct cpuidle_driver *drv,
+ const struct cpumask *const coupled_cpus)
+{
+ int ret, cpu;
+ struct cpuidle_device *device;
+
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("failed to register cpuidle driver\n");
+ return ret;
+ }
+
+ for_each_possible_cpu(cpu) {
+ device = &per_cpu(cpuidle_dev, cpu);
+ device->cpu = cpu;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+ /*
+ * On multiplatform for ARM, the coupled idle states could
+ * enabled in the kernel even if the cpuidle driver does not
+ * use it. Note, coupled_cpus is a struct copy.
+ */
+ if (coupled_cpus)
+ device->coupled_cpus = *coupled_cpus;
+#endif
+ ret = cpuidle_register_device(device);
+ if (!ret)
+ continue;
+
+ pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
+
+ cpuidle_unregister(drv);
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpuidle_register);
+
#ifdef CONFIG_SMP
static void smp_callback(void *v)
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 422c7b69ba7c..8dfaaae94444 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -11,6 +11,8 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/clockchips.h>
#include "cpuidle.h"
@@ -19,9 +21,28 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
-static void __cpuidle_driver_init(struct cpuidle_driver *drv)
+static void cpuidle_setup_broadcast_timer(void *arg)
{
+ int cpu = smp_processor_id();
+ clockevents_notify((long)(arg), &cpu);
+}
+
+static void __cpuidle_driver_init(struct cpuidle_driver *drv, int cpu)
+{
+ int i;
+
drv->refcnt = 0;
+
+ for (i = drv->state_count - 1; i >= 0 ; i--) {
+
+ if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
+ continue;
+
+ drv->bctimer = 1;
+ on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
+ (void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
+ break;
+ }
}
static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
@@ -35,7 +56,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
if (__cpuidle_get_cpu_driver(cpu))
return -EBUSY;
- __cpuidle_driver_init(drv);
+ __cpuidle_driver_init(drv, cpu);
__cpuidle_set_cpu_driver(drv, cpu);
@@ -49,6 +70,12 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu)
if (!WARN_ON(drv->refcnt > 0))
__cpuidle_set_cpu_driver(NULL, cpu);
+
+ if (drv->bctimer) {
+ drv->bctimer = 0;
+ on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
+ (void *)CLOCK_EVT_NOTIFY_BROADCAST_OFF, 1);
+ }
}
#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4d338740f2cb..a8117e614009 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -350,11 +350,11 @@ static void intel_didl_outputs(struct drm_device *dev)
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
- if (acpi_is_video_device(acpi_dev))
+ if (acpi_is_video_device(handle))
acpi_video_bus = acpi_dev;
else {
list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
- if (acpi_is_video_device(acpi_cdev)) {
+ if (acpi_is_video_device(acpi_cdev->handle)) {
acpi_video_bus = acpi_cdev;
break;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1a38dd7dfe4e..0e8fab1913df 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -71,7 +71,6 @@
static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle",
.owner = THIS_MODULE,
- .en_core_tk_irqen = 1,
};
/* intel_idle.max_cstate=0 disables driver */
static int max_cstate = CPUIDLE_STATE_MAX - 1;
@@ -339,7 +338,6 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
- stop_critical_timings();
if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -348,8 +346,6 @@ static int intel_idle(struct cpuidle_device *dev,
__mwait(eax, ecx);
}
- start_critical_timings();
-
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 14d4dced1def..d544e3aaf761 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4121,7 +4121,7 @@ static int sony_pic_enable(struct acpi_device *device,
resource->res3.data.irq.sharable = ACPI_SHARED;
resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
-
+ resource->res4.length = sizeof(struct acpi_resource);
}
/* setup Type 2/3 resources */
else {
@@ -4140,6 +4140,7 @@ static int sony_pic_enable(struct acpi_device *device,
resource->res2.data.irq.sharable = ACPI_SHARED;
resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
+ resource->res3.length = sizeof(struct acpi_resource);
}
/* Attempt to set the resource */
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 918d5f044865..cf88f9b62445 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -379,10 +379,6 @@ static int __init isapnp_read_tag(unsigned char *type, unsigned short *size)
*type = (tag >> 3) & 0x0f;
*size = tag & 0x07;
}
-#if 0
- printk(KERN_DEBUG "tag = 0x%x, type = 0x%x, size = %i\n", tag, *type,
- *size);
-#endif
if (*type == 0xff && *size == 0xffff) /* probably invalid data */
return -1;
return 0;
@@ -813,13 +809,6 @@ static int __init isapnp_build_device_list(void)
if (!card)
continue;
-#if 0
- dev_info(&card->dev,
- "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- header[0], header[1], header[2], header[3], header[4],
- header[5], header[6], header[7], header[8]);
- dev_info(&card->dev, "checksum = %#x\n", checksum);
-#endif
INIT_LIST_HEAD(&card->devices);
card->serial =
(header[7] << 24) | (header[6] << 16) | (header[5] << 8) |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index b8f4ea7b27fc..9847ab163829 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -634,6 +634,7 @@ int pnpacpi_build_resource_template(struct pnp_dev *dev,
}
/* resource will pointer the end resource now */
resource->type = ACPI_RESOURCE_TYPE_END_TAG;
+ resource->length = sizeof(struct acpi_resource);
return 0;
}
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 63ddb0173456..1c03ee822e50 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -185,10 +185,9 @@ static int pnp_devices_proc_show(struct seq_file *m, void *v)
if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
break;
- seq_printf(m, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
+ seq_printf(m, "%02x\t%08x\t%3phC\t%04x\n",
node->handle, node->eisa_id,
- node->type_code[0], node->type_code[1],
- node->type_code[2], node->flags);
+ node->type_code, node->flags);
if (nodenum <= thisnodenum) {
printk(KERN_ERR
"%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n",