summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/common/gic.c188
-rw-r--r--arch/arm/include/asm/hardware/gic.h8
-rw-r--r--arch/arm/include/asm/mach/map.h1
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm/vfp/vfpmodule.c31
-rw-r--r--include/linux/cpu_pm.h109
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cpu_pm.c233
-rw-r--r--kernel/power/Kconfig4
11 files changed, 578 insertions, 9 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5a3a78633177..9c4caa633d04 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -29,6 +29,7 @@ config ARM
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select GENERIC_IRQ_SHOW
+ select CPU_PM if (SUSPEND || CPU_IDLE)
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 3227ca952a12..734db99eaee7 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/smp.h>
+#include <linux/cpu_pm.h>
#include <linux/cpumask.h>
#include <linux/io.h>
@@ -276,6 +277,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
if (gic_irqs > 1020)
gic_irqs = 1020;
+ gic->gic_irqs = gic_irqs;
+
/*
* Set all global interrupts to be level triggered, active low.
*/
@@ -343,6 +346,189 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_CPU_CTRL);
}
+#ifdef CONFIG_CPU_PM
+/*
+ * Saves the GIC distributor registers during suspend or idle. Must be called
+ * with interrupts disabled but before powering down the GIC. After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+static void gic_dist_save(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ void __iomem *dist_base;
+ int i;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data[gic_nr].dist_base;
+
+ if (!dist_base)
+ return;
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ gic_data[gic_nr].saved_spi_conf[i] =
+ readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ gic_data[gic_nr].saved_spi_target[i] =
+ readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ gic_data[gic_nr].saved_spi_enable[i] =
+ readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle. Must be called before enabling interrupts. If a level interrupt
+ * that occured while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occured will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+static void gic_dist_restore(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ unsigned int i;
+ void __iomem *dist_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data[gic_nr].dist_base;
+
+ if (!dist_base)
+ return;
+
+ writel_relaxed(0, dist_base + GIC_DIST_CTRL);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+ dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(0xa0a0a0a0,
+ dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+ dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+ dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+}
+
+static void gic_cpu_save(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data[gic_nr].dist_base;
+ cpu_base = gic_data[gic_nr].cpu_base;
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+}
+
+static void gic_cpu_restore(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data[gic_nr].dist_base;
+ cpu_base = gic_data[gic_nr].cpu_base;
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
+
+ writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++) {
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ gic_cpu_save(i);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ gic_cpu_restore(i);
+ break;
+ case CPU_CLUSTER_PM_ENTER:
+ gic_dist_save(i);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ gic_dist_restore(i);
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+ .notifier_call = gic_notifier,
+};
+
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+ gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_enable);
+
+ gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_conf);
+
+ cpu_pm_register_notifier(&gic_notifier_block);
+}
+#else
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+}
+#endif
+
void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
void __iomem *dist_base, void __iomem *cpu_base)
{
@@ -358,8 +544,10 @@ void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
if (gic_nr == 0)
gic_cpu_base_addr = cpu_base;
+ gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic, irq_start);
gic_cpu_init(gic);
+ gic_pm_init(gic);
}
void __cpuinit gic_secondary_init(unsigned int gic_nr)
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 435d3f86c708..c5627057b1c7 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -46,6 +46,14 @@ struct gic_chip_data {
unsigned int irq_offset;
void __iomem *dist_base;
void __iomem *cpu_base;
+#ifdef CONFIG_CPU_PM
+ u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+ u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+ u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+ u32 __percpu *saved_ppi_enable;
+ u32 __percpu *saved_ppi_conf;
+#endif
+ unsigned int gic_irqs;
};
#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index d2fedb5aeb1f..b36f3654bf54 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -29,6 +29,7 @@ struct map_desc {
#define MT_MEMORY_NONCACHED 11
#define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13
+#define MT_MEMORY_SO 14
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5750704e0271..f1956b27ae5a 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -232,6 +232,9 @@ extern pgprot_t pgprot_kernel;
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
+#define pgprot_stronglyordered(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677b92c8..ea9c9f3e48bf 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -273,6 +273,14 @@ static struct mem_type mem_types[] = {
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_SO] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_MT_UNCACHED,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
+ PMD_SECT_UNCACHED | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 79bcb4316930..0cbd5a0a9332 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
@@ -68,7 +69,7 @@ static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
/*
* Force a reload of the VFP context from the thread structure. We do
* this by ensuring that access to the VFP hardware is disabled, and
- * clear last_VFP_context. Must be called from non-preemptible context.
+ * clear vfp_current_hw_state. Must be called from non-preemptible context.
*/
static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
{
@@ -436,9 +437,7 @@ static void vfp_enable(void *unused)
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
}
-#ifdef CONFIG_PM
-#include <linux/syscore_ops.h>
-
+#ifdef CONFIG_CPU_PM
static int vfp_pm_suspend(void)
{
struct thread_info *ti = current_thread_info();
@@ -468,19 +467,33 @@ static void vfp_pm_resume(void)
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
}
-static struct syscore_ops vfp_pm_syscore_ops = {
- .suspend = vfp_pm_suspend,
- .resume = vfp_pm_resume,
+static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ vfp_pm_suspend();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ vfp_pm_resume();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vfp_cpu_pm_notifier_block = {
+ .notifier_call = vfp_cpu_pm_notifier,
};
static void vfp_pm_init(void)
{
- register_syscore_ops(&vfp_pm_syscore_ops);
+ cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
}
#else
static inline void vfp_pm_init(void) { }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_CPU_PM */
/*
* Ensure that the VFP state stored in 'thread->vfpstate' is up to date
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
new file mode 100644
index 000000000000..455b233dd3b1
--- /dev/null
+++ b/include/linux/cpu_pm.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPU_PM_H
+#define _LINUX_CPU_PM_H
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+/*
+ * When a CPU goes to a low power state that turns off power to the CPU's
+ * power domain, the contents of some blocks (floating point coprocessors,
+ * interrupt controllers, caches, timers) in the same power domain can
+ * be lost. The cpm_pm notifiers provide a method for platform idle, suspend,
+ * and hotplug implementations to notify the drivers for these blocks that
+ * they may be reset.
+ *
+ * All cpu_pm notifications must be called with interrupts disabled.
+ *
+ * The notifications are split into two classes: CPU notifications and CPU
+ * cluster notifications.
+ *
+ * CPU notifications apply to a single CPU and must be called on the affected
+ * CPU. They are used to save per-cpu context for affected blocks.
+ *
+ * CPU cluster notifications apply to all CPUs in a single power domain. They
+ * are used to save any global context for affected blocks, and must be called
+ * after all the CPUs in the power domain have been notified of the low power
+ * state.
+ */
+
+/*
+ * Event codes passed as unsigned long val to notifier calls
+ */
+enum cpu_pm_event {
+ /* A single cpu is entering a low power state */
+ CPU_PM_ENTER,
+
+ /* A single cpu failed to enter a low power state */
+ CPU_PM_ENTER_FAILED,
+
+ /* A single cpu is exiting a low power state */
+ CPU_PM_EXIT,
+
+ /* A cpu power domain is entering a low power state */
+ CPU_CLUSTER_PM_ENTER,
+
+ /* A cpu power domain failed to enter a low power state */
+ CPU_CLUSTER_PM_ENTER_FAILED,
+
+ /* A cpu power domain is exiting a low power state */
+ CPU_CLUSTER_PM_EXIT,
+};
+
+#ifdef CONFIG_CPU_PM
+int cpu_pm_register_notifier(struct notifier_block *nb);
+int cpu_pm_unregister_notifier(struct notifier_block *nb);
+int cpu_pm_enter(void);
+int cpu_pm_exit(void);
+int cpu_cluster_pm_enter(void);
+int cpu_cluster_pm_exit(void);
+
+#else
+
+static inline int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int cpu_pm_enter(void)
+{
+ return 0;
+}
+
+static inline int cpu_pm_exit(void)
+{
+ return 0;
+}
+
+static inline int cpu_cluster_pm_enter(void)
+{
+ return 0;
+}
+
+static inline int cpu_cluster_pm_exit(void)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/kernel/Makefile b/kernel/Makefile
index eca595e2fd52..988cb3da7031 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
+obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_PERF_EVENTS) += events/
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
new file mode 100644
index 000000000000..249152e15308
--- /dev/null
+++ b/kernel/cpu_pm.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu_pm.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+
+static DEFINE_RWLOCK(cpu_pm_notifier_lock);
+static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+
+static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+{
+ int ret;
+
+ ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
+
+ return notifier_to_errno(ret);
+}
+
+/**
+ * cpu_pm_register_notifier - register a driver with cpu_pm
+ * @nb: notifier block to register
+ *
+ * Add a driver to a list of drivers that are notified about
+ * CPU and CPU cluster low power entry and exit.
+ *
+ * This function may sleep, and has the same return conditions as
+ * raw_notifier_chain_register.
+ */
+int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+/**
+ * cpu_pm_unregister_notifier - unregister a driver with cpu_pm
+ * @nb: notifier block to be unregistered
+ *
+ * Remove a driver from the CPU PM notifier list.
+ *
+ * This function may sleep, and has the same return conditions as
+ * raw_notifier_chain_unregister.
+ */
+int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+
+/**
+ * cpm_pm_enter - CPU low power entry notifier
+ *
+ * Notifies listeners that a single CPU is entering a low power state that may
+ * cause some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called on the affected CPU with interrupts disabled. Platform is
+ * responsible for ensuring that cpu_pm_enter is not called twice on the same
+ * CPU before cpu_pm_exit is called. Notified drivers can include VFP
+ * co-processor, interrupt controller and it's PM extensions, local CPU
+ * timers context save/restore which shouldn't be interrupted. Hence it
+ * must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of CPU PM
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_enter);
+
+/**
+ * cpm_pm_exit - CPU low power exit notifier
+ *
+ * Notifies listeners that a single CPU is exiting a low power state that may
+ * have caused some blocks in the same power domain as the cpu to reset.
+ *
+ * Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_exit);
+
+/**
+ * cpm_cluster_pm_enter - CPU cluster low power entry notifier
+ *
+ * Notifies listeners that all cpus in a power domain are entering a low power
+ * state that may cause some blocks in the same power domain to reset.
+ *
+ * Must be called after cpu_pm_enter has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain. Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_cluster_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of CPU cluster
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+
+/**
+ * cpm_cluster_pm_exit - CPU cluster low power exit notifier
+ *
+ * Notifies listeners that all cpus in a power domain are exiting form a
+ * low power state that may have caused some blocks in the same power domain
+ * to reset.
+ *
+ * Must be called after cpu_pm_exit has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain. Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_cluster_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
+
+#ifdef CONFIG_PM
+static int cpu_pm_suspend(void)
+{
+ int ret;
+
+ ret = cpu_pm_enter();
+ if (ret)
+ return ret;
+
+ ret = cpu_cluster_pm_enter();
+ return ret;
+}
+
+static void cpu_pm_resume(void)
+{
+ cpu_cluster_pm_exit();
+ cpu_pm_exit();
+}
+
+static struct syscore_ops cpu_pm_syscore_ops = {
+ .suspend = cpu_pm_suspend,
+ .resume = cpu_pm_resume,
+};
+
+static int cpu_pm_init(void)
+{
+ register_syscore_ops(&cpu_pm_syscore_ops);
+ return 0;
+}
+core_initcall(cpu_pm_init);
+#endif
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 3744c594b19b..80a85971cf64 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -235,3 +235,7 @@ config PM_GENERIC_DOMAINS
config PM_GENERIC_DOMAINS_RUNTIME
def_bool y
depends on PM_RUNTIME && PM_GENERIC_DOMAINS
+
+config CPU_PM
+ bool
+ depends on SUSPEND || CPU_IDLE