From 194fe6f28e2819d3f50fbed24c3b72f21501dbfa Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 8 Jan 2015 06:29:04 +0000 Subject: drivers: cpuidle: Don't initialize big.LITTLE driver if MCPM is unavailable If big.LITTLE driver is initialized even when MCPM is unavailable, we get the below warning the first time cpu tries to enter deeper C-states. ------------[ cut here ]------------ WARNING: CPU: 4 PID: 0 at kernel/arch/arm/common/mcpm_entry.c:130 mcpm_cpu_suspend+0x6d/0x74() Modules linked in: CPU: 4 PID: 0 Comm: swapper/4 Not tainted 3.19.0-rc3-00007-gaf5a2cb1ad5c-dirty #11 Hardware name: ARM-Versatile Express [] (unwind_backtrace) from [] (show_stack+0x11/0x14) [] (show_stack) from [] (dump_stack+0x6d/0x78) [] (dump_stack) from [] (warn_slowpath_common+0x69/0x90) [] (warn_slowpath_common) from [] (warn_slowpath_null+0x17/0x1c) [] (warn_slowpath_null) from [] (mcpm_cpu_suspend+0x6d/0x74) [] (mcpm_cpu_suspend) from [] (bl_powerdown_finisher+0x21/0x24) [] (bl_powerdown_finisher) from [] (cpu_suspend_abort+0x1/0x14) [] (cpu_suspend_abort) from [<00000000>] ( (null)) ---[ end trace d098e3fd00000008 ]--- This patch fixes the issue by checking for the availability of MCPM before initializing the big.LITTLE cpuidle driver Signed-off-by: Sudeep Holla Acked-by: Lorenzo Pieralisi Cc: Lorenzo Pieralisi Cc: Daniel Lezcano Cc: "Rafael J. Wysocki" Signed-off-by: Daniel Lezcano --- drivers/cpuidle/cpuidle-big_little.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/cpuidle') diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index e3e225fe6b45..40c34faffe59 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c @@ -182,6 +182,10 @@ static int __init bl_idle_init(void) */ if (!of_match_node(compatible_machine_match, root)) return -ENODEV; + + if (!mcpm_is_available()) + return -EUNATCH; + /* * For now the differentiation between little and big cores * is based on the part number. A7 cores are considered little -- cgit v1.2.3 From af3cfdbf56b91785650f54e7c9a899d814b4b9fb Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 26 Jan 2015 18:33:44 +0000 Subject: arm64: kernel: remove ARM64_CPU_SUSPEND config option ARM64_CPU_SUSPEND config option was introduced to make code providing context save/restore selectable only on platforms requiring power management capabilities. Currently ARM64_CPU_SUSPEND depends on the PM_SLEEP config option which in turn is set by the SUSPEND config option. The introduction of CPU_IDLE for arm64 requires that code configured by ARM64_CPU_SUSPEND (context save/restore) should be compiled in in order to enable the CPU idle driver to rely on CPU operations carrying out context save/restore. The ARM64_CPUIDLE config option (ARM64 generic idle driver) is therefore forced to select ARM64_CPU_SUSPEND, even if there may be (ie PM_SLEEP) failed dependencies, which is not a clean way of handling the kernel configuration option. For these reasons, this patch removes the ARM64_CPU_SUSPEND config option and makes the context save/restore dependent on CPU_PM, which is selected whenever either SUSPEND or CPU_IDLE are configured, cleaning up dependencies in the process. This way, code previously configured through ARM64_CPU_SUSPEND is compiled in whenever a power management subsystem requires it to be present in the kernel (SUSPEND || CPU_IDLE), which is the behaviour expected on ARM64 kernels. The cpu_suspend and cpu_init_idle CPU operations are added only if CPU_IDLE is selected, since they are CPU_IDLE specific methods and should be grouped and defined accordingly. PSCI CPU operations are updated to reflect the introduced changes. Signed-off-by: Lorenzo Pieralisi Cc: Arnd Bergmann Cc: Will Deacon Cc: Krzysztof Kozlowski Cc: Daniel Lezcano Cc: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 3 --- arch/arm64/include/asm/cpu_ops.h | 8 ++++---- arch/arm64/include/asm/cpuidle.h | 6 ++++++ arch/arm64/include/asm/suspend.h | 2 -- arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/asm-offsets.c | 2 +- arch/arm64/kernel/cpuidle.c | 20 ++++++++++++++++++++ arch/arm64/kernel/hw_breakpoint.c | 2 +- arch/arm64/kernel/psci.c | 2 -- arch/arm64/kernel/suspend.c | 21 --------------------- arch/arm64/mm/proc.S | 2 +- drivers/cpuidle/Kconfig.arm64 | 1 - drivers/cpuidle/cpuidle-arm64.c | 1 - 13 files changed, 34 insertions(+), 38 deletions(-) (limited to 'drivers/cpuidle') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 21a59bf37145..d3f7e4941231 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -642,9 +642,6 @@ source "kernel/power/Kconfig" config ARCH_SUSPEND_POSSIBLE def_bool y -config ARM64_CPU_SUSPEND - def_bool PM_SLEEP - endmenu menu "CPU Power Management" diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 6f8e2ef9094a..da301ee7395c 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -28,8 +28,6 @@ struct device_node; * enable-method property. * @cpu_init: Reads any data necessary for a specific enable-method from the * devicetree, for a given cpu node and proposed logical id. - * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from - * devicetree, for a given cpu node and proposed logical id. * @cpu_prepare: Early one-time preparation step for a cpu. If there is a * mechanism for doing so, tests whether it is possible to boot * the given CPU. @@ -42,6 +40,8 @@ struct device_node; * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * cpu being killed. * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. + * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from + * devicetree, for a given cpu node and proposed logical id. * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing * to wrong parameters or error conditions. Called from the * CPU being suspended. Must be called with IRQs disabled. @@ -49,7 +49,6 @@ struct device_node; struct cpu_operations { const char *name; int (*cpu_init)(struct device_node *, unsigned int); - int (*cpu_init_idle)(struct device_node *, unsigned int); int (*cpu_prepare)(unsigned int); int (*cpu_boot)(unsigned int); void (*cpu_postboot)(void); @@ -58,7 +57,8 @@ struct cpu_operations { void (*cpu_die)(unsigned int cpu); int (*cpu_kill)(unsigned int cpu); #endif -#ifdef CONFIG_ARM64_CPU_SUSPEND +#ifdef CONFIG_CPU_IDLE + int (*cpu_init_idle)(struct device_node *, unsigned int); int (*cpu_suspend)(unsigned long); #endif }; diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index b52a9932e2b1..0710654631e7 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -3,11 +3,17 @@ #ifdef CONFIG_CPU_IDLE extern int cpu_init_idle(unsigned int cpu); +extern int cpu_suspend(unsigned long arg); #else static inline int cpu_init_idle(unsigned int cpu) { return -EOPNOTSUPP; } + +static inline int cpu_suspend(unsigned long arg) +{ + return -EOPNOTSUPP; +} #endif #endif diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 456d67c1f0fa..003802f58963 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -23,6 +23,4 @@ struct sleep_save_sp { extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); -extern int cpu_suspend(unsigned long); - #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 3b3b1cd3e7f0..bef04afd6031 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -27,7 +27,7 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o -arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o +arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 9a9fce090d58..a2ae19403abb 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -152,7 +152,7 @@ int main(void) DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); #endif -#ifdef CONFIG_ARM64_CPU_SUSPEND +#ifdef CONFIG_CPU_PM DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 19d17f51db37..5c0896647fd1 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -29,3 +29,23 @@ int cpu_init_idle(unsigned int cpu) of_node_put(cpu_node); return ret; } + +/** + * cpu_suspend() - function to enter a low-power idle state + * @arg: argument to pass to CPU suspend operations + * + * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU + * operations back-end error code otherwise. + */ +int cpu_suspend(unsigned long arg) +{ + int cpu = smp_processor_id(); + + /* + * If cpu_ops have not been registered or suspend + * has not been initialized, cpu_suspend call fails early. + */ + if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) + return -EOPNOTSUPP; + return cpu_ops[cpu]->cpu_suspend(arg); +} diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index df1cf15377b4..98bbe06e469c 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -894,7 +894,7 @@ static struct notifier_block hw_breakpoint_reset_nb = { .notifier_call = hw_breakpoint_reset_notify, }; -#ifdef CONFIG_ARM64_CPU_SUSPEND +#ifdef CONFIG_CPU_PM extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)); #else static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index f1dbca7d5c96..3425f311c49e 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -540,8 +540,6 @@ const struct cpu_operations cpu_psci_ops = { .name = "psci", #ifdef CONFIG_CPU_IDLE .cpu_init_idle = cpu_psci_cpu_init_idle, -#endif -#ifdef CONFIG_ARM64_CPU_SUSPEND .cpu_suspend = cpu_psci_cpu_suspend, #endif #ifdef CONFIG_SMP diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 2d6b6065fe7f..d7daf45ae7a2 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -1,7 +1,6 @@ #include #include #include -#include #include #include #include @@ -51,26 +50,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) hw_breakpoint_restore = hw_bp_restore; } -/** - * cpu_suspend() - function to enter a low-power state - * @arg: argument to pass to CPU suspend operations - * - * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU - * operations back-end error code otherwise. - */ -int cpu_suspend(unsigned long arg) -{ - int cpu = smp_processor_id(); - - /* - * If cpu_ops have not been registered or suspend - * has not been initialized, cpu_suspend call fails early. - */ - if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) - return -EOPNOTSUPP; - return cpu_ops[cpu]->cpu_suspend(arg); -} - /* * __cpu_suspend * diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b98fc8ce6a16..28eebfb6af76 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -102,7 +102,7 @@ ENTRY(cpu_do_idle) ret ENDPROC(cpu_do_idle) -#ifdef CONFIG_ARM64_CPU_SUSPEND +#ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context * diff --git a/drivers/cpuidle/Kconfig.arm64 b/drivers/cpuidle/Kconfig.arm64 index d0a08ed1b2ee..6effb3656735 100644 --- a/drivers/cpuidle/Kconfig.arm64 +++ b/drivers/cpuidle/Kconfig.arm64 @@ -4,7 +4,6 @@ config ARM64_CPUIDLE bool "Generic ARM64 CPU idle Driver" - select ARM64_CPU_SUSPEND select DT_IDLE_STATES help Select this to enable generic cpuidle driver for ARM64. diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm64.c index 80704b931ba4..39a2c62716c3 100644 --- a/drivers/cpuidle/cpuidle-arm64.c +++ b/drivers/cpuidle/cpuidle-arm64.c @@ -19,7 +19,6 @@ #include #include -#include #include "dt_idle_states.h" -- cgit v1.2.3 From 712eddf70225ab5ae65e946e22d2dfe6b93e8dd1 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sat, 24 Jan 2015 14:05:50 +0900 Subject: cpuidle: exynos: add coupled cpuidle support for exynos4210 The following patch adds coupled cpuidle support for Exynos4210 to an existing cpuidle-exynos driver. As a result it enables AFTR mode to be used by default on Exynos4210 without the need to hot unplug CPU1 first. The patch is heavily based on earlier cpuidle-exynos4210 driver from Daniel Lezcano: http://www.spinics.net/lists/linux-samsung-soc/msg28134.html Changes from Daniel's code include: - porting code to current kernels - fixing it to work on my setup (by using S5P_INFORM register instead of S5P_VA_SYSRAM one on Revison 1.1 and retrying poking CPU1 out of the BOOT ROM if necessary) - fixing rare lockup caused by waiting for CPU1 to get stuck in the BOOT ROM (CPU hotplug code in arch/arm/mach-exynos/platsmp.c doesn't require this and works fine) - moving Exynos specific code to arch/arm/mach-exynos/pm.c - using cpu_boot_reg_base() helper instead of BOOT_VECTOR macro - using exynos_cpu_*() helpers instead of accessing registers directly - using arch_send_wakeup_ipi_mask() instead of dsb_sev() (this matches CPU hotplug code in arch/arm/mach-exynos/platsmp.c) - integrating separate exynos4210-cpuidle driver into existing exynos-cpuidle one Cc: Colin Cross Cc: Kukjin Kim Cc: Krzysztof Kozlowski Cc: Tomasz Figa Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Daniel Lezcano Acked-by: Kyungmin Park Signed-off-by: Kukjin Kim --- arch/arm/mach-exynos/common.h | 4 + arch/arm/mach-exynos/exynos.c | 4 + arch/arm/mach-exynos/platsmp.c | 2 +- arch/arm/mach-exynos/pm.c | 122 +++++++++++++++++++++++++++ drivers/cpuidle/Kconfig.arm | 1 + drivers/cpuidle/cpuidle-exynos.c | 76 +++++++++++++++-- include/linux/platform_data/cpuidle-exynos.h | 20 +++++ 7 files changed, 223 insertions(+), 6 deletions(-) create mode 100644 include/linux/platform_data/cpuidle-exynos.h (limited to 'drivers/cpuidle') diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index 865f878063cc..f70eca7ee705 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h @@ -13,6 +13,7 @@ #define __ARCH_ARM_MACH_EXYNOS_COMMON_H #include +#include #define EXYNOS3250_SOC_ID 0xE3472000 #define EXYNOS3_SOC_MASK 0xFFFFF000 @@ -150,8 +151,11 @@ extern void exynos_pm_central_suspend(void); extern int exynos_pm_central_resume(void); extern void exynos_enter_aftr(void); +extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data; + extern void s5p_init_cpu(void __iomem *cpuid_addr); extern unsigned int samsung_rev(void); +extern void __iomem *cpu_boot_reg_base(void); static inline void pmu_raw_writel(u32 val, u32 offset) { diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index c13d0837fa8c..509f2e5a2d70 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c @@ -246,6 +246,10 @@ static void __init exynos_dt_machine_init(void) if (!IS_ENABLED(CONFIG_SMP)) exynos_sysram_init(); +#ifdef CONFIG_ARM_EXYNOS_CPUIDLE + if (of_machine_is_compatible("samsung,exynos4210")) + exynos_cpuidle.dev.platform_data = &cpuidle_coupled_exynos_data; +#endif if (of_machine_is_compatible("samsung,exynos4210") || of_machine_is_compatible("samsung,exynos4212") || (of_machine_is_compatible("samsung,exynos4412") && diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 7a1ebfeeeeb8..3f32c47a6d74 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -194,7 +194,7 @@ int exynos_cluster_power_state(int cluster) S5P_CORE_LOCAL_PWR_EN); } -static inline void __iomem *cpu_boot_reg_base(void) +void __iomem *cpu_boot_reg_base(void) { if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1) return pmu_base_addr + S5P_INFORM5; diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c index 159eb4c9779e..1f9cbd434c60 100644 --- a/arch/arm/mach-exynos/pm.c +++ b/arch/arm/mach-exynos/pm.c @@ -179,3 +179,125 @@ void exynos_enter_aftr(void) cpu_pm_exit(); } + +static atomic_t cpu1_wakeup = ATOMIC_INIT(0); + +static int exynos_cpu0_enter_aftr(void) +{ + int ret = -1; + + /* + * If the other cpu is powered on, we have to power it off, because + * the AFTR state won't work otherwise + */ + if (cpu_online(1)) { + /* + * We reach a sync point with the coupled idle state, we know + * the other cpu will power down itself or will abort the + * sequence, let's wait for one of these to happen + */ + while (exynos_cpu_power_state(1)) { + /* + * The other cpu may skip idle and boot back + * up again + */ + if (atomic_read(&cpu1_wakeup)) + goto abort; + + /* + * The other cpu may bounce through idle and + * boot back up again, getting stuck in the + * boot rom code + */ + if (__raw_readl(cpu_boot_reg_base()) == 0) + goto abort; + + cpu_relax(); + } + } + + exynos_enter_aftr(); + ret = 0; + +abort: + if (cpu_online(1)) { + /* + * Set the boot vector to something non-zero + */ + __raw_writel(virt_to_phys(exynos_cpu_resume), + cpu_boot_reg_base()); + dsb(); + + /* + * Turn on cpu1 and wait for it to be on + */ + exynos_cpu_power_up(1); + while (exynos_cpu_power_state(1) != S5P_CORE_LOCAL_PWR_EN) + cpu_relax(); + + while (!atomic_read(&cpu1_wakeup)) { + /* + * Poke cpu1 out of the boot rom + */ + __raw_writel(virt_to_phys(exynos_cpu_resume), + cpu_boot_reg_base()); + + arch_send_wakeup_ipi_mask(cpumask_of(1)); + } + } + + return ret; +} + +static int exynos_wfi_finisher(unsigned long flags) +{ + cpu_do_idle(); + + return -1; +} + +static int exynos_cpu1_powerdown(void) +{ + int ret = -1; + + /* + * Idle sequence for cpu1 + */ + if (cpu_pm_enter()) + goto cpu1_aborted; + + /* + * Turn off cpu 1 + */ + exynos_cpu_power_down(1); + + ret = cpu_suspend(0, exynos_wfi_finisher); + + cpu_pm_exit(); + +cpu1_aborted: + dsb(); + /* + * Notify cpu 0 that cpu 1 is awake + */ + atomic_set(&cpu1_wakeup, 1); + + return ret; +} + +static void exynos_pre_enter_aftr(void) +{ + __raw_writel(virt_to_phys(exynos_cpu_resume), cpu_boot_reg_base()); +} + +static void exynos_post_enter_aftr(void) +{ + atomic_set(&cpu1_wakeup, 0); +} + +struct cpuidle_exynos_data cpuidle_coupled_exynos_data = { + .cpu0_enter_aftr = exynos_cpu0_enter_aftr, + .cpu1_powerdown = exynos_cpu1_powerdown, + .pre_enter_aftr = exynos_pre_enter_aftr, + .post_enter_aftr = exynos_post_enter_aftr, +}; diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 8c16ab20fb15..8e07c9419153 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -55,6 +55,7 @@ config ARM_AT91_CPUIDLE config ARM_EXYNOS_CPUIDLE bool "Cpu Idle Driver for the Exynos processors" depends on ARCH_EXYNOS + select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP help Select this to enable cpuidle for Exynos processors diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c index 4003a3160865..26f5f29fdb03 100644 --- a/drivers/cpuidle/cpuidle-exynos.c +++ b/drivers/cpuidle/cpuidle-exynos.c @@ -1,8 +1,11 @@ -/* linux/arch/arm/mach-exynos/cpuidle.c - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. +/* + * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * + * Coupled cpuidle support based on the work of: + * Colin Cross + * Daniel Lezcano + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -13,13 +16,49 @@ #include #include #include +#include +#include #include #include #include +static atomic_t exynos_idle_barrier; + +static struct cpuidle_exynos_data *exynos_cpuidle_pdata; static void (*exynos_enter_aftr)(void); +static int exynos_enter_coupled_lowpower(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + int ret; + + exynos_cpuidle_pdata->pre_enter_aftr(); + + /* + * Waiting all cpus to reach this point at the same moment + */ + cpuidle_coupled_parallel_barrier(dev, &exynos_idle_barrier); + + /* + * Both cpus will reach this point at the same time + */ + ret = dev->cpu ? exynos_cpuidle_pdata->cpu1_powerdown() + : exynos_cpuidle_pdata->cpu0_enter_aftr(); + if (ret) + index = ret; + + /* + * Waiting all cpus to finish the power sequence before going further + */ + cpuidle_coupled_parallel_barrier(dev, &exynos_idle_barrier); + + exynos_cpuidle_pdata->post_enter_aftr(); + + return index; +} + static int exynos_enter_lowpower(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -55,13 +94,40 @@ static struct cpuidle_driver exynos_idle_driver = { .safe_state_index = 0, }; +static struct cpuidle_driver exynos_coupled_idle_driver = { + .name = "exynos_coupled_idle", + .owner = THIS_MODULE, + .states = { + [0] = ARM_CPUIDLE_WFI_STATE, + [1] = { + .enter = exynos_enter_coupled_lowpower, + .exit_latency = 5000, + .target_residency = 10000, + .flags = CPUIDLE_FLAG_COUPLED | + CPUIDLE_FLAG_TIMER_STOP, + .name = "C1", + .desc = "ARM power down", + }, + }, + .state_count = 2, + .safe_state_index = 0, +}; + static int exynos_cpuidle_probe(struct platform_device *pdev) { int ret; - exynos_enter_aftr = (void *)(pdev->dev.platform_data); + if (of_machine_is_compatible("samsung,exynos4210")) { + exynos_cpuidle_pdata = pdev->dev.platform_data; + + ret = cpuidle_register(&exynos_coupled_idle_driver, + cpu_possible_mask); + } else { + exynos_enter_aftr = (void *)(pdev->dev.platform_data); + + ret = cpuidle_register(&exynos_idle_driver, NULL); + } - ret = cpuidle_register(&exynos_idle_driver, NULL); if (ret) { dev_err(&pdev->dev, "failed to register cpuidle driver\n"); return ret; diff --git a/include/linux/platform_data/cpuidle-exynos.h b/include/linux/platform_data/cpuidle-exynos.h new file mode 100644 index 000000000000..bfa40e4c5d5f --- /dev/null +++ b/include/linux/platform_data/cpuidle-exynos.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __CPUIDLE_EXYNOS_H +#define __CPUIDLE_EXYNOS_H + +struct cpuidle_exynos_data { + int (*cpu0_enter_aftr)(void); + int (*cpu1_powerdown)(void); + void (*pre_enter_aftr)(void); + void (*post_enter_aftr)(void); +}; + +#endif -- cgit v1.2.3 From 3810631332465d967ba5e27ea2c7dff2c9afac6c Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 12 Feb 2015 23:33:15 +0100 Subject: PM / sleep: Re-implement suspend-to-idle handling In preparation for adding support for quiescing timers in the final stage of suspend-to-idle transitions, rework the freeze_enter() function making the system wait on a wakeup event, the freeze_wake() function terminating the suspend-to-idle loop and the mechanism by which deep idle states are entered during suspend-to-idle. First of all, introduce a simple state machine for suspend-to-idle and make the code in question use it. Second, prevent freeze_enter() from losing wakeup events due to race conditions and ensure that the number of online CPUs won't change while it is being executed. In addition to that, make it force all of the CPUs re-enter the idle loop in case they are in idle states already (so they can enter deeper idle states if possible). Next, drop cpuidle_use_deepest_state() and replace use_deepest_state checks in cpuidle_select() and cpuidle_reflect() with a single suspend-to-idle state check in cpuidle_idle_call(). Finally, introduce cpuidle_enter_freeze() that will simply find the deepest idle state available to the given CPU and enter it using cpuidle_enter(). Signed-off-by: Rafael J. Wysocki Acked-by: Peter Zijlstra (Intel) --- drivers/cpuidle/cpuidle.c | 49 +++++++++++++++++++++++++---------------------- include/linux/cpuidle.h | 4 ++-- include/linux/suspend.h | 16 ++++++++++++++++ kernel/power/suspend.c | 43 ++++++++++++++++++++++++++++++++++------- kernel/sched/idle.c | 16 ++++++++++++++++ 5 files changed, 96 insertions(+), 32 deletions(-) (limited to 'drivers/cpuidle') diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 125150dc6e81..23a8d6cc8d30 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "cpuidle.h" @@ -32,7 +33,6 @@ LIST_HEAD(cpuidle_detected_devices); static int enabled_devices; static int off __read_mostly; static int initialized __read_mostly; -static bool use_deepest_state __read_mostly; int cpuidle_disabled(void) { @@ -66,24 +66,9 @@ int cpuidle_play_dead(void) } /** - * cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode. - * @enable: Whether enable or disable the feature. - * - * If the "deepest idle" mode is enabled, cpuidle will ignore the governor and - * always use the state with the greatest exit latency (out of the states that - * are not disabled). - * - * This function can only be called after cpuidle_pause() to avoid races. - */ -void cpuidle_use_deepest_state(bool enable) -{ - use_deepest_state = enable; -} - -/** - * cpuidle_find_deepest_state - Find the state of the greatest exit latency. - * @drv: cpuidle driver for a given CPU. - * @dev: cpuidle device for a given CPU. + * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. + * @drv: cpuidle driver for the given CPU. + * @dev: cpuidle device for the given CPU. */ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev) @@ -104,6 +89,27 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, return ret; } +/** + * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. + * + * Find the deepest state available and enter it. + */ +void cpuidle_enter_freeze(void) +{ + struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + int index; + + index = cpuidle_find_deepest_state(drv, dev); + if (index >= 0) + cpuidle_enter(drv, dev, index); + else + arch_cpu_idle(); + + /* Interrupts are enabled again here. */ + local_irq_disable(); +} + /** * cpuidle_enter_state - enter the state and update stats * @dev: cpuidle device for this cpu @@ -166,9 +172,6 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) if (!drv || !dev || !dev->enabled) return -EBUSY; - if (unlikely(use_deepest_state)) - return cpuidle_find_deepest_state(drv, dev); - return cpuidle_curr_governor->select(drv, dev); } @@ -200,7 +203,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ void cpuidle_reflect(struct cpuidle_device *dev, int index) { - if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state)) + if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev, index); } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index ab70f3bc44ad..f63aabf4ee90 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -141,7 +141,7 @@ extern void cpuidle_resume(void); extern int cpuidle_enable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_play_dead(void); -extern void cpuidle_use_deepest_state(bool enable); +extern void cpuidle_enter_freeze(void); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); #else @@ -174,7 +174,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline int cpuidle_play_dead(void) {return -ENODEV; } -static inline void cpuidle_use_deepest_state(bool enable) {} +static inline void cpuidle_enter_freeze(void) { } static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } #endif diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3388c1b6f7d8..5efe743ce1e8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -201,6 +201,21 @@ struct platform_freeze_ops { */ extern void suspend_set_ops(const struct platform_suspend_ops *ops); extern int suspend_valid_only_mem(suspend_state_t state); + +/* Suspend-to-idle state machnine. */ +enum freeze_state { + FREEZE_STATE_NONE, /* Not suspended/suspending. */ + FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */ + FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */ +}; + +extern enum freeze_state __read_mostly suspend_freeze_state; + +static inline bool idle_should_freeze(void) +{ + return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); +} + extern void freeze_set_ops(const struct platform_freeze_ops *ops); extern void freeze_wake(void); @@ -228,6 +243,7 @@ extern int pm_suspend(suspend_state_t state); static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } +static inline bool idle_should_freeze(void) { return false; } static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} static inline void freeze_wake(void) {} #endif /* !CONFIG_SUSPEND */ diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c347e3ce3a55..b7d6b3a721b1 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -37,7 +37,9 @@ const char *pm_states[PM_SUSPEND_MAX]; static const struct platform_suspend_ops *suspend_ops; static const struct platform_freeze_ops *freeze_ops; static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); -static bool suspend_freeze_wake; + +enum freeze_state __read_mostly suspend_freeze_state; +static DEFINE_SPINLOCK(suspend_freeze_lock); void freeze_set_ops(const struct platform_freeze_ops *ops) { @@ -48,22 +50,49 @@ void freeze_set_ops(const struct platform_freeze_ops *ops) static void freeze_begin(void) { - suspend_freeze_wake = false; + suspend_freeze_state = FREEZE_STATE_NONE; } static void freeze_enter(void) { - cpuidle_use_deepest_state(true); + spin_lock_irq(&suspend_freeze_lock); + if (pm_wakeup_pending()) + goto out; + + suspend_freeze_state = FREEZE_STATE_ENTER; + spin_unlock_irq(&suspend_freeze_lock); + + get_online_cpus(); cpuidle_resume(); - wait_event(suspend_freeze_wait_head, suspend_freeze_wake); + + /* Push all the CPUs into the idle loop. */ + wake_up_all_idle_cpus(); + pr_debug("PM: suspend-to-idle\n"); + /* Make the current CPU wait so it can enter the idle loop too. */ + wait_event(suspend_freeze_wait_head, + suspend_freeze_state == FREEZE_STATE_WAKE); + pr_debug("PM: resume from suspend-to-idle\n"); + cpuidle_pause(); - cpuidle_use_deepest_state(false); + put_online_cpus(); + + spin_lock_irq(&suspend_freeze_lock); + + out: + suspend_freeze_state = FREEZE_STATE_NONE; + spin_unlock_irq(&suspend_freeze_lock); } void freeze_wake(void) { - suspend_freeze_wake = true; - wake_up(&suspend_freeze_wait_head); + unsigned long flags; + + spin_lock_irqsave(&suspend_freeze_lock, flags); + if (suspend_freeze_state > FREEZE_STATE_NONE) { + suspend_freeze_state = FREEZE_STATE_WAKE; + wake_up(&suspend_freeze_wait_head); + } + spin_unlock_irqrestore(&suspend_freeze_lock, flags); } EXPORT_SYMBOL_GPL(freeze_wake); diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index aaf1c1d5cf5d..94b2d7b88a27 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -104,6 +105,21 @@ static void cpuidle_idle_call(void) */ rcu_idle_enter(); + /* + * Suspend-to-idle ("freeze") is a system state in which all user space + * has been frozen, all I/O devices have been suspended and the only + * activity happens here and in iterrupts (if any). In that case bypass + * the cpuidle governor and go stratight for the deepest idle state + * available. Possibly also suspend the local tick and the entire + * timekeeping to prevent timer interrupts from kicking us out of idle + * until a proper wakeup interrupt happens. + */ + if (idle_should_freeze()) { + cpuidle_enter_freeze(); + local_irq_enable(); + goto exit_idle; + } + /* * Ask the cpuidle framework to choose a convenient idle state. * Fall back to the default arch idle method on errors. -- cgit v1.2.3 From 124cf9117c5f93cc5b324530b7e105b09c729d5d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 13 Feb 2015 23:50:43 +0100 Subject: PM / sleep: Make it possible to quiesce timers during suspend-to-idle The efficiency of suspend-to-idle depends on being able to keep CPUs in the deepest available idle states for as much time as possible. Ideally, they should only be brought out of idle by system wakeup interrupts. However, timer interrupts occurring periodically prevent that from happening and it is not practical to chase all of the "misbehaving" timers in a whack-a-mole fashion. A much more effective approach is to suspend the local ticks for all CPUs and the entire timekeeping along the lines of what is done during full suspend, which also helps to keep suspend-to-idle and full suspend reasonably similar. The idea is to suspend the local tick on each CPU executing cpuidle_enter_freeze() and to make the last of them suspend the entire timekeeping. That should prevent timer interrupts from triggering until an IO interrupt wakes up one of the CPUs. It needs to be done with interrupts disabled on all of the CPUs, though, because otherwise the suspended clocksource might be accessed by an interrupt handler which might lead to fatal consequences. Unfortunately, the existing ->enter callbacks provided by cpuidle drivers generally cannot be used for implementing that, because some of them re-enable interrupts temporarily and some idle entry methods cause interrupts to be re-enabled automatically on exit. Also some of these callbacks manipulate local clock event devices of the CPUs which really shouldn't be done after suspending their ticks. To overcome that difficulty, introduce a new cpuidle state callback, ->enter_freeze, that will be guaranteed (1) to keep interrupts disabled all the time (and return with interrupts disabled) and (2) not to touch the CPU timer devices. Modify cpuidle_enter_freeze() to look for the deepest available idle state with ->enter_freeze present and to make the CPU execute that callback with suspended tick (and the last of the online CPUs to execute it with suspended timekeeping). Suggested-by: Thomas Gleixner Signed-off-by: Rafael J. Wysocki Acked-by: Peter Zijlstra (Intel) --- drivers/cpuidle/cpuidle.c | 49 +++++++++++++++++++++++++++++++++++++++++----- include/linux/cpuidle.h | 9 +++++++++ include/linux/tick.h | 6 +++++- kernel/time/tick-common.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++ kernel/time/timekeeping.c | 4 ++-- kernel/time/timekeeping.h | 2 ++ 6 files changed, 112 insertions(+), 8 deletions(-) (limited to 'drivers/cpuidle') diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 23a8d6cc8d30..4d534582514e 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include "cpuidle.h" @@ -69,18 +70,20 @@ int cpuidle_play_dead(void) * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. + * @freeze: Whether or not the state should be suitable for suspend-to-idle. */ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, - struct cpuidle_device *dev) + struct cpuidle_device *dev, bool freeze) { unsigned int latency_req = 0; - int i, ret = CPUIDLE_DRIVER_STATE_START - 1; + int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; struct cpuidle_state_usage *su = &dev->states_usage[i]; - if (s->disabled || su->disable || s->exit_latency <= latency_req) + if (s->disabled || su->disable || s->exit_latency <= latency_req + || (freeze && !s->enter_freeze)) continue; latency_req = s->exit_latency; @@ -89,10 +92,31 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, return ret; } +static void enter_freeze_proper(struct cpuidle_driver *drv, + struct cpuidle_device *dev, int index) +{ + tick_freeze(); + /* + * The state used here cannot be a "coupled" one, because the "coupled" + * cpuidle mechanism enables interrupts and doing that with timekeeping + * suspended is generally unsafe. + */ + drv->states[index].enter_freeze(dev, drv, index); + WARN_ON(!irqs_disabled()); + /* + * timekeeping_resume() that will be called by tick_unfreeze() for the + * last CPU executing it calls functions containing RCU read-side + * critical sections, so tell RCU about that. + */ + RCU_NONIDLE(tick_unfreeze()); +} + /** * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. * - * Find the deepest state available and enter it. + * If there are states with the ->enter_freeze callback, find the deepest of + * them and enter it with frozen tick. Otherwise, find the deepest state + * available and enter it normally. */ void cpuidle_enter_freeze(void) { @@ -100,7 +124,22 @@ void cpuidle_enter_freeze(void) struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int index; - index = cpuidle_find_deepest_state(drv, dev); + /* + * Find the deepest state with ->enter_freeze present, which guarantees + * that interrupts won't be enabled when it exits and allows the tick to + * be frozen safely. + */ + index = cpuidle_find_deepest_state(drv, dev, true); + if (index >= 0) { + enter_freeze_proper(drv, dev, index); + return; + } + + /* + * It is not safe to freeze the tick, find the deepest state available + * at all and try to enter it normally. + */ + index = cpuidle_find_deepest_state(drv, dev, false); if (index >= 0) cpuidle_enter(drv, dev, index); else diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index f63aabf4ee90..f551a9299ac9 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -50,6 +50,15 @@ struct cpuidle_state { int index); int (*enter_dead) (struct cpuidle_device *dev, int index); + + /* + * CPUs execute ->enter_freeze with the local tick or entire timekeeping + * suspended, so it must not re-enable interrupts at any point (even + * temporarily) or attempt to change states of clock event devices. + */ + void (*enter_freeze) (struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); }; /* Idle State Flags */ diff --git a/include/linux/tick.h b/include/linux/tick.h index eda850ca757a..9c085dc12ae9 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -79,6 +79,9 @@ extern void __init tick_init(void); extern int tick_is_oneshot_available(void); extern struct tick_device *tick_get_device(int cpu); +extern void tick_freeze(void); +extern void tick_unfreeze(void); + # ifdef CONFIG_HIGH_RES_TIMERS extern int tick_init_highres(void); extern int tick_program_event(ktime_t expires, int force); @@ -119,6 +122,8 @@ static inline int tick_oneshot_mode_active(void) { return 0; } #else /* CONFIG_GENERIC_CLOCKEVENTS */ static inline void tick_init(void) { } +static inline void tick_freeze(void) { } +static inline void tick_unfreeze(void) { } static inline void tick_cancel_sched_timer(int cpu) { } static inline void tick_clock_notify(void) { } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } @@ -226,5 +231,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) __tick_nohz_task_switch(tsk); } - #endif diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 7efeedf53ebd..f7c515595b42 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -394,6 +394,56 @@ void tick_resume(void) } } +static DEFINE_RAW_SPINLOCK(tick_freeze_lock); +static unsigned int tick_freeze_depth; + +/** + * tick_freeze - Suspend the local tick and (possibly) timekeeping. + * + * Check if this is the last online CPU executing the function and if so, + * suspend timekeeping. Otherwise suspend the local tick. + * + * Call with interrupts disabled. Must be balanced with %tick_unfreeze(). + * Interrupts must not be enabled before the subsequent %tick_unfreeze(). + */ +void tick_freeze(void) +{ + raw_spin_lock(&tick_freeze_lock); + + tick_freeze_depth++; + if (tick_freeze_depth == num_online_cpus()) { + timekeeping_suspend(); + } else { + tick_suspend(); + tick_suspend_broadcast(); + } + + raw_spin_unlock(&tick_freeze_lock); +} + +/** + * tick_unfreeze - Resume the local tick and (possibly) timekeeping. + * + * Check if this is the first CPU executing the function and if so, resume + * timekeeping. Otherwise resume the local tick. + * + * Call with interrupts disabled. Must be balanced with %tick_freeze(). + * Interrupts must not be enabled after the preceding %tick_freeze(). + */ +void tick_unfreeze(void) +{ + raw_spin_lock(&tick_freeze_lock); + + if (tick_freeze_depth == num_online_cpus()) + timekeeping_resume(); + else + tick_resume(); + + tick_freeze_depth--; + + raw_spin_unlock(&tick_freeze_lock); +} + /** * tick_init - initialize the tick control */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index aef5dc722abf..91db94136c10 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1197,7 +1197,7 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta) * xtime/wall_to_monotonic/jiffies/etc are * still managed by arch specific suspend/resume code. */ -static void timekeeping_resume(void) +void timekeeping_resume(void) { struct timekeeper *tk = &tk_core.timekeeper; struct clocksource *clock = tk->tkr.clock; @@ -1278,7 +1278,7 @@ static void timekeeping_resume(void) hrtimers_resume(); } -static int timekeeping_suspend(void) +int timekeeping_suspend(void) { struct timekeeper *tk = &tk_core.timekeeper; unsigned long flags; diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h index adc1fc98bde3..1d91416055d5 100644 --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h @@ -16,5 +16,7 @@ extern int timekeeping_inject_offset(struct timespec *ts); extern s32 timekeeping_get_tai_offset(void); extern void timekeeping_set_tai_offset(s32 tai_offset); extern void timekeeping_clocktai(struct timespec *ts); +extern int timekeeping_suspend(void); +extern void timekeeping_resume(void); #endif -- cgit v1.2.3 From 92c83ff5b42b109c94fdeee53cb31f674f776d75 Mon Sep 17 00:00:00 2001 From: Preeti U Murthy Date: Wed, 18 Feb 2015 06:34:17 +0100 Subject: cpuidle: powernv: Read target_residency value of idle states from DT if available The device tree now exposes the residency values for different idle states. Read these values instead of calculating residency from the latency values. The values exposed in the DT are validated for optimal power efficiency. However to maintain compatibility with the older firmware code which does not expose residency values, use default values as a fallback mechanism. While at it, use better APIs to parse the powermgmt device tree node. Signed-off-by: Preeti U Murthy Acked-by: Stewart Smith Acked-by: Michael Ellerman Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/cpuidle-powernv.c | 69 +++++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 29 deletions(-) (limited to 'drivers/cpuidle') diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index aedec0957934..30d42298a69f 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -159,69 +160,79 @@ static int powernv_add_idle_states(void) int nr_idle_states = 1; /* Snooze */ int dt_idle_states; const __be32 *idle_state_flags; - const __be32 *idle_state_latency; - u32 len_flags, flags, latency_ns; - int i; + u32 len_flags, flags; + u32 *latency_ns, *residency_ns; + int i, rc; /* Currently we have snooze statically defined */ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); if (!power_mgt) { pr_warn("opal: PowerMgmt Node not found\n"); - return nr_idle_states; + goto out; } - idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags); + idle_state_flags = of_get_property(power_mgt, + "ibm,cpu-idle-state-flags", &len_flags); if (!idle_state_flags) { - pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n"); - return nr_idle_states; + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); + goto out; } - idle_state_latency = of_get_property(power_mgt, - "ibm,cpu-idle-state-latencies-ns", NULL); - if (!idle_state_latency) { - pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n"); - return nr_idle_states; + dt_idle_states = len_flags / sizeof(u32); + + latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL); + rc = of_property_read_u32_array(power_mgt, + "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states); + if (rc) { + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); + goto out_free_latency; } - dt_idle_states = len_flags / sizeof(u32); + residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL); + rc = of_property_read_u32_array(power_mgt, + "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states); for (i = 0; i < dt_idle_states; i++) { flags = be32_to_cpu(idle_state_flags[i]); - /* Cpuidle accepts exit_latency in us and we estimate - * target residency to be 10x exit_latency + /* + * Cpuidle accepts exit_latency and target_residency in us. + * Use default target_residency values if f/w does not expose it. */ - latency_ns = be32_to_cpu(idle_state_latency[i]); if (flags & OPAL_PM_NAP_ENABLED) { /* Add NAP state */ strcpy(powernv_states[nr_idle_states].name, "Nap"); strcpy(powernv_states[nr_idle_states].desc, "Nap"); powernv_states[nr_idle_states].flags = 0; - powernv_states[nr_idle_states].exit_latency = - ((unsigned int)latency_ns) / 1000; - powernv_states[nr_idle_states].target_residency = - ((unsigned int)latency_ns / 100); + powernv_states[nr_idle_states].target_residency = 100; powernv_states[nr_idle_states].enter = &nap_loop; - nr_idle_states++; - } - - if (flags & OPAL_PM_SLEEP_ENABLED || + } else if (flags & OPAL_PM_SLEEP_ENABLED || flags & OPAL_PM_SLEEP_ENABLED_ER1) { /* Add FASTSLEEP state */ strcpy(powernv_states[nr_idle_states].name, "FastSleep"); strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP; - powernv_states[nr_idle_states].exit_latency = - ((unsigned int)latency_ns) / 1000; - powernv_states[nr_idle_states].target_residency = - ((unsigned int)latency_ns / 100); + powernv_states[nr_idle_states].target_residency = 300000; powernv_states[nr_idle_states].enter = &fastsleep_loop; - nr_idle_states++; } + + powernv_states[nr_idle_states].exit_latency = + ((unsigned int)latency_ns[i]) / 1000; + + if (!rc) { + powernv_states[nr_idle_states].target_residency = + ((unsigned int)residency_ns[i]) / 1000; + } + + nr_idle_states++; } + kfree(residency_ns); +out_free_latency: + kfree(latency_ns); +out: return nr_idle_states; } -- cgit v1.2.3 From 70734a786acfd1998e47d40df19cba5c29469bdf Mon Sep 17 00:00:00 2001 From: Preeti U Murthy Date: Wed, 18 Feb 2015 23:24:53 -0600 Subject: cpuidle: powernv: Avoid endianness conversions while parsing DT We currently read the information about idle states from the DT so as to populate the cpuidle table. Use those APIs to read from the DT that can avoid endianness conversions of the property values in the cpuidle driver. Signed-off-by: Preeti U Murthy Acked-by: Michael Ellerman Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/cpuidle-powernv.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'drivers/cpuidle') diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 30d42298a69f..59372077ec7c 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -159,9 +159,7 @@ static int powernv_add_idle_states(void) struct device_node *power_mgt; int nr_idle_states = 1; /* Snooze */ int dt_idle_states; - const __be32 *idle_state_flags; - u32 len_flags, flags; - u32 *latency_ns, *residency_ns; + u32 *latency_ns, *residency_ns, *flags; int i, rc; /* Currently we have snooze statically defined */ @@ -172,14 +170,19 @@ static int powernv_add_idle_states(void) goto out; } - idle_state_flags = of_get_property(power_mgt, - "ibm,cpu-idle-state-flags", &len_flags); - if (!idle_state_flags) { - pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); + /* Read values of any property to determine the num of idle states */ + dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags"); + if (dt_idle_states < 0) { + pr_warn("cpuidle-powernv: no idle states found in the DT\n"); goto out; } - dt_idle_states = len_flags / sizeof(u32); + flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL); + if (of_property_read_u32_array(power_mgt, + "ibm,cpu-idle-state-flags", flags, dt_idle_states)) { + pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n"); + goto out_free_flags; + } latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL); rc = of_property_read_u32_array(power_mgt, @@ -195,21 +198,19 @@ static int powernv_add_idle_states(void) for (i = 0; i < dt_idle_states; i++) { - flags = be32_to_cpu(idle_state_flags[i]); - /* * Cpuidle accepts exit_latency and target_residency in us. * Use default target_residency values if f/w does not expose it. */ - if (flags & OPAL_PM_NAP_ENABLED) { + if (flags[i] & OPAL_PM_NAP_ENABLED) { /* Add NAP state */ strcpy(powernv_states[nr_idle_states].name, "Nap"); strcpy(powernv_states[nr_idle_states].desc, "Nap"); powernv_states[nr_idle_states].flags = 0; powernv_states[nr_idle_states].target_residency = 100; powernv_states[nr_idle_states].enter = &nap_loop; - } else if (flags & OPAL_PM_SLEEP_ENABLED || - flags & OPAL_PM_SLEEP_ENABLED_ER1) { + } else if (flags[i] & OPAL_PM_SLEEP_ENABLED || + flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) { /* Add FASTSLEEP state */ strcpy(powernv_states[nr_idle_states].name, "FastSleep"); strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); @@ -232,6 +233,8 @@ static int powernv_add_idle_states(void) kfree(residency_ns); out_free_latency: kfree(latency_ns); +out_free_flags: + kfree(flags); out: return nr_idle_states; } -- cgit v1.2.3