From 4881f603d7b82df2bc15efd2a272f973a3bf8df1 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Fri, 25 Apr 2014 08:44:59 +0800 Subject: PM / hibernate: use unsigned local variables in swsusp_show_speed() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit do_div() needs 'u64' type, or it reports warning. And negative number is meaningless for "speed", so change all signed to unsigned within swsusp_show_speed(). The related warning (with allmodconfig for unicore32): CC kernel/power/hibernate.o kernel/power/hibernate.c: In function ‘swsusp_show_speed’: kernel/power/hibernate.c:237: warning: comparison of distinct pointer types lacks a cast Signed-off-by: Chen Gang [rjw: Subject] Signed-off-by: Rafael J. Wysocki --- kernel/power/hibernate.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index f4f2073711d3..de4b989cc8fd 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -228,19 +228,23 @@ static void platform_recover(int platform_mode) void swsusp_show_speed(struct timeval *start, struct timeval *stop, unsigned nr_pages, char *msg) { - s64 elapsed_centisecs64; - int centisecs; - int k; - int kps; + u64 elapsed_centisecs64; + unsigned int centisecs; + unsigned int k; + unsigned int kps; elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); + /* + * If "(s64)elapsed_centisecs64 < 0", it will print long elapsed time, + * it is obvious enough for what went wrong. + */ do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); centisecs = elapsed_centisecs64; if (centisecs == 0) centisecs = 1; /* avoid div-by-zero */ k = nr_pages * (PAGE_SIZE / 1024); kps = (k * 100) / centisecs; - printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", + printk(KERN_INFO "PM: %s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n", msg, k, centisecs / 100, centisecs % 100, kps / 1000, (kps % 1000) / 10); -- cgit v1.2.3 From 52c324f8a87b336496d0f5e9d8dff1aa32bb08cd Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 1 May 2014 00:13:47 +0200 Subject: cpuidle: Combine cpuidle_enabled() with cpuidle_select() Since both cpuidle_enabled() and cpuidle_select() are only called by cpuidle_idle_call(), it is not really useful to keep them separate and combining them will help to avoid complicating cpuidle_idle_call() even further if governors are changed to return error codes sometimes. This code modification shouldn't lead to any functional changes. Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/cpuidle.c | 26 ++++++-------------------- include/linux/cpuidle.h | 5 ----- kernel/sched/idle.c | 20 +++++++------------- 3 files changed, 13 insertions(+), 38 deletions(-) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 8236746e46bb..f38359f64cc6 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -64,26 +64,6 @@ int cpuidle_play_dead(void) return -ENODEV; } -/** - * cpuidle_enabled - check if the cpuidle framework is ready - * @dev: cpuidle device for this cpu - * @drv: cpuidle driver for this cpu - * - * Return 0 on success, otherwise: - * -NODEV : the cpuidle framework is not available - * -EBUSY : the cpuidle framework is not initialized - */ -int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev) -{ - if (off || !initialized) - return -ENODEV; - - if (!drv || !dev || !dev->enabled) - return -EBUSY; - - return 0; -} - /** * cpuidle_enter_state - enter the state and update stats * @dev: cpuidle device for this cpu @@ -138,6 +118,12 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, */ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) { + if (off || !initialized) + return -ENODEV; + + if (!drv || !dev || !dev->enabled) + return -EBUSY; + return cpuidle_curr_governor->select(drv, dev); } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index b0238cba440b..a8d5bd391a26 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -120,8 +120,6 @@ struct cpuidle_driver { #ifdef CONFIG_CPU_IDLE extern void disable_cpuidle(void); -extern int cpuidle_enabled(struct cpuidle_driver *drv, - struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_enter(struct cpuidle_driver *drv, @@ -149,9 +147,6 @@ extern int cpuidle_play_dead(void); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); #else static inline void disable_cpuidle(void) { } -static inline int cpuidle_enabled(struct cpuidle_driver *drv, - struct cpuidle_device *dev) -{return -ENODEV; } static inline int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 8f4390a079c7..a8f12247ce7c 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -101,19 +101,13 @@ static int cpuidle_idle_call(void) rcu_idle_enter(); /* - * Check if the cpuidle framework is ready, otherwise fallback - * to the default arch specific idle method + * Ask the cpuidle framework to choose a convenient idle state. + * Fall back to the default arch specific idle method on errors. */ - ret = cpuidle_enabled(drv, dev); - - if (!ret) { - /* - * Ask the governor to choose an idle state it thinks - * it is convenient to go to. There is *always* a - * convenient idle state - */ - next_state = cpuidle_select(drv, dev); + next_state = cpuidle_select(drv, dev); + ret = next_state; + if (ret >= 0) { /* * The idle task must be scheduled, it is pointless to * go to idle, just update no idle residency and get @@ -140,7 +134,7 @@ static int cpuidle_idle_call(void) CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); - if (!ret) { + if (ret >= 0) { trace_cpu_idle_rcuidle(next_state, dev->cpu); /* @@ -175,7 +169,7 @@ static int cpuidle_idle_call(void) * We can't use the cpuidle framework, let's use the default * idle routine */ - if (ret) + if (ret < 0) arch_cpu_idle(); __current_set_polling(); -- cgit v1.2.3 From 3836785a1bdcd6706c68ad46bf53adc0b057b310 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 1 May 2014 00:14:04 +0200 Subject: cpuidle / menu: Return (-1) if there are no suitable states If there is a PM QoS latency limit and all of the sufficiently shallow C-states are disabled, the cpuidle menu governor returns 0 which on some systems is CPUIDLE_DRIVER_STATE_START and shouldn't be returned if that C-state has been disabled. Fix the issue by modifying the menu governor to return (-1) in such situations. Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/governors/menu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 71b523293354..3ca15a8cbaa8 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -296,7 +296,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->needs_update = 0; } - data->last_state_idx = 0; + data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; /* Special case when user has set very strict latency requirement */ if (unlikely(latency_req == 0)) -- cgit v1.2.3 From 2c730785d9532d2a9c46e059bd6a6c9a764c539f Mon Sep 17 00:00:00 2001 From: Sebastian Capella Date: Mon, 21 Apr 2014 17:30:46 -0700 Subject: PM / hibernate: no kernel_power_off when pm_power_off NULL Reboot logic in kernel/reboot will avoid calling kernel_power_off when pm_power_off is null, and instead uses kernel_halt. Change hibernate's power_down to follow the behavior in the reboot call. Calling the notifier twice (once for SYS_POWER_OFF and again for SYS_HALT) causes a panic during hibernation on Kirkwood Openblocks A6 board. Signed-off-by: Sebastian Capella Reported-by: Ezequiel Garcia Reviewed-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- kernel/power/hibernate.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index de4b989cc8fd..1f08ac7f55d8 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -599,7 +599,8 @@ static void power_down(void) case HIBERNATION_PLATFORM: hibernation_platform_enter(); case HIBERNATION_SHUTDOWN: - kernel_power_off(); + if (pm_power_off) + kernel_power_off(); break; #ifdef CONFIG_SUSPEND case HIBERNATION_SUSPEND: @@ -627,7 +628,8 @@ static void power_down(void) * corruption after resume. */ printk(KERN_CRIT "PM: Please power down manually\n"); - while(1); + while (1) + cpu_relax(); } /** -- cgit v1.2.3 From bed4d597a0f99b380d24ab3a9da47b62cbf1ad0e Mon Sep 17 00:00:00 2001 From: Chander Kashyap Date: Tue, 22 Apr 2014 18:08:04 +0530 Subject: cpuidle / menu: move repeated correction factor check to init In menu_select function we check for correction factor every time. If it is zero we are initializing to unity. Hence move it to init function and initialise by unity, hence avoid repeated comparisons. Signed-off-by: Chander Kashyap Reviewed-by: Tuukka Tikkanen Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/governors/menu.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 3ca15a8cbaa8..c4f80c15a48d 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -310,13 +310,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->bucket = which_bucket(data->next_timer_us); - /* - * if the correction factor is 0 (eg first time init or cpu hotplug - * etc), we actually want to start out with a unity factor. - */ - if (data->correction_factor[data->bucket] == 0) - data->correction_factor[data->bucket] = RESOLUTION * DECAY; - /* * Force the result of multiplication to be 64 bits even if both * operands are 32 bits. @@ -466,9 +459,17 @@ static int menu_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = &per_cpu(menu_devices, dev->cpu); + int i; memset(data, 0, sizeof(struct menu_device)); + /* + * if the correction factor is 0 (eg first time init or cpu hotplug + * etc), we actually want to start out with a unity factor. + */ + for(i = 0; i < BUCKETS; i++) + data->correction_factor[i] = RESOLUTION * DECAY; + return 0; } -- cgit v1.2.3 From a6220fc19afc07fe77cfd16f5b8e568615517091 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 5 May 2014 00:51:54 +0200 Subject: PM / suspend: Always use deepest C-state in the "freeze" sleep state If freeze_enter() is called, we want to bypass the current cpuidle governor and always use the deepest available (that is, not disabled) C-state, because we want to save as much energy as reasonably possible then and runtime latency constraints don't matter at that point, since the system is in a sleep state anyway. Signed-off-by: Rafael J. Wysocki Tested-by: Aubrey Li --- drivers/cpuidle/cpuidle.c | 45 ++++++++++++++++++++++++++++++++++++++++++++- include/linux/cpuidle.h | 2 ++ kernel/power/suspend.c | 2 ++ 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index f38359f64cc6..cb7019977c50 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -32,6 +32,7 @@ LIST_HEAD(cpuidle_detected_devices); static int enabled_devices; static int off __read_mostly; static int initialized __read_mostly; +static bool use_deepest_state __read_mostly; int cpuidle_disabled(void) { @@ -64,6 +65,45 @@ int cpuidle_play_dead(void) return -ENODEV; } +/** + * cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode. + * @enable: Whether enable or disable the feature. + * + * If the "deepest idle" mode is enabled, cpuidle will ignore the governor and + * always use the state with the greatest exit latency (out of the states that + * are not disabled). + * + * This function can only be called after cpuidle_pause() to avoid races. + */ +void cpuidle_use_deepest_state(bool enable) +{ + use_deepest_state = enable; +} + +/** + * cpuidle_find_deepest_state - Find the state of the greatest exit latency. + * @drv: cpuidle driver for a given CPU. + * @dev: cpuidle device for a given CPU. + */ +static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + unsigned int latency_req = 0; + int i, ret = CPUIDLE_DRIVER_STATE_START - 1; + + for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { + struct cpuidle_state *s = &drv->states[i]; + struct cpuidle_state_usage *su = &dev->states_usage[i]; + + if (s->disabled || su->disable || s->exit_latency <= latency_req) + continue; + + latency_req = s->exit_latency; + ret = i; + } + return ret; +} + /** * cpuidle_enter_state - enter the state and update stats * @dev: cpuidle device for this cpu @@ -124,6 +164,9 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) if (!drv || !dev || !dev->enabled) return -EBUSY; + if (unlikely(use_deepest_state)) + return cpuidle_find_deepest_state(drv, dev); + return cpuidle_curr_governor->select(drv, dev); } @@ -155,7 +198,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ void cpuidle_reflect(struct cpuidle_device *dev, int index) { - if (cpuidle_curr_governor->reflect) + if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state)) cpuidle_curr_governor->reflect(dev, index); } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index a8d5bd391a26..c51a436135c4 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -143,6 +143,7 @@ extern void cpuidle_resume(void); extern int cpuidle_enable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_play_dead(void); +extern void cpuidle_use_deepest_state(bool enable); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); #else @@ -175,6 +176,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline int cpuidle_play_dead(void) {return -ENODEV; } +static inline void cpuidle_use_deepest_state(bool enable) {} static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } #endif diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8233cd4047d7..155721f7f909 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -54,9 +54,11 @@ static void freeze_begin(void) static void freeze_enter(void) { + cpuidle_use_deepest_state(true); cpuidle_resume(); wait_event(suspend_freeze_wait_head, suspend_freeze_wake); cpuidle_pause(); + cpuidle_use_deepest_state(false); } void freeze_wake(void) -- cgit v1.2.3 From 8a54cd5bd6ebf009b96ec79510b593f7ba5c0ff3 Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Tue, 6 May 2014 13:01:56 +0200 Subject: PM / hibernate: Documentation: Fix script for unswapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit System can have mmaped also character devices (e.g dri devices by X) or deleted files. Running cat on character devices is really bad idea (system can hang) so run cat only on regular files. Also mmaped files can have spaces in filenames. Signed-off-by: Pali Rohár [rjw: Subject] Signed-off-by: Rafael J. Wysocki --- Documentation/power/swsusp.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt index 079160e22bcc..f732a8321e8a 100644 --- a/Documentation/power/swsusp.txt +++ b/Documentation/power/swsusp.txt @@ -220,7 +220,10 @@ Q: After resuming, system is paging heavily, leading to very bad interactivity. A: Try running -cat `cat /proc/[0-9]*/maps | grep / | sed 's:.* /:/:' | sort -u` > /dev/null +cat /proc/[0-9]*/maps | grep / | sed 's:.* /:/:' | sort -u | while read file +do + test -f "$file" && cat "$file" > /dev/null +done after resume. swapoff -a; swapon -a may also be useful. -- cgit v1.2.3 From 317cf7e5e85e3ef9f23fc6dd8b2945ab4a258140 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Fri, 9 May 2014 23:32:08 +0200 Subject: PM / hibernate: convert simple_strtoul to kstrtoul Replace obsolete function. Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Rafael J. Wysocki --- kernel/power/hibernate.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 1f08ac7f55d8..2377ff72994c 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -1115,7 +1115,10 @@ static int __init resumewait_setup(char *str) static int __init resumedelay_setup(char *str) { - resume_delay = simple_strtoul(str, NULL, 0); + int rc = kstrtoul(str, 0, (unsigned long *)&resume_delay); + + if (rc) + return rc; return 1; } -- cgit v1.2.3 From fad16dd9c962229c5965ec6f5cd5f48180f94fd4 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 8 May 2014 23:22:15 +0200 Subject: ACPI / PM: Export acpi_target_system_state() to modules Export the acpi_target_system_state() function to modules so that modular drivers can use it to check what the target ACPI sleep state of the system is (that is needed for i915 mostly at this point). Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index c40fb2e81bbc..2281ca31c1bc 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -89,6 +89,7 @@ u32 acpi_target_system_state(void) { return acpi_target_sleep_state; } +EXPORT_SYMBOL_GPL(acpi_target_system_state); static bool pwr_btn_event_pending; -- cgit v1.2.3 From 1f0b63866fc1be700260547be8edf8e6f0af37f2 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 15 May 2014 23:29:57 +0200 Subject: ACPI / PM: Hold ACPI scan lock over the "freeze" sleep state The "freeze" sleep state suffers from the same issue that was addressed by commit ad07277e82de (ACPI / PM: Hold acpi_scan_lock over system PM transitions) for ACPI sleep states, that is, things break if ->remove() is called for devices whose system resume callbacks haven't been executed yet. It also can be addressed in the same way, by holding the ACPI scan lock over the "freeze" sleep state and PM transitions to and from that state, but ->begin() and ->end() platform operations for the "freeze" sleep state are needed for this purpose. This change has been tested on Acer Aspire S5 with Thunderbolt. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 18 ++++++++++++++++++ include/linux/suspend.h | 7 +++++++ kernel/power/suspend.c | 15 +++++++++++++++ 3 files changed, 40 insertions(+) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 2281ca31c1bc..c11e3795431b 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -612,6 +612,22 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = { .recover = acpi_pm_finish, }; +static int acpi_freeze_begin(void) +{ + acpi_scan_lock_acquire(); + return 0; +} + +static void acpi_freeze_end(void) +{ + acpi_scan_lock_release(); +} + +static const struct platform_freeze_ops acpi_freeze_ops = { + .begin = acpi_freeze_begin, + .end = acpi_freeze_end, +}; + static void acpi_sleep_suspend_setup(void) { int i; @@ -622,7 +638,9 @@ static void acpi_sleep_suspend_setup(void) suspend_set_ops(old_suspend_ordering ? &acpi_suspend_ops_old : &acpi_suspend_ops); + freeze_set_ops(&acpi_freeze_ops); } + #else /* !CONFIG_SUSPEND */ static inline void acpi_sleep_suspend_setup(void) {} #endif /* !CONFIG_SUSPEND */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f73cabf59012..91d66fd8dce1 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -187,6 +187,11 @@ struct platform_suspend_ops { void (*recover)(void); }; +struct platform_freeze_ops { + int (*begin)(void); + void (*end)(void); +}; + #ifdef CONFIG_SUSPEND /** * suspend_set_ops - set platform dependent suspend operations @@ -194,6 +199,7 @@ struct platform_suspend_ops { */ extern void suspend_set_ops(const struct platform_suspend_ops *ops); extern int suspend_valid_only_mem(suspend_state_t state); +extern void freeze_set_ops(const struct platform_freeze_ops *ops); extern void freeze_wake(void); /** @@ -220,6 +226,7 @@ extern int pm_suspend(suspend_state_t state); static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } +static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} static inline void freeze_wake(void) {} #endif /* !CONFIG_SUSPEND */ diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8233cd4047d7..73a905f83972 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -38,6 +38,7 @@ const char *const pm_states[PM_SUSPEND_MAX] = { }; static const struct platform_suspend_ops *suspend_ops; +static const struct platform_freeze_ops *freeze_ops; static bool need_suspend_ops(suspend_state_t state) { @@ -47,6 +48,13 @@ static bool need_suspend_ops(suspend_state_t state) static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); static bool suspend_freeze_wake; +void freeze_set_ops(const struct platform_freeze_ops *ops) +{ + lock_system_sleep(); + freeze_ops = ops; + unlock_system_sleep(); +} + static void freeze_begin(void) { suspend_freeze_wake = false; @@ -269,6 +277,10 @@ int suspend_devices_and_enter(suspend_state_t state) error = suspend_ops->begin(state); if (error) goto Close; + } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { + error = freeze_ops->begin(); + if (error) + goto Close; } suspend_console(); suspend_test_start(); @@ -294,6 +306,9 @@ int suspend_devices_and_enter(suspend_state_t state) Close: if (need_suspend_ops(state) && suspend_ops->end) suspend_ops->end(); + else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) + freeze_ops->end(); + trace_machine_suspend(PWR_EVENT_EXIT); return error; -- cgit v1.2.3 From a3cffce4fbafac072660648e028cc9e629b5b3c8 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Thu, 8 May 2014 14:59:04 +0300 Subject: ACPI / platform: add IDs for Broadcom Bluetooth and GPS chips These IDs are used on Baytrail boards such as Lenovo Miix 2 and Asus Transformer Book T100TA. On lenovo Miix 2 8", BCM4752 is called LNV4752. All the rest of the IDs are for Broadcom BCM43241 module with the ID referring to different revision number. Signed-off-by: Heikki Krogerus Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_platform.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index dbfe49e5fd63..c0a39417ebe4 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -32,6 +32,10 @@ static const struct acpi_device_id acpi_platform_device_ids[] = { { "ACPI0003" }, { "VPC2004" }, { "BCM4752" }, + { "LNV4752" }, + { "BCM2E1A" }, + { "BCM2E39" }, + { "BCM2E3D" }, /* Intel Smart Sound Technology */ { "INT33C8" }, -- cgit v1.2.3 From f6514be5fe7fe796041b673bad769510414ff2b9 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 May 2014 19:08:46 +0300 Subject: PM / hibernate: Fix memory corruption in resumedelay_setup() In the original code "resume_delay" is an int so on 64 bits, the call to kstrtoul() will cause memory corruption. We may as well fix a style issue here as well and make "resume_delay" unsigned int, since that's what we pass to ssleep(). Fixes: 317cf7e5e85e (PM / hibernate: convert simple_strtoul to kstrtoul) Signed-off-by: Dan Carpenter Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- kernel/power/hibernate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 2377ff72994c..df88d55dc436 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -35,7 +35,7 @@ static int nocompress; static int noresume; static int resume_wait; -static int resume_delay; +static unsigned int resume_delay; static char resume_file[256] = CONFIG_PM_STD_PARTITION; dev_t swsusp_resume_device; sector_t swsusp_resume_block; @@ -1115,7 +1115,7 @@ static int __init resumewait_setup(char *str) static int __init resumedelay_setup(char *str) { - int rc = kstrtoul(str, 0, (unsigned long *)&resume_delay); + int rc = kstrtouint(str, 0, &resume_delay); if (rc) return rc; -- cgit v1.2.3 From aae4518b3124b29f8dc81c829c704fd2df72e98b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 16 May 2014 02:46:50 +0200 Subject: PM / sleep: Mechanism to avoid resuming runtime-suspended devices unnecessarily Currently, some subsystems (e.g. PCI and the ACPI PM domain) have to resume all runtime-suspended devices during system suspend, mostly because those devices may need to be reprogrammed due to different wakeup settings for system sleep and for runtime PM. For some devices, though, it's OK to remain in runtime suspend throughout a complete system suspend/resume cycle (if the device was in runtime suspend at the start of the cycle). We would like to do this whenever possible, to avoid the overhead of extra power-up and power-down events. However, problems may arise because the device's descendants may require it to be at full power at various points during the cycle. Therefore the most straightforward way to do this safely is if the device and all its descendants can remain runtime suspended until the complete stage of system resume. To this end, introduce a new device PM flag, power.direct_complete and modify the PM core to use that flag as follows. If the ->prepare() callback of a device returns a positive number, the PM core will regard that as an indication that it may leave the device runtime-suspended. It will then check if the system power transition in progress is a suspend (and not hibernation in particular) and if the device is, indeed, runtime-suspended. In that case, the PM core will set the device's power.direct_complete flag. Otherwise it will clear power.direct_complete for the device and it also will later clear it for the device's parent (if there's one). Next, the PM core will not invoke the ->suspend() ->suspend_late(), ->suspend_irq(), ->resume_irq(), ->resume_early(), or ->resume() callbacks for all devices having power.direct_complete set. It will invoke their ->complete() callbacks, however, and those callbacks are then responsible for resuming the devices as appropriate, if necessary. For example, in some cases they may need to queue up runtime resume requests for the devices using pm_request_resume(). Changelog partly based on an Alan Stern's description of the idea (http://marc.info/?l=linux-pm&m=139940466625569&w=2). Signed-off-by: Rafael J. Wysocki Acked-by: Alan Stern --- drivers/base/power/main.c | 66 +++++++++++++++++++++++++++++++++++----------- include/linux/pm.h | 36 +++++++++++++++++++------ include/linux/pm_runtime.h | 6 +++++ 3 files changed, 85 insertions(+), 23 deletions(-) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 86d5e4fb5b98..343ffad59377 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -479,7 +479,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Out; if (!dev->power.is_noirq_suspended) @@ -605,7 +605,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Out; if (!dev->power.is_late_suspended) @@ -735,6 +735,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + /* Match the pm_runtime_disable() in __device_suspend(). */ + pm_runtime_enable(dev); + goto Complete; + } + dpm_wait(dev->parent, async); dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -1007,7 +1013,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a goto Complete; } - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Complete; dpm_wait_for_children(dev, async); @@ -1146,7 +1152,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as goto Complete; } - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Complete; dpm_wait_for_children(dev, async); @@ -1332,6 +1338,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + if (pm_runtime_status_suspended(dev)) { + pm_runtime_disable(dev); + if (pm_runtime_suspended_if_enabled(dev)) + goto Complete; + + pm_runtime_enable(dev); + } + dev->power.direct_complete = false; + } + dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -1382,10 +1399,19 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) End: if (!error) { + struct device *parent = dev->parent; + dev->power.is_suspended = true; - if (dev->power.wakeup_path - && dev->parent && !dev->parent->power.ignore_children) - dev->parent->power.wakeup_path = true; + if (parent) { + spin_lock_irq(&parent->power.lock); + + dev->parent->power.direct_complete = false; + if (dev->power.wakeup_path + && !dev->parent->power.ignore_children) + dev->parent->power.wakeup_path = true; + + spin_unlock_irq(&parent->power.lock); + } } device_unlock(dev); @@ -1487,7 +1513,7 @@ static int device_prepare(struct device *dev, pm_message_t state) { int (*callback)(struct device *) = NULL; char *info = NULL; - int error = 0; + int ret = 0; if (dev->power.syscore) return 0; @@ -1523,17 +1549,27 @@ static int device_prepare(struct device *dev, pm_message_t state) callback = dev->driver->pm->prepare; } - if (callback) { - error = callback(dev); - suspend_report_result(callback, error); - } + if (callback) + ret = callback(dev); device_unlock(dev); - if (error) + if (ret < 0) { + suspend_report_result(callback, ret); pm_runtime_put(dev); - - return error; + return ret; + } + /* + * A positive return value from ->prepare() means "this device appears + * to be runtime-suspended and its state is fine, so if it really is + * runtime-suspended, you can leave it in that state provided that you + * will do the same thing with all of its descendants". This only + * applies to suspend transitions, however. + */ + spin_lock_irq(&dev->power.lock); + dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; + spin_unlock_irq(&dev->power.lock); + return 0; } /** diff --git a/include/linux/pm.h b/include/linux/pm.h index d915d0345fa1..72c0fe098a27 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -93,13 +93,23 @@ typedef struct pm_message { * been registered) to recover from the race condition. * This method is executed for all kinds of suspend transitions and is * followed by one of the suspend callbacks: @suspend(), @freeze(), or - * @poweroff(). The PM core executes subsystem-level @prepare() for all - * devices before starting to invoke suspend callbacks for any of them, so - * generally devices may be assumed to be functional or to respond to - * runtime resume requests while @prepare() is being executed. However, - * device drivers may NOT assume anything about the availability of user - * space at that time and it is NOT valid to request firmware from within - * @prepare() (it's too late to do that). It also is NOT valid to allocate + * @poweroff(). If the transition is a suspend to memory or standby (that + * is, not related to hibernation), the return value of @prepare() may be + * used to indicate to the PM core to leave the device in runtime suspend + * if applicable. Namely, if @prepare() returns a positive number, the PM + * core will understand that as a declaration that the device appears to be + * runtime-suspended and it may be left in that state during the entire + * transition and during the subsequent resume if all of its descendants + * are left in runtime suspend too. If that happens, @complete() will be + * executed directly after @prepare() and it must ensure the proper + * functioning of the device after the system resume. + * The PM core executes subsystem-level @prepare() for all devices before + * starting to invoke suspend callbacks for any of them, so generally + * devices may be assumed to be functional or to respond to runtime resume + * requests while @prepare() is being executed. However, device drivers + * may NOT assume anything about the availability of user space at that + * time and it is NOT valid to request firmware from within @prepare() + * (it's too late to do that). It also is NOT valid to allocate * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. * [To work around these limitations, drivers may register suspend and * hibernation notifiers to be executed before the freezing of tasks.] @@ -112,7 +122,16 @@ typedef struct pm_message { * of the other devices that the PM core has unsuccessfully attempted to * suspend earlier). * The PM core executes subsystem-level @complete() after it has executed - * the appropriate resume callbacks for all devices. + * the appropriate resume callbacks for all devices. If the corresponding + * @prepare() at the beginning of the suspend transition returned a + * positive number and the device was left in runtime suspend (without + * executing any suspend and resume callbacks for it), @complete() will be + * the only callback executed for the device during resume. In that case, + * @complete() must be prepared to do whatever is necessary to ensure the + * proper functioning of the device after the system resume. To this end, + * @complete() can check the power.direct_complete flag of the device to + * learn whether (unset) or not (set) the previous suspend and resume + * callbacks have been executed for it. * * @suspend: Executed before putting the system into a sleep state in which the * contents of main memory are preserved. The exact action to perform @@ -546,6 +565,7 @@ struct dev_pm_info { bool is_late_suspended:1; bool ignore_children:1; bool early_init:1; /* Owned by the PM core */ + bool direct_complete:1; /* Owned by the PM core */ spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2a5897a4afbc..43fd6716f662 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -101,6 +101,11 @@ static inline bool pm_runtime_status_suspended(struct device *dev) return dev->power.runtime_status == RPM_SUSPENDED; } +static inline bool pm_runtime_suspended_if_enabled(struct device *dev) +{ + return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1; +} + static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; @@ -150,6 +155,7 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {} static inline bool pm_runtime_suspended(struct device *dev) { return false; } static inline bool pm_runtime_active(struct device *dev) { return true; } static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; } static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline void pm_runtime_no_callbacks(struct device *dev) {} -- cgit v1.2.3 From f71495f3f0c5f0801823d1235b271a4a415d3df8 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 16 May 2014 02:47:37 +0200 Subject: PM / sleep: Update device PM documentation to cover direct_complete Update the device PM documentation in devices.txt and runtime_pm.txt to reflect the changes in the system suspend and resume handling related to the introduction of the new power.direct_complete flag. Signed-off-by: Rafael J. Wysocki Acked-by: Alan Stern --- Documentation/power/devices.txt | 34 ++++++++++++++++++++++++++++++---- Documentation/power/runtime_pm.txt | 17 +++++++++++++++++ 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 47d46dff70f7..d172bce0fd49 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt @@ -2,6 +2,7 @@ Device Power Management Copyright (c) 2010-2011 Rafael J. Wysocki , Novell Inc. Copyright (c) 2010 Alan Stern +Copyright (c) 2014 Intel Corp., Rafael J. Wysocki Most of the code in Linux is device drivers, so most of the Linux power @@ -326,6 +327,20 @@ the phases are: driver in some way for the upcoming system power transition, but it should not put the device into a low-power state. + For devices supporting runtime power management, the return value of the + prepare callback can be used to indicate to the PM core that it may + safely leave the device in runtime suspend (if runtime-suspended + already), provided that all of the device's descendants are also left in + runtime suspend. Namely, if the prepare callback returns a positive + number and that happens for all of the descendants of the device too, + and all of them (including the device itself) are runtime-suspended, the + PM core will skip the suspend, suspend_late and suspend_noirq suspend + phases as well as the resume_noirq, resume_early and resume phases of + the following system resume for all of these devices. In that case, + the complete callback will be called directly after the prepare callback + and is entirely responsible for bringing the device back to the + functional state as appropriate. + 2. The suspend methods should quiesce the device to stop it from performing I/O. They also may save the device registers and put it into the appropriate low-power state, depending on the bus type the device is on, @@ -400,12 +415,23 @@ When resuming from freeze, standby or memory sleep, the phases are: the resume callbacks occur; it's not necessary to wait until the complete phase. + Moreover, if the preceding prepare callback returned a positive number, + the device may have been left in runtime suspend throughout the whole + system suspend and resume (the suspend, suspend_late, suspend_noirq + phases of system suspend and the resume_noirq, resume_early, resume + phases of system resume may have been skipped for it). In that case, + the complete callback is entirely responsible for bringing the device + back to the functional state after system suspend if necessary. [For + example, it may need to queue up a runtime resume request for the device + for this purpose.] To check if that is the case, the complete callback + can consult the device's power.direct_complete flag. Namely, if that + flag is set when the complete callback is being run, it has been called + directly after the preceding prepare and special action may be required + to make the device work correctly afterward. + At the end of these phases, drivers should be as functional as they were before suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are -gated on. Even if the device was in a low-power state before the system sleep -because of runtime power management, afterwards it should be back in its -full-power state. There are multiple reasons why it's best to do this; they are -discussed in more detail in Documentation/power/runtime_pm.txt. +gated on. However, the details here may again be platform-specific. For example, some systems support multiple "run" states, and the mode in effect at diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 5f96daf8566a..e1bee8a4aaac 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -2,6 +2,7 @@ Runtime Power Management Framework for I/O Devices (C) 2009-2011 Rafael J. Wysocki , Novell Inc. (C) 2010 Alan Stern +(C) 2014 Intel Corp., Rafael J. Wysocki 1. Introduction @@ -444,6 +445,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: bool pm_runtime_status_suspended(struct device *dev); - return true if the device's runtime PM status is 'suspended' + bool pm_runtime_suspended_if_enabled(struct device *dev); + - return true if the device's runtime PM status is 'suspended' and its + 'power.disable_depth' field is equal to 1 + void pm_runtime_allow(struct device *dev); - set the power.runtime_auto flag for the device and decrease its usage counter (used by the /sys/devices/.../power/control interface to @@ -644,6 +649,18 @@ place (in particular, if the system is not waking up from hibernation), it may be more efficient to leave the devices that had been suspended before the system suspend began in the suspended state. +To this end, the PM core provides a mechanism allowing some coordination between +different levels of device hierarchy. Namely, if a system suspend .prepare() +callback returns a positive number for a device, that indicates to the PM core +that the device appears to be runtime-suspended and its state is fine, so it +may be left in runtime suspend provided that all of its descendants are also +left in runtime suspend. If that happens, the PM core will not execute any +system suspend and resume callbacks for all of those devices, except for the +complete callback, which is then entirely responsible for handling the device +as appropriate. This only applies to system suspend transitions that are not +related to hibernation (see Documentation/power/devices.txt for more +information). + The PM core does its best to reduce the probability of race conditions between the runtime PM and system suspend/resume (and hibernation) callbacks by carrying out the following operations: -- cgit v1.2.3 From f25c0ae2b4c41996c1a6b609132c1788a6eea080 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 17 May 2014 00:18:13 +0200 Subject: ACPI / PM: Avoid resuming devices in ACPI PM domain during system suspend Rework the ACPI PM domain's PM callbacks to avoid resuming devices during system suspend (in order to modify their wakeup settings etc.) if that isn't necessary. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/device_pm.c | 43 ++++++++++++++++++++++++++++++++++++------- drivers/acpi/scan.c | 4 ++++ include/acpi/acpi_bus.h | 3 ++- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index d047739f3380..9e5fd9c440b7 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -900,17 +900,45 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early); */ int acpi_subsys_prepare(struct device *dev) { - /* - * Devices having power.ignore_children set may still be necessary for - * suspending their children in the next phase of device suspend. - */ - if (dev->power.ignore_children) - pm_runtime_resume(dev); + struct acpi_device *adev = ACPI_COMPANION(dev); + u32 sys_target; + int ret, state; + + ret = pm_generic_prepare(dev); + if (ret < 0) + return ret; + + if (!adev || !pm_runtime_suspended(dev) + || device_may_wakeup(dev) != !!adev->wakeup.prepare_count) + return 0; + + sys_target = acpi_target_system_state(); + if (sys_target == ACPI_STATE_S0) + return 1; - return pm_generic_prepare(dev); + if (adev->power.flags.dsw_present) + return 0; + + ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state); + return !ret && state == adev->power.state; } EXPORT_SYMBOL_GPL(acpi_subsys_prepare); +/** + * acpi_subsys_complete - Finalize device's resume during system resume. + * @dev: Device to handle. + */ +static void acpi_subsys_complete(struct device *dev) +{ + /* + * If the device had been runtime-suspended before the system went into + * the sleep state it is going out of and it has never been resumed till + * now, resume it in case the firmware powered it up. + */ + if (dev->power.direct_complete) + pm_request_resume(dev); +} + /** * acpi_subsys_suspend - Run the device driver's suspend callback. * @dev: Device to handle. @@ -979,6 +1007,7 @@ static struct dev_pm_domain acpi_general_pm_domain = { #endif #ifdef CONFIG_PM_SLEEP .prepare = acpi_subsys_prepare, + .complete = acpi_subsys_complete, .suspend = acpi_subsys_suspend, .suspend_late = acpi_subsys_suspend_late, .resume_early = acpi_subsys_resume_early, diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 7efe546a8c42..df6e4c924b35 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1551,9 +1551,13 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) */ if (acpi_has_method(device->handle, "_PSC")) device->power.flags.explicit_get = 1; + if (acpi_has_method(device->handle, "_IRC")) device->power.flags.inrush_current = 1; + if (acpi_has_method(device->handle, "_DSW")) + device->power.flags.dsw_present = 1; + /* * Enumerate supported power management states */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 84a2e29a2314..7417a16c8d86 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -261,7 +261,8 @@ struct acpi_device_power_flags { u32 inrush_current:1; /* Serialize Dx->D0 */ u32 power_removed:1; /* Optimize Dx->D0 */ u32 ignore_parent:1; /* Power is independent of parent power state */ - u32 reserved:27; + u32 dsw_present:1; /* _DSW present? */ + u32 reserved:26; }; struct acpi_device_power_state { -- cgit v1.2.3 From 4cf563c5d97c83d4b2fb3a778dd7d5e362cc3e34 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Thu, 15 May 2014 16:40:23 +0300 Subject: ACPI / PM: Export rest of the subsys PM callbacks No reason for excluding the remaining ones. Signed-off-by: Heikki Krogerus [rjw: Rebased and exported the new acpi_subsys_complete() too.] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/device_pm.c | 5 ++++- include/linux/acpi.h | 6 ++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 9e5fd9c440b7..49a51277f81d 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -928,7 +928,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_prepare); * acpi_subsys_complete - Finalize device's resume during system resume. * @dev: Device to handle. */ -static void acpi_subsys_complete(struct device *dev) +void acpi_subsys_complete(struct device *dev) { /* * If the device had been runtime-suspended before the system went into @@ -938,6 +938,7 @@ static void acpi_subsys_complete(struct device *dev) if (dev->power.direct_complete) pm_request_resume(dev); } +EXPORT_SYMBOL_GPL(acpi_subsys_complete); /** * acpi_subsys_suspend - Run the device driver's suspend callback. @@ -951,6 +952,7 @@ int acpi_subsys_suspend(struct device *dev) pm_runtime_resume(dev); return pm_generic_suspend(dev); } +EXPORT_SYMBOL_GPL(acpi_subsys_suspend); /** * acpi_subsys_suspend_late - Suspend device using ACPI. @@ -996,6 +998,7 @@ int acpi_subsys_freeze(struct device *dev) pm_runtime_resume(dev); return pm_generic_freeze(dev); } +EXPORT_SYMBOL_GPL(acpi_subsys_freeze); #endif /* CONFIG_PM_SLEEP */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7a8f2cd66c8b..4c007262e891 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -554,14 +554,20 @@ static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } int acpi_dev_suspend_late(struct device *dev); int acpi_dev_resume_early(struct device *dev); int acpi_subsys_prepare(struct device *dev); +void acpi_subsys_complete(struct device *dev); int acpi_subsys_suspend_late(struct device *dev); int acpi_subsys_resume_early(struct device *dev); +int acpi_subsys_suspend(struct device *dev); +int acpi_subsys_freeze(struct device *dev); #else static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } static inline int acpi_dev_resume_early(struct device *dev) { return 0; } static inline int acpi_subsys_prepare(struct device *dev) { return 0; } +static inline void acpi_subsys_complete(struct device *dev) {} static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } +static inline int acpi_subsys_suspend(struct device *dev) { return 0; } +static inline int acpi_subsys_freeze(struct device *dev) { return 0; } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM) -- cgit v1.2.3 From e2d0e90fae82809667f1dcf4d0d9baa421691c7a Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Thu, 15 May 2014 16:40:25 +0300 Subject: clk: new basic clk type for fractional divider Fractional divider clocks are fairly common. This adds basic type for them. Signed-off-by: Heikki Krogerus Acked-by: Mike Turquette Signed-off-by: Rafael J. Wysocki --- drivers/clk/Makefile | 1 + drivers/clk/clk-fractional-divider.c | 135 +++++++++++++++++++++++++++++++++++ include/linux/clk-provider.h | 31 ++++++++ 3 files changed, 167 insertions(+) create mode 100644 drivers/clk/clk-fractional-divider.c diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 5f8a28735c96..0745059b1834 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o obj-$(CONFIG_COMMON_CLK) += clk-gate.o obj-$(CONFIG_COMMON_CLK) += clk-mux.o obj-$(CONFIG_COMMON_CLK) += clk-composite.o +obj-$(CONFIG_COMMON_CLK) += clk-fractional-divider.o # hardware specific clock types # please keep this section sorted lexicographically by file/directory path name diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c new file mode 100644 index 000000000000..ede685ca0d20 --- /dev/null +++ b/drivers/clk/clk-fractional-divider.c @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2014 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Adjustable fractional divider clock implementation. + * Output rate = (m / n) * parent_rate. + */ + +#include +#include +#include +#include +#include + +#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) + +static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_fractional_divider *fd = to_clk_fd(hw); + unsigned long flags = 0; + u32 val, m, n; + u64 ret; + + if (fd->lock) + spin_lock_irqsave(fd->lock, flags); + + val = clk_readl(fd->reg); + + if (fd->lock) + spin_unlock_irqrestore(fd->lock, flags); + + m = (val & fd->mmask) >> fd->mshift; + n = (val & fd->nmask) >> fd->nshift; + + ret = parent_rate * m; + do_div(ret, n); + + return ret; +} + +static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_fractional_divider *fd = to_clk_fd(hw); + unsigned maxn = (fd->nmask >> fd->nshift) + 1; + unsigned div; + + if (!rate || rate >= *prate) + return *prate; + + div = gcd(*prate, rate); + + while ((*prate / div) > maxn) { + div <<= 1; + rate <<= 1; + } + + return rate; +} + +static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_fractional_divider *fd = to_clk_fd(hw); + unsigned long flags = 0; + unsigned long div; + unsigned n, m; + u32 val; + + div = gcd(parent_rate, rate); + m = rate / div; + n = parent_rate / div; + + if (fd->lock) + spin_lock_irqsave(fd->lock, flags); + + val = clk_readl(fd->reg); + val &= ~(fd->mmask | fd->nmask); + val |= (m << fd->mshift) | (n << fd->nshift); + clk_writel(val, fd->reg); + + if (fd->lock) + spin_unlock_irqrestore(fd->lock, flags); + + return 0; +} + +const struct clk_ops clk_fractional_divider_ops = { + .recalc_rate = clk_fd_recalc_rate, + .round_rate = clk_fd_round_rate, + .set_rate = clk_fd_set_rate, +}; +EXPORT_SYMBOL_GPL(clk_fractional_divider_ops); + +struct clk *clk_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock) +{ + struct clk_fractional_divider *fd; + struct clk_init_data init; + struct clk *clk; + + fd = kzalloc(sizeof(*fd), GFP_KERNEL); + if (!fd) { + dev_err(dev, "could not allocate fractional divider clk\n"); + return ERR_PTR(-ENOMEM); + } + + init.name = name; + init.ops = &clk_fractional_divider_ops; + init.flags = flags | CLK_IS_BASIC; + init.parent_names = parent_name ? &parent_name : NULL; + init.num_parents = parent_name ? 1 : 0; + + fd->reg = reg; + fd->mshift = mshift; + fd->mmask = (BIT(mwidth) - 1) << mshift; + fd->nshift = nshift; + fd->nmask = (BIT(nwidth) - 1) << nshift; + fd->flags = clk_divider_flags; + fd->lock = lock; + fd->hw.init = &init; + + clk = clk_register(dev, &fd->hw); + if (IS_ERR(clk)) + kfree(fd); + + return clk; +} +EXPORT_SYMBOL_GPL(clk_register_fractional_divider); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 511917416fb0..fb4eca6907cd 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -413,6 +413,37 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); +/** + * struct clk_fractional_divider - adjustable fractional divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the divider + * @mshift: shift to the numerator bit field + * @mwidth: width of the numerator bit field + * @nshift: shift to the denominator bit field + * @nwidth: width of the denominator bit field + * @lock: register lock + * + * Clock with adjustable fractional divider affecting its output frequency. + */ + +struct clk_fractional_divider { + struct clk_hw hw; + void __iomem *reg; + u8 mshift; + u32 mmask; + u8 nshift; + u32 nmask; + u8 flags; + spinlock_t *lock; +}; + +extern const struct clk_ops clk_fractional_divider_ops; +struct clk *clk_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); + /*** * struct clk_composite - aggregate clock of mux, divider and gate clocks * -- cgit v1.2.3 From 8ce62f85a81f57e86bc120ab690facc612223188 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 25 May 2014 14:38:52 +0200 Subject: ACPI / platform / LPSS: Enable async suspend/resume of LPSS devices To seed up suspend and resume of devices included into Intel SoCs handled by the ACPI LPSS driver during system suspend, make acpi_lpss_create_device() call device_enable_async_suspend() for every device created by it. This requires acpi_create_platform_device() to be modified to return a pointer to struct platform_device instead of an int. As a result, acpi_create_platform_device() cannot be pointed to by the .attach pointer in platform_handler directly any more, so a simple wrapper around it is necessary for this purpose. That, in turn, allows the second unused argument of acpi_create_platform_device() to be dropped, which is an improvement. Tested-by: Heikki Krogerus Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_lpss.c | 17 +++++++++++------ drivers/acpi/acpi_platform.c | 25 ++++++++++++++----------- drivers/acpi/internal.h | 3 +-- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 69e29f409d4c..0e9c0d38b85c 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -267,12 +267,14 @@ static int acpi_lpss_create_device(struct acpi_device *adev, struct lpss_private_data *pdata; struct resource_list_entry *rentry; struct list_head resource_list; + struct platform_device *pdev; int ret; dev_desc = (struct lpss_device_desc *)id->driver_data; - if (!dev_desc) - return acpi_create_platform_device(adev, id); - + if (!dev_desc) { + pdev = acpi_create_platform_device(adev); + return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; + } pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; @@ -322,10 +324,13 @@ static int acpi_lpss_create_device(struct acpi_device *adev, dev_desc->setup(pdata); adev->driver_data = pdata; - ret = acpi_create_platform_device(adev, id); - if (ret > 0) - return ret; + pdev = acpi_create_platform_device(adev); + if (!IS_ERR_OR_NULL(pdev)) { + device_enable_async_suspend(&pdev->dev); + return 1; + } + ret = PTR_ERR(pdev); adev->driver_data = NULL; err_out: diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index c0a39417ebe4..9f7bcfdf18ef 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -47,7 +47,6 @@ static const struct acpi_device_id acpi_platform_device_ids[] = { /** * acpi_create_platform_device - Create platform device for ACPI device node * @adev: ACPI device node to create a platform device for. - * @id: ACPI device ID used to match @adev. * * Check if the given @adev can be represented as a platform device and, if * that's the case, create and register a platform device, populate its common @@ -55,8 +54,7 @@ static const struct acpi_device_id acpi_platform_device_ids[] = { * * Name of the platform device will be the same as @adev's. */ -int acpi_create_platform_device(struct acpi_device *adev, - const struct acpi_device_id *id) +struct platform_device *acpi_create_platform_device(struct acpi_device *adev) { struct platform_device *pdev = NULL; struct acpi_device *acpi_parent; @@ -68,19 +66,19 @@ int acpi_create_platform_device(struct acpi_device *adev, /* If the ACPI node already has a physical device attached, skip it. */ if (adev->physical_node_count) - return 0; + return NULL; INIT_LIST_HEAD(&resource_list); count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); if (count < 0) { - return 0; + return NULL; } else if (count > 0) { resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL); if (!resources) { dev_err(&adev->dev, "No memory for resources\n"); acpi_dev_free_resource_list(&resource_list); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } count = 0; list_for_each_entry(rentry, &resource_list, node) @@ -117,22 +115,27 @@ int acpi_create_platform_device(struct acpi_device *adev, pdevinfo.num_res = count; pdevinfo.acpi_node.companion = adev; pdev = platform_device_register_full(&pdevinfo); - if (IS_ERR(pdev)) { + if (IS_ERR(pdev)) dev_err(&adev->dev, "platform device creation failed: %ld\n", PTR_ERR(pdev)); - pdev = NULL; - } else { + else dev_dbg(&adev->dev, "created platform device %s\n", dev_name(&pdev->dev)); - } kfree(resources); + return pdev; +} + +static int acpi_platform_attach(struct acpi_device *adev, + const struct acpi_device_id *id) +{ + acpi_create_platform_device(adev); return 1; } static struct acpi_scan_handler platform_handler = { .ids = acpi_platform_device_ids, - .attach = acpi_create_platform_device, + .attach = acpi_platform_attach, }; void __init acpi_platform_init(void) diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 957391306cbf..bb7de413d06d 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -180,8 +180,7 @@ static inline void suspend_nvs_restore(void) {} -------------------------------------------------------------------------- */ struct platform_device; -int acpi_create_platform_device(struct acpi_device *adev, - const struct acpi_device_id *id); +struct platform_device *acpi_create_platform_device(struct acpi_device *adev); /*-------------------------------------------------------------------------- Video -- cgit v1.2.3 From c78b0830667a7e7c1f0ca65b76b33166a84806b3 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Fri, 23 May 2014 16:15:09 +0300 Subject: ACPI / LPSS: custom power domain for LPSS A power domain where we save the context of the additional LPSS registers. We need to do this or all LPSS devices are left in reset state when resuming from D3 on some Baytrails. The devices with the fractional clock divider also have zeros for N and M values after resuming unless they are reset. Li Aubrey found the root cause for the issue. The idea of using power domain for LPSS came from Mika Westerberg. Reported-by: Jin Yao Suggested-by: Li Aubrey Suggested-by: Mika Westerberg Tested-by: Mika Westerberg Signed-off-by: Heikki Krogerus [rjw: Added the .complete() callback to the PM domain, fixed build warning on 32-bit.] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_lpss.c | 157 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 150 insertions(+), 7 deletions(-) diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 69e29f409d4c..a01d4d1343dd 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "internal.h" @@ -43,6 +44,8 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_TX_INT 0x20 #define LPSS_TX_INT_MASK BIT(1) +#define LPSS_PRV_REG_COUNT 9 + struct lpss_shared_clock { const char *name; unsigned long rate; @@ -58,6 +61,7 @@ struct lpss_device_desc { unsigned int prv_offset; size_t prv_size_override; bool clk_gate; + bool save_ctx; struct lpss_shared_clock *shared_clock; void (*setup)(struct lpss_private_data *pdata); }; @@ -72,6 +76,7 @@ struct lpss_private_data { resource_size_t mmio_size; struct clk *clk; const struct lpss_device_desc *dev_desc; + u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; }; static void lpss_uart_setup(struct lpss_private_data *pdata) @@ -116,6 +121,7 @@ static struct lpss_shared_clock pwm_clock = { static struct lpss_device_desc byt_pwm_dev_desc = { .clk_required = true, + .save_ctx = true, .shared_clock = &pwm_clock, }; @@ -128,6 +134,7 @@ static struct lpss_device_desc byt_uart_dev_desc = { .clk_required = true, .prv_offset = 0x800, .clk_gate = true, + .save_ctx = true, .shared_clock = &uart_clock, .setup = lpss_uart_setup, }; @@ -141,6 +148,7 @@ static struct lpss_device_desc byt_spi_dev_desc = { .clk_required = true, .prv_offset = 0x400, .clk_gate = true, + .save_ctx = true, .shared_clock = &spi_clock, }; @@ -156,6 +164,7 @@ static struct lpss_shared_clock i2c_clock = { static struct lpss_device_desc byt_i2c_dev_desc = { .clk_required = true, .prv_offset = 0x800, + .save_ctx = true, .shared_clock = &i2c_clock, }; @@ -449,6 +458,126 @@ static void acpi_lpss_set_ltr(struct device *dev, s32 val) } } +#ifdef CONFIG_PM +/** + * acpi_lpss_save_ctx() - Save the private registers of LPSS device + * @dev: LPSS device + * + * Most LPSS devices have private registers which may loose their context when + * the device is powered down. acpi_lpss_save_ctx() saves those registers into + * prv_reg_ctx array. + */ +static void acpi_lpss_save_ctx(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + unsigned int i; + + for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { + unsigned long offset = i * sizeof(u32); + + pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset); + dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n", + pdata->prv_reg_ctx[i], offset); + } +} + +/** + * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device + * @dev: LPSS device + * + * Restores the registers that were previously stored with acpi_lpss_save_ctx(). + */ +static void acpi_lpss_restore_ctx(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + unsigned int i; + + /* + * The following delay is needed or the subsequent write operations may + * fail. The LPSS devices are actually PCI devices and the PCI spec + * expects 10ms delay before the device can be accessed after D3 to D0 + * transition. + */ + msleep(10); + + for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { + unsigned long offset = i * sizeof(u32); + + __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset); + dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n", + pdata->prv_reg_ctx[i], offset); + } +} + +#ifdef CONFIG_PM_SLEEP +static int acpi_lpss_suspend_late(struct device *dev) +{ + int ret = pm_generic_suspend_late(dev); + + if (ret) + return ret; + + acpi_lpss_save_ctx(dev); + return acpi_dev_suspend_late(dev); +} + +static int acpi_lpss_restore_early(struct device *dev) +{ + int ret = acpi_dev_resume_early(dev); + + if (ret) + return ret; + + acpi_lpss_restore_ctx(dev); + return pm_generic_resume_early(dev); +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int acpi_lpss_runtime_suspend(struct device *dev) +{ + int ret = pm_generic_runtime_suspend(dev); + + if (ret) + return ret; + + acpi_lpss_save_ctx(dev); + return acpi_dev_runtime_suspend(dev); +} + +static int acpi_lpss_runtime_resume(struct device *dev) +{ + int ret = acpi_dev_runtime_resume(dev); + + if (ret) + return ret; + + acpi_lpss_restore_ctx(dev); + return pm_generic_runtime_resume(dev); +} +#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ + +static struct dev_pm_domain acpi_lpss_pm_domain = { + .ops = { +#ifdef CONFIG_PM_SLEEP + .suspend_late = acpi_lpss_suspend_late, + .restore_early = acpi_lpss_restore_early, + .prepare = acpi_subsys_prepare, + .complete = acpi_subsys_complete, + .suspend = acpi_subsys_suspend, + .resume_early = acpi_subsys_resume_early, + .freeze = acpi_subsys_freeze, + .poweroff = acpi_subsys_suspend, + .poweroff_late = acpi_subsys_suspend_late, +#endif +#ifdef CONFIG_PM_RUNTIME + .runtime_suspend = acpi_lpss_runtime_suspend, + .runtime_resume = acpi_lpss_runtime_resume, +#endif + }, +}; + static int acpi_lpss_platform_notify(struct notifier_block *nb, unsigned long action, void *data) { @@ -456,7 +585,6 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, struct lpss_private_data *pdata; struct acpi_device *adev; const struct acpi_device_id *id; - int ret = 0; id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev); if (!id || !id->driver_data) @@ -466,7 +594,7 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, return 0; pdata = acpi_driver_data(adev); - if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required) + if (!pdata || !pdata->mmio_base) return 0; if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { @@ -474,12 +602,27 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, return 0; } - if (action == BUS_NOTIFY_ADD_DEVICE) - ret = sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group); - else if (action == BUS_NOTIFY_DEL_DEVICE) - sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); + switch (action) { + case BUS_NOTIFY_BOUND_DRIVER: + if (pdata->dev_desc->save_ctx) + pdev->dev.pm_domain = &acpi_lpss_pm_domain; + break; + case BUS_NOTIFY_UNBOUND_DRIVER: + if (pdata->dev_desc->save_ctx) + pdev->dev.pm_domain = NULL; + break; + case BUS_NOTIFY_ADD_DEVICE: + if (pdata->dev_desc->ltr_required) + return sysfs_create_group(&pdev->dev.kobj, + &lpss_attr_group); + case BUS_NOTIFY_DEL_DEVICE: + if (pdata->dev_desc->ltr_required) + sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); + default: + break; + } - return ret; + return 0; } static struct notifier_block acpi_lpss_nb = { -- cgit v1.2.3 From ed3a872e2ef62bde06e2f579d8d1458766ced078 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Mon, 19 May 2014 14:42:07 +0300 Subject: ACPI / LPSS: support for fractional divider clock This creates fractional divider type clock for the ones that have it. It is needed by the UART driver as the clock rate must accommodate to the requested baud rate. Signed-off-by: Heikki Krogerus Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_lpss.c | 75 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 23 deletions(-) diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index a01d4d1343dd..d1c9b04e29a3 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -29,6 +29,7 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_LTR_SIZE 0x18 /* Offsets relative to LPSS_PRIVATE_OFFSET */ +#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16)) #define LPSS_GENERAL 0x08 #define LPSS_GENERAL_LTR_MODE_SW BIT(2) #define LPSS_GENERAL_UART_RTS_OVRD BIT(3) @@ -60,6 +61,7 @@ struct lpss_device_desc { bool ltr_required; unsigned int prv_offset; size_t prv_size_override; + bool clk_divider; bool clk_gate; bool save_ctx; struct lpss_shared_clock *shared_clock; @@ -94,6 +96,14 @@ static void lpss_uart_setup(struct lpss_private_data *pdata) } static struct lpss_device_desc lpt_dev_desc = { + .clk_required = true, + .prv_offset = 0x800, + .ltr_required = true, + .clk_divider = true, + .clk_gate = true, +}; + +static struct lpss_device_desc lpt_i2c_dev_desc = { .clk_required = true, .prv_offset = 0x800, .ltr_required = true, @@ -104,6 +114,7 @@ static struct lpss_device_desc lpt_uart_dev_desc = { .clk_required = true, .prv_offset = 0x800, .ltr_required = true, + .clk_divider = true, .clk_gate = true, .setup = lpss_uart_setup, }; @@ -125,31 +136,21 @@ static struct lpss_device_desc byt_pwm_dev_desc = { .shared_clock = &pwm_clock, }; -static struct lpss_shared_clock uart_clock = { - .name = "uart_clk", - .rate = 44236800, -}; - static struct lpss_device_desc byt_uart_dev_desc = { .clk_required = true, .prv_offset = 0x800, + .clk_divider = true, .clk_gate = true, .save_ctx = true, - .shared_clock = &uart_clock, .setup = lpss_uart_setup, }; -static struct lpss_shared_clock spi_clock = { - .name = "spi_clk", - .rate = 50000000, -}; - static struct lpss_device_desc byt_spi_dev_desc = { .clk_required = true, .prv_offset = 0x400, + .clk_divider = true, .clk_gate = true, .save_ctx = true, - .shared_clock = &spi_clock, }; static struct lpss_device_desc byt_sdio_dev_desc = { @@ -175,8 +176,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { /* Lynxpoint LPSS devices */ { "INT33C0", (unsigned long)&lpt_dev_desc }, { "INT33C1", (unsigned long)&lpt_dev_desc }, - { "INT33C2", (unsigned long)&lpt_dev_desc }, - { "INT33C3", (unsigned long)&lpt_dev_desc }, + { "INT33C2", (unsigned long)&lpt_i2c_dev_desc }, + { "INT33C3", (unsigned long)&lpt_i2c_dev_desc }, { "INT33C4", (unsigned long)&lpt_uart_dev_desc }, { "INT33C5", (unsigned long)&lpt_uart_dev_desc }, { "INT33C6", (unsigned long)&lpt_sdio_dev_desc }, @@ -192,8 +193,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT3430", (unsigned long)&lpt_dev_desc }, { "INT3431", (unsigned long)&lpt_dev_desc }, - { "INT3432", (unsigned long)&lpt_dev_desc }, - { "INT3433", (unsigned long)&lpt_dev_desc }, + { "INT3432", (unsigned long)&lpt_i2c_dev_desc }, + { "INT3433", (unsigned long)&lpt_i2c_dev_desc }, { "INT3434", (unsigned long)&lpt_uart_dev_desc }, { "INT3435", (unsigned long)&lpt_uart_dev_desc }, { "INT3436", (unsigned long)&lpt_sdio_dev_desc }, @@ -221,9 +222,11 @@ static int register_device_clock(struct acpi_device *adev, { const struct lpss_device_desc *dev_desc = pdata->dev_desc; struct lpss_shared_clock *shared_clock = dev_desc->shared_clock; + const char *devname = dev_name(&adev->dev); struct clk *clk = ERR_PTR(-ENODEV); struct lpss_clk_data *clk_data; - const char *parent; + const char *parent, *clk_name; + void __iomem *prv_base; if (!lpss_clk_dev) lpt_register_clock_device(); @@ -234,7 +237,7 @@ static int register_device_clock(struct acpi_device *adev, if (dev_desc->clkdev_name) { clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name, - dev_name(&adev->dev)); + devname); return 0; } @@ -243,6 +246,7 @@ static int register_device_clock(struct acpi_device *adev, return -ENODATA; parent = clk_data->name; + prv_base = pdata->mmio_base + dev_desc->prv_offset; if (shared_clock) { clk = shared_clock->clk; @@ -256,16 +260,41 @@ static int register_device_clock(struct acpi_device *adev, } if (dev_desc->clk_gate) { - clk = clk_register_gate(NULL, dev_name(&adev->dev), parent, 0, - pdata->mmio_base + dev_desc->prv_offset, - 0, 0, NULL); - pdata->clk = clk; + clk = clk_register_gate(NULL, devname, parent, 0, + prv_base, 0, 0, NULL); + parent = devname; + } + + if (dev_desc->clk_divider) { + /* Prevent division by zero */ + if (!readl(prv_base)) + writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); + + clk_name = kasprintf(GFP_KERNEL, "%s-div", devname); + if (!clk_name) + return -ENOMEM; + clk = clk_register_fractional_divider(NULL, clk_name, parent, + 0, prv_base, + 1, 15, 16, 15, 0, NULL); + parent = clk_name; + + clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); + if (!clk_name) { + kfree(parent); + return -ENOMEM; + } + clk = clk_register_gate(NULL, clk_name, parent, + CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, + prv_base, 31, 0, NULL); + kfree(parent); + kfree(clk_name); } if (IS_ERR(clk)) return PTR_ERR(clk); - clk_register_clkdev(clk, NULL, dev_name(&adev->dev)); + pdata->clk = clk; + clk_register_clkdev(clk, NULL, devname); return 0; } -- cgit v1.2.3