diff options
author | Ulf Hansson <ulf.hansson@linaro.org> | 2019-10-10 13:01:48 +0300 |
---|---|---|
committer | Ulf Hansson <ulf.hansson@linaro.org> | 2020-01-02 18:50:34 +0300 |
commit | a0cf319460745a6102555ecf99994374704dcb4c (patch) | |
tree | 056fc8bcdd9583429e3def6b93835fb292e24247 /drivers/cpuidle | |
parent | 8554951a4dd3d3a1a0efeba8eb5543eee7533be4 (diff) | |
download | linux-a0cf319460745a6102555ecf99994374704dcb4c.tar.xz |
cpuidle: psci: Prepare to use OS initiated suspend mode via PM domains
The per CPU variable psci_power_state, contains an array of fixed values,
which reflects the corresponding arm,psci-suspend-param parsed from DT, for
each of the available CPU idle states.
This isn't sufficient when using the hierarchical CPU topology in DT, in
combination with having PSCI OS initiated (OSI) mode enabled. More
precisely, in OSI mode, Linux is responsible of telling the PSCI FW what
idle state the cluster (a group of CPUs) should enter, while in PSCI
Platform Coordinated (PC) mode, each CPU independently votes for an idle
state of the cluster.
For this reason, introduce a per CPU variable called domain_state and
implement two helper functions to read/write its value. Then let the
domain_state take precedence over the regular selected state, when entering
and idle state.
To avoid executing the above OSI specific code in the ->enter() callback,
while operating in the default PSCI Platform Coordinated mode, let's also
add a new enter-function and use it for OSI.
Co-developed-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Lina Iyer <lina.iyer@linaro.org>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Rafael J. Wysocki <rafael@kernel.org>
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r-- | drivers/cpuidle/cpuidle-psci.c | 56 |
1 files changed, 50 insertions, 6 deletions
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 6a87848be3c3..9600fe674a89 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -29,14 +29,47 @@ struct psci_cpuidle_data { }; static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); +static DEFINE_PER_CPU(u32, domain_state); + +static inline void psci_set_domain_state(u32 state) +{ + __this_cpu_write(domain_state, state); +} + +static inline u32 psci_get_domain_state(void) +{ + return __this_cpu_read(domain_state); +} + +static inline int psci_enter_state(int idx, u32 state) +{ + return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state); +} + +static int psci_enter_domain_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data); + u32 *states = data->psci_states; + u32 state = psci_get_domain_state(); + int ret; + + if (!state) + state = states[idx]; + + ret = psci_enter_state(idx, state); + + /* Clear the domain state to start fresh when back from idle. */ + psci_set_domain_state(0); + return ret; +} static int psci_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states); - return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, - idx, state[idx]); + return psci_enter_state(idx, state[idx]); } static struct cpuidle_driver psci_idle_driver __initdata = { @@ -79,7 +112,8 @@ static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) return 0; } -static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, +static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, + struct device_node *cpu_node, unsigned int state_count, int cpu) { int i, ret = 0; @@ -118,6 +152,15 @@ static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, ret = PTR_ERR(data->dev); goto free_mem; } + + /* + * Using the deepest state for the CPU to trigger a potential + * selection of a shared state for the domain, assumes the + * domain states are all deeper states. + */ + if (data->dev) + drv->states[state_count - 1].enter = + psci_enter_domain_idle_state; } /* Idle states parsed correctly, store them in the per-cpu struct. */ @@ -129,7 +172,8 @@ free_mem: return ret; } -static __init int psci_cpu_init_idle(unsigned int cpu, unsigned int state_count) +static __init int psci_cpu_init_idle(struct cpuidle_driver *drv, + unsigned int cpu, unsigned int state_count) { struct device_node *cpu_node; int ret; @@ -145,7 +189,7 @@ static __init int psci_cpu_init_idle(unsigned int cpu, unsigned int state_count) if (!cpu_node) return -ENODEV; - ret = psci_dt_cpu_init_idle(cpu_node, state_count, cpu); + ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu); of_node_put(cpu_node); @@ -201,7 +245,7 @@ static int __init psci_idle_init_cpu(int cpu) /* * Initialize PSCI idle states. */ - ret = psci_cpu_init_idle(cpu, ret); + ret = psci_cpu_init_idle(drv, cpu, ret); if (ret) { pr_err("CPU %d failed to PSCI idle\n", cpu); goto out_kfree_drv; |