diff options
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r-- | drivers/cpuidle/cpuidle-psci-domain.c | 14 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-qcom-spm.c | 11 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-riscv-sbi.c | 14 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 8 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 120 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 34 |
6 files changed, 96 insertions, 105 deletions
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c index 2041f59116ce..37c41209eaf9 100644 --- a/drivers/cpuidle/cpuidle-psci-domain.c +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -28,7 +28,6 @@ struct psci_pd_provider { }; static LIST_HEAD(psci_pd_providers); -static bool psci_pd_allow_domain_state; static int psci_pd_power_off(struct generic_pm_domain *pd) { @@ -38,9 +37,6 @@ static int psci_pd_power_off(struct generic_pm_domain *pd) if (!state->data) return 0; - if (!psci_pd_allow_domain_state) - return -EBUSY; - /* OSI mode is enabled, set the corresponding domain state. */ pd_state = state->data; psci_set_domain_state(pd, pd->state_idx, *pd_state); @@ -126,15 +122,6 @@ static void psci_pd_remove(void) } } -static void psci_cpuidle_domain_sync_state(struct device *dev) -{ - /* - * All devices have now been attached/probed to the PM domain topology, - * hence it's fine to allow domain states to be picked. - */ - psci_pd_allow_domain_state = true; -} - static const struct of_device_id psci_of_match[] = { { .compatible = "arm,psci-1.0" }, {} @@ -195,7 +182,6 @@ static struct platform_driver psci_cpuidle_domain_driver = { .driver = { .name = "psci-cpuidle-domain", .of_match_table = psci_of_match, - .sync_state = psci_cpuidle_domain_sync_state, }, }; diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 5f386761b156..7ab6f68b96a8 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -86,9 +86,9 @@ static const struct of_device_id qcom_idle_state_match[] = { static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) { - struct platform_device *pdev = NULL; + struct platform_device *pdev; struct device_node *cpu_node, *saw_node; - struct cpuidle_qcom_spm_data *data = NULL; + struct cpuidle_qcom_spm_data *data; int ret; cpu_node = of_cpu_device_node_get(cpu); @@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) return -ENODEV; saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); + of_node_put(cpu_node); if (!saw_node) return -ENODEV; pdev = of_find_device_by_node(saw_node); of_node_put(saw_node); - of_node_put(cpu_node); if (!pdev) return -ENODEV; data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + put_device(&pdev->dev); return -ENOMEM; + } data->spm = dev_get_drvdata(&pdev->dev); + put_device(&pdev->dev); if (!data->spm) return -EINVAL; diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index 0fe1ece9fbdc..a360bc4d20b7 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -44,7 +44,6 @@ static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data); static DEFINE_PER_CPU(struct sbi_domain_state, domain_state); static bool sbi_cpuidle_use_osi; static bool sbi_cpuidle_use_cpuhp; -static bool sbi_cpuidle_pd_allow_domain_state; static inline void sbi_set_domain_state(u32 state) { @@ -345,15 +344,6 @@ deinit: return ret; } -static void sbi_cpuidle_domain_sync_state(struct device *dev) -{ - /* - * All devices have now been attached/probed to the PM domain - * topology, hence it's fine to allow domain states to be picked. - */ - sbi_cpuidle_pd_allow_domain_state = true; -} - #ifdef CONFIG_DT_IDLE_GENPD static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd) @@ -364,9 +354,6 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd) if (!state->data) return 0; - if (!sbi_cpuidle_pd_allow_domain_state) - return -EBUSY; - /* OSI mode is enabled, set the corresponding domain state. */ pd_state = state->data; sbi_set_domain_state(*pd_state); @@ -564,7 +551,6 @@ static struct platform_driver sbi_cpuidle_driver = { .probe = sbi_cpuidle_probe, .driver = { .name = "sbi-cpuidle", - .sync_state = sbi_cpuidle_domain_sync_state, }, }; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 0835da449db8..56132e843c99 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -635,8 +635,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev) static int __cpuidle_register_device(struct cpuidle_device *dev) { struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + unsigned int cpu = dev->cpu; int i, ret; + if (per_cpu(cpuidle_devices, cpu)) { + pr_info("CPU%d: cpuidle device already registered\n", cpu); + return -EEXIST; + } + if (!try_module_get(drv->owner)) return -EINVAL; @@ -648,7 +654,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; } - per_cpu(cpuidle_devices, dev->cpu) = dev; + per_cpu(cpuidle_devices, cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); ret = cpuidle_coupled_register_device(dev); diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 52d5d26fc7c6..4d9aa5ce31f0 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns) static DEFINE_PER_CPU(struct menu_device, menu_devices); +static void menu_update_intervals(struct menu_device *data, unsigned int interval_us) +{ + /* Update the repeating-pattern data. */ + data->intervals[data->interval_ptr++] = interval_us; + if (data->interval_ptr >= INTERVALS) + data->interval_ptr = 0; +} + static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); /* @@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (data->needs_update) { menu_update(drv, dev); data->needs_update = 0; + } else if (!dev->last_residency_ns) { + /* + * This happens when the driver rejects the previously selected + * idle state and returns an error, so update the recent + * intervals table to prevent invalid information from being + * used going forward. + */ + menu_update_intervals(data, UINT_MAX); } /* Find the shortest expected idle interval. */ @@ -271,20 +287,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, return 0; } - if (tick_nohz_tick_stopped()) { - /* - * If the tick is already stopped, the cost of possible short - * idle duration misprediction is much higher, because the CPU - * may be stuck in a shallow idle state for a long time as a - * result of it. In that case say we might mispredict and use - * the known time till the closest timer event for the idle - * state selection. - */ - if (predicted_ns < TICK_NSEC) - predicted_ns = data->next_timer_ns; - } else if (latency_req > predicted_ns) { - latency_req = predicted_ns; - } + /* + * If the tick is already stopped, the cost of possible short idle + * duration misprediction is much higher, because the CPU may be stuck + * in a shallow idle state for a long time as a result of it. In that + * case, say we might mispredict and use the known time till the closest + * timer event for the idle state selection. + */ + if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC) + predicted_ns = data->next_timer_ns; /* * Find the idle state with the lowest power while satisfying @@ -300,48 +311,50 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (idx == -1) idx = i; /* first enabled state */ - if (s->target_residency_ns > predicted_ns) { - /* - * Use a physical idle state, not busy polling, unless - * a timer is going to trigger soon enough. - */ - if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && - s->exit_latency_ns <= latency_req && - s->target_residency_ns <= data->next_timer_ns) { - predicted_ns = s->target_residency_ns; - idx = i; - break; - } - if (predicted_ns < TICK_NSEC) - break; - - if (!tick_nohz_tick_stopped()) { - /* - * If the state selected so far is shallow, - * waking up early won't hurt, so retain the - * tick in that case and let the governor run - * again in the next iteration of the loop. - */ - predicted_ns = drv->states[idx].target_residency_ns; - break; - } + if (s->exit_latency_ns > latency_req) + break; - /* - * If the state selected so far is shallow and this - * state's target residency matches the time till the - * closest timer event, select this one to avoid getting - * stuck in the shallow one for too long. - */ - if (drv->states[idx].target_residency_ns < TICK_NSEC && - s->target_residency_ns <= delta_tick) - idx = i; + if (s->target_residency_ns <= predicted_ns) { + idx = i; + continue; + } - return idx; + /* + * Use a physical idle state, not busy polling, unless a timer + * is going to trigger soon enough. + */ + if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && + s->target_residency_ns <= data->next_timer_ns) { + predicted_ns = s->target_residency_ns; + idx = i; + break; } - if (s->exit_latency_ns > latency_req) + + if (predicted_ns < TICK_NSEC) break; - idx = i; + if (!tick_nohz_tick_stopped()) { + /* + * If the state selected so far is shallow, waking up + * early won't hurt, so retain the tick in that case and + * let the governor run again in the next iteration of + * the idle loop. + */ + predicted_ns = drv->states[idx].target_residency_ns; + break; + } + + /* + * If the state selected so far is shallow and this state's + * target residency matches the time till the closest timer + * event, select this one to avoid getting stuck in the shallow + * one for too long. + */ + if (drv->states[idx].target_residency_ns < TICK_NSEC && + s->target_residency_ns <= delta_tick) + idx = i; + + return idx; } if (idx == -1) @@ -482,10 +495,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->correction_factor[data->bucket] = new_factor; - /* update the repeating-pattern data */ - data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); - if (data->interval_ptr >= INTERVALS) - data->interval_ptr = 0; + menu_update_intervals(data, ktime_to_us(measured_ns)); } /** diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index d6f5da61cb7d..61de64817604 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -27,14 +27,14 @@ static ssize_t show_available_governors(struct device *dev, mutex_lock(&cpuidle_lock); list_for_each_entry(tmp, &cpuidle_governors, governor_list) { - if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2))) + if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2))) goto out; - i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name); + i += sysfs_emit_at(buf, i, "%.*s ", CPUIDLE_NAME_LEN, tmp->name); } out: - i+= sprintf(&buf[i], "\n"); + i += sysfs_emit_at(buf, i, "\n"); mutex_unlock(&cpuidle_lock); return i; } @@ -49,9 +49,9 @@ static ssize_t show_current_driver(struct device *dev, spin_lock(&cpuidle_driver_lock); drv = cpuidle_get_driver(); if (drv) - ret = sprintf(buf, "%s\n", drv->name); + ret = sysfs_emit(buf, "%s\n", drv->name); else - ret = sprintf(buf, "none\n"); + ret = sysfs_emit(buf, "none\n"); spin_unlock(&cpuidle_driver_lock); return ret; @@ -65,9 +65,9 @@ static ssize_t show_current_governor(struct device *dev, mutex_lock(&cpuidle_lock); if (cpuidle_curr_governor) - ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name); + ret = sysfs_emit(buf, "%s\n", cpuidle_curr_governor->name); else - ret = sprintf(buf, "none\n"); + ret = sysfs_emit(buf, "none\n"); mutex_unlock(&cpuidle_lock); return ret; @@ -230,7 +230,7 @@ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store) static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, char *buf) \ { \ - return sprintf(buf, "%u\n", state->_name);\ + return sysfs_emit(buf, "%u\n", state->_name);\ } #define define_show_state_ull_function(_name) \ @@ -238,7 +238,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ - return sprintf(buf, "%llu\n", state_usage->_name);\ + return sysfs_emit(buf, "%llu\n", state_usage->_name);\ } #define define_show_state_str_function(_name) \ @@ -247,8 +247,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ char *buf) \ { \ if (state->_name[0] == '\0')\ - return sprintf(buf, "<null>\n");\ - return sprintf(buf, "%s\n", state->_name);\ + return sysfs_emit(buf, "<null>\n");\ + return sysfs_emit(buf, "%s\n", state->_name);\ } #define define_show_state_time_function(_name) \ @@ -256,7 +256,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ - return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \ + return sysfs_emit(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \ } define_show_state_time_function(exit_latency) @@ -273,14 +273,14 @@ static ssize_t show_state_time(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { - return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns)); + return sysfs_emit(buf, "%llu\n", ktime_to_us(state_usage->time_ns)); } static ssize_t show_state_disable(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { - return sprintf(buf, "%llu\n", + return sysfs_emit(buf, "%llu\n", state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER); } @@ -310,7 +310,7 @@ static ssize_t show_state_default_status(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { - return sprintf(buf, "%s\n", + return sysfs_emit(buf, "%s\n", state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); } @@ -358,7 +358,7 @@ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ - return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\ + return sysfs_emit(buf, "%llu\n", state_usage->s2idle_##_name);\ } define_show_state_s2idle_ull_function(usage); @@ -550,7 +550,7 @@ static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf) ssize_t ret; spin_lock(&cpuidle_driver_lock); - ret = sprintf(buf, "%s\n", drv ? drv->name : "none"); + ret = sysfs_emit(buf, "%s\n", drv ? drv->name : "none"); spin_unlock(&cpuidle_driver_lock); return ret; |