diff options
-rw-r--r-- | drivers/cpufreq/qcom-cpufreq-hw.c | 2 | ||||
-rw-r--r-- | drivers/devfreq/devfreq.c | 7 | ||||
-rw-r--r-- | drivers/devfreq/governor_passive.c | 44 | ||||
-rw-r--r-- | drivers/devfreq/tegra30-devfreq.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 8 | ||||
-rw-r--r-- | drivers/opp/core.c | 798 | ||||
-rw-r--r-- | drivers/opp/of.c | 230 | ||||
-rw-r--r-- | drivers/opp/opp.h | 17 | ||||
-rw-r--r-- | include/linux/devfreq.h | 2 | ||||
-rw-r--r-- | include/linux/pm_opp.h | 112 |
10 files changed, 885 insertions, 339 deletions
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 31903d078ba6..356244510b18 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -54,7 +54,7 @@ static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy, if (IS_ERR(opp)) return PTR_ERR(opp); - ret = dev_pm_opp_set_bw(dev, opp); + ret = dev_pm_opp_set_opp(dev, opp); dev_pm_opp_put(opp); return ret; } diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 6aa10de792b3..cefe84a10824 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -757,6 +757,9 @@ static void devfreq_dev_release(struct device *dev) if (devfreq->profile->exit) devfreq->profile->exit(devfreq->dev.parent); + if (devfreq->opp_table) + dev_pm_opp_put_opp_table(devfreq->opp_table); + mutex_destroy(&devfreq->lock); kfree(devfreq); } @@ -844,6 +847,10 @@ struct devfreq *devfreq_add_device(struct device *dev, } devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); + devfreq->opp_table = dev_pm_opp_get_opp_table(dev); + if (IS_ERR(devfreq->opp_table)) + devfreq->opp_table = NULL; + atomic_set(&devfreq->suspend_count, 0); dev_set_name(&devfreq->dev, "%s", dev_name(dev)); diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c index 63332e4a65ae..b094132bd20b 100644 --- a/drivers/devfreq/governor_passive.c +++ b/drivers/devfreq/governor_passive.c @@ -19,18 +19,16 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq, = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent; unsigned long child_freq = ULONG_MAX; - struct dev_pm_opp *opp; - int i, count, ret = 0; + struct dev_pm_opp *opp, *p_opp; + int i, count; /* * If the devfreq device with passive governor has the specific method * to determine the next frequency, should use the get_target_freq() * of struct devfreq_passive_data. */ - if (p_data->get_target_freq) { - ret = p_data->get_target_freq(devfreq, freq); - goto out; - } + if (p_data->get_target_freq) + return p_data->get_target_freq(devfreq, freq); /* * If the parent and passive devfreq device uses the OPP table, @@ -56,26 +54,35 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq, * list of parent device. Because in this case, *freq is temporary * value which is decided by ondemand governor. */ - opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0); - if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - goto out; - } + if (devfreq->opp_table && parent_devfreq->opp_table) { + p_opp = devfreq_recommended_opp(parent_devfreq->dev.parent, + freq, 0); + if (IS_ERR(p_opp)) + return PTR_ERR(p_opp); + + opp = dev_pm_opp_xlate_required_opp(parent_devfreq->opp_table, + devfreq->opp_table, p_opp); + dev_pm_opp_put(p_opp); - dev_pm_opp_put(opp); + if (IS_ERR(opp)) + return PTR_ERR(opp); + + *freq = dev_pm_opp_get_freq(opp); + dev_pm_opp_put(opp); + + return 0; + } /* - * Get the OPP table's index of decided freqeuncy by governor + * Get the OPP table's index of decided frequency by governor * of parent device. */ for (i = 0; i < parent_devfreq->profile->max_state; i++) if (parent_devfreq->profile->freq_table[i] == *freq) break; - if (i == parent_devfreq->profile->max_state) { - ret = -EINVAL; - goto out; - } + if (i == parent_devfreq->profile->max_state) + return -EINVAL; /* Get the suitable frequency by using index of parent device. */ if (i < devfreq->profile->max_state) { @@ -88,8 +95,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq, /* Return the suitable frequency for passive device. */ *freq = child_freq; -out: - return ret; + return 0; } static int devfreq_passive_notifier_call(struct notifier_block *nb, diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c index 117cad7968ab..ce83f883ca65 100644 --- a/drivers/devfreq/tegra30-devfreq.c +++ b/drivers/devfreq/tegra30-devfreq.c @@ -647,7 +647,7 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq, return PTR_ERR(opp); } - ret = dev_pm_opp_set_bw(dev, opp); + ret = dev_pm_opp_set_opp(dev, opp); dev_pm_opp_put(opp); return ret; @@ -849,7 +849,7 @@ static int tegra_devfreq_probe(struct platform_device *pdev) return err; } - err = dev_pm_opp_of_add_table(&pdev->dev); + err = dev_pm_opp_of_add_table_noclk(&pdev->dev, 0); if (err) { dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err); goto put_hw; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index e6703ae98760..05e0ef58fe32 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -134,7 +134,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (!gmu->legacy) { a6xx_hfi_set_freq(gmu, perf_index); - dev_pm_opp_set_bw(&gpu->pdev->dev, opp); + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); pm_runtime_put(gmu->dev); return; } @@ -158,7 +158,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (ret) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); - dev_pm_opp_set_bw(&gpu->pdev->dev, opp); + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); pm_runtime_put(gmu->dev); } @@ -866,7 +866,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) if (IS_ERR_OR_NULL(gpu_opp)) return; - dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp); + dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); dev_pm_opp_put(gpu_opp); } @@ -1072,7 +1072,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) a6xx_gmu_shutdown(gmu); /* Remove the bus vote */ - dev_pm_opp_set_bw(&gpu->pdev->dev, NULL); + dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); /* * Make sure the GX domain is off before turning off the GMU (CX) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 8c905aabacc0..c3f3d9249cc5 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -27,6 +27,10 @@ * various states of availability. */ LIST_HEAD(opp_tables); + +/* OPP tables with uninitialized required OPPs */ +LIST_HEAD(lazy_opp_tables); + /* Lock to allow exclusive modification to the device and opp lists */ DEFINE_MUTEX(opp_table_lock); /* Flag indicating that opp_tables list is being updated at the moment */ @@ -146,6 +150,32 @@ unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); /** + * dev_pm_opp_get_required_pstate() - Gets the required performance state + * corresponding to an available opp + * @opp: opp for which performance state has to be returned for + * @index: index of the required opp + * + * Return: performance state read from device tree corresponding to the + * required opp, else return 0. + */ +unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, + unsigned int index) +{ + if (IS_ERR_OR_NULL(opp) || !opp->available || + index >= opp->opp_table->required_opp_count) { + pr_err("%s: Invalid parameters\n", __func__); + return 0; + } + + /* required-opps not fully initialized yet */ + if (lazy_linking_pending(opp->opp_table)) + return 0; + + return opp->required_opps[index]->pstate; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate); + +/** * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not * @opp: opp for which turbo mode is being verified * @@ -449,6 +479,55 @@ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); +/** + * dev_pm_opp_find_level_ceil() - search for an rounded up level + * @dev: device for which we do this operation + * @level: level to search for + * + * Return: Searches for rounded up match in the opp table and returns pointer + * to the matching opp if found, else returns ERR_PTR in case of error and + * should be handled using IS_ERR. Error return values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level) +{ + struct opp_table *opp_table; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + int r = PTR_ERR(opp_table); + + dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); + return ERR_PTR(r); + } + + mutex_lock(&opp_table->lock); + + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { + if (temp_opp->available && temp_opp->level >= *level) { + opp = temp_opp; + *level = opp->level; + + /* Increment the reference count of OPP */ + dev_pm_opp_get(opp); + break; + } + } + + mutex_unlock(&opp_table->lock); + dev_pm_opp_put_opp_table(opp_table); + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); + static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, unsigned long *freq) { @@ -655,6 +734,10 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk, { int ret; + /* We may reach here for devices which don't change frequency */ + if (IS_ERR(clk)) + return 0; + ret = clk_set_rate(clk, freq); if (ret) { dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, @@ -666,12 +749,12 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk, static int _generic_set_opp_regulator(struct opp_table *opp_table, struct device *dev, - unsigned long old_freq, + struct dev_pm_opp *opp, unsigned long freq, - struct dev_pm_opp_supply *old_supply, - struct dev_pm_opp_supply *new_supply) + int scaling_down) { struct regulator *reg = opp_table->regulators[0]; + struct dev_pm_opp *old_opp = opp_table->current_opp; int ret; /* This function only supports single regulator per device */ @@ -681,8 +764,8 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table, } /* Scaling up? Scale voltage before frequency */ - if (freq >= old_freq) { - ret = _set_opp_voltage(dev, reg, new_supply); + if (!scaling_down) { + ret = _set_opp_voltage(dev, reg, opp->supplies); if (ret) goto restore_voltage; } @@ -693,8 +776,8 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table, goto restore_voltage; /* Scaling down? Scale voltage after frequency */ - if (freq < old_freq) { - ret = _set_opp_voltage(dev, reg, new_supply); + if (scaling_down) { + ret = _set_opp_voltage(dev, reg, opp->supplies); if (ret) goto restore_freq; } @@ -712,19 +795,18 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table, return 0; restore_freq: - if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq)) + if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate)) dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", - __func__, old_freq); + __func__, old_opp->rate); restore_voltage: /* This shouldn't harm even if the voltages weren't updated earlier */ - if (old_supply) - _set_opp_voltage(dev, reg, old_supply); + _set_opp_voltage(dev, reg, old_opp->supplies); return ret; } static int _set_opp_bw(const struct opp_table *opp_table, - struct dev_pm_opp *opp, struct device *dev, bool remove) + struct dev_pm_opp *opp, struct device *dev) { u32 avg, peak; int i, ret; @@ -733,7 +815,7 @@ static int _set_opp_bw(const struct opp_table *opp_table, return 0; for (i = 0; i < opp_table->path_count; i++) { - if (remove) { + if (!opp) { avg = 0; peak = 0; } else { @@ -743,7 +825,7 @@ static int _set_opp_bw(const struct opp_table *opp_table, ret = icc_set_bw(opp_table->paths[i], avg, peak); if (ret) { dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", - remove ? "remove" : "set", i, ret); + opp ? "set" : "remove", i, ret); return ret; } } @@ -752,29 +834,31 @@ static int _set_opp_bw(const struct opp_table *opp_table, } static int _set_opp_custom(const struct opp_table *opp_table, - struct device *dev, unsigned long old_freq, - unsigned long freq, - struct dev_pm_opp_supply *old_supply, - struct dev_pm_opp_supply *new_supply) + struct device *dev, struct dev_pm_opp *opp, + unsigned long freq) { - struct dev_pm_set_opp_data *data; + struct dev_pm_set_opp_data *data = opp_table->set_opp_data; + struct dev_pm_opp *old_opp = opp_table->current_opp; int size; - data = opp_table->set_opp_data; + /* + * We support this only if dev_pm_opp_set_regulators() was called + * earlier. + */ + if (opp_table->sod_supplies) { + size = sizeof(*old_opp->supplies) * opp_table->regulator_count; + memcpy(data->old_opp.supplies, old_opp->supplies, size); + memcpy(data->new_opp.supplies, opp->supplies, size); + data->regulator_count = opp_table->regulator_count; + } else { + data->regulator_count = 0; + } + data->regulators = opp_table->regulators; - data->regulator_count = opp_table->regulator_count; data->clk = opp_table->clk; data->dev = dev; - - data->old_opp.rate = old_freq; - size = sizeof(*old_supply) * opp_table->regulator_count; - if (!old_supply) - memset(data->old_opp.supplies, 0, size); - else - memcpy(data->old_opp.supplies, old_supply, size); - + data->old_opp.rate = old_opp->rate; data->new_opp.rate = freq; - memcpy(data->new_opp.supplies, new_supply, size); return opp_table->set_opp(data); } @@ -809,6 +893,10 @@ static int _set_required_opps(struct device *dev, if (!required_opp_tables) return 0; + /* required-opps not fully initialized yet */ + if (lazy_linking_pending(opp_table)) + return -EBUSY; + /* Single genpd case */ if (!genpd_virt_devs) return _set_required_opp(dev, dev, opp, 0); @@ -841,38 +929,32 @@ static int _set_required_opps(struct device *dev, return ret; } -/** - * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp - * @dev: device for which we do this operation - * @opp: opp based on which the bandwidth levels are to be configured - * - * This configures the bandwidth to the levels specified by the OPP. However - * if the OPP specified is NULL the bandwidth levels are cleared out. - * - * Return: 0 on success or a negative error value. - */ -int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) +static void _find_current_opp(struct device *dev, struct opp_table *opp_table) { - struct opp_table *opp_table; - int ret; + struct dev_pm_opp *opp = ERR_PTR(-ENODEV); + unsigned long freq; - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) { - dev_err(dev, "%s: device opp table doesn't exist\n", __func__); - return PTR_ERR(opp_table); + if (!IS_ERR(opp_table->clk)) { + freq = clk_get_rate(opp_table->clk); + opp = _find_freq_ceil(opp_table, &freq); } - if (opp) - ret = _set_opp_bw(opp_table, opp, dev, false); - else - ret = _set_opp_bw(opp_table, NULL, dev, true); + /* + * Unable to find the current OPP ? Pick the first from the list since + * it is in ascending order, otherwise rest of the code will need to + * make special checks to validate current_opp. + */ + if (IS_ERR(opp)) { + mutex_lock(&opp_table->lock); + opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); + dev_pm_opp_get(opp); + mutex_unlock(&opp_table->lock); + } - dev_pm_opp_put_opp_table(opp_table); - return ret; + opp_table->current_opp = opp; } -EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw); -static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table) +static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) { int ret; @@ -887,7 +969,7 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table) if (!_get_opp_count(opp_table)) return 0; - ret = _set_opp_bw(opp_table, NULL, dev, true); + ret = _set_opp_bw(opp_table, NULL, dev); if (ret) return ret; @@ -900,6 +982,89 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table) return ret; } +static int _set_opp(struct device *dev, struct opp_table *opp_table, + struct dev_pm_opp *opp, unsigned long freq) +{ + struct dev_pm_opp *old_opp; + int scaling_down, ret; + + if (unlikely(!opp)) + return _disable_opp_table(dev, opp_table); + + /* Find the currently set OPP if we don't know already */ + if (unlikely(!opp_table->current_opp)) + _find_current_opp(dev, opp_table); + + old_opp = opp_table->current_opp; + + /* Return early if nothing to do */ + if (opp_table->enabled && old_opp == opp) { + dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__); + return 0; + } + + dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n", + __func__, old_opp->rate, freq, old_opp->level, opp->level, + old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0, + opp->bandwidth ? opp->bandwidth[0].peak : 0); + + scaling_down = _opp_compare_key(old_opp, opp); + if (scaling_down == -1) + scaling_down = 0; + + /* Scaling up? Configure required OPPs before frequency */ + if (!scaling_down) { + ret = _set_required_opps(dev, opp_table, opp, true); + if (ret) { + dev_err(dev, "Failed to set required opps: %d\n", ret); + return ret; + } + + ret = _set_opp_bw(opp_table, opp, dev); + if (ret) { + dev_err(dev, "Failed to set bw: %d\n", ret); + return ret; + } + } + + if (opp_table->set_opp) { + ret = _set_opp_custom(opp_table, dev, opp, freq); + } else if (opp_table->regulators) { + ret = _generic_set_opp_regulator(opp_table, dev, opp, freq, + scaling_down); + } else { + /* Only frequency scaling */ + ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq); + } + + if (ret) + return ret; + + /* Scaling down? Configure required OPPs after frequency */ + if (scaling_down) { + ret = _set_opp_bw(opp_table, opp, dev); + if (ret) { + dev_err(dev, "Failed to set bw: %d\n", ret); + return ret; + } + + ret = _set_required_opps(dev, opp_table, opp, false); + if (ret) { + dev_err(dev, "Failed to set required opps: %d\n", ret); + return ret; + } + } + + opp_table->enabled = true; + dev_pm_opp_put(old_opp); + + /* Make sure current_opp doesn't get freed */ + dev_pm_opp_get(opp); + opp_table->current_opp = opp; + + return ret; +} + /** * dev_pm_opp_set_rate() - Configure new OPP based on frequency * @dev: device for which we do this operation @@ -914,118 +1079,85 @@ static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table) int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { struct opp_table *opp_table; - unsigned long freq, old_freq, temp_freq; - struct dev_pm_opp *old_opp, *opp; - struct clk *clk; + unsigned long freq = 0, temp_freq; + struct dev_pm_opp *opp = NULL; int ret; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { - dev_err(dev, "%s: device opp doesn't exist\n", __func__); + dev_err(dev, "%s: device's opp table doesn't exist\n", __func__); return PTR_ERR(opp_table); } - if (unlikely(!target_freq)) { - ret = _opp_set_rate_zero(dev, opp_table); - goto put_opp_table; - } - - clk = opp_table->clk; - if (IS_ERR(clk)) { - dev_err(dev, "%s: No clock available for the device\n", - __func__); - ret = PTR_ERR(clk); - goto put_opp_table; - } - - freq = clk_round_rate(clk, target_freq); - if ((long)freq <= 0) - freq = target_freq; - - old_freq = clk_get_rate(clk); - - /* Return early if nothing to do */ - if (opp_table->enabled && old_freq == freq) { - dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", - __func__, freq); - ret = 0; - goto put_opp_table; - } - - /* - * For IO devices which require an OPP on some platforms/SoCs - * while just needing to scale the clock on some others - * we look for empty OPP tables with just a clock handle and - * scale only the clk. This makes dev_pm_opp_set_rate() - * equivalent to a clk_set_rate() - */ - if (!_get_opp_count(opp_table)) { - ret = _generic_set_opp_clk_only(dev, clk, freq); - goto put_opp_table; - } + if (target_freq) { + /* + * For IO devices which require an OPP on some platforms/SoCs + * while just needing to scale the clock on some others + * we look for empty OPP tables with just a clock handle and + * scale only the clk. This makes dev_pm_opp_set_rate() + * equivalent to a clk_set_rate() + */ + if (!_get_opp_count(opp_table)) { + ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq); + goto put_opp_table; + } - temp_freq = old_freq; - old_opp = _find_freq_ceil(opp_table, &temp_freq); - if (IS_ERR(old_opp)) { - dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n", - __func__, old_freq, PTR_ERR(old_opp)); - } + freq = clk_round_rate(opp_table->clk, target_freq); + if ((long)freq <= 0) + freq = target_freq; - temp_freq = freq; - opp = _find_freq_ceil(opp_table, &temp_freq); - if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", - __func__, freq, ret); - goto put_old_opp; + /* + * The clock driver may support finer resolution of the + * frequencies than the OPP table, don't update the frequency we + * pass to clk_set_rate() here. + */ + temp_freq = freq; + opp = _find_freq_ceil(opp_table, &temp_freq); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", + __func__, freq, ret); + goto put_opp_table; + } } - dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, - old_freq, freq); + ret = _set_opp(dev, opp_table, opp, freq); - /* Scaling up? Configure required OPPs before frequency */ - if (freq >= old_freq) { - ret = _set_required_opps(dev, opp_table, opp, true); - if (ret) - goto put_opp; - } - - if (opp_table->set_opp) { - ret = _set_opp_custom(opp_table, dev, old_freq, freq, - IS_ERR(old_opp) ? NULL : old_opp->supplies, - opp->supplies); - } else if (opp_table->regulators) { - ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq, - IS_ERR(old_opp) ? NULL : old_opp->supplies, - opp->supplies); - } else { - /* Only frequency scaling */ - ret = _generic_set_opp_clk_only(dev, clk, freq); - } + if (target_freq) + dev_pm_opp_put(opp); +put_opp_table: + dev_pm_opp_put_opp_table(opp_table); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); - /* Scaling down? Configure required OPPs after frequency */ - if (!ret && freq < old_freq) { - ret = _set_required_opps(dev, opp_table, opp, false); - if (ret) - dev_err(dev, "Failed to set required opps: %d\n", ret); - } +/** + * dev_pm_opp_set_opp() - Configure device for OPP + * @dev: device for which we do this operation + * @opp: OPP to set to + * + * This configures the device based on the properties of the OPP passed to this + * routine. + * + * Return: 0 on success, a negative error number otherwise. + */ +int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) +{ + struct opp_table *opp_table; + int ret; - if (!ret) { - ret = _set_opp_bw(opp_table, opp, dev, false); - if (!ret) - opp_table->enabled = true; + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "%s: device opp doesn't exist\n", __func__); + return PTR_ERR(opp_table); } -put_opp: - dev_pm_opp_put(opp); -put_old_opp: - if (!IS_ERR(old_opp)) - dev_pm_opp_put(old_opp); -put_opp_table: + ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0); dev_pm_opp_put_opp_table(opp_table); + return ret; } -EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); +EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); /* OPP-dev Helpers */ static void _remove_opp_dev(struct opp_device *opp_dev, @@ -1075,6 +1207,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) mutex_init(&opp_table->lock); mutex_init(&opp_table->genpd_virt_dev_lock); INIT_LIST_HEAD(&opp_table->dev_list); + INIT_LIST_HEAD(&opp_table->lazy); /* Mark regulator count uninitialized */ opp_table->regulator_count = -1; @@ -1087,21 +1220,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) _of_init_opp_table(opp_table, dev, index); - /* Find clk for the device */ - opp_table->clk = clk_get(dev, NULL); - if (IS_ERR(opp_table->clk)) { - ret = PTR_ERR(opp_table->clk); - if (ret == -EPROBE_DEFER) - goto remove_opp_dev; - - dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); - } - /* Find interconnect path(s) for the device */ ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); if (ret) { if (ret == -EPROBE_DEFER) - goto put_clk; + goto remove_opp_dev; dev_warn(dev, "%s: Error finding interconnect paths: %d\n", __func__, ret); @@ -1113,9 +1236,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) return opp_table; -put_clk: - if (!IS_ERR(opp_table->clk)) - clk_put(opp_table->clk); remove_opp_dev: _remove_opp_dev(opp_dev, opp_table); err: @@ -1128,6 +1248,37 @@ void _get_opp_table_kref(struct opp_table *opp_table) kref_get(&opp_table->kref); } +static struct opp_table *_update_opp_table_clk(struct device *dev, + struct opp_table *opp_table, + bool getclk) +{ + int ret; + + /* + * Return early if we don't need to get clk or we have already tried it + * earlier. + */ + if (!getclk || IS_ERR(opp_table) || opp_table->clk) + return opp_table; + + /* Find clk for the device */ + opp_table->clk = clk_get(dev, NULL); + + ret = PTR_ERR_OR_ZERO(opp_table->clk); + if (!ret) + return opp_table; + + if (ret == -ENOENT) { + dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); + return opp_table; + } + + dev_pm_opp_put_opp_table(opp_table); + dev_err_probe(dev, ret, "Couldn't find clock\n"); + + return ERR_PTR(ret); +} + /* * We need to make sure that the OPP table for a device doesn't get added twice, * if this routine gets called in parallel with the same device pointer. @@ -1143,7 +1294,8 @@ void _get_opp_table_kref(struct opp_table *opp_table) * uses the opp_tables_busy flag to indicate if another creator is in the middle * of adding an OPP table and others should wait for it to finish. */ -struct opp_table *_add_opp_table_indexed(struct device *dev, int index) +struct opp_table *_add_opp_table_indexed(struct device *dev, int index, + bool getclk) { struct opp_table *opp_table; @@ -1190,12 +1342,12 @@ again: unlock: mutex_unlock(&opp_table_lock); - return opp_table; + return _update_opp_table_clk(dev, opp_table, getclk); } -struct opp_table *_add_opp_table(struct device *dev) +static struct opp_table *_add_opp_table(struct device *dev, bool getclk) { - return _add_opp_table_indexed(dev, 0); + return _add_opp_table_indexed(dev, 0, getclk); } struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) @@ -1214,6 +1366,9 @@ static void _opp_table_kref_release(struct kref *kref) list_del(&opp_table->node); mutex_unlock(&opp_table_lock); + if (opp_table->current_opp) + dev_pm_opp_put(opp_table->current_opp); + _of_clear_opp_table(opp_table); /* Release clk */ @@ -1508,6 +1663,21 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, return 0; } +void _required_opps_available(struct dev_pm_opp *opp, int count) +{ + int i; + + for (i = 0; i < count; i++) { + if (opp->required_opps[i]->available) + continue; + + opp->available = false; + pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n", + __func__, opp->required_opps[i]->np, opp->rate); + return; + } +} + /* * Returns: * 0: On success. And appropriate error message for duplicate OPPs. @@ -1527,12 +1697,10 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, mutex_lock(&opp_table->lock); head = &opp_table->opp_list; - if (likely(!rate_not_available)) { - ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); - if (ret) { - mutex_unlock(&opp_table->lock); - return ret; - } + ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); + if (ret) { + mutex_unlock(&opp_table->lock); + return ret; } list_add(&new_opp->node, head); @@ -1549,6 +1717,12 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, __func__, new_opp->rate); } + /* required-opps not fully initialized yet */ + if (lazy_linking_pending(opp_table)) + return 0; + + _required_opps_available(new_opp, opp_table->required_opp_count); + return 0; } @@ -1631,7 +1805,7 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, { struct opp_table *opp_table; - opp_table = _add_opp_table(dev); + opp_table = _add_opp_table(dev, false); if (IS_ERR(opp_table)) return opp_table; @@ -1693,7 +1867,7 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) { struct opp_table *opp_table; - opp_table = _add_opp_table(dev); + opp_table = _add_opp_table(dev, false); if (IS_ERR(opp_table)) return opp_table; @@ -1737,38 +1911,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table) } EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); -static int _allocate_set_opp_data(struct opp_table *opp_table) -{ - struct dev_pm_set_opp_data *data; - int len, count = opp_table->regulator_count; - - if (WARN_ON(!opp_table->regulators)) - return -EINVAL; - - /* space for set_opp_data */ - len = sizeof(*data); - - /* space for old_opp.supplies and new_opp.supplies */ - len += 2 * sizeof(struct dev_pm_opp_supply) * count; - - data = kzalloc(len, GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->old_opp.supplies = (void *)(data + 1); - data->new_opp.supplies = data->old_opp.supplies + count; - - opp_table->set_opp_data = data; - - return 0; -} - -static void _free_set_opp_data(struct opp_table *opp_table) -{ - kfree(opp_table->set_opp_data); - opp_table->set_opp_data = NULL; -} - /** * dev_pm_opp_set_regulators() - Set regulator names for the device * @dev: Device for which regulator name is being set. @@ -1785,11 +1927,12 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) { + struct dev_pm_opp_supply *supplies; struct opp_table *opp_table; struct regulator *reg; int ret, i; - opp_table = _add_opp_table(dev); + opp_table = _add_opp_table(dev, false); if (IS_ERR(opp_table)) return opp_table; @@ -1826,10 +1969,19 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, opp_table->regulator_count = count; - /* Allocate block only once to pass to set_opp() routines */ - ret = _allocate_set_opp_data(opp_table); - if (ret) + supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL); + if (!supplies) { + ret = -ENOMEM; goto free_regulators; + } + + mutex_lock(&opp_table->lock); + opp_table->sod_supplies = supplies; + if (opp_table->set_opp_data) { + opp_table->set_opp_data->old_opp.supplies = supplies; + opp_table->set_opp_data->new_opp.supplies = supplies + count; + } + mutex_unlock(&opp_table->lock); return opp_table; @@ -1872,7 +2024,15 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) for (i = opp_table->regulator_count - 1; i >= 0; i--) regulator_put(opp_table->regulators[i]); - _free_set_opp_data(opp_table); + mutex_lock(&opp_table->lock); + if (opp_table->set_opp_data) { + opp_table->set_opp_data->old_opp.supplies = NULL; + opp_table->set_opp_data->new_opp.supplies = NULL; + } + + kfree(opp_table->sod_supplies); + opp_table->sod_supplies = NULL; + mutex_unlock(&opp_table->lock); kfree(opp_table->regulators); opp_table->regulators = NULL; @@ -1900,7 +2060,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) struct opp_table *opp_table; int ret; - opp_table = _add_opp_table(dev); + opp_table = _add_opp_table(dev, false); if (IS_ERR(opp_table)) return opp_table; @@ -1910,9 +2070,11 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) goto err; } - /* Already have default clk set, free it */ - if (!IS_ERR(opp_table->clk)) - clk_put(opp_table->clk); + /* clk shouldn't be initialized at this point */ + if (WARN_ON(opp_table->clk)) { + ret = -EBUSY; + goto err; + } /* Find clk for the device */ opp_table->clk = clk_get(dev, name); @@ -1966,12 +2128,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)) { + struct dev_pm_set_opp_data *data; struct opp_table *opp_table; if (!set_opp) return ERR_PTR(-EINVAL); - opp_table = _add_opp_table(dev); + opp_table = _add_opp_table(dev, false); if (IS_ERR(opp_table)) return opp_table; @@ -1982,8 +2145,23 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, } /* Another CPU that shares the OPP table has set the helper ? */ - if (!opp_table->set_opp) - opp_table->set_opp = set_opp; + if (opp_table->set_opp) + return opp_table; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + mutex_lock(&opp_table->lock); + opp_table->set_opp_data = data; + if (opp_table->sod_supplies) { + data->old_opp.supplies = opp_table->sod_supplies; + data->new_opp.supplies = opp_table->sod_supplies + + opp_table->regulator_count; + } + mutex_unlock(&opp_table->lock); + + opp_table->set_opp = set_opp; return opp_table; } @@ -2005,10 +2183,50 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) WARN_ON(!list_empty(&opp_table->opp_list)); opp_table->set_opp = NULL; + + mutex_lock(&opp_table->lock); + kfree(opp_table->set_opp_data); + opp_table->set_opp_data = NULL; + mutex_unlock(&opp_table->lock); + dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper); +static void devm_pm_opp_unregister_set_opp_helper(void *data) +{ + dev_pm_opp_unregister_set_opp_helper(data); +} + +/** + * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper + * @dev: Device for which the helper is getting registered. + * @set_opp: Custom set OPP helper. + * + * This is a resource-managed version of dev_pm_opp_register_set_opp_helper(). + * + * Return: pointer to 'struct opp_table' on success and errorno otherwise. + */ +struct opp_table * +devm_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + struct opp_table *opp_table; + int err; + + opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp); + if (IS_ERR(opp_table)) + return opp_table; + + err = devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper, + opp_table); + if (err) + return ERR_PTR(err); + + return opp_table; +} +EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper); + static void _opp_detach_genpd(struct opp_table *opp_table) { int index; @@ -2058,7 +2276,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, int index = 0, ret = -EINVAL; const char **name = names; - opp_table = _add_opp_table(dev); + opp_table = _add_opp_table(dev, false); if (IS_ERR(opp_table)) return opp_table; @@ -2144,6 +2362,97 @@ void dev_pm_opp_detach_genpd(struct opp_table *opp_table) } EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd); +static void devm_pm_opp_detach_genpd(void *data) +{ + dev_pm_opp_detach_genpd(data); +} + +/** + * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual + * device pointer + * @dev: Consumer device for which the genpd is getting attached. + * @names: Null terminated array of pointers containing names of genpd to attach. + * @virt_devs: Pointer to return the array of virtual devices. + * + * This is a resource-managed version of dev_pm_opp_attach_genpd(). + * + * Return: pointer to 'struct opp_table' on success and errorno otherwise. + */ +struct opp_table * +devm_pm_opp_attach_genpd(struct device *dev, const char **names, + struct device ***virt_devs) +{ + struct opp_table *opp_table; + int err; + + opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs); + if (IS_ERR(opp_table)) + return opp_table; + + err = devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd, + opp_table); + if (err) + return ERR_PTR(err); + + return opp_table; +} +EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd); + +/** + * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP. + * @src_table: OPP table which has @dst_table as one of its required OPP table. + * @dst_table: Required OPP table of the @src_table. + * @src_opp: OPP from the @src_table. + * + * This function returns the OPP (present in @dst_table) pointed out by the + * "required-opps" property of the @src_opp (present in @src_table). + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + * + * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise. + */ +struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, + struct opp_table *dst_table, + struct dev_pm_opp *src_opp) +{ + struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV); + int i; + + if (!src_table || !dst_table || !src_opp || + !src_table->required_opp_tables) + return ERR_PTR(-EINVAL); + + /* required-opps not fully initialized yet */ + if (lazy_linking_pending(src_table)) + return ERR_PTR(-EBUSY); + + for (i = 0; i < src_table->required_opp_count; i++) { + if (src_table->required_opp_tables[i] == dst_table) { + mutex_lock(&src_table->lock); + + list_for_each_entry(opp, &src_table->opp_list, node) { + if (opp == src_opp) { + dest_opp = opp->required_opps[i]; + dev_pm_opp_get(dest_opp); + break; + } + } + + mutex_unlock(&src_table->lock); + break; + } + } + + if (IS_ERR(dest_opp)) { + pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, + src_table, dst_table); + } + + return dest_opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp); + /** * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. * @src_table: OPP table which has dst_table as one of its required OPP table. @@ -2172,9 +2481,13 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, * and so none of them have the "required-opps" property set. Return the * pstate of the src_table as it is in such cases. */ - if (!src_table->required_opp_count) + if (!src_table || !src_table->required_opp_count) return pstate; + /* required-opps not fully initialized yet */ + if (lazy_linking_pending(src_table)) + return -EBUSY; + for (i = 0; i < src_table->required_opp_count; i++) { if (src_table->required_opp_tables[i]->np == dst_table->np) break; @@ -2226,7 +2539,7 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) struct opp_table *opp_table; int ret; - opp_table = _add_opp_table(dev); + opp_table = _add_opp_table(dev, true); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); @@ -2504,3 +2817,44 @@ void dev_pm_opp_remove_table(struct device *dev) dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); + +/** + * dev_pm_opp_sync_regulators() - Sync state of voltage regulators + * @dev: device for which we do this operation + * + * Sync voltage state of the OPP table regulators. + * + * Return: 0 on success or a negative error value. + */ +int dev_pm_opp_sync_regulators(struct device *dev) +{ + struct opp_table *opp_table; + struct regulator *reg; + int i, ret = 0; + + /* Device may not have OPP table */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return 0; + + /* Regulator may not be required for the device */ + if (unlikely(!opp_table->regulators)) + goto put_table; + + /* Nothing to sync if voltage wasn't changed */ + if (!opp_table->enabled) + goto put_table; + + for (i = 0; i < opp_table->regulator_count; i++) { + reg = opp_table->regulators[i]; + ret = regulator_sync_voltage(reg); + if (ret) + break; + } +put_table: + /* Drop reference taken by _find_opp_table() */ + dev_pm_opp_put_opp_table(opp_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 03cb387236c4..f480c10e6314 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -144,7 +144,7 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table) for (i = 0; i < opp_table->required_opp_count; i++) { if (IS_ERR_OR_NULL(required_opp_tables[i])) - break; + continue; dev_pm_opp_put_opp_table(required_opp_tables[i]); } @@ -153,6 +153,7 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table) opp_table->required_opp_count = 0; opp_table->required_opp_tables = NULL; + list_del(&opp_table->lazy); } /* @@ -165,6 +166,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, { struct opp_table **required_opp_tables; struct device_node *required_np, *np; + bool lazy = false; int count, i; /* Traversing the first OPP node is all we need */ @@ -195,8 +197,10 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, required_opp_tables[i] = _find_table_of_opp_np(required_np); of_node_put(required_np); - if (IS_ERR(required_opp_tables[i])) - goto free_required_tables; + if (IS_ERR(required_opp_tables[i])) { + lazy = true; + continue; + } /* * We only support genpd's OPPs in the "required-opps" for now, @@ -210,6 +214,10 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, } } + /* Let's do the linking later on */ + if (lazy) + list_add(&opp_table->lazy, &lazy_opp_tables); + goto put_np; free_required_tables: @@ -278,14 +286,14 @@ void _of_opp_free_required_opps(struct opp_table *opp_table, for (i = 0; i < opp_table->required_opp_count; i++) { if (!required_opps[i]) - break; + continue; /* Put the reference back */ dev_pm_opp_put(required_opps[i]); } - kfree(required_opps); opp->required_opps = NULL; + kfree(required_opps); } /* Populate all required OPPs which are part of "required-opps" list */ @@ -309,6 +317,10 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table, for (i = 0; i < count; i++) { required_table = opp_table->required_opp_tables[i]; + /* Required table not added yet, we will link later */ + if (IS_ERR_OR_NULL(required_table)) + continue; + np = of_parse_required_opp(opp->np, i); if (unlikely(!np)) { ret = -ENODEV; @@ -334,6 +346,104 @@ free_required_opps: return ret; } +/* Link required OPPs for an individual OPP */ +static int lazy_link_required_opps(struct opp_table *opp_table, + struct opp_table *new_table, int index) +{ + struct device_node *required_np; + struct dev_pm_opp *opp; + + list_for_each_entry(opp, &opp_table->opp_list, node) { + required_np = of_parse_required_opp(opp->np, index); + if (unlikely(!required_np)) + return -ENODEV; + + opp->required_opps[index] = _find_opp_of_np(new_table, required_np); + of_node_put(required_np); + + if (!opp->required_opps[index]) { + pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", + __func__, opp->np, index); + return -ENODEV; + } + } + + return 0; +} + +/* Link required OPPs for all OPPs of the newly added OPP table */ +static void lazy_link_required_opp_table(struct opp_table *new_table) +{ + struct opp_table *opp_table, *temp, **required_opp_tables; + struct device_node *required_np, *opp_np, *required_table_np; + struct dev_pm_opp *opp; + int i, ret; + + /* + * We only support genpd's OPPs in the "required-opps" for now, + * as we don't know much about other cases. + */ + if (!new_table->is_genpd) + return; + + mutex_lock(&opp_table_lock); + + list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) { + bool lazy = false; + + /* opp_np can't be invalid here */ + opp_np = of_get_next_available_child(opp_table->np, NULL); + + for (i = 0; i < opp_table->required_opp_count; i++) { + required_opp_tables = opp_table->required_opp_tables; + + /* Required opp-table is already parsed */ + if (!IS_ERR(required_opp_tables[i])) + continue; + + /* required_np can't be invalid here */ + required_np = of_parse_required_opp(opp_np, i); + required_table_np = of_get_parent(required_np); + + of_node_put(required_table_np); + of_node_put(required_np); + + /* + * Newly added table isn't the required opp-table for + * opp_table. + */ + if (required_table_np != new_table->np) { + lazy = true; + continue; + } + + required_opp_tables[i] = new_table; + _get_opp_table_kref(new_table); + + /* Link OPPs now */ + ret = lazy_link_required_opps(opp_table, new_table, i); + if (ret) { + /* The OPPs will be marked unusable */ + lazy = false; + break; + } + } + + of_node_put(opp_np); + + /* All required opp-tables found, remove from lazy list */ + if (!lazy) { + list_del(&opp_table->lazy); + INIT_LIST_HEAD(&opp_table->lazy); + + list_for_each_entry(opp, &opp_table->opp_list, node) + _required_opps_available(opp, opp_table->required_opp_count); + } + } + + mutex_unlock(&opp_table_lock); +} + static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) { struct device_node *np, *opp_np; @@ -755,7 +865,6 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, struct device *dev, struct device_node *np) { struct dev_pm_opp *new_opp; - u64 rate = 0; u32 val; int ret; bool rate_not_available = false; @@ -772,7 +881,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, /* Check if the OPP supports hardware's hierarchy of versions or not */ if (!_opp_is_supported(dev, opp_table, np)) { - dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); + dev_dbg(dev, "OPP not supported by hardware: %lu\n", + new_opp->rate); goto free_opp; } @@ -822,10 +932,11 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; - pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", + pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n", __func__, new_opp->turbo, new_opp->rate, new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min, - new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns); + new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns, + new_opp->level); /* * Notify the changes in the availability of the operable @@ -888,6 +999,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) } } + lazy_link_required_opp_table(opp_table); + return 0; remove_static_opp: @@ -956,29 +1069,23 @@ remove_static_opp: return ret; } -/** - * dev_pm_opp_of_add_table() - Initialize opp table from device tree - * @dev: device pointer used to lookup OPP table. - * - * Register the initial OPP table with the OPP library for given device. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure - * -ENODEV when 'operating-points' property is not found or is invalid data - * in device node. - * -ENODATA when empty 'operating-points' property is found - * -EINVAL when invalid entries are found in opp-v2 table - */ -int dev_pm_opp_of_add_table(struct device *dev) +static int _of_add_table_indexed(struct device *dev, int index, bool getclk) { struct opp_table *opp_table; - int ret; + int ret, count; - opp_table = _add_opp_table_indexed(dev, 0); + if (index) { + /* + * If only one phandle is present, then the same OPP table + * applies for all index requests. + */ + count = of_count_phandle_with_args(dev->of_node, + "operating-points-v2", NULL); + if (count == 1) + index = 0; + } + + opp_table = _add_opp_table_indexed(dev, index, getclk); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); @@ -996,15 +1103,12 @@ int dev_pm_opp_of_add_table(struct device *dev) return ret; } -EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); /** - * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree + * dev_pm_opp_of_add_table() - Initialize opp table from device tree * @dev: device pointer used to lookup OPP table. - * @index: Index number. * - * Register the initial OPP table with the OPP library for given device only - * using the "operating-points-v2" property. + * Register the initial OPP table with the OPP library for given device. * * Return: * 0 On success OR @@ -1017,34 +1121,46 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); * -ENODATA when empty 'operating-points' property is found * -EINVAL when invalid entries are found in opp-v2 table */ -int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) +int dev_pm_opp_of_add_table(struct device *dev) { - struct opp_table *opp_table; - int ret, count; - - if (index) { - /* - * If only one phandle is present, then the same OPP table - * applies for all index requests. - */ - count = of_count_phandle_with_args(dev->of_node, - "operating-points-v2", NULL); - if (count == 1) - index = 0; - } - - opp_table = _add_opp_table_indexed(dev, index); - if (IS_ERR(opp_table)) - return PTR_ERR(opp_table); - - ret = _of_add_opp_table_v2(dev, opp_table); - if (ret) - dev_pm_opp_put_opp_table(opp_table); + return _of_add_table_indexed(dev, 0, true); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); - return ret; +/** + * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree + * @dev: device pointer used to lookup OPP table. + * @index: Index number. + * + * Register the initial OPP table with the OPP library for given device only + * using the "operating-points-v2" property. + * + * Return: Refer to dev_pm_opp_of_add_table() for return values. + */ +int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) +{ + return _of_add_table_indexed(dev, index, true); } EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed); +/** + * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device + * tree without getting clk for device. + * @dev: device pointer used to lookup OPP table. + * @index: Index number. + * + * Register the initial OPP table with the OPP library for given device only + * using the "operating-points-v2" property. Do not try to get the clk for the + * device. + * + * Return: Refer to dev_pm_opp_of_add_table() for return values. + */ +int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) +{ + return _of_add_table_indexed(dev, index, false); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk); + /* CPU device specific helpers */ /** diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 4ced7ffa8158..9b9daf83b074 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -26,7 +26,7 @@ struct regulator; /* Lock to allow exclusive modification to the device and opp lists */ extern struct mutex opp_table_lock; -extern struct list_head opp_tables; +extern struct list_head opp_tables, lazy_opp_tables; /* * Internal data structure organization with the OPP layer library is as @@ -135,6 +135,7 @@ enum opp_table_access { * @clock_latency_ns_max: Max clock latency in nanoseconds. * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. + * @current_opp: Currently configured OPP for the table. * @suspend_opp: Pointer to OPP to be used during device suspend. * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. * @genpd_virt_devs: List of virtual devices for multiple genpd support. @@ -155,6 +156,7 @@ enum opp_table_access { * @genpd_performance_state: Device's power domain support performance state. * @is_genpd: Marks if the OPP table belongs to a genpd. * @set_opp: Platform specific set_opp callback + * @sod_supplies: Set opp data supplies * @set_opp_data: Data to be passed to set_opp callback * @dentry: debugfs dentry pointer of the real device directory (not links). * @dentry_name: Name of the real dentry. @@ -166,7 +168,7 @@ enum opp_table_access { * meant for book keeping and private to OPP library. */ struct opp_table { - struct list_head node; + struct list_head node, lazy; struct blocking_notifier_head head; struct list_head dev_list; @@ -182,6 +184,7 @@ struct opp_table { unsigned int parsed_static_opps; enum opp_table_access shared_opp; + struct dev_pm_opp *current_opp; struct dev_pm_opp *suspend_opp; struct mutex genpd_virt_dev_lock; @@ -202,6 +205,7 @@ struct opp_table { bool is_genpd; int (*set_opp)(struct dev_pm_set_opp_data *data); + struct dev_pm_opp_supply *sod_supplies; struct dev_pm_set_opp_data *set_opp_data; #ifdef CONFIG_DEBUG_FS @@ -223,9 +227,14 @@ int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2); int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available); int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic); void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); -struct opp_table *_add_opp_table(struct device *dev); -struct opp_table *_add_opp_table_indexed(struct device *dev, int index); +struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk); void _put_opp_list_kref(struct opp_table *opp_table); +void _required_opps_available(struct dev_pm_opp *opp, int count); + +static inline bool lazy_linking_pending(struct opp_table *opp_table) +{ + return unlikely(!list_empty(&opp_table->lazy)); +} #ifdef CONFIG_OF void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index); diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index b6d3bae1c74d..26ea0850be9b 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -137,6 +137,7 @@ struct devfreq_stats { * using devfreq. * @profile: device-specific devfreq profile * @governor: method how to choose frequency based on the usage. + * @opp_table: Reference to OPP table of dev.parent, if one exists. * @nb: notifier block used to notify devfreq object that it should * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. @@ -173,6 +174,7 @@ struct devfreq { struct device dev; struct devfreq_dev_profile *profile; const struct devfreq_governor *governor; + struct opp_table *opp_table; struct notifier_block nb; struct delayed_work work; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 1435c054016a..c0371efa4a0f 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -98,6 +98,9 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); +unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, + unsigned int index); + bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); @@ -111,6 +114,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, bool available); struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level); +struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); @@ -143,28 +148,32 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) void dev_pm_opp_put_prop_name(struct opp_table *opp_table); struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); void dev_pm_opp_put_regulators(struct opp_table *opp_table); -struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); +struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); +struct opp_table *devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); +struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); +struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); -int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp); +int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); +int dev_pm_opp_sync_regulators(struct device *dev); #else static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} @@ -184,6 +193,13 @@ static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) return 0; } +static inline +unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, + unsigned int index) +{ + return 0; +} + static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) { return false; @@ -217,31 +233,37 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level) +{ + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, unsigned long u_volt) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} @@ -249,7 +271,7 @@ static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) @@ -280,19 +302,19 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} @@ -300,57 +322,76 @@ static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} +static inline struct opp_table * +devm_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} -static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name) +static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} +static inline struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, + const char **names, struct device ***virt_devs) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, + struct opp_table *dst_table, struct dev_pm_opp *src_opp) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - return -ENOTSUPP; + return -EOPNOTSUPP; } -static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) +static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) { return -EOPNOTSUPP; } static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) @@ -366,11 +407,17 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask { } +static inline int dev_pm_opp_sync_regulators(struct device *dev) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); +int dev_pm_opp_of_add_table_noclk(struct device *dev, int index); void dev_pm_opp_of_remove_table(struct device *dev); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); @@ -387,12 +434,17 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev) #else static inline int dev_pm_opp_of_add_table(struct device *dev) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) { - return -ENOTSUPP; + return -EOPNOTSUPP; +} + +static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) +{ + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_remove_table(struct device *dev) @@ -401,7 +453,7 @@ static inline void dev_pm_opp_of_remove_table(struct device *dev) static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) @@ -410,7 +462,7 @@ static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpum static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) @@ -426,7 +478,7 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) static inline int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_unregister_em(struct device *dev) @@ -435,12 +487,12 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev) static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) { - return -ENOTSUPP; + return -EOPNOTSUPP; } #endif |