diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 12 | ||||
-rw-r--r-- | drivers/cpufreq/Makefile | 1 | ||||
-rw-r--r-- | drivers/cpufreq/acpi-cpufreq.c | 14 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt-platdev.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 17 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor_attr_set.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/imx6q-cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 246 | ||||
-rw-r--r-- | drivers/cpufreq/mediatek-cpufreq-hw.c | 308 | ||||
-rw-r--r-- | drivers/cpufreq/mediatek-cpufreq.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/omap-cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/powernv-cpufreq.c | 16 | ||||
-rw-r--r-- | drivers/cpufreq/qcom-cpufreq-hw.c | 151 | ||||
-rw-r--r-- | drivers/cpufreq/scmi-cpufreq.c | 65 | ||||
-rw-r--r-- | drivers/cpufreq/scpi-cpufreq.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/sh-cpufreq.c | 11 | ||||
-rw-r--r-- | drivers/cpufreq/vexpress-spc-cpufreq.c | 26 |
18 files changed, 641 insertions, 245 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index a5c5f70acfc9..954749afb5fe 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -133,6 +133,18 @@ config ARM_MEDIATEK_CPUFREQ help This adds the CPUFreq driver support for MediaTek SoCs. +config ARM_MEDIATEK_CPUFREQ_HW + tristate "MediaTek CPUFreq HW driver" + depends on ARCH_MEDIATEK || COMPILE_TEST + default m + help + Support for the CPUFreq HW driver. + Some MediaTek chipsets have a HW engine to offload the steps + necessary for changing the frequency of the CPUs. Firmware loaded + in this engine exposes a programming interface to the OS. + The driver implements the cpufreq interface for this HW engine. + Say Y if you want to support CPUFreq HW. + config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" depends on ARCH_OMAP2PLUS diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 27d3bd7ea9d4..48ee5859030c 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_IMX_CPUFREQ_DT) += imx-cpufreq-dt.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o +obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ_HW) += mediatek-cpufreq-hw.o obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index b49612895c78..28467d83c745 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -889,6 +889,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->fast_switch_possible = !acpi_pstate_strict && !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY); + if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) + pr_warn(FW_WARN "P-state 0 is not max freq\n"); + return result; err_unreg: @@ -918,16 +921,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) return 0; } -static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy) -{ - struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data, - policy->cpu); - unsigned int freq = policy->freq_table[0].frequency; - - if (perf->states[0].core_frequency * 1000 != freq) - pr_warn(FW_WARN "P-state 0 is not max freq\n"); -} - static int acpi_cpufreq_resume(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = policy->driver_data; @@ -955,7 +948,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = { .bios_limit = acpi_processor_get_bios_limit, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, - .ready = acpi_cpufreq_cpu_ready, .resume = acpi_cpufreq_resume, .name = "acpi-cpufreq", .attr = acpi_cpufreq_attr, diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 231e585f6ba2..ca1d103ec449 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -137,11 +137,15 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,apq8096", }, { .compatible = "qcom,msm8996", }, { .compatible = "qcom,qcs404", }, + { .compatible = "qcom,sa8155p" }, { .compatible = "qcom,sc7180", }, { .compatible = "qcom,sc7280", }, { .compatible = "qcom,sc8180x", }, { .compatible = "qcom,sdm845", }, + { .compatible = "qcom,sm6350", }, { .compatible = "qcom,sm8150", }, + { .compatible = "qcom,sm8250", }, + { .compatible = "qcom,sm8350", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index ece52863ba62..8fcaba541539 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -143,8 +143,6 @@ static int cpufreq_init(struct cpufreq_policy *policy) cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); - return 0; out_clk_put: @@ -184,6 +182,7 @@ static struct cpufreq_driver dt_cpufreq_driver = { .exit = cpufreq_exit, .online = cpufreq_online, .offline = cpufreq_offline, + .register_em = cpufreq_register_em_with_opp, .name = "cpufreq-dt", .attr = cpufreq_dt_attr, .suspend = cpufreq_generic_suspend, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 06c526d66dd3..5782b15a8caa 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1491,6 +1491,19 @@ static int cpufreq_online(unsigned int cpu) write_lock_irqsave(&cpufreq_driver_lock, flags); list_add(&policy->policy_list, &cpufreq_policy_list); write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + /* + * Register with the energy model before + * sched_cpufreq_governor_change() is called, which will result + * in rebuilding of the sched domains, which should only be done + * once the energy model is properly initialized for the policy + * first. + * + * Also, this should be called before the policy is registered + * with cooling framework. + */ + if (cpufreq_driver->register_em) + cpufreq_driver->register_em(policy); } ret = cpufreq_init_policy(policy); @@ -1504,10 +1517,6 @@ static int cpufreq_online(unsigned int cpu) kobject_uevent(&policy->kobj, KOBJ_ADD); - /* Callback for handling stuff after policy is ready */ - if (cpufreq_driver->ready) - cpufreq_driver->ready(policy); - if (cpufreq_thermal_control_enabled(cpufreq_driver)) policy->cdev = of_cpufreq_cooling_register(policy); diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c index 66b05a326910..a6f365b9cc1a 100644 --- a/drivers/cpufreq/cpufreq_governor_attr_set.c +++ b/drivers/cpufreq/cpufreq_governor_attr_set.c @@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l if (count) return count; - kobject_put(&attr_set->kobj); mutex_destroy(&attr_set->update_lock); + kobject_put(&attr_set->kobj); return 0; } EXPORT_SYMBOL_GPL(gov_attr_set_put); diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 5bf5fc759881..90beb26ed34e 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -192,7 +192,6 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy) policy->clk = clks[ARM].clk; cpufreq_generic_init(policy, freq_table, transition_latency); policy->suspend_freq = max_freq; - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; } @@ -204,6 +203,7 @@ static struct cpufreq_driver imx6q_cpufreq_driver = { .target_index = imx6q_set_target, .get = cpufreq_generic_get, .init = imx6q_cpufreq_init, + .register_em = cpufreq_register_em_with_opp, .name = "imx6q-cpufreq", .attr = cpufreq_generic_attr, .suspend = cpufreq_generic_suspend, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b4ffe6c8a0d0..8c176b7dae41 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -32,7 +32,6 @@ #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> #include <asm/intel-family.h> -#include "../drivers/thermal/intel/thermal_interrupt.h" #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) @@ -220,7 +219,6 @@ struct global_params { * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance * @suspended: Whether or not the driver has been suspended. - * @hwp_notify_work: workqueue for HWP notifications. * * This structure stores per CPU instance data for all CPUs. */ @@ -259,7 +257,6 @@ struct cpudata { unsigned int sched_flags; u32 hwp_boost_min; bool suspended; - struct delayed_work hwp_notify_work; }; static struct cpudata **all_cpu_data; @@ -271,6 +268,7 @@ static struct cpudata **all_cpu_data; * @get_min: Callback to get minimum P state * @get_turbo: Callback to get turbo P state * @get_scaling: Callback to get frequency scaling factor + * @get_cpu_scaling: Get frequency scaling factor for a given cpu * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference * @get_val: Callback to convert P state to actual MSR write value * @get_vid: Callback to get VID data for Atom platforms @@ -284,6 +282,7 @@ struct pstate_funcs { int (*get_min)(void); int (*get_turbo)(void); int (*get_scaling)(void); + int (*get_cpu_scaling)(int cpu); int (*get_aperf_mperf_shift)(void); u64 (*get_val)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); @@ -387,6 +386,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu) return cppc_perf.nominal_perf; } +static u32 intel_pstate_cppc_nominal(int cpu) +{ + u64 nominal_perf; + + if (cppc_get_nominal_perf(cpu, &nominal_perf)) + return 0; + + return nominal_perf; +} #else /* CONFIG_ACPI_CPPC_LIB */ static inline void intel_pstate_set_itmt_prio(int cpu) { @@ -473,20 +481,6 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) acpi_processor_unregister_performance(policy->cpu); } - -static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) -{ - return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; -} - -static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, - struct cppc_perf_caps *caps) -{ - if (cppc_get_perf_caps(cpu->cpu, caps)) - return false; - - return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; -} #else /* CONFIG_ACPI */ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) { @@ -509,15 +503,8 @@ static inline int intel_pstate_get_cppc_guaranteed(int cpu) } #endif /* CONFIG_ACPI_CPPC_LIB */ -static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) -{ - pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu); - - cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling; -} - /** - * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels. + * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels. * @cpu: Target CPU. * * On hybrid processors, HWP may expose more performance levels than there are @@ -525,115 +512,46 @@ static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) * scaling factor between HWP performance levels and CPU frequency will be less * than the scaling factor between P-state values and CPU frequency. * - * In that case, the scaling factor between HWP performance levels and CPU - * frequency needs to be determined which can be done with the help of the - * observation that certain HWP performance levels should correspond to certain - * P-states, like for example the HWP highest performance should correspond - * to the maximum turbo P-state of the CPU. + * In that case, adjust the CPU parameters used in computations accordingly. */ -static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) +static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) { int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; int perf_ctl_turbo = pstate_funcs.get_turbo(); int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; - int perf_ctl_max = pstate_funcs.get_max(); - int max_freq = perf_ctl_max * perf_ctl_scaling; - int scaling = INT_MAX; - int freq; + int scaling = cpu->pstate.scaling; pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); - pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max); + pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max()); pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); - pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); - -#ifdef CONFIG_ACPI - if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) { - struct cppc_perf_caps caps; - - if (intel_pstate_cppc_perf_caps(cpu, &caps)) { - if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { - pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); - - /* - * If the CPPC nominal performance is valid, it - * can be assumed to correspond to cpu_khz. - */ - if (caps.nominal_perf == perf_ctl_max_phys) { - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); - return; - } - scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); - } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { - pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); - - /* - * If the CPPC guaranteed performance is valid, - * it can be assumed to correspond to max_freq. - */ - if (caps.guaranteed_perf == perf_ctl_max) { - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); - return; - } - scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); - } - } - } -#endif - /* - * If using the CPPC data to compute the HWP-to-frequency scaling factor - * doesn't work, use the HWP_CAP gauranteed perf for this purpose with - * the assumption that it corresponds to max_freq. - */ - if (scaling > perf_ctl_scaling) { - pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu); - - if (cpu->pstate.max_pstate == perf_ctl_max) { - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); - return; - } - scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate); - if (scaling > perf_ctl_scaling) { - /* - * This should not happen, because it would mean that - * the number of HWP perf levels was less than the - * number of P-states, so use the PERF_CTL scaling in - * that case. - */ - pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu, - scaling); - - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); - return; - } - } + pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); /* - * If the product of the HWP performance scaling factor obtained above - * and the HWP_CAP highest performance is greater than the maximum turbo - * frequency corresponding to the pstate_funcs.get_turbo() return value, - * the scaling factor is too high, so recompute it so that the HWP_CAP - * highest performance corresponds to the maximum turbo frequency. + * If the product of the HWP performance scaling factor and the HWP_CAP + * highest performance is greater than the maximum turbo frequency + * corresponding to the pstate_funcs.get_turbo() return value, the + * scaling factor is too high, so recompute it to make the HWP_CAP + * highest performance correspond to the maximum turbo frequency. */ if (turbo_freq < cpu->pstate.turbo_pstate * scaling) { - pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling); - cpu->pstate.turbo_freq = turbo_freq; scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); - } - - cpu->pstate.scaling = scaling; + cpu->pstate.scaling = scaling; - pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); + pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n", + cpu->cpu, scaling); + } cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, perf_ctl_scaling); - freq = perf_ctl_max_phys * perf_ctl_scaling; - cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling); + cpu->pstate.max_pstate_physical = + DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling, + scaling); cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; /* @@ -1628,40 +1546,6 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) /************************** sysfs end ************************/ -static void intel_pstate_notify_work(struct work_struct *work) -{ - mutex_lock(&intel_pstate_driver_lock); - cpufreq_update_policy(smp_processor_id()); - wrmsrl(MSR_HWP_STATUS, 0); - mutex_unlock(&intel_pstate_driver_lock); -} - -void notify_hwp_interrupt(void) -{ - unsigned int this_cpu = smp_processor_id(); - struct cpudata *cpudata; - u64 value; - - if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) - return; - - rdmsrl(MSR_HWP_STATUS, value); - if (!(value & 0x01)) - return; - - cpudata = all_cpu_data[this_cpu]; - schedule_delayed_work_on(this_cpu, &cpudata->hwp_notify_work, msecs_to_jiffies(10)); -} - -static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) -{ - /* Enable HWP notification interrupt for guaranteed performance change */ - if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { - INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); - } -} - static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt as we don't process them */ @@ -1671,8 +1555,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); if (cpudata->epp_default == -EINVAL) cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); - - intel_pstate_enable_hwp_interrupt(cpudata); } static int atom_get_min_pstate(void) @@ -1900,6 +1782,38 @@ static int knl_get_turbo_pstate(void) return ret; } +#ifdef CONFIG_ACPI_CPPC_LIB +static u32 hybrid_ref_perf; + +static int hybrid_get_cpu_scaling(int cpu) +{ + return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf, + intel_pstate_cppc_nominal(cpu)); +} + +static void intel_pstate_cppc_set_cpu_scaling(void) +{ + u32 min_nominal_perf = U32_MAX; + int cpu; + + for_each_present_cpu(cpu) { + u32 nominal_perf = intel_pstate_cppc_nominal(cpu); + + if (nominal_perf && nominal_perf < min_nominal_perf) + min_nominal_perf = nominal_perf; + } + + if (min_nominal_perf < U32_MAX) { + hybrid_ref_perf = min_nominal_perf; + pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; + } +} +#else +static inline void intel_pstate_cppc_set_cpu_scaling(void) +{ +} +#endif /* CONFIG_ACPI_CPPC_LIB */ + static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) { trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); @@ -1928,10 +1842,8 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { - bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU); int perf_ctl_max_phys = pstate_funcs.get_max_physical(); - int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys : - pstate_funcs.get_scaling(); + int perf_ctl_scaling = pstate_funcs.get_scaling(); cpu->pstate.min_pstate = pstate_funcs.get_min(); cpu->pstate.max_pstate_physical = perf_ctl_max_phys; @@ -1940,10 +1852,13 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (hwp_active && !hwp_mode_bdw) { __intel_pstate_get_hwp_cap(cpu); - if (hybrid_cpu) - intel_pstate_hybrid_hwp_calibrate(cpu); - else + if (pstate_funcs.get_cpu_scaling) { + cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); + if (cpu->pstate.scaling != perf_ctl_scaling) + intel_pstate_hybrid_hwp_adjust(cpu); + } else { cpu->pstate.scaling = perf_ctl_scaling; + } } else { cpu->pstate.scaling = perf_ctl_scaling; cpu->pstate.max_pstate = pstate_funcs.get_max(); @@ -3290,11 +3205,15 @@ static int __init intel_pstate_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return -ENODEV; - if (no_load) - return -ENODEV; - id = x86_match_cpu(hwp_support_ids); if (id) { + bool hwp_forced = intel_pstate_hwp_is_enabled(); + + if (hwp_forced) + pr_info("HWP enabled by BIOS\n"); + else if (no_load) + return -ENODEV; + copy_cpu_funcs(&core_funcs); /* * Avoid enabling HWP for processors without EPP support, @@ -3304,8 +3223,7 @@ static int __init intel_pstate_init(void) * If HWP is enabled already, though, there is no choice but to * deal with it. */ - if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || - intel_pstate_hwp_is_enabled()) { + if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { hwp_active++; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; @@ -3315,9 +3233,16 @@ static int __init intel_pstate_init(void) if (!default_driver) default_driver = &intel_pstate; + if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) + intel_pstate_cppc_set_cpu_scaling(); + goto hwp_cpu_matched; } + pr_info("HWP not enabled\n"); } else { + if (no_load) + return -ENODEV; + id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) { pr_info("CPU model not supported\n"); @@ -3396,10 +3321,9 @@ static int __init intel_pstate_setup(char *str) else if (!strcmp(str, "passive")) default_driver = &intel_cpufreq; - if (!strcmp(str, "no_hwp")) { - pr_info("HWP disabled\n"); + if (!strcmp(str, "no_hwp")) no_hwp = 1; - } + if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c new file mode 100644 index 000000000000..0cf18dd46b92 --- /dev/null +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020 MediaTek Inc. + */ + +#include <linux/bitfield.h> +#include <linux/cpufreq.h> +#include <linux/energy_model.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/slab.h> + +#define LUT_MAX_ENTRIES 32U +#define LUT_FREQ GENMASK(11, 0) +#define LUT_ROW_SIZE 0x4 +#define CPUFREQ_HW_STATUS BIT(0) +#define SVS_HW_STATUS BIT(1) +#define POLL_USEC 1000 +#define TIMEOUT_USEC 300000 + +enum { + REG_FREQ_LUT_TABLE, + REG_FREQ_ENABLE, + REG_FREQ_PERF_STATE, + REG_FREQ_HW_STATE, + REG_EM_POWER_TBL, + REG_FREQ_LATENCY, + + REG_ARRAY_SIZE, +}; + +struct mtk_cpufreq_data { + struct cpufreq_frequency_table *table; + void __iomem *reg_bases[REG_ARRAY_SIZE]; + int nr_opp; +}; + +static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = { + [REG_FREQ_LUT_TABLE] = 0x0, + [REG_FREQ_ENABLE] = 0x84, + [REG_FREQ_PERF_STATE] = 0x88, + [REG_FREQ_HW_STATE] = 0x8c, + [REG_EM_POWER_TBL] = 0x90, + [REG_FREQ_LATENCY] = 0x110, +}; + +static int __maybe_unused +mtk_cpufreq_get_cpu_power(unsigned long *mW, + unsigned long *KHz, struct device *cpu_dev) +{ + struct mtk_cpufreq_data *data; + struct cpufreq_policy *policy; + int i; + + policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return 0; + + data = policy->driver_data; + + for (i = 0; i < data->nr_opp; i++) { + if (data->table[i].frequency < *KHz) + break; + } + i--; + + *KHz = data->table[i].frequency; + *mW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] + + i * LUT_ROW_SIZE) / 1000; + + return 0; +} + +static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy, + unsigned int index) +{ + struct mtk_cpufreq_data *data = policy->driver_data; + + writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); + + return 0; +} + +static unsigned int mtk_cpufreq_hw_get(unsigned int cpu) +{ + struct mtk_cpufreq_data *data; + struct cpufreq_policy *policy; + unsigned int index; + + policy = cpufreq_cpu_get_raw(cpu); + if (!policy) + return 0; + + data = policy->driver_data; + + index = readl_relaxed(data->reg_bases[REG_FREQ_PERF_STATE]); + index = min(index, LUT_MAX_ENTRIES - 1); + + return data->table[index].frequency; +} + +static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct mtk_cpufreq_data *data = policy->driver_data; + unsigned int index; + + index = cpufreq_table_find_index_dl(policy, target_freq); + + writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); + + return policy->freq_table[index].frequency; +} + +static int mtk_cpu_create_freq_table(struct platform_device *pdev, + struct mtk_cpufreq_data *data) +{ + struct device *dev = &pdev->dev; + u32 temp, i, freq, prev_freq = 0; + void __iomem *base_table; + + data->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1, + sizeof(*data->table), GFP_KERNEL); + if (!data->table) + return -ENOMEM; + + base_table = data->reg_bases[REG_FREQ_LUT_TABLE]; + + for (i = 0; i < LUT_MAX_ENTRIES; i++) { + temp = readl_relaxed(base_table + (i * LUT_ROW_SIZE)); + freq = FIELD_GET(LUT_FREQ, temp) * 1000; + + if (freq == prev_freq) + break; + + data->table[i].frequency = freq; + + dev_dbg(dev, "index=%d freq=%d\n", i, data->table[i].frequency); + + prev_freq = freq; + } + + data->table[i].frequency = CPUFREQ_TABLE_END; + data->nr_opp = i; + + return 0; +} + +static int mtk_cpu_resources_init(struct platform_device *pdev, + struct cpufreq_policy *policy, + const u16 *offsets) +{ + struct mtk_cpufreq_data *data; + struct device *dev = &pdev->dev; + void __iomem *base; + int ret, i; + int index; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains", + "#performance-domain-cells", + policy->cpus); + if (index < 0) + return index; + + base = devm_platform_ioremap_resource(pdev, index); + if (IS_ERR(base)) + return PTR_ERR(base); + + for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++) + data->reg_bases[i] = base + offsets[i]; + + ret = mtk_cpu_create_freq_table(pdev, data); + if (ret) { + dev_info(dev, "Domain-%d failed to create freq table\n", index); + return ret; + } + + policy->freq_table = data->table; + policy->driver_data = data; + + return 0; +} + +static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) +{ + struct platform_device *pdev = cpufreq_get_driver_data(); + int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS; + struct mtk_cpufreq_data *data; + unsigned int latency; + int ret; + + /* Get the bases of cpufreq for domains */ + ret = mtk_cpu_resources_init(pdev, policy, platform_get_drvdata(pdev)); + if (ret) { + dev_info(&pdev->dev, "CPUFreq resource init failed\n"); + return ret; + } + + data = policy->driver_data; + + latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000; + if (!latency) + latency = CPUFREQ_ETERNAL; + + policy->cpuinfo.transition_latency = latency; + policy->fast_switch_possible = true; + + /* HW should be in enabled state to proceed now */ + writel_relaxed(0x1, data->reg_bases[REG_FREQ_ENABLE]); + if (readl_poll_timeout(data->reg_bases[REG_FREQ_HW_STATE], sig, + (sig & pwr_hw) == pwr_hw, POLL_USEC, + TIMEOUT_USEC)) { + if (!(sig & CPUFREQ_HW_STATUS)) { + pr_info("cpufreq hardware of CPU%d is not enabled\n", + policy->cpu); + return -ENODEV; + } + + pr_info("SVS of CPU%d is not enabled\n", policy->cpu); + } + + return 0; +} + +static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) +{ + struct mtk_cpufreq_data *data = policy->driver_data; + + /* HW should be in paused state now */ + writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]); + + return 0; +} + +static void mtk_cpufreq_register_em(struct cpufreq_policy *policy) +{ + struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power); + struct mtk_cpufreq_data *data = policy->driver_data; + + em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp, + &em_cb, policy->cpus, true); +} + +static struct cpufreq_driver cpufreq_mtk_hw_driver = { + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_IS_COOLING_DEV, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = mtk_cpufreq_hw_target_index, + .get = mtk_cpufreq_hw_get, + .init = mtk_cpufreq_hw_cpu_init, + .exit = mtk_cpufreq_hw_cpu_exit, + .register_em = mtk_cpufreq_register_em, + .fast_switch = mtk_cpufreq_hw_fast_switch, + .name = "mtk-cpufreq-hw", + .attr = cpufreq_generic_attr, +}; + +static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) +{ + const void *data; + int ret; + + data = of_device_get_match_data(&pdev->dev); + if (!data) + return -EINVAL; + + platform_set_drvdata(pdev, (void *) data); + cpufreq_mtk_hw_driver.driver_data = pdev; + + ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver); + if (ret) + dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n"); + + return ret; +} + +static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev) +{ + return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver); +} + +static const struct of_device_id mtk_cpufreq_hw_match[] = { + { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets }, + {} +}; + +static struct platform_driver mtk_cpufreq_hw_driver = { + .probe = mtk_cpufreq_hw_driver_probe, + .remove = mtk_cpufreq_hw_driver_remove, + .driver = { + .name = "mtk-cpufreq-hw", + .of_match_table = mtk_cpufreq_hw_match, + }, +}; +module_platform_driver(mtk_cpufreq_hw_driver); + +MODULE_AUTHOR("Hector Yuan <hector.yuan@mediatek.com>"); +MODULE_DESCRIPTION("Mediatek cpufreq-hw driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 87019d5a9547..866163883b48 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -448,8 +448,6 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) policy->driver_data = info; policy->clk = info->cpu_clk; - dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus); - return 0; } @@ -471,6 +469,7 @@ static struct cpufreq_driver mtk_cpufreq_driver = { .get = cpufreq_generic_get, .init = mtk_cpufreq_init, .exit = mtk_cpufreq_exit, + .register_em = cpufreq_register_em_with_opp, .name = "mtk-cpufreq", .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index e035ee216b0f..1b50df06c6bc 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -131,7 +131,6 @@ static int omap_cpu_init(struct cpufreq_policy *policy) /* FIXME: what's the actual transition time? */ cpufreq_generic_init(policy, freq_table, 300 * 1000); - dev_pm_opp_of_register_em(mpu_dev, policy->cpus); return 0; } @@ -150,6 +149,7 @@ static struct cpufreq_driver omap_driver = { .get = cpufreq_generic_get, .init = omap_cpu_init, .exit = omap_cpu_exit, + .register_em = cpufreq_register_em_with_opp, .name = "omap", .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 23a06cba392c..5a2cf5f91ccb 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -36,6 +36,7 @@ #define MAX_PSTATE_SHIFT 32 #define LPSTATE_SHIFT 48 #define GPSTATE_SHIFT 56 +#define MAX_NR_CHIPS 32 #define MAX_RAMP_DOWN_TIME 5120 /* @@ -1046,12 +1047,20 @@ static int init_chip_info(void) unsigned int *chip; unsigned int cpu, i; unsigned int prev_chip_id = UINT_MAX; + cpumask_t *chip_cpu_mask; int ret = 0; chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; + /* Allocate a chip cpu mask large enough to fit mask for all chips */ + chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL); + if (!chip_cpu_mask) { + ret = -ENOMEM; + goto free_and_return; + } + for_each_possible_cpu(cpu) { unsigned int id = cpu_to_chip_id(cpu); @@ -1059,22 +1068,25 @@ static int init_chip_info(void) prev_chip_id = id; chip[nr_chips++] = id; } + cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]); } chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); if (!chips) { ret = -ENOMEM; - goto free_and_return; + goto out_free_chip_cpu_mask; } for (i = 0; i < nr_chips; i++) { chips[i].id = chip[i]; - cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i])); + cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]); INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn); for_each_cpu(cpu, &chips[i].mask) per_cpu(chip_info, cpu) = &chips[i]; } +out_free_chip_cpu_mask: + kfree(chip_cpu_mask); free_and_return: kfree(chip); return ret; diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index f86859bf76f1..a2be0df7e174 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -7,12 +7,14 @@ #include <linux/cpufreq.h> #include <linux/init.h> #include <linux/interconnect.h> +#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pm_opp.h> #include <linux/slab.h> +#include <linux/spinlock.h> #define LUT_MAX_ENTRIES 40U #define LUT_SRC GENMASK(31, 30) @@ -22,10 +24,13 @@ #define CLK_HW_DIV 2 #define LUT_TURBO_IND 1 +#define HZ_PER_KHZ 1000 + struct qcom_cpufreq_soc_data { u32 reg_enable; u32 reg_freq_lut; u32 reg_volt_lut; + u32 reg_current_vote; u32 reg_perf_state; u8 lut_row_size; }; @@ -34,6 +39,16 @@ struct qcom_cpufreq_data { void __iomem *base; struct resource *res; const struct qcom_cpufreq_soc_data *soc_data; + + /* + * Mutex to synchronize between de-init sequence and re-starting LMh + * polling/interrupts + */ + struct mutex throttle_lock; + int throttle_irq; + bool cancel_throttle; + struct delayed_work throttle_work; + struct cpufreq_policy *policy; }; static unsigned long cpu_hw_rate, xo_rate; @@ -251,10 +266,92 @@ static void qcom_get_related_cpus(int index, struct cpumask *m) } } +static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) +{ + unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote); + + return (val & 0x3FF) * 19200; +} + +static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) +{ + unsigned long max_capacity, capacity, freq_hz, throttled_freq; + struct cpufreq_policy *policy = data->policy; + int cpu = cpumask_first(policy->cpus); + struct device *dev = get_cpu_device(cpu); + struct dev_pm_opp *opp; + unsigned int freq; + + /* + * Get the h/w throttled frequency, normalize it using the + * registered opp table and use it to calculate thermal pressure. + */ + freq = qcom_lmh_get_throttle_freq(data); + freq_hz = freq * HZ_PER_KHZ; + + opp = dev_pm_opp_find_freq_floor(dev, &freq_hz); + if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) + dev_pm_opp_find_freq_ceil(dev, &freq_hz); + + throttled_freq = freq_hz / HZ_PER_KHZ; + + /* Update thermal pressure */ + + max_capacity = arch_scale_cpu_capacity(cpu); + capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq); + + /* Don't pass boost capacity to scheduler */ + if (capacity > max_capacity) + capacity = max_capacity; + + arch_set_thermal_pressure(policy->cpus, max_capacity - capacity); + + /* + * In the unlikely case policy is unregistered do not enable + * polling or h/w interrupt + */ + mutex_lock(&data->throttle_lock); + if (data->cancel_throttle) + goto out; + + /* + * If h/w throttled frequency is higher than what cpufreq has requested + * for, then stop polling and switch back to interrupt mechanism. + */ + if (throttled_freq >= qcom_cpufreq_hw_get(cpu)) + enable_irq(data->throttle_irq); + else + mod_delayed_work(system_highpri_wq, &data->throttle_work, + msecs_to_jiffies(10)); + +out: + mutex_unlock(&data->throttle_lock); +} + +static void qcom_lmh_dcvs_poll(struct work_struct *work) +{ + struct qcom_cpufreq_data *data; + + data = container_of(work, struct qcom_cpufreq_data, throttle_work.work); + qcom_lmh_dcvs_notify(data); +} + +static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data) +{ + struct qcom_cpufreq_data *c_data = data; + + /* Disable interrupt and enable polling */ + disable_irq_nosync(c_data->throttle_irq); + qcom_lmh_dcvs_notify(c_data); + + return 0; +} + static const struct qcom_cpufreq_soc_data qcom_soc_data = { .reg_enable = 0x0, .reg_freq_lut = 0x110, .reg_volt_lut = 0x114, + .reg_current_vote = 0x704, .reg_perf_state = 0x920, .lut_row_size = 32, }; @@ -274,6 +371,51 @@ static const struct of_device_id qcom_cpufreq_hw_match[] = { }; MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match); +static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) +{ + struct qcom_cpufreq_data *data = policy->driver_data; + struct platform_device *pdev = cpufreq_get_driver_data(); + char irq_name[15]; + int ret; + + /* + * Look for LMh interrupt. If no interrupt line is specified / + * if there is an error, allow cpufreq to be enabled as usual. + */ + data->throttle_irq = platform_get_irq(pdev, index); + if (data->throttle_irq <= 0) + return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0; + + data->cancel_throttle = false; + data->policy = policy; + + mutex_init(&data->throttle_lock); + INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll); + + snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu); + ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq, + IRQF_ONESHOT, irq_name, data); + if (ret) { + dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret); + return 0; + } + + return 0; +} + +static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data) +{ + if (data->throttle_irq <= 0) + return; + + mutex_lock(&data->throttle_lock); + data->cancel_throttle = true; + mutex_unlock(&data->throttle_lock); + + cancel_delayed_work_sync(&data->throttle_work); + free_irq(data->throttle_irq, data); +} + static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) { struct platform_device *pdev = cpufreq_get_driver_data(); @@ -348,6 +490,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) } policy->driver_data = data; + policy->dvfs_possible_from_any_cpu = true; ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy); if (ret) { @@ -362,14 +505,16 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) goto error; } - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); - if (policy_has_boost_freq(policy)) { ret = cpufreq_enable_boost_support(); if (ret) dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); } + ret = qcom_cpufreq_hw_lmh_init(policy, index); + if (ret) + goto error; + return 0; error: kfree(data); @@ -389,6 +534,7 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) dev_pm_opp_remove_all_dynamic(cpu_dev); dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); + qcom_cpufreq_hw_lmh_exit(data); kfree(policy->freq_table); kfree(data); iounmap(base); @@ -412,6 +558,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = { .get = qcom_cpufreq_hw_get, .init = qcom_cpufreq_hw_cpu_init, .exit = qcom_cpufreq_hw_cpu_exit, + .register_em = cpufreq_register_em_with_opp, .fast_switch = qcom_cpufreq_hw_fast_switch, .name = "qcom-cpufreq-hw", .attr = qcom_cpufreq_hw_attr, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 75f818d04b48..1e0cd4d165f0 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -22,7 +22,9 @@ struct scmi_data { int domain_id; + int nr_opp; struct device *cpu_dev; + cpumask_var_t opp_shared_cpus; }; static struct scmi_protocol_handle *ph; @@ -123,9 +125,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) struct device *cpu_dev; struct scmi_data *priv; struct cpufreq_frequency_table *freq_table; - struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); - cpumask_var_t opp_shared_cpus; - bool power_scale_mw; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -133,9 +132,15 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) return -ENODEV; } - if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL)) + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) return -ENOMEM; + if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) { + ret = -ENOMEM; + goto out_free_priv; + } + /* Obtain CPUs that share SCMI performance controls */ ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); if (ret) { @@ -148,14 +153,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) * The OPP 'sharing cpus' info may come from DT through an empty opp * table and opp-shared. */ - ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus); - if (ret || !cpumask_weight(opp_shared_cpus)) { + ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus); + if (ret || !cpumask_weight(priv->opp_shared_cpus)) { /* * Either opp-table is not set or no opp-shared was found. * Use the CPU mask from SCMI to designate CPUs sharing an OPP * table. */ - cpumask_copy(opp_shared_cpus, policy->cpus); + cpumask_copy(priv->opp_shared_cpus, policy->cpus); } /* @@ -180,7 +185,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) goto out_free_opp; } - ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus); + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus); if (ret) { dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); @@ -188,21 +193,13 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) goto out_free_opp; } - power_scale_mw = perf_ops->power_scale_mw_get(ph); - em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, - opp_shared_cpus, power_scale_mw); - } - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto out_free_opp; + priv->nr_opp = nr_opp; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto out_free_priv; + goto out_free_opp; } priv->cpu_dev = cpu_dev; @@ -223,17 +220,16 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->fast_switch_possible = perf_ops->fast_switch_possible(ph, cpu_dev); - free_cpumask_var(opp_shared_cpus); return 0; -out_free_priv: - kfree(priv); - out_free_opp: dev_pm_opp_remove_all_dynamic(cpu_dev); out_free_cpumask: - free_cpumask_var(opp_shared_cpus); + free_cpumask_var(priv->opp_shared_cpus); + +out_free_priv: + kfree(priv); return ret; } @@ -244,11 +240,33 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_remove_all_dynamic(priv->cpu_dev); + free_cpumask_var(priv->opp_shared_cpus); kfree(priv); return 0; } +static void scmi_cpufreq_register_em(struct cpufreq_policy *policy) +{ + struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); + bool power_scale_mw = perf_ops->power_scale_mw_get(ph); + struct scmi_data *priv = policy->driver_data; + + /* + * This callback will be called for each policy, but we don't need to + * register with EM every time. Despite not being part of the same + * policy, some CPUs may still share their perf-domains, and a CPU from + * another policy may already have registered with EM on behalf of CPUs + * of this policy. + */ + if (!priv->nr_opp) + return; + + em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp, + &em_cb, priv->opp_shared_cpus, + power_scale_mw); +} + static struct cpufreq_driver scmi_cpufreq_driver = { .name = "scmi", .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | @@ -261,6 +279,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = { .get = scmi_cpufreq_get_rate, .init = scmi_cpufreq_init, .exit = scmi_cpufreq_exit, + .register_em = scmi_cpufreq_register_em, }; static int scmi_cpufreq_probe(struct scmi_device *sdev) diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index d6a698a1b5d1..bda3e7d42964 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -163,8 +163,6 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) policy->fast_switch_possible = false; - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); - return 0; out_free_cpufreq_table: @@ -200,6 +198,7 @@ static struct cpufreq_driver scpi_cpufreq_driver = { .init = scpi_cpufreq_init, .exit = scpi_cpufreq_exit, .target_index = scpi_cpufreq_set_target, + .register_em = cpufreq_register_em_with_opp, }; static int scpi_cpufreq_probe(struct platform_device *pdev) diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 1a251e635ebd..b8704232c27b 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -145,16 +145,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) return 0; } -static void sh_cpufreq_cpu_ready(struct cpufreq_policy *policy) -{ - struct device *dev = get_cpu_device(policy->cpu); - - dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " - "Maximum %u.%03u MHz.\n", - policy->min / 1000, policy->min % 1000, - policy->max / 1000, policy->max % 1000); -} - static struct cpufreq_driver sh_cpufreq_driver = { .name = "sh", .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, @@ -163,7 +153,6 @@ static struct cpufreq_driver sh_cpufreq_driver = { .verify = sh_cpufreq_verify, .init = sh_cpufreq_cpu_init, .exit = sh_cpufreq_cpu_exit, - .ready = sh_cpufreq_cpu_ready, .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 51dfa9ae6cf5..d295f405c4bb 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -15,7 +15,6 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> -#include <linux/cpu_cooling.h> #include <linux/device.h> #include <linux/module.h> #include <linux/mutex.h> @@ -47,7 +46,6 @@ static bool bL_switching_enabled; #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) -static struct thermal_cooling_device *cdev[MAX_CLUSTERS]; static struct clk *clk[MAX_CLUSTERS]; static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; static atomic_t cluster_usage[MAX_CLUSTERS + 1]; @@ -442,8 +440,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) policy->freq_table = freq_table[cur_cluster]; policy->cpuinfo.transition_latency = 1000000; /* 1 ms */ - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); - if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); @@ -455,12 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; - int cur_cluster = cpu_to_cluster(policy->cpu); - - if (cur_cluster < MAX_CLUSTERS) { - cpufreq_cooling_unregister(cdev[cur_cluster]); - cdev[cur_cluster] = NULL; - } cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -473,17 +463,6 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) return 0; } -static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy) -{ - int cur_cluster = cpu_to_cluster(policy->cpu); - - /* Do not register a cpu_cooling device if we are in IKS mode */ - if (cur_cluster >= MAX_CLUSTERS) - return; - - cdev[cur_cluster] = of_cpufreq_cooling_register(policy); -} - static struct cpufreq_driver ve_spc_cpufreq_driver = { .name = "vexpress-spc", .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | @@ -493,7 +472,7 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = { .get = ve_spc_cpufreq_get_rate, .init = ve_spc_cpufreq_init, .exit = ve_spc_cpufreq_exit, - .ready = ve_spc_cpufreq_ready, + .register_em = cpufreq_register_em_with_opp, .attr = cpufreq_generic_attr, }; @@ -553,6 +532,9 @@ static int ve_spc_cpufreq_probe(struct platform_device *pdev) for (i = 0; i < MAX_CLUSTERS; i++) mutex_init(&cluster_lock[i]); + if (!is_bL_switching_enabled()) + ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV; + ret = cpufreq_register_driver(&ve_spc_cpufreq_driver); if (ret) { pr_info("%s: Failed registering platform driver: %s, err: %d\n", |