diff options
78 files changed, 1075 insertions, 652 deletions
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt deleted file mode 100644 index 1d7e49167666..000000000000 --- a/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt +++ /dev/null @@ -1,61 +0,0 @@ -Generic cpufreq driver - -It is a generic DT based cpufreq driver for frequency management. It supports -both uniprocessor (UP) and symmetric multiprocessor (SMP) systems which share -clock and voltage across all CPUs. - -Both required and optional properties listed below must be defined -under node /cpus/cpu@0. - -Required properties: -- None - -Optional properties: -- operating-points: Refer to Documentation/devicetree/bindings/opp/opp-v1.yaml for - details. OPPs *must* be supplied either via DT, i.e. this property, or - populated at runtime. -- clock-latency: Specify the possible maximum transition latency for clock, - in unit of nanoseconds. -- voltage-tolerance: Specify the CPU voltage tolerance in percentage. -- #cooling-cells: - Please refer to - Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml. - -Examples: - -cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - compatible = "arm,cortex-a9"; - reg = <0>; - next-level-cache = <&L2>; - operating-points = < - /* kHz uV */ - 792000 1100000 - 396000 950000 - 198000 850000 - >; - clock-latency = <61036>; /* two CLK32 periods */ - #cooling-cells = <2>; - }; - - cpu@1 { - compatible = "arm,cortex-a9"; - reg = <1>; - next-level-cache = <&L2>; - }; - - cpu@2 { - compatible = "arm,cortex-a9"; - reg = <2>; - next-level-cache = <&L2>; - }; - - cpu@3 { - compatible = "arm,cortex-a9"; - reg = <3>; - next-level-cache = <&L2>; - }; -}; diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml index e0242bed3342..2d42fc3d8ef8 100644 --- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml @@ -22,6 +22,7 @@ properties: items: - enum: - qcom,qcm2290-cpufreq-hw + - qcom,qcs615-cpufreq-hw - qcom,sc7180-cpufreq-hw - qcom,sc8180x-cpufreq-hw - qcom,sdm670-cpufreq-hw @@ -132,6 +133,7 @@ allOf: compatible: contains: enum: + - qcom,qcs615-cpufreq-hw - qcom,qdu1000-cpufreq-epss - qcom,sa8255p-cpufreq-epss - qcom,sa8775p-cpufreq-epss diff --git a/Documentation/devicetree/bindings/cpufreq/mediatek,mt8196-cpufreq-hw.yaml b/Documentation/devicetree/bindings/cpufreq/mediatek,mt8196-cpufreq-hw.yaml new file mode 100644 index 000000000000..5f3c7db3f3aa --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/mediatek,mt8196-cpufreq-hw.yaml @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/cpufreq/mediatek,mt8196-cpufreq-hw.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Hybrid CPUFreq for MT8196/MT6991 series SoCs + +maintainers: + - Nicolas Frattaroli <nicolas.frattaroli@collabora.com> + +description: + MT8196 uses CPUFreq management hardware that supports dynamic voltage + frequency scaling (dvfs), and can support several performance domains. + +properties: + compatible: + const: mediatek,mt8196-cpufreq-hw + + reg: + items: + - description: FDVFS control register region + - description: OPP tables and control for performance domain 0 + - description: OPP tables and control for performance domain 1 + - description: OPP tables and control for performance domain 2 + + "#performance-domain-cells": + const: 1 + +required: + - compatible + - reg + - "#performance-domain-cells" + +additionalProperties: false + +examples: + - | + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a720"; + enable-method = "psci"; + performance-domains = <&performance 0>; + reg = <0x000>; + }; + + /* ... */ + + cpu6: cpu@600 { + device_type = "cpu"; + compatible = "arm,cortex-x4"; + enable-method = "psci"; + performance-domains = <&performance 1>; + reg = <0x600>; + }; + + cpu7: cpu@700 { + device_type = "cpu"; + compatible = "arm,cortex-x925"; + enable-method = "psci"; + performance-domains = <&performance 2>; + reg = <0x700>; + }; + }; + + /* ... */ + + soc { + #address-cells = <2>; + #size-cells = <2>; + + performance: performance-controller@c2c2034 { + compatible = "mediatek,mt8196-cpufreq-hw"; + reg = <0 0xc220400 0 0x20>, <0 0xc2c0f20 0 0x120>, + <0 0xc2c1040 0 0x120>, <0 0xc2c1160 0 0x120>; + #performance-domain-cells = <1>; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 520fb4e379a3..9cd8106dcac2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6349,6 +6349,12 @@ F: kernel/sched/cpufreq*.c F: rust/kernel/cpufreq.rs F: tools/testing/selftests/cpufreq/ +CPU FREQUENCY DRIVERS - VIRTUAL MACHINE CPUFREQ +M: Saravana Kannan <saravanak@google.com> +L: linux-pm@vger.kernel.org +S: Maintained +F: drivers/cpufreq/virtual-cpufreq.c + CPU HOTPLUG M: Thomas Gleixner <tglx@linutronix.de> M: Peter Zijlstra <peterz@infradead.org> diff --git a/arch/arm64/boot/dts/ti/k3-am62p5.dtsi b/arch/arm64/boot/dts/ti/k3-am62p5.dtsi index 202378d9d5cf..8982a7b9f1a6 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p5.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62p5.dtsi @@ -135,7 +135,7 @@ opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; - opp-supported-hw = <0x01 0x0006>; + opp-supported-hw = <0x01 0x0007>; clock-latency-ns = <6000000>; }; diff --git a/drivers/base/base.h b/drivers/base/base.h index 123031a757d9..700aecd22fd3 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -251,6 +251,14 @@ void device_links_unbind_consumers(struct device *dev); void fw_devlink_drivers_done(void); void fw_devlink_probing_done(void); +#define dev_for_each_link_to_supplier(__link, __dev) \ + list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \ + device_links_read_lock_held()) + +#define dev_for_each_link_to_consumer(__link, __dev) \ + list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \ + device_links_read_lock_held()) + /* device pm support */ void device_pm_move_to_tail(struct device *dev); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 2ea6e05e6ec9..b9a34c3425ec 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -40,10 +40,6 @@ typedef int (*pm_callback_t)(struct device *); -#define list_for_each_entry_rcu_locked(pos, head, member) \ - list_for_each_entry_rcu(pos, head, member, \ - device_links_read_lock_held()) - /* * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and @@ -281,7 +277,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) * callbacks freeing the link objects for the links in the list we're * walking. */ - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + dev_for_each_link_to_supplier(link, dev) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_wait(link->supplier, async); @@ -338,7 +334,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async) * continue instead of trying to continue in parallel with its * unregistration). */ - list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) + dev_for_each_link_to_consumer(link, dev) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_wait(link->consumer, async); @@ -675,7 +671,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) idx = device_links_read_lock(); /* Start processing the device's "async" consumers. */ - list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) + dev_for_each_link_to_consumer(link, dev) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_async_with_cleanup(link->consumer, func); @@ -724,8 +720,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy if (dev->power.syscore || dev->power.direct_complete) goto Out; - if (!dev->power.is_noirq_suspended) + if (!dev->power.is_noirq_suspended) { + /* + * This means that system suspend has been aborted in the noirq + * phase before invoking the noirq suspend callback for the + * device, so if device_suspend_late() has left it in suspend, + * device_resume_early() should leave it in suspend either in + * case the early resume of it depends on the noirq resume that + * has not run. + */ + if (dev_pm_skip_suspend(dev)) + dev->power.must_resume = false; + goto Out; + } if (!dpm_wait_for_superior(dev, async)) goto Out; @@ -1330,7 +1338,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func) idx = device_links_read_lock(); /* Start processing the device's "async" suppliers. */ - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + dev_for_each_link_to_supplier(link, dev) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_async_with_cleanup(link->supplier, func); @@ -1384,7 +1392,7 @@ static void dpm_superior_set_must_resume(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + dev_for_each_link_to_supplier(link, dev) link->supplier->power.must_resume = true; device_links_read_unlock(idx); @@ -1813,7 +1821,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + dev_for_each_link_to_supplier(link, dev) { spin_lock_irq(&link->supplier->power.lock); link->supplier->power.direct_complete = false; spin_unlock_irq(&link->supplier->power.lock); @@ -2065,7 +2073,7 @@ static bool device_prepare_smart_suspend(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + dev_for_each_link_to_supplier(link, dev) { if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3e84dc4122de..7420b9851fe0 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1903,8 +1903,7 @@ void pm_runtime_get_suppliers(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, - device_links_read_lock_held()) + dev_for_each_link_to_supplier(link, dev) if (device_link_test(link, DL_FLAG_PM_RUNTIME)) { link->supplier_preactivated = true; pm_runtime_get_sync(link->supplier); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 4f7f9201598d..083d8369a591 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -318,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask) return cmd.val; } -/* Called via smp_call_function_many(), on the target CPUs */ static void do_drv_write(void *_cmd) { struct drv_cmd *cmd = _cmd; @@ -335,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data, .val = val, .func.write = data->cpu_freq_write, }; - int this_cpu; - this_cpu = get_cpu(); - if (cpumask_test_cpu(this_cpu, mask)) - do_drv_write(&cmd); - - smp_call_function_many(mask, do_drv_write, &cmd, 1); - put_cpu(); + on_each_cpu_mask(mask, do_drv_write, &cmd, true); } static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data) diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c index 4fe39eadd163..b6b1cdc4d11d 100644 --- a/drivers/cpufreq/airoha-cpufreq.c +++ b/drivers/cpufreq/airoha-cpufreq.c @@ -107,6 +107,7 @@ static struct platform_driver airoha_cpufreq_driver = { }; static const struct of_device_id airoha_cpufreq_match_list[] __initconst = { + { .compatible = "airoha,an7583" }, { .compatible = "airoha,en7581" }, {}, }; diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index f28a4435fba7..0efe403a5980 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -265,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base, */ target_vm = avs_map[l0_vdd_min] - 100; - target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV; + target_vm = max(target_vm, MIN_VOLT_MV); dvfs->avs[1] = armada_37xx_avs_val_match(target_vm); /* @@ -273,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base, * be larger than 1000mv */ target_vm = avs_map[l0_vdd_min] - 150; - target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV; + target_vm = max(target_vm, MIN_VOLT_MV); dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm); /* diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 5940d262374f..71450cca8e9f 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -480,7 +480,7 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv) static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); struct private_data *priv; if (!policy) @@ -488,8 +488,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) priv = policy->driver_data; - cpufreq_cpu_put(policy); - return brcm_avs_get_frequency(priv->base); } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 4a17162a392d..12de0ac7bbaf 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -50,8 +50,7 @@ struct cppc_freq_invariance { static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); static struct kthread_worker *kworker_fie; -static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, - struct cppc_perf_fb_ctrs *fb_ctrs_t0, +static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0, struct cppc_perf_fb_ctrs *fb_ctrs_t1); /** @@ -87,8 +86,7 @@ static void cppc_scale_freq_workfn(struct kthread_work *work) return; } - perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs, - &fb_ctrs); + perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs); if (!perf) return; @@ -684,8 +682,7 @@ static inline u64 get_delta(u64 t1, u64 t0) return (u32)t1 - (u32)t0; } -static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, - struct cppc_perf_fb_ctrs *fb_ctrs_t0, +static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0, struct cppc_perf_fb_ctrs *fb_ctrs_t1) { u64 delta_reference, delta_delivered; @@ -725,8 +722,8 @@ static int cppc_get_perf_ctrs_sample(int cpu, static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) { + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct cppc_cpudata *cpu_data; u64 delivered_perf; int ret; @@ -736,8 +733,6 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) cpu_data = policy->driver_data; - cpufreq_cpu_put(policy); - ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1); if (ret) { if (ret == -EFAULT) @@ -747,8 +742,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) return 0; } - delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0, - &fb_ctrs_t1); + delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1); if (!delivered_perf) goto out_invalid_counters; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 015dd393eaba..cd1816a12bb9 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -103,6 +103,7 @@ static const struct of_device_id allowlist[] __initconst = { * platforms using "operating-points-v2" property. */ static const struct of_device_id blocklist[] __initconst = { + { .compatible = "airoha,an7583", }, { .compatible = "airoha,en7581", }, { .compatible = "allwinner,sun50i-a100" }, @@ -188,9 +189,11 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "ti,omap3", }, { .compatible = "ti,am625", }, { .compatible = "ti,am62a7", }, + { .compatible = "ti,am62d2", }, { .compatible = "ti,am62p5", }, { .compatible = "qcom,ipq5332", }, + { .compatible = "qcom,ipq5424", }, { .compatible = "qcom,ipq6018", }, { .compatible = "qcom,ipq8064", }, { .compatible = "qcom,ipq8074", }, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fc7eace8b65b..852e024facc3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -664,10 +664,10 @@ unlock: static unsigned int cpufreq_parse_policy(char *str_governor) { - if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) + if (!strncasecmp(str_governor, "performance", strlen("performance"))) return CPUFREQ_POLICY_PERFORMANCE; - if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) + if (!strncasecmp(str_governor, "powersave", strlen("powersave"))) return CPUFREQ_POLICY_POWERSAVE; return CPUFREQ_POLICY_UNKNOWN; @@ -914,7 +914,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, const char *buf, size_t count) { unsigned int freq = 0; - unsigned int ret; + int ret; if (!policy->governor || !policy->governor->store_setspeed) return -EINVAL; @@ -1121,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) if (has_target()) { /* Update policy governor to the one used before hotplug. */ - gov = get_governor(policy->last_governor); + if (policy->last_governor[0] != '\0') + gov = get_governor(policy->last_governor); if (gov) { pr_debug("Restoring governor %s for cpu %d\n", gov->name, policy->cpu); @@ -1844,7 +1845,6 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b */ unsigned int cpufreq_quick_get(unsigned int cpu) { - struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; unsigned long flags; read_lock_irqsave(&cpufreq_driver_lock, flags); @@ -1859,7 +1859,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu) read_unlock_irqrestore(&cpufreq_driver_lock, flags); - policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (policy) return policy->cur; @@ -1875,9 +1875,7 @@ EXPORT_SYMBOL(cpufreq_quick_get); */ unsigned int cpufreq_quick_get_max(unsigned int cpu) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); - - policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (policy) return policy->max; @@ -1893,9 +1891,7 @@ EXPORT_SYMBOL(cpufreq_quick_get_max); */ __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); - - policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (policy) return policy->cpuinfo.max_freq; @@ -1919,9 +1915,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) */ unsigned int cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); - - policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (!policy) return 0; @@ -2750,9 +2744,7 @@ static void cpufreq_policy_refresh(struct cpufreq_policy *policy) */ void cpufreq_update_policy(unsigned int cpu) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); - - policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (!policy) return; @@ -2769,9 +2761,7 @@ EXPORT_SYMBOL(cpufreq_update_policy); */ void cpufreq_update_limits(unsigned int cpu) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); - - policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (!policy) return; @@ -2792,7 +2782,7 @@ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state) if (!policy->freq_table) return -ENXIO; - ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table); + ret = cpufreq_frequency_table_cpuinfo(policy); if (ret) { pr_err("%s: Policy frequency update failed\n", __func__); return ret; @@ -2921,10 +2911,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) return -EPROBE_DEFER; if (!driver_data || !driver_data->verify || !driver_data->init || - !(driver_data->setpolicy || driver_data->target_index || - driver_data->target) || - (driver_data->setpolicy && (driver_data->target_index || - driver_data->target)) || + (driver_data->target_index && driver_data->target) || + (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) || (!driver_data->get_intermediate != !driver_data->target_intermediate) || (!driver_data->online != !driver_data->offline) || (driver_data->adjust_perf && !driver_data->fast_switch)) @@ -2953,6 +2941,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) goto err_null_driver; } + /* + * Mark support for the scheduler's frequency invariance engine for + * drivers that implement target(), target_index() or fast_switch(). + */ + if (!cpufreq_driver->setpolicy) { + static_branch_enable_cpuslocked(&cpufreq_freq_invariance); + pr_debug("cpufreq: supports frequency invariance\n"); + } + ret = subsys_interface_register(&cpufreq_interface); if (ret) goto err_boost_unreg; @@ -2974,21 +2971,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) hp_online = ret; ret = 0; - /* - * Mark support for the scheduler's frequency invariance engine for - * drivers that implement target(), target_index() or fast_switch(). - */ - if (!cpufreq_driver->setpolicy) { - static_branch_enable_cpuslocked(&cpufreq_freq_invariance); - pr_debug("supports frequency invariance"); - } - pr_debug("driver %s up and running\n", driver_data->name); goto out; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_boost_unreg: + if (!cpufreq_driver->setpolicy) + static_branch_disable_cpuslocked(&cpufreq_freq_invariance); remove_boost_sysfs_file(); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); @@ -3056,9 +3046,7 @@ static int __init cpufreq_core_init(void) static bool cpufreq_policy_is_good_for_eas(unsigned int cpu) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); - - policy = cpufreq_cpu_get(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (!policy) { pr_debug("cpufreq policy not set for CPU: %d\n", cpu); return false; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 56500b25d77c..cce6a8d113e1 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set, struct dbs_data *dbs_data = to_dbs_data(attr_set); unsigned int input; int ret; - ret = sscanf(buf, "%u", &input); + ret = kstrtouint(buf, 0, &input); - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; dbs_data->sampling_down_factor = input; @@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set, struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; - ret = sscanf(buf, "%u", &input); + ret = kstrtouint(buf, 0, &input); - if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold) + if (ret || input > 100 || input <= cs_tuners->down_threshold) return -EINVAL; dbs_data->up_threshold = input; @@ -184,10 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set, struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; - ret = sscanf(buf, "%u", &input); + ret = kstrtouint(buf, 0, &input); /* cannot be lower than 1 otherwise freq will not fall */ - if (ret != 1 || input < 1 || input >= dbs_data->up_threshold) + if (ret || input < 1 || input >= dbs_data->up_threshold) return -EINVAL; cs_tuners->down_threshold = input; @@ -201,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set, unsigned int input; int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; + ret = kstrtouint(buf, 0, &input); + if (ret) + return ret; if (input > 1) input = 1; @@ -226,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf, struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; - ret = sscanf(buf, "%u", &input); + ret = kstrtouint(buf, 0, &input); - if (ret != 1) - return -EINVAL; + if (ret) + return ret; if (input > 100) input = 100; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 0e65d37c9231..a6ecc203f7b7 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -30,29 +30,6 @@ static struct od_ops od_ops; static unsigned int default_powersave_bias; /* - * Not all CPUs want IO time to be accounted as busy; this depends on how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (android.com) claims this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) and later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; -#endif - return 0; -} - -/* * Find right freq to be set now with powersave_bias on. * Returns the freq_hi to be used right now and will set freq_hi_delay_us, * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs. @@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data) dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; dbs_data->ignore_nice_load = 0; tuners->powersave_bias = default_powersave_bias; - dbs_data->io_is_busy = should_io_be_busy(); + dbs_data->io_is_busy = od_should_io_be_busy(); dbs_data->tuners = tuners; return 0; diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h index 1af8e5c4b86f..2ca8f1aaf2e3 100644 --- a/drivers/cpufreq/cpufreq_ondemand.h +++ b/drivers/cpufreq/cpufreq_ondemand.h @@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol struct od_dbs_tuners { unsigned int powersave_bias; }; + +#ifdef CONFIG_X86 +#include <asm/cpu_device_id.h> + +/* + * Not all CPUs want IO time to be accounted as busy; this depends on + * how efficient idling at a higher frequency/voltage is. + * + * Pavel Machek says this is not so for various generations of AMD and + * old Intel systems. Mike Chan (android.com) claims this is also not + * true for ARM. + * + * Because of this, select a known series of Intel CPUs (Family 6 and + * later) by default, and leave all others up to the user. + */ +static inline bool od_should_io_be_busy(void) +{ + return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO); +} +#else +static inline bool od_should_io_be_busy(void) { return false; } +#endif diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 35de513af6c9..7f251daf03ce 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -28,22 +28,21 @@ static bool policy_has_boost_freq(struct cpufreq_policy *policy) return false; } -int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table) +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy) { - struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *pos, *table = policy->freq_table; unsigned int min_freq = ~0; unsigned int max_freq = 0; - unsigned int freq; + unsigned int freq, i; - cpufreq_for_each_valid_entry(pos, table) { + cpufreq_for_each_valid_entry_idx(pos, table, i) { freq = pos->frequency; if ((!cpufreq_boost_enabled() || !policy->boost_enabled) && (pos->flags & CPUFREQ_BOOST_FREQ)) continue; - pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq); + pr_debug("table entry %u: %u kHz\n", i, freq); if (freq < min_freq) min_freq = freq; if (freq > max_freq) @@ -65,10 +64,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, return 0; } -int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, - struct cpufreq_frequency_table *table) +int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy) { - struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *pos, *table = policy->freq_table; unsigned int freq, prev_smaller = 0; bool found = false; @@ -110,7 +108,7 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy) if (!policy->freq_table) return -ENODEV; - return cpufreq_frequency_table_verify(policy, policy->freq_table); + return cpufreq_frequency_table_verify(policy); } EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); @@ -128,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, }; struct cpufreq_frequency_table *pos; struct cpufreq_frequency_table *table = policy->freq_table; - unsigned int freq, diff, i = 0; + unsigned int freq, diff, i; int index; pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", @@ -354,7 +352,7 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy) return 0; } - ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table); + ret = cpufreq_frequency_table_cpuinfo(policy); if (ret) return ret; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0d5d283a5429..38897bb14a2c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -620,24 +620,9 @@ static int min_perf_pct_min(void) (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; } -static s16 intel_pstate_get_epb(struct cpudata *cpu_data) -{ - u64 epb; - int ret; - - if (!boot_cpu_has(X86_FEATURE_EPB)) - return -ENXIO; - - ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); - if (ret) - return (s16)ret; - - return (s16)(epb & 0x0f); -} - static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) { - s16 epp; + s16 epp = -EOPNOTSUPP; if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { /* @@ -651,34 +636,13 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) return epp; } epp = (hwp_req_data >> 24) & 0xff; - } else { - /* When there is no EPP present, HWP uses EPB settings */ - epp = intel_pstate_get_epb(cpu_data); } return epp; } -static int intel_pstate_set_epb(int cpu, s16 pref) -{ - u64 epb; - int ret; - - if (!boot_cpu_has(X86_FEATURE_EPB)) - return -ENXIO; - - ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); - if (ret) - return ret; - - epb = (epb & ~0x0f) | pref; - wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); - - return 0; -} - /* - * EPP/EPB display strings corresponding to EPP index in the + * EPP display strings corresponding to EPP index in the * energy_perf_strings[] * index String *------------------------------------- @@ -782,7 +746,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, u32 raw_epp) { int epp = -EINVAL; - int ret; + int ret = -EOPNOTSUPP; if (!pref_index) epp = cpu_data->epp_default; @@ -802,10 +766,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, return -EBUSY; ret = intel_pstate_set_epp(cpu_data, epp); - } else { - if (epp == -EINVAL) - epp = (pref_index - 1) << 2; - ret = intel_pstate_set_epb(cpu_data->cpu, epp); } return ret; @@ -937,11 +897,19 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) cpufreq_freq_attr_ro(base_frequency); +enum hwp_cpufreq_attr_index { + HWP_BASE_FREQUENCY_INDEX = 0, + HWP_PERFORMANCE_PREFERENCE_INDEX, + HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX, + HWP_CPUFREQ_ATTR_COUNT, +}; + static struct freq_attr *hwp_cpufreq_attrs[] = { - &energy_performance_preference, - &energy_performance_available_preferences, - &base_frequency, - NULL, + [HWP_BASE_FREQUENCY_INDEX] = &base_frequency, + [HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference, + [HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = + &energy_performance_available_preferences, + [HWP_CPUFREQ_ATTR_COUNT] = NULL, }; static bool no_cas __ro_after_init; @@ -1337,9 +1305,8 @@ static void intel_pstate_hwp_set(unsigned int cpu) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; - } else { - intel_pstate_set_epb(cpu, epp); } + skip_epp: WRITE_ONCE(cpu_data->hwp_req_cached, value); wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value); @@ -1411,6 +1378,9 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) #define POWER_CTL_EE_ENABLE 1 #define POWER_CTL_EE_DISABLE 2 +/* Enable bit for Dynamic Efficiency Control (DEC) */ +#define POWER_CTL_DEC_ENABLE 27 + static int power_ctl_ee_state; static void set_power_ctl_ee_state(bool input) @@ -1502,9 +1472,7 @@ static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy, static bool intel_pstate_update_max_freq(struct cpudata *cpudata) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); - - policy = cpufreq_cpu_get(cpudata->cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu); if (!policy) return false; @@ -1695,41 +1663,40 @@ unlock_driver: return count; } -static void update_qos_request(enum freq_qos_req_type type) +static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type) { + struct cpudata *cpudata = all_cpu_data[cpu]; + unsigned int freq = cpudata->pstate.turbo_freq; struct freq_qos_request *req; - struct cpufreq_policy *policy; - int i; - - for_each_possible_cpu(i) { - struct cpudata *cpu = all_cpu_data[i]; - unsigned int freq, perf_pct; - policy = cpufreq_cpu_get(i); - if (!policy) - continue; + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); + if (!policy) + return; - req = policy->driver_data; - cpufreq_cpu_put(policy); + req = policy->driver_data; + if (!req) + return; - if (!req) - continue; + if (hwp_active) + intel_pstate_get_hwp_cap(cpudata); - if (hwp_active) - intel_pstate_get_hwp_cap(cpu); + if (type == FREQ_QOS_MIN) { + freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100); + } else { + req++; + freq = (freq * global.max_perf_pct) / 100; + } - if (type == FREQ_QOS_MIN) { - perf_pct = global.min_perf_pct; - } else { - req++; - perf_pct = global.max_perf_pct; - } + if (freq_qos_update_request(req, freq) < 0) + pr_warn("Failed to update freq constraint: CPU%d\n", cpu); +} - freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); +static void update_qos_requests(enum freq_qos_req_type type) +{ + int i; - if (freq_qos_update_request(req, freq) < 0) - pr_warn("Failed to update freq constraint: CPU%d\n", i); - } + for_each_possible_cpu(i) + update_cpu_qos_request(i, type); } static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, @@ -1758,7 +1725,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, if (intel_pstate_driver == &intel_pstate) intel_pstate_update_policies(); else - update_qos_request(FREQ_QOS_MAX); + update_qos_requests(FREQ_QOS_MAX); mutex_unlock(&intel_pstate_driver_lock); @@ -1792,7 +1759,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, if (intel_pstate_driver == &intel_pstate) intel_pstate_update_policies(); else - update_qos_request(FREQ_QOS_MIN); + update_qos_requests(FREQ_QOS_MIN); mutex_unlock(&intel_pstate_driver_lock); @@ -2575,7 +2542,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) * that sample.time will always be reset before setting the utilization * update hook and make the caller skip the sample then. */ - if (cpu->last_sample_time) { + if (likely(cpu->last_sample_time)) { intel_pstate_calc_avg_perf(cpu); return true; } @@ -3802,6 +3769,26 @@ static const struct x86_cpu_id intel_hybrid_scaling_factor[] = { {} }; +static bool hwp_check_epp(void) +{ + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) + return true; + + /* Without EPP support, don't expose EPP-related sysfs attributes. */ + hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL; + hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL; + + return false; +} + +static bool hwp_check_dec(void) +{ + u64 power_ctl; + + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); + return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE)); +} + static int __init intel_pstate_init(void) { static struct cpudata **_all_cpu_data; @@ -3822,23 +3809,32 @@ static int __init intel_pstate_init(void) id = x86_match_cpu(hwp_support_ids); if (id) { - hwp_forced = intel_pstate_hwp_is_enabled(); + bool epp_present = hwp_check_epp(); - if (hwp_forced) + /* + * If HWP is enabled already, there is no choice but to deal + * with it. + */ + hwp_forced = intel_pstate_hwp_is_enabled(); + if (hwp_forced) { pr_info("HWP enabled by BIOS\n"); - else if (no_load) + no_hwp = 0; + } else if (no_load) { return -ENODEV; + } else if (!epp_present && !hwp_check_dec()) { + /* + * Avoid enabling HWP for processors without EPP support + * unless the Dynamic Efficiency Control (DEC) enable + * bit (MSR_IA32_POWER_CTL, bit 27) is set because that + * means incomplete HWP implementation which is a corner + * case and supporting it is generally problematic. + */ + no_hwp = 1; + } copy_cpu_funcs(&core_funcs); - /* - * Avoid enabling HWP for processors without EPP support, - * because that means incomplete HWP implementation which is a - * corner case and supporting it is generally problematic. - * - * If HWP is enabled already, though, there is no choice but to - * deal with it. - */ - if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { + + if (!no_hwp) { hwp_active = true; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index ba0e08c8486a..49e76b44468a 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -953,6 +953,9 @@ static void __exit longhaul_exit(void) struct cpufreq_policy *policy = cpufreq_cpu_get(0); int i; + if (unlikely(!policy)) + return; + for (i = 0; i < numscales; i++) { if (mults[i] == maxmult) { struct cpufreq_freqs freqs; diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index 74f1b4c796e4..fce5aa5ceea0 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -24,6 +24,8 @@ #define POLL_USEC 1000 #define TIMEOUT_USEC 300000 +#define FDVFS_FDIV_HZ (26 * 1000) + enum { REG_FREQ_LUT_TABLE, REG_FREQ_ENABLE, @@ -35,7 +37,14 @@ enum { REG_ARRAY_SIZE, }; -struct mtk_cpufreq_data { +struct mtk_cpufreq_priv { + struct device *dev; + const struct mtk_cpufreq_variant *variant; + void __iomem *fdvfs; +}; + +struct mtk_cpufreq_domain { + struct mtk_cpufreq_priv *parent; struct cpufreq_frequency_table *table; void __iomem *reg_bases[REG_ARRAY_SIZE]; struct resource *res; @@ -43,20 +52,51 @@ struct mtk_cpufreq_data { int nr_opp; }; -static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = { - [REG_FREQ_LUT_TABLE] = 0x0, - [REG_FREQ_ENABLE] = 0x84, - [REG_FREQ_PERF_STATE] = 0x88, - [REG_FREQ_HW_STATE] = 0x8c, - [REG_EM_POWER_TBL] = 0x90, - [REG_FREQ_LATENCY] = 0x110, +struct mtk_cpufreq_variant { + int (*init)(struct mtk_cpufreq_priv *priv); + const u16 reg_offsets[REG_ARRAY_SIZE]; + const bool is_hybrid_dvfs; +}; + +static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = { + .reg_offsets = { + [REG_FREQ_LUT_TABLE] = 0x0, + [REG_FREQ_ENABLE] = 0x84, + [REG_FREQ_PERF_STATE] = 0x88, + [REG_FREQ_HW_STATE] = 0x8c, + [REG_EM_POWER_TBL] = 0x90, + [REG_FREQ_LATENCY] = 0x110, + }, +}; + +static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv) +{ + priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL); + if (IS_ERR(priv->fdvfs)) + return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs), + "failed to get fdvfs iomem\n"); + + return 0; +} + +static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = { + .init = mtk_cpufreq_hw_mt8196_init, + .reg_offsets = { + [REG_FREQ_LUT_TABLE] = 0x0, + [REG_FREQ_ENABLE] = 0x84, + [REG_FREQ_PERF_STATE] = 0x88, + [REG_FREQ_HW_STATE] = 0x8c, + [REG_EM_POWER_TBL] = 0x90, + [REG_FREQ_LATENCY] = 0x114, + }, + .is_hybrid_dvfs = true, }; static int __maybe_unused mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW, unsigned long *KHz) { - struct mtk_cpufreq_data *data; + struct mtk_cpufreq_domain *data; struct cpufreq_policy *policy; int i; @@ -80,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW, return 0; } +static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq, + struct cpufreq_policy *policy) +{ + struct mtk_cpufreq_domain *data = policy->driver_data; + struct mtk_cpufreq_priv *priv = data->parent; + unsigned int cpu; + + target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ); + for_each_cpu(cpu, policy->real_cpus) { + writel_relaxed(target_freq, priv->fdvfs + cpu * 4); + } +} + static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy, unsigned int index) { - struct mtk_cpufreq_data *data = policy->driver_data; - - writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); + struct mtk_cpufreq_domain *data = policy->driver_data; + unsigned int target_freq; + + if (data->parent->fdvfs) { + target_freq = policy->freq_table[index].frequency; + mtk_cpufreq_hw_fdvfs_switch(target_freq, policy); + } else { + writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); + } return 0; } static unsigned int mtk_cpufreq_hw_get(unsigned int cpu) { - struct mtk_cpufreq_data *data; + struct mtk_cpufreq_domain *data; struct cpufreq_policy *policy; unsigned int index; @@ -111,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu) static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { - struct mtk_cpufreq_data *data = policy->driver_data; + struct mtk_cpufreq_domain *data = policy->driver_data; unsigned int index; index = cpufreq_table_find_index_dl(policy, target_freq, false); - writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); + if (data->parent->fdvfs) + mtk_cpufreq_hw_fdvfs_switch(target_freq, policy); + else + writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); return policy->freq_table[index].frequency; } static int mtk_cpu_create_freq_table(struct platform_device *pdev, - struct mtk_cpufreq_data *data) + struct mtk_cpufreq_domain *data) { struct device *dev = &pdev->dev; u32 temp, i, freq, prev_freq = 0; @@ -157,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev, static int mtk_cpu_resources_init(struct platform_device *pdev, struct cpufreq_policy *policy, - const u16 *offsets) + struct mtk_cpufreq_priv *priv) { - struct mtk_cpufreq_data *data; + struct mtk_cpufreq_domain *data; struct device *dev = &pdev->dev; struct resource *res; struct of_phandle_args args; @@ -180,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev, index = args.args[0]; of_node_put(args.np); + /* + * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared + * register range is for FDVFS, followed by the frequency domain MMIOs. + */ + if (priv->variant->is_hybrid_dvfs) + index++; + + data->parent = priv; + res = platform_get_resource(pdev, IORESOURCE_MEM, index); if (!res) { dev_err(dev, "failed to get mem resource %d\n", index); @@ -202,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev, data->res = res; for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++) - data->reg_bases[i] = base + offsets[i]; + data->reg_bases[i] = base + priv->variant->reg_offsets[i]; ret = mtk_cpu_create_freq_table(pdev, data); if (ret) { @@ -223,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) { struct platform_device *pdev = cpufreq_get_driver_data(); int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS; - struct mtk_cpufreq_data *data; + struct mtk_cpufreq_domain *data; unsigned int latency; int ret; @@ -262,7 +333,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) { - struct mtk_cpufreq_data *data = policy->driver_data; + struct mtk_cpufreq_domain *data = policy->driver_data; struct resource *res = data->res; void __iomem *base = data->base; @@ -275,7 +346,7 @@ static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) static void mtk_cpufreq_register_em(struct cpufreq_policy *policy) { struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power); - struct mtk_cpufreq_data *data = policy->driver_data; + struct mtk_cpufreq_domain *data = policy->driver_data; em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp, &em_cb, policy->cpus, true); @@ -297,6 +368,7 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = { static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) { + struct mtk_cpufreq_priv *priv; const void *data; int ret, cpu; struct device *cpu_dev; @@ -320,7 +392,20 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) if (!data) return -EINVAL; - platform_set_drvdata(pdev, (void *) data); + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->variant = data; + priv->dev = &pdev->dev; + + if (priv->variant->init) { + ret = priv->variant->init(priv); + if (ret) + return ret; + } + + platform_set_drvdata(pdev, priv); cpufreq_mtk_hw_driver.driver_data = pdev; ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver); @@ -336,7 +421,8 @@ static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev) } static const struct of_device_id mtk_cpufreq_hw_match[] = { - { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets }, + { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant }, + { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant }, {} }; MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match); diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index f3f02c4b6888..00de1166188a 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, soc_data->sram_max_volt); return ret; } - } else if (pre_vproc > new_vproc) { + } else { vproc = max(new_vproc, pre_vsram - soc_data->max_volt_shift); ret = regulator_set_voltage(proc_reg, vproc, @@ -320,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb, struct dev_pm_opp *new_opp; struct mtk_cpu_dvfs_info *info; unsigned long freq, volt; - struct cpufreq_policy *policy; int ret = 0; info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb); @@ -353,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb, } dev_pm_opp_put(new_opp); - policy = cpufreq_cpu_get(info->opp_cpu); - if (policy) { + + struct cpufreq_policy *policy __free(put_cpufreq_policy) + = cpufreq_cpu_get(info->opp_cpu); + if (policy) cpufreq_driver_target(policy, freq / 1000, CPUFREQ_RELATION_L); - cpufreq_cpu_put(policy); - } } } diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 54f8117103c8..765a5bb81829 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -200,6 +200,10 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev, case QCOM_ID_IPQ9574: drv->versions = 1 << (unsigned int)(*speedbin); break; + case QCOM_ID_IPQ5424: + case QCOM_ID_IPQ5404: + drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0); + break; case QCOM_ID_MSM8996SG: case QCOM_ID_APQ8096SG: drv->versions = 1 << ((unsigned int)(*speedbin) + 4); @@ -591,6 +595,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_u { .compatible = "qcom,msm8996", .data = &match_data_kryo }, { .compatible = "qcom,qcs404", .data = &match_data_qcs404 }, { .compatible = "qcom,ipq5332", .data = &match_data_kryo }, + { .compatible = "qcom,ipq5424", .data = &match_data_kryo }, { .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 }, { .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 }, { .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 }, diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 76c888ed8d16..4215621deb3f 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -554,17 +554,15 @@ out_dmc0: static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr) { + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0); int ret; - struct cpufreq_policy *policy; - policy = cpufreq_cpu_get(0); if (!policy) { pr_debug("cpufreq: get no policy for cpu0\n"); return NOTIFY_BAD; } ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0); - cpufreq_cpu_put(policy); if (ret < 0) return NOTIFY_BAD; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index ef078426bfd5..38c165d526d1 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -15,6 +15,7 @@ #include <linux/energy_model.h> #include <linux/export.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/pm_opp.h> #include <linux/pm_qos.h> #include <linux/slab.h> @@ -424,6 +425,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev) return true; } + /* + * Older Broadcom STB chips had a "clocks" property for CPU node(s) + * that did not match the SCMI performance protocol node, if we got + * there, it means we had such an older Device Tree, therefore return + * true to preserve backwards compatibility. + */ + if (of_machine_is_compatible("brcm,brcmstb")) + return true; + return false; } diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 9c0b01e00508..642ddb9ea217 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, static int sh_cpufreq_verify(struct cpufreq_policy_data *policy) { struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); - struct cpufreq_frequency_table *freq_table; - freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; - if (freq_table) - return cpufreq_frequency_table_verify(policy, freq_table); + if (policy->freq_table) + return cpufreq_frequency_table_verify(policy); cpufreq_verify_within_cpu_limits(policy); diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 0b66df4ed513..f8b42e981635 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor); * DETECT SPEEDSTEP SPEEDS * *********************************************************************/ -unsigned int speedstep_get_freqs(enum speedstep_processor processor, - unsigned int *low_speed, - unsigned int *high_speed, - unsigned int *transition_latency, - void (*set_state) (unsigned int state)) +int speedstep_get_freqs(enum speedstep_processor processor, + unsigned int *low_speed, + unsigned int *high_speed, + unsigned int *transition_latency, + void (*set_state)(unsigned int state)) { unsigned int prev_speed; - unsigned int ret = 0; unsigned long flags; ktime_t tv1, tv2; + int ret = 0; if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) return -EINVAL; diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h index dc762ea786be..48329647d4c4 100644 --- a/drivers/cpufreq/speedstep-lib.h +++ b/drivers/cpufreq/speedstep-lib.h @@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor); * SPEEDSTEP_LOW; the second argument is zero so that no * cpufreq_notify_transition calls are initiated. */ -extern unsigned int speedstep_get_freqs(enum speedstep_processor processor, - unsigned int *low_speed, - unsigned int *high_speed, - unsigned int *transition_latency, - void (*set_state) (unsigned int state)); +extern int speedstep_get_freqs(enum speedstep_processor processor, + unsigned int *low_speed, + unsigned int *high_speed, + unsigned int *transition_latency, + void (*set_state)(unsigned int state)); diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index cbabb726c664..4270686fc3e3 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -103,13 +103,12 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy, static unsigned int tegra186_cpufreq_get(unsigned int cpu) { + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); struct tegra186_cpufreq_cluster *cluster; - struct cpufreq_policy *policy; unsigned int edvd_offset, cluster_id; u32 ndiv; - policy = cpufreq_cpu_get(cpu); if (!policy) return 0; @@ -117,7 +116,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu) ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK; cluster_id = data->cpus[policy->cpu].bpmp_cluster_id; cluster = &data->clusters[cluster_id]; - cpufreq_cpu_put(policy); return (cluster->ref_clk_khz * ndiv) / cluster->div; } diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 5a5147277cd0..6ee76f5fe9c5 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -72,7 +72,9 @@ enum { #define AM62P5_EFUSE_O_MPU_OPP 15 #define AM62P5_EFUSE_S_MPU_OPP 19 +#define AM62P5_EFUSE_T_MPU_OPP 20 #define AM62P5_EFUSE_U_MPU_OPP 21 +#define AM62P5_EFUSE_V_MPU_OPP 22 #define AM62P5_SUPPORT_O_MPU_OPP BIT(0) #define AM62P5_SUPPORT_U_MPU_OPP BIT(2) @@ -153,7 +155,9 @@ static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data, unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP; switch (efuse) { + case AM62P5_EFUSE_V_MPU_OPP: case AM62P5_EFUSE_U_MPU_OPP: + case AM62P5_EFUSE_T_MPU_OPP: case AM62P5_EFUSE_S_MPU_OPP: calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP; fallthrough; @@ -307,9 +311,10 @@ static struct ti_cpufreq_soc_data am3517_soc_data = { }; static const struct soc_device_attribute k3_cpufreq_soc[] = { - { .family = "AM62X", .revision = "SR1.0" }, - { .family = "AM62AX", .revision = "SR1.0" }, - { .family = "AM62PX", .revision = "SR1.0" }, + { .family = "AM62X", }, + { .family = "AM62AX", }, + { .family = "AM62PX", }, + { .family = "AM62DX", }, { /* sentinel */ } }; @@ -457,6 +462,7 @@ static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = { { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, }, { .compatible = "ti,am625", .data = &am625_soc_data, }, { .compatible = "ti,am62a7", .data = &am62a7_soc_data, }, + { .compatible = "ti,am62d2", .data = &am62a7_soc_data, }, { .compatible = "ti,am62p5", .data = &am62p5_soc_data, }, /* legacy */ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, }, diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c index 7dd1b0c263c7..6ffa16d239b2 100644 --- a/drivers/cpufreq/virtual-cpufreq.c +++ b/drivers/cpufreq/virtual-cpufreq.c @@ -250,7 +250,7 @@ static int virt_cpufreq_offline(struct cpufreq_policy *policy) static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy) { if (policy->freq_table) - return cpufreq_frequency_table_verify(policy, policy->freq_table); + return cpufreq_frequency_table_verify(policy); cpufreq_verify_within_cpu_limits(policy); return 0; diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 5f386761b156..7ab6f68b96a8 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -86,9 +86,9 @@ static const struct of_device_id qcom_idle_state_match[] = { static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) { - struct platform_device *pdev = NULL; + struct platform_device *pdev; struct device_node *cpu_node, *saw_node; - struct cpuidle_qcom_spm_data *data = NULL; + struct cpuidle_qcom_spm_data *data; int ret; cpu_node = of_cpu_device_node_get(cpu); @@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) return -ENODEV; saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); + of_node_put(cpu_node); if (!saw_node) return -ENODEV; pdev = of_find_device_by_node(saw_node); of_node_put(saw_node); - of_node_put(cpu_node); if (!pdev) return -ENODEV; data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + put_device(&pdev->dev); return -ENOMEM; + } data->spm = dev_get_drvdata(&pdev->dev); + put_device(&pdev->dev); if (!data->spm) return -EINVAL; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 0835da449db8..56132e843c99 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -635,8 +635,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev) static int __cpuidle_register_device(struct cpuidle_device *dev) { struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + unsigned int cpu = dev->cpu; int i, ret; + if (per_cpu(cpuidle_devices, cpu)) { + pr_info("CPU%d: cpuidle device already registered\n", cpu); + return -EEXIST; + } + if (!try_module_get(drv->owner)) return -EINVAL; @@ -648,7 +654,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; } - per_cpu(cpuidle_devices, dev->cpu) = dev; + per_cpu(cpuidle_devices, cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); ret = cpuidle_coupled_register_device(dev); diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index b2e3d0b0a116..4d9aa5ce31f0 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -314,45 +314,47 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (s->exit_latency_ns > latency_req) break; - if (s->target_residency_ns > predicted_ns) { - /* - * Use a physical idle state, not busy polling, unless - * a timer is going to trigger soon enough. - */ - if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && - s->target_residency_ns <= data->next_timer_ns) { - predicted_ns = s->target_residency_ns; - idx = i; - break; - } - if (predicted_ns < TICK_NSEC) - break; - - if (!tick_nohz_tick_stopped()) { - /* - * If the state selected so far is shallow, - * waking up early won't hurt, so retain the - * tick in that case and let the governor run - * again in the next iteration of the loop. - */ - predicted_ns = drv->states[idx].target_residency_ns; - break; - } + if (s->target_residency_ns <= predicted_ns) { + idx = i; + continue; + } + + /* + * Use a physical idle state, not busy polling, unless a timer + * is going to trigger soon enough. + */ + if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && + s->target_residency_ns <= data->next_timer_ns) { + predicted_ns = s->target_residency_ns; + idx = i; + break; + } + if (predicted_ns < TICK_NSEC) + break; + + if (!tick_nohz_tick_stopped()) { /* - * If the state selected so far is shallow and this - * state's target residency matches the time till the - * closest timer event, select this one to avoid getting - * stuck in the shallow one for too long. + * If the state selected so far is shallow, waking up + * early won't hurt, so retain the tick in that case and + * let the governor run again in the next iteration of + * the idle loop. */ - if (drv->states[idx].target_residency_ns < TICK_NSEC && - s->target_residency_ns <= delta_tick) - idx = i; - - return idx; + predicted_ns = drv->states[idx].target_residency_ns; + break; } - idx = i; + /* + * If the state selected so far is shallow and this state's + * target residency matches the time till the closest timer + * event, select this one to avoid getting stuck in the shallow + * one for too long. + */ + if (drv->states[idx].target_residency_ns < TICK_NSEC && + s->target_residency_ns <= delta_tick) + idx = i; + + return idx; } if (idx == -1) diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index d6f5da61cb7d..61de64817604 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -27,14 +27,14 @@ static ssize_t show_available_governors(struct device *dev, mutex_lock(&cpuidle_lock); list_for_each_entry(tmp, &cpuidle_governors, governor_list) { - if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2))) + if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2))) goto out; - i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name); + i += sysfs_emit_at(buf, i, "%.*s ", CPUIDLE_NAME_LEN, tmp->name); } out: - i+= sprintf(&buf[i], "\n"); + i += sysfs_emit_at(buf, i, "\n"); mutex_unlock(&cpuidle_lock); return i; } @@ -49,9 +49,9 @@ static ssize_t show_current_driver(struct device *dev, spin_lock(&cpuidle_driver_lock); drv = cpuidle_get_driver(); if (drv) - ret = sprintf(buf, "%s\n", drv->name); + ret = sysfs_emit(buf, "%s\n", drv->name); else - ret = sprintf(buf, "none\n"); + ret = sysfs_emit(buf, "none\n"); spin_unlock(&cpuidle_driver_lock); return ret; @@ -65,9 +65,9 @@ static ssize_t show_current_governor(struct device *dev, mutex_lock(&cpuidle_lock); if (cpuidle_curr_governor) - ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name); + ret = sysfs_emit(buf, "%s\n", cpuidle_curr_governor->name); else - ret = sprintf(buf, "none\n"); + ret = sysfs_emit(buf, "none\n"); mutex_unlock(&cpuidle_lock); return ret; @@ -230,7 +230,7 @@ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store) static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, char *buf) \ { \ - return sprintf(buf, "%u\n", state->_name);\ + return sysfs_emit(buf, "%u\n", state->_name);\ } #define define_show_state_ull_function(_name) \ @@ -238,7 +238,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ - return sprintf(buf, "%llu\n", state_usage->_name);\ + return sysfs_emit(buf, "%llu\n", state_usage->_name);\ } #define define_show_state_str_function(_name) \ @@ -247,8 +247,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ char *buf) \ { \ if (state->_name[0] == '\0')\ - return sprintf(buf, "<null>\n");\ - return sprintf(buf, "%s\n", state->_name);\ + return sysfs_emit(buf, "<null>\n");\ + return sysfs_emit(buf, "%s\n", state->_name);\ } #define define_show_state_time_function(_name) \ @@ -256,7 +256,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ - return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \ + return sysfs_emit(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \ } define_show_state_time_function(exit_latency) @@ -273,14 +273,14 @@ static ssize_t show_state_time(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { - return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns)); + return sysfs_emit(buf, "%llu\n", ktime_to_us(state_usage->time_ns)); } static ssize_t show_state_disable(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { - return sprintf(buf, "%llu\n", + return sysfs_emit(buf, "%llu\n", state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER); } @@ -310,7 +310,7 @@ static ssize_t show_state_default_status(struct cpuidle_state *state, struct cpuidle_state_usage *state_usage, char *buf) { - return sprintf(buf, "%s\n", + return sysfs_emit(buf, "%s\n", state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); } @@ -358,7 +358,7 @@ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \ struct cpuidle_state_usage *state_usage, \ char *buf) \ { \ - return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\ + return sysfs_emit(buf, "%llu\n", state_usage->s2idle_##_name);\ } define_show_state_s2idle_ull_function(usage); @@ -550,7 +550,7 @@ static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf) ssize_t ret; spin_lock(&cpuidle_driver_lock); - ret = sprintf(buf, "%s\n", drv ? drv->name : "none"); + ret = sysfs_emit(buf, "%s\n", drv ? drv->name : "none"); spin_unlock(&cpuidle_driver_lock); return ret; diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 0470d7c175f4..5a2c9badcc64 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -34,15 +34,18 @@ /* DDRMON_CTRL */ #define DDRMON_CTRL 0x04 +#define DDRMON_CTRL_LPDDR5 BIT(6) #define DDRMON_CTRL_DDR4 BIT(5) #define DDRMON_CTRL_LPDDR4 BIT(4) #define DDRMON_CTRL_HARDWARE_EN BIT(3) #define DDRMON_CTRL_LPDDR23 BIT(2) #define DDRMON_CTRL_SOFTWARE_EN BIT(1) #define DDRMON_CTRL_TIMER_CNT_EN BIT(0) -#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \ +#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_LPDDR5 | \ + DDRMON_CTRL_DDR4 | \ DDRMON_CTRL_LPDDR4 | \ DDRMON_CTRL_LPDDR23) +#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7) #define DDRMON_CH0_WR_NUM 0x20 #define DDRMON_CH0_RD_NUM 0x24 @@ -116,12 +119,60 @@ struct rockchip_dfi { int buswidth[DMC_MAX_CHANNELS]; int ddrmon_stride; bool ddrmon_ctrl_single; + u32 lp5_bank_mode; + bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */ + unsigned int count_multiplier; /* number of data clocks per count */ }; +static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl, + u32 *mask) +{ + u32 ddrmon_ver; + + *mask = DDRMON_CTRL_DDR_TYPE_MASK; + + switch (dfi->ddr_type) { + case ROCKCHIP_DDRTYPE_LPDDR2: + case ROCKCHIP_DDRTYPE_LPDDR3: + *ctrl = DDRMON_CTRL_LPDDR23; + break; + case ROCKCHIP_DDRTYPE_LPDDR4: + case ROCKCHIP_DDRTYPE_LPDDR4X: + *ctrl = DDRMON_CTRL_LPDDR4; + break; + case ROCKCHIP_DDRTYPE_LPDDR5: + ddrmon_ver = readl_relaxed(dfi->regs); + if (ddrmon_ver < 0x40) { + *ctrl = DDRMON_CTRL_LPDDR5 | dfi->lp5_bank_mode; + *mask |= DDRMON_CTRL_LP5_BANK_MODE_MASK; + break; + } + + /* + * As it is unknown whether the unpleasant special case + * behaviour used by the vendor kernel is needed for any + * shipping hardware, ask users to report if they have + * some of that hardware. + */ + dev_err(&dfi->edev->dev, + "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n", + ddrmon_ver); + return -EOPNOTSUPP; + default: + dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n", + dfi->ddr_type); + return -EOPNOTSUPP; + } + + return 0; +} + static int rockchip_dfi_enable(struct rockchip_dfi *dfi) { void __iomem *dfi_regs = dfi->regs; int i, ret = 0; + u32 ctrl; + u32 ctrl_mask; mutex_lock(&dfi->mutex); @@ -135,8 +186,11 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi) goto out; } + ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl, &ctrl_mask); + if (ret) + goto out; + for (i = 0; i < dfi->max_channels; i++) { - u32 ctrl = 0; if (!(dfi->channel_mask & BIT(i))) continue; @@ -146,21 +200,7 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi) DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN), dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); - /* set ddr type to dfi */ - switch (dfi->ddr_type) { - case ROCKCHIP_DDRTYPE_LPDDR2: - case ROCKCHIP_DDRTYPE_LPDDR3: - ctrl = DDRMON_CTRL_LPDDR23; - break; - case ROCKCHIP_DDRTYPE_LPDDR4: - case ROCKCHIP_DDRTYPE_LPDDR4X: - ctrl = DDRMON_CTRL_LPDDR4; - break; - default: - break; - } - - writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK), + writel_relaxed(HIWORD_UPDATE(ctrl, ctrl_mask), dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); /* enable count, use software mode */ @@ -435,7 +475,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event) switch (event->attr.config) { case PERF_EVENT_CYCLES: - count = total.c[0].clock_cycles; + count = total.c[0].clock_cycles * dfi->count_multiplier; break; case PERF_EVENT_READ_BYTES: for (i = 0; i < dfi->max_channels; i++) @@ -651,10 +691,14 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi) break; case ROCKCHIP_DDRTYPE_LPDDR4: case ROCKCHIP_DDRTYPE_LPDDR4X: + case ROCKCHIP_DDRTYPE_LPDDR5: dfi->burst_len = 16; break; } + if (!dfi->count_multiplier) + dfi->count_multiplier = 1; + ret = perf_pmu_register(pmu, "rockchip_ddr", -1); if (ret) return ret; @@ -726,7 +770,7 @@ static int rk3568_dfi_init(struct rockchip_dfi *dfi) static int rk3588_dfi_init(struct rockchip_dfi *dfi) { struct regmap *regmap_pmu = dfi->regmap_pmu; - u32 reg2, reg3, reg4; + u32 reg2, reg3, reg4, reg6; regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, ®2); regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, ®3); @@ -751,6 +795,15 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi) dfi->max_channels = 4; dfi->ddrmon_stride = 0x4000; + dfi->count_multiplier = 2; + + if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) { + regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, ®6); + dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7; + dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6); + if (dfi->lp5_ckr) + dfi->count_multiplier *= 2; + } return 0; }; diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c index 22fe9e631f8a..4c22be728f6a 100644 --- a/drivers/devfreq/mtk-cci-devfreq.c +++ b/drivers/devfreq/mtk-cci-devfreq.c @@ -86,7 +86,7 @@ static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage) soc_data->sram_max_volt); return ret; } - } else if (pre_voltage > new_voltage) { + } else { voltage = max(new_voltage, pre_vsram - soc_data->max_volt_shift); ret = regulator_set_voltage(drv->proc_reg, voltage, @@ -386,7 +386,8 @@ out_disable_cci_clk: out_free_resources: if (regulator_is_enabled(drv->proc_reg)) regulator_disable(drv->proc_reg); - if (drv->sram_reg && regulator_is_enabled(drv->sram_reg)) + if (!IS_ERR_OR_NULL(drv->sram_reg) && + regulator_is_enabled(drv->sram_reg)) regulator_disable(drv->sram_reg); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 395c6be901ce..dcea66aadfa3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2665,7 +2665,7 @@ static int amdgpu_pmops_thaw(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); /* do not resume device if it's normal hibernation */ - if (!pm_hibernate_is_recovering()) + if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend()) return 0; return amdgpu_device_resume(drm_dev, true); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 79997553d8f9..b934523593d9 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -597,8 +597,6 @@ config HID_LED config HID_LENOVO tristate "Lenovo / Thinkpad devices" - depends on ACPI - select ACPI_PLATFORM_PROFILE select NEW_LEDS select LEDS_CLASS help diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c index 0f2cbae39b2b..7017bfa59093 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c @@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type) struct amdtp_hid_data *hid_data = hid->driver_data; struct amdtp_cl_data *cli_data = hid_data->cli_data; struct request_list *req_list = &cli_data->req_list; + struct amd_input_data *in_data = cli_data->in_data; + struct amd_mp2_dev *mp2; int i; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); for (i = 0; i < cli_data->num_hid_devices; i++) { if (cli_data->hid_sensor_hubs[i] == hid) { struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL); @@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work) u8 report_id, node_type; u8 report_size = 0; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); req_node = list_last_entry(&req_list->list, struct request_list, list); list_del(&req_node->list); current_index = req_node->current_index; @@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work) node_type = req_node->report_type; kfree(req_node); - mp2 = container_of(in_data, struct amd_mp2_dev, in_data); mp2_ops = mp2->mp2_ops; if (node_type == HID_FEATURE_REPORT) { report_size = mp2_ops->get_feat_rep(sensor_index, report_id, @@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work) cli_data->cur_hid_dev = current_index; cli_data->sensor_requested_cnt[current_index] = 0; amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]); + if (!list_empty(&req_list->list)) + schedule_delayed_work(&cli_data->work, 0); } void amd_sfh_work_buffer(struct work_struct *work) @@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work) u8 report_size; int i; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); for (i = 0; i < cli_data->num_hid_devices; i++) { if (cli_data->sensor_sts[i] == SENSOR_ENABLED) { - mp2 = container_of(in_data, struct amd_mp2_dev, in_data); report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i], cli_data->report_id[i], in_data); hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT, diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h index f44a3bb2fbd4..78f830c133e5 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h @@ -10,6 +10,7 @@ #ifndef AMD_SFH_COMMON_H #define AMD_SFH_COMMON_H +#include <linux/mutex.h> #include <linux/pci.h> #include "amd_sfh_hid.h" @@ -59,6 +60,8 @@ struct amd_mp2_dev { u32 mp2_acs; struct sfh_dev_status dev_en; struct work_struct work; + /* mp2 to protect data */ + struct mutex lock; u8 init_done; u8 rver; }; diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index 2983af969579..1d9f955573aa 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -466,6 +466,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i if (!privdata->cl_data) return -ENOMEM; + rc = devm_mutex_init(&pdev->dev, &privdata->lock); + if (rc) + return rc; + privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data; if (privdata->sfh1_1_ops) { if (boot_cpu_data.x86 >= 0x1A) diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index d27dcfb2b9e4..8db9d4e7c3b0 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -974,7 +974,10 @@ static int asus_input_mapping(struct hid_device *hdev, case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break; case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break; case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break; + case 0x4e: asus_map_key_clear(KEY_FN_ESC); break; + case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER); break; + case 0x8b: asus_map_key_clear(KEY_PROG1); break; /* ProArt Creator Hub key */ case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */ case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */ case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */ diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 482f62a78c41..5a95ea3bec98 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -229,10 +229,12 @@ static int cp2112_gpio_set_unlocked(struct cp2112_device *dev, ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret < 0) + if (ret != CP2112_GPIO_SET_LENGTH) { hid_err(hdev, "error setting GPIO values: %d\n", ret); + return ret < 0 ? ret : -EIO; + } - return ret; + return 0; } static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset, @@ -309,9 +311,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip, * Set gpio value when output direction is already set, * as specified in AN495, Rev. 0.2, cpt. 4.4 */ - cp2112_gpio_set_unlocked(dev, offset, value); - - return 0; + return cp2112_gpio_set_unlocked(dev, offset, value); } static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index b3121fa7a72d..654879814f97 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -32,8 +32,6 @@ #include <linux/leds.h> #include <linux/workqueue.h> -#include <linux/platform_profile.h> - #include "hid-ids.h" /* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */ @@ -734,7 +732,7 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data) report_key_event(input, KEY_RFKILL); return 1; } - platform_profile_cycle(); + report_key_event(input, KEY_PERFORMANCE); return 1; case TP_X12_RAW_HOTKEY_FN_F10: /* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/ diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c index 854926b3cfd4..a2643ae790d6 100644 --- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c +++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c @@ -997,6 +997,8 @@ static const struct pci_device_id quicki2c_pci_tbl[] = { { PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2, &ptl_ddata) }, { PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1, &ptl_ddata) }, { PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2, &ptl_ddata) }, + { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT1, &ptl_ddata) }, + { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT2, &ptl_ddata) }, { } }; MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl); diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h index d412eafcf9ea..4e60a7de4727 100644 --- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h +++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h @@ -13,6 +13,8 @@ #define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A #define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448 #define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A +#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT1 0x4D48 +#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT2 0x4D4A /* Packet size value, the unit is 16 bytes */ #define MAX_PACKET_SIZE_VALUE_LNL 256 diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c index 5e5f179dd113..84314989dc53 100644 --- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c +++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c @@ -976,6 +976,8 @@ static const struct pci_device_id quickspi_pci_tbl[] = { {PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), }, {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), }, {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), }, + {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT1, &ptl), }, + {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT2, &ptl), }, {} }; MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl); diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h index 6fdf674b21c5..f3532d866749 100644 --- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h +++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h @@ -19,6 +19,8 @@ #define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B #define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449 #define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B +#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT1 0x4D49 +#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT2 0x4D4B /* HIDSPI special ACPI parameters DSM methods */ #define ACPI_QUICKSPI_REVISION_NUM 2 diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 91a7b7e7c0c8..9ba83954c255 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -259,7 +259,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 3, .target_residency = 6, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -267,7 +267,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -275,7 +275,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, .target_residency = 80, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -283,7 +283,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, .target_residency = 800, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -296,7 +296,7 @@ static struct cpuidle_state snb_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -304,7 +304,7 @@ static struct cpuidle_state snb_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -312,7 +312,7 @@ static struct cpuidle_state snb_cstates[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 211, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -320,7 +320,7 @@ static struct cpuidle_state snb_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, .target_residency = 345, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", @@ -328,7 +328,7 @@ static struct cpuidle_state snb_cstates[] __initdata = { .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, .target_residency = 345, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -341,7 +341,7 @@ static struct cpuidle_state byt_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6N", @@ -349,7 +349,7 @@ static struct cpuidle_state byt_cstates[] __initdata = { .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, .target_residency = 275, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6S", @@ -357,7 +357,7 @@ static struct cpuidle_state byt_cstates[] __initdata = { .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 500, .target_residency = 560, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", @@ -365,7 +365,7 @@ static struct cpuidle_state byt_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, .target_residency = 4000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7S", @@ -373,7 +373,7 @@ static struct cpuidle_state byt_cstates[] __initdata = { .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, .target_residency = 20000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -386,7 +386,7 @@ static struct cpuidle_state cht_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6N", @@ -394,7 +394,7 @@ static struct cpuidle_state cht_cstates[] __initdata = { .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 275, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6S", @@ -402,7 +402,7 @@ static struct cpuidle_state cht_cstates[] __initdata = { .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, .target_residency = 560, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", @@ -410,7 +410,7 @@ static struct cpuidle_state cht_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, .target_residency = 4000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7S", @@ -418,7 +418,7 @@ static struct cpuidle_state cht_cstates[] __initdata = { .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, .target_residency = 20000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -431,7 +431,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -439,7 +439,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -447,7 +447,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, .target_residency = 156, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -455,7 +455,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 300, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", @@ -463,7 +463,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = { .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 87, .target_residency = 300, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -476,7 +476,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -484,7 +484,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 80, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -492,7 +492,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, .target_residency = 156, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -500,7 +500,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 82, .target_residency = 300, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -513,7 +513,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -521,7 +521,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 250, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -529,7 +529,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, .target_residency = 300, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -537,7 +537,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 84, .target_residency = 400, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -550,7 +550,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -558,7 +558,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 500, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -566,7 +566,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, .target_residency = 600, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -574,7 +574,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 88, .target_residency = 700, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -587,7 +587,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -595,7 +595,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -603,7 +603,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 33, .target_residency = 100, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -611,7 +611,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, .target_residency = 400, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", @@ -619,7 +619,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, .target_residency = 500, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", @@ -627,7 +627,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, .target_residency = 900, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", @@ -635,7 +635,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, .target_residency = 1800, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", @@ -643,7 +643,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, .target_residency = 7700, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -655,7 +655,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -663,7 +663,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -671,7 +671,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 40, .target_residency = 100, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -679,7 +679,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, .target_residency = 400, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", @@ -687,7 +687,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 166, .target_residency = 500, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", @@ -695,7 +695,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 300, .target_residency = 900, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", @@ -703,7 +703,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 600, .target_residency = 1800, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", @@ -711,7 +711,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2600, .target_residency = 7700, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -724,7 +724,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -732,7 +732,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C3", @@ -740,7 +740,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 70, .target_residency = 100, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -748,7 +748,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 85, .target_residency = 200, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", @@ -756,7 +756,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 124, .target_residency = 800, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", @@ -764,7 +764,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 200, .target_residency = 800, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", @@ -772,7 +772,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 480, .target_residency = 5000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", @@ -780,7 +780,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 890, .target_residency = 5000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -793,7 +793,7 @@ static struct cpuidle_state skx_cstates[] __initdata = { .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -801,7 +801,7 @@ static struct cpuidle_state skx_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -809,7 +809,7 @@ static struct cpuidle_state skx_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 133, .target_residency = 600, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -822,7 +822,7 @@ static struct cpuidle_state icx_cstates[] __initdata = { .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -830,7 +830,7 @@ static struct cpuidle_state icx_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 4, .target_residency = 4, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -838,7 +838,7 @@ static struct cpuidle_state icx_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 170, .target_residency = 600, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -861,7 +861,7 @@ static struct cpuidle_state adl_cstates[] __initdata = { .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -869,7 +869,7 @@ static struct cpuidle_state adl_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 2, .target_residency = 4, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -877,7 +877,7 @@ static struct cpuidle_state adl_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 220, .target_residency = 600, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", @@ -885,7 +885,7 @@ static struct cpuidle_state adl_cstates[] __initdata = { .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 280, .target_residency = 800, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", @@ -893,7 +893,7 @@ static struct cpuidle_state adl_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 680, .target_residency = 2000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -906,7 +906,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -914,7 +914,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 2, .target_residency = 4, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -922,7 +922,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 170, .target_residency = 500, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", @@ -930,7 +930,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, .target_residency = 600, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", @@ -938,7 +938,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 230, .target_residency = 700, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -951,7 +951,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -959,7 +959,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 420, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", @@ -967,7 +967,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 310, .target_residency = 930, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -980,7 +980,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = { .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -988,7 +988,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 2, .target_residency = 4, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -996,7 +996,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 195, .target_residency = 585, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", @@ -1004,7 +1004,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = { .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 260, .target_residency = 1040, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", @@ -1012,7 +1012,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 660, .target_residency = 1980, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1025,7 +1025,7 @@ static struct cpuidle_state spr_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -1033,7 +1033,7 @@ static struct cpuidle_state spr_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 2, .target_residency = 4, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1042,7 +1042,7 @@ static struct cpuidle_state spr_cstates[] __initdata = { CPUIDLE_FLAG_INIT_XSTATE, .exit_latency = 290, .target_residency = 800, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1055,7 +1055,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -1063,7 +1063,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 4, .target_residency = 4, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1073,7 +1073,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = { CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 170, .target_residency = 650, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6P", @@ -1083,7 +1083,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = { CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 210, .target_residency = 1000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1096,7 +1096,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -1104,7 +1104,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 4, .target_residency = 4, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1114,7 +1114,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = { CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 220, .target_residency = 650, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6P", @@ -1124,7 +1124,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = { CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 240, .target_residency = 750, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1137,7 +1137,7 @@ static struct cpuidle_state atom_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C2", @@ -1145,7 +1145,7 @@ static struct cpuidle_state atom_cstates[] __initdata = { .flags = MWAIT2flg(0x10), .exit_latency = 20, .target_residency = 80, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C4", @@ -1153,7 +1153,7 @@ static struct cpuidle_state atom_cstates[] __initdata = { .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1161,7 +1161,7 @@ static struct cpuidle_state atom_cstates[] __initdata = { .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1173,7 +1173,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 4, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C4", @@ -1181,7 +1181,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = { .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1189,7 +1189,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = { .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7", @@ -1197,7 +1197,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1200, .target_residency = 4000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", @@ -1205,7 +1205,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = { .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, .target_residency = 20000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1217,7 +1217,7 @@ static struct cpuidle_state avn_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1225,7 +1225,7 @@ static struct cpuidle_state avn_cstates[] __initdata = { .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 15, .target_residency = 45, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1237,7 +1237,7 @@ static struct cpuidle_state knl_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 1, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle }, { .name = "C6", @@ -1245,7 +1245,7 @@ static struct cpuidle_state knl_cstates[] __initdata = { .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 120, .target_residency = 500, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle }, { .enter = NULL } @@ -1258,7 +1258,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -1266,7 +1266,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1274,7 +1274,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 133, .target_residency = 133, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C7s", @@ -1282,7 +1282,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = { .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 155, .target_residency = 155, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C8", @@ -1290,7 +1290,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = { .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 1000, .target_residency = 1000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C9", @@ -1298,7 +1298,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = { .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 2000, .target_residency = 2000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C10", @@ -1306,7 +1306,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = { .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 10000, .target_residency = 10000, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1319,7 +1319,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -1327,7 +1327,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1335,7 +1335,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 50, .target_residency = 500, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1352,7 +1352,7 @@ static struct cpuidle_state snr_cstates[] __initdata = { .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -1360,7 +1360,7 @@ static struct cpuidle_state snr_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 15, .target_residency = 25, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6", @@ -1368,7 +1368,7 @@ static struct cpuidle_state snr_cstates[] __initdata = { .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 130, .target_residency = 500, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1381,7 +1381,7 @@ static struct cpuidle_state grr_cstates[] __initdata = { .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -1389,7 +1389,7 @@ static struct cpuidle_state grr_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 2, .target_residency = 10, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6S", @@ -1397,7 +1397,7 @@ static struct cpuidle_state grr_cstates[] __initdata = { .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 500, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } @@ -1410,7 +1410,7 @@ static struct cpuidle_state srf_cstates[] __initdata = { .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 1, .target_residency = 1, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C1E", @@ -1418,7 +1418,7 @@ static struct cpuidle_state srf_cstates[] __initdata = { .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 2, .target_residency = 10, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6S", @@ -1427,7 +1427,7 @@ static struct cpuidle_state srf_cstates[] __initdata = { CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 270, .target_residency = 700, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .name = "C6SP", @@ -1436,7 +1436,7 @@ static struct cpuidle_state srf_cstates[] __initdata = { CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 310, .target_residency = 900, - .enter = &intel_idle, + .enter = intel_idle, .enter_s2idle = intel_idle_s2idle, }, { .enter = NULL } diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 028d9f031dde..8b506417ad2f 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -233,6 +233,7 @@ static u16 get_legacy_obj_type(u16 opcode) { switch (opcode) { case MLX5_CMD_OP_CREATE_RQ: + case MLX5_CMD_OP_CREATE_RMP: return MLX5_EVENT_QUEUE_TYPE_RQ; case MLX5_CMD_OP_CREATE_QP: return MLX5_EVENT_QUEUE_TYPE_QP; diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index 65fbd098f9e9..4c842368289f 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -711,6 +711,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid) iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev); mutex_unlock(&igroup->lock); + iommufd_hw_pagetable_put(idev->ictx, hwpt); + /* Caller must destroy hwpt */ return hwpt; } @@ -1057,7 +1059,6 @@ void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid) hwpt = iommufd_hw_pagetable_detach(idev, pasid); if (!hwpt) return; - iommufd_hw_pagetable_put(idev->ictx, hwpt); refcount_dec(&idev->obj.users); } EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, "IOMMUFD"); diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c index fc4de63b0bce..e23d9ee4fe38 100644 --- a/drivers/iommu/iommufd/eventq.c +++ b/drivers/iommu/iommufd/eventq.c @@ -393,12 +393,12 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name, const struct file_operations *fops) { struct file *filep; - int fdno; spin_lock_init(&eventq->lock); INIT_LIST_HEAD(&eventq->deliver); init_waitqueue_head(&eventq->wait_queue); + /* The filep is fput() by the core code during failure */ filep = anon_inode_getfile(name, fops, eventq, O_RDWR); if (IS_ERR(filep)) return PTR_ERR(filep); @@ -408,10 +408,7 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name, eventq->filep = filep; refcount_inc(&eventq->obj.users); - fdno = get_unused_fd_flags(O_CLOEXEC); - if (fdno < 0) - fput(filep); - return fdno; + return get_unused_fd_flags(O_CLOEXEC); } static const struct file_operations iommufd_fault_fops = @@ -452,7 +449,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) return 0; out_put_fdno: put_unused_fd(fdno); - fput(fault->common.filep); return rc; } @@ -536,7 +532,6 @@ int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd) out_put_fdno: put_unused_fd(fdno); - fput(veventq->common.filep); out_abort: iommufd_object_abort_and_destroy(ucmd->ictx, &veventq->common.obj); out_unlock_veventqs: diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 0da2a81eedfa..627f9b78483a 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -454,9 +454,8 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx, if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) { struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt); - lockdep_assert_not_held(&hwpt_paging->ioas->mutex); - if (hwpt_paging->auto_domain) { + lockdep_assert_not_held(&hwpt_paging->ioas->mutex); iommufd_object_put_and_try_destroy(ictx, &hwpt->obj); return; } diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index 15af7ced0501..ce775fbbae94 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -23,6 +23,7 @@ #include "iommufd_test.h" struct iommufd_object_ops { + size_t file_offset; void (*pre_destroy)(struct iommufd_object *obj); void (*destroy)(struct iommufd_object *obj); void (*abort)(struct iommufd_object *obj); @@ -121,6 +122,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj) old = xas_store(&xas, NULL); xa_unlock(&ictx->objects); WARN_ON(old != XA_ZERO_ENTRY); + + if (WARN_ON(!refcount_dec_and_test(&obj->users))) + return; + kfree(obj); } @@ -131,10 +136,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj) void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, struct iommufd_object *obj) { - if (iommufd_object_ops[obj->type].abort) - iommufd_object_ops[obj->type].abort(obj); + const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type]; + + if (ops->file_offset) { + struct file **filep = ((void *)obj) + ops->file_offset; + + /* + * A file should hold a users refcount while the file is open + * and put it back in its release. The file should hold a + * pointer to obj in their private data. Normal fput() is + * deferred to a workqueue and can get out of order with the + * following kfree(obj). Using the sync version ensures the + * release happens immediately. During abort we require the file + * refcount is one at this point - meaning the object alloc + * function cannot do anything to allow another thread to take a + * refcount prior to a guaranteed success. + */ + if (*filep) + __fput_sync(*filep); + } + + if (ops->abort) + ops->abort(obj); else - iommufd_object_ops[obj->type].destroy(obj); + ops->destroy(obj); iommufd_object_abort(ictx, obj); } @@ -550,16 +575,23 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma) if (vma->vm_flags & VM_EXEC) return -EPERM; + mtree_lock(&ictx->mt_mmap); /* vma->vm_pgoff carries a page-shifted start position to an immap */ immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT); - if (!immap) + if (!immap || !refcount_inc_not_zero(&immap->owner->users)) { + mtree_unlock(&ictx->mt_mmap); return -ENXIO; + } + mtree_unlock(&ictx->mt_mmap); + /* * mtree_load() returns the immap for any contained mmio_addr, so only * allow the exact immap thing to be mapped */ - if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) - return -ENXIO; + if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) { + rc = -ENXIO; + goto err_refcount; + } vma->vm_pgoff = 0; vma->vm_private_data = immap; @@ -570,10 +602,11 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma) immap->mmio_addr >> PAGE_SHIFT, length, vma->vm_page_prot); if (rc) - return rc; + goto err_refcount; + return 0; - /* vm_ops.open won't be called for mmap itself. */ - refcount_inc(&immap->owner->users); +err_refcount: + refcount_dec(&immap->owner->users); return rc; } @@ -651,6 +684,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx) } EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, "IOMMUFD"); +#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj) \ + .file_offset = (offsetof(_struct, _filep) + \ + BUILD_BUG_ON_ZERO(!__same_type( \ + struct file *, ((_struct *)NULL)->_filep)) + \ + BUILD_BUG_ON_ZERO(offsetof(_struct, _obj))) + static const struct iommufd_object_ops iommufd_object_ops[] = { [IOMMUFD_OBJ_ACCESS] = { .destroy = iommufd_access_destroy_object, @@ -661,6 +700,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { }, [IOMMUFD_OBJ_FAULT] = { .destroy = iommufd_fault_destroy, + IOMMUFD_FILE_OFFSET(struct iommufd_fault, common.filep, common.obj), }, [IOMMUFD_OBJ_HW_QUEUE] = { .destroy = iommufd_hw_queue_destroy, @@ -683,6 +723,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { [IOMMUFD_OBJ_VEVENTQ] = { .destroy = iommufd_veventq_destroy, .abort = iommufd_veventq_abort, + IOMMUFD_FILE_OFFSET(struct iommufd_veventq, common.filep, common.obj), }, [IOMMUFD_OBJ_VIOMMU] = { .destroy = iommufd_viommu_destroy, diff --git a/drivers/opp/core.c b/drivers/opp/core.c index edbd60501cf0..bba4f7daff8c 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -476,6 +476,16 @@ static unsigned long _read_bw(struct dev_pm_opp *opp, int index) return opp->bandwidth[index].peak; } +static unsigned long _read_opp_key(struct dev_pm_opp *opp, int index, + struct dev_pm_opp_key *key) +{ + key->bw = opp->bandwidth ? opp->bandwidth[index].peak : 0; + key->freq = opp->rates[index]; + key->level = opp->level; + + return true; +} + /* Generic comparison helpers */ static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, unsigned long opp_key, unsigned long key) @@ -509,6 +519,22 @@ static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, return false; } +static bool _compare_opp_key_exact(struct dev_pm_opp **opp, + struct dev_pm_opp *temp_opp, struct dev_pm_opp_key *opp_key, + struct dev_pm_opp_key *key) +{ + bool level_match = (key->level == OPP_LEVEL_UNSET || opp_key->level == key->level); + bool freq_match = (key->freq == 0 || opp_key->freq == key->freq); + bool bw_match = (key->bw == 0 || opp_key->bw == key->bw); + + if (freq_match && level_match && bw_match) { + *opp = temp_opp; + return true; + } + + return false; +} + /* Generic key finding helpers */ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, unsigned long *key, int index, bool available, @@ -541,6 +567,37 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, return opp; } +static struct dev_pm_opp *_opp_table_find_opp_key(struct opp_table *opp_table, + struct dev_pm_opp_key *key, bool available, + unsigned long (*read)(struct dev_pm_opp *opp, int index, + struct dev_pm_opp_key *key), + bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, + struct dev_pm_opp_key *opp_key, struct dev_pm_opp_key *key), + bool (*assert)(struct opp_table *opp_table, unsigned int index)) +{ + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + struct dev_pm_opp_key temp_key; + + /* Assert that the requirement is met */ + if (!assert(opp_table, 0)) + return ERR_PTR(-EINVAL); + + guard(mutex)(&opp_table->lock); + + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { + if (temp_opp->available == available) { + read(temp_opp, 0, &temp_key); + if (compare(&opp, temp_opp, &temp_key, key)) { + /* Increment the reference count of OPP */ + dev_pm_opp_get(opp); + break; + } + } + } + + return opp; +} + static struct dev_pm_opp * _find_key(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), @@ -633,6 +690,48 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); /** + * dev_pm_opp_find_key_exact() - Search for an OPP with exact key set + * @dev: Device for which the OPP is being searched + * @key: OPP key set to match + * @available: true/false - match for available OPP + * + * Search for an exact match of the key set in the OPP table. + * + * Return: A matching opp on success, else ERR_PTR in case of error. + * Possible error values: + * EINVAL: for bad pointers + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * Note: 'available' is a modifier for the search. If 'available' == true, + * then the match is for exact matching key and is available in the stored + * OPP table. If false, the match is for exact key which is not available. + * + * This provides a mechanism to enable an OPP which is not available currently + * or the opposite as well. + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev, + struct dev_pm_opp_key *key, + bool available) +{ + struct opp_table *opp_table __free(put_opp_table) = _find_opp_table(dev); + + if (IS_ERR(opp_table)) { + dev_err(dev, "%s: OPP table not found (%ld)\n", __func__, + PTR_ERR(opp_table)); + return ERR_CAST(opp_table); + } + + return _opp_table_find_opp_key(opp_table, key, available, + _read_opp_key, _compare_opp_key_exact, + assert_single_clk); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_key_exact); + +/** * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the * clock corresponding to the index * @dev: Device for which we do this operation diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c index 1b2f132d76f0..b405dfa20891 100644 --- a/drivers/pinctrl/mediatek/pinctrl-airoha.c +++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c @@ -108,6 +108,9 @@ #define JTAG_UDI_EN_MASK BIT(4) #define JTAG_DFD_EN_MASK BIT(3) +#define REG_FORCE_GPIO_EN 0x0228 +#define FORCE_GPIO_EN(n) BIT(n) + /* LED MAP */ #define REG_LAN_LED0_MAPPING 0x027c #define REG_LAN_LED1_MAPPING 0x0280 @@ -719,16 +722,16 @@ static const struct airoha_pinctrl_func_group mdio_func_group[] = { .name = "mdio", .regmap[0] = { AIROHA_FUNC_MUX, - REG_GPIO_PON_MODE, - GPIO_SGMII_MDIO_MODE_MASK, - GPIO_SGMII_MDIO_MODE_MASK - }, - .regmap[1] = { - AIROHA_FUNC_MUX, REG_GPIO_2ND_I2C_MODE, GPIO_MDC_IO_MASTER_MODE_MODE, GPIO_MDC_IO_MASTER_MODE_MODE }, + .regmap[1] = { + AIROHA_FUNC_MUX, + REG_FORCE_GPIO_EN, + FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2), + FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2) + }, .regmap_size = 2, }, }; @@ -1752,8 +1755,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[0] = { AIROHA_FUNC_MUX, REG_GPIO_2ND_I2C_MODE, - GPIO_LAN3_LED0_MODE_MASK, - GPIO_LAN3_LED0_MODE_MASK + GPIO_LAN3_LED1_MODE_MASK, + GPIO_LAN3_LED1_MODE_MASK }, .regmap[1] = { AIROHA_FUNC_MUX, @@ -1816,8 +1819,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[0] = { AIROHA_FUNC_MUX, REG_GPIO_2ND_I2C_MODE, - GPIO_LAN3_LED0_MODE_MASK, - GPIO_LAN3_LED0_MODE_MASK + GPIO_LAN3_LED1_MODE_MASK, + GPIO_LAN3_LED1_MODE_MASK }, .regmap[1] = { AIROHA_FUNC_MUX, @@ -1880,8 +1883,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[0] = { AIROHA_FUNC_MUX, REG_GPIO_2ND_I2C_MODE, - GPIO_LAN3_LED0_MODE_MASK, - GPIO_LAN3_LED0_MODE_MASK + GPIO_LAN3_LED1_MODE_MASK, + GPIO_LAN3_LED1_MODE_MASK }, .regmap[1] = { AIROHA_FUNC_MUX, @@ -1944,8 +1947,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[0] = { AIROHA_FUNC_MUX, REG_GPIO_2ND_I2C_MODE, - GPIO_LAN3_LED0_MODE_MASK, - GPIO_LAN3_LED0_MODE_MASK + GPIO_LAN3_LED1_MODE_MASK, + GPIO_LAN3_LED1_MODE_MASK }, .regmap[1] = { AIROHA_FUNC_MUX, diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c index 5ad7cc438068..a25eb2018acd 100644 --- a/drivers/powercap/idle_inject.c +++ b/drivers/powercap/idle_inject.c @@ -133,7 +133,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) duration_us = READ_ONCE(ii_dev->run_duration_us); duration_us += READ_ONCE(ii_dev->idle_duration_us); - hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC)); + hrtimer_forward_now(timer, us_to_ktime(duration_us)); return HRTIMER_RESTART; } @@ -232,8 +232,7 @@ int idle_inject_start(struct idle_inject_device *ii_dev) idle_inject_wakeup(ii_dev); hrtimer_start(&ii_dev->timer, - ns_to_ktime((idle_duration_us + run_duration_us) * - NSEC_PER_USEC), + us_to_ktime(idle_duration_us + run_duration_us), HRTIMER_MODE_REL); return 0; diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index efc2a81f50e5..f426276e2b6b 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -514,6 +514,11 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) if (max_active_zones) { if (nactive > max_active_zones) { + if (bdev_max_active_zones(bdev) == 0) { + max_active_zones = 0; + zone_info->max_active_zones = 0; + goto validate; + } btrfs_err(device->fs_info, "zoned: %u active zones on %s exceeds max_active_zones %u", nactive, rcu_dereference(device->name), @@ -526,6 +531,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags); } +validate: /* Validate superblock log */ nr_zones = BTRFS_NR_SB_LOG_ZONES; for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index 6550bd9f002c..74dfb6496095 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -148,7 +148,7 @@ struct smb_direct_transport { wait_queue_head_t wait_send_pending; atomic_t send_pending; - struct delayed_work post_recv_credits_work; + struct work_struct post_recv_credits_work; struct work_struct send_immediate_work; struct work_struct disconnect_work; @@ -367,8 +367,8 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) spin_lock_init(&t->lock_new_recv_credits); - INIT_DELAYED_WORK(&t->post_recv_credits_work, - smb_direct_post_recv_credits); + INIT_WORK(&t->post_recv_credits_work, + smb_direct_post_recv_credits); INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); @@ -399,9 +399,9 @@ static void free_transport(struct smb_direct_transport *t) wait_event(t->wait_send_pending, atomic_read(&t->send_pending) == 0); - cancel_work_sync(&t->disconnect_work); - cancel_delayed_work_sync(&t->post_recv_credits_work); - cancel_work_sync(&t->send_immediate_work); + disable_work_sync(&t->disconnect_work); + disable_work_sync(&t->post_recv_credits_work); + disable_work_sync(&t->send_immediate_work); if (t->qp) { ib_drain_qp(t->qp); @@ -615,8 +615,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) wake_up_interruptible(&t->wait_send_credits); if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count)) - mod_delayed_work(smb_direct_wq, - &t->post_recv_credits_work, 0); + queue_work(smb_direct_wq, &t->post_recv_credits_work); if (data_length) { enqueue_reassembly(t, recvmsg, (int)data_length); @@ -773,8 +772,7 @@ again: st->count_avail_recvmsg += queue_removed; if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) { spin_unlock(&st->receive_credit_lock); - mod_delayed_work(smb_direct_wq, - &st->post_recv_credits_work, 0); + queue_work(smb_direct_wq, &st->post_recv_credits_work); } else { spin_unlock(&st->receive_credit_lock); } @@ -801,7 +799,7 @@ read_rfc1002_done: static void smb_direct_post_recv_credits(struct work_struct *work) { struct smb_direct_transport *t = container_of(work, - struct smb_direct_transport, post_recv_credits_work.work); + struct smb_direct_transport, post_recv_credits_work); struct smb_direct_recvmsg *recvmsg; int receive_credits, credits = 0; int ret; @@ -1734,7 +1732,7 @@ static int smb_direct_prepare_negotiation(struct smb_direct_transport *t) goto out_err; } - smb_direct_post_recv_credits(&t->post_recv_credits_work.work); + smb_direct_post_recv_credits(&t->post_recv_credits_work); return 0; out_err: put_recvmsg(t, recvmsg); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 95f3807c8c55..40966512ea18 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -780,11 +780,10 @@ struct cpufreq_frequency_table { else -int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table); +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy); + +int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy); -int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, - struct cpufreq_frequency_table *table); int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index cf477beae4bb..789406d95e69 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -98,6 +98,25 @@ struct dev_pm_opp_data { unsigned long u_volt; }; +/** + * struct dev_pm_opp_key - Key used to identify OPP entries + * @freq: Frequency in Hz. Use 0 if frequency is not to be matched. + * @level: Performance level associated with the OPP entry. + * Use OPP_LEVEL_UNSET if level is not to be matched. + * @bw: Bandwidth associated with the OPP entry. + * Use 0 if bandwidth is not to be matched. + * + * This structure is used to uniquely identify an OPP entry based on + * frequency, performance level, and bandwidth. Each field can be + * selectively ignored during matching by setting it to its respective + * NOP value. + */ +struct dev_pm_opp_key { + unsigned long freq; + unsigned int level; + u32 bw; +}; + #if defined(CONFIG_PM_OPP) struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); @@ -131,6 +150,10 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available); +struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev, + struct dev_pm_opp_key *key, + bool available); + struct dev_pm_opp * dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, u32 index, bool available); @@ -289,6 +312,13 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, return ERR_PTR(-EOPNOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev, + struct dev_pm_opp_key *key, + bool available) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct dev_pm_opp * dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, u32 index, bool available) diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 317ae31e89b3..b02876f1ae38 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -418,6 +418,12 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) { } #endif /* CONFIG_HIBERNATION */ +#if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND) +bool pm_hibernation_mode_is_suspend(void); +#else +static inline bool pm_hibernation_mode_is_suspend(void) { return false; } +#endif + int arch_resume_nosmt(void); #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV diff --git a/include/soc/rockchip/rk3588_grf.h b/include/soc/rockchip/rk3588_grf.h index 630b35a55064..02a7b2432d99 100644 --- a/include/soc/rockchip/rk3588_grf.h +++ b/include/soc/rockchip/rk3588_grf.h @@ -12,7 +12,11 @@ #define RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12) #define RK3588_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28) -#define RK3588_PMUGRF_OS_REG4 0x210 -#define RK3588_PMUGRF_OS_REG5 0x214 +#define RK3588_PMUGRF_OS_REG4 0x210 +#define RK3588_PMUGRF_OS_REG5 0x214 +#define RK3588_PMUGRF_OS_REG6 0x218 +#define RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE GENMASK(2, 1) +/* Whether the LPDDR5 is in 2:1 (= 0) or 4:1 (= 1) CKR a.k.a. DQS mode */ +#define RK3588_PMUGRF_OS_REG6_LP5_CKR BIT(0) #endif /* __SOC_RK3588_GRF_H */ diff --git a/include/soc/rockchip/rockchip_grf.h b/include/soc/rockchip/rockchip_grf.h index e46fd72aea8d..41c7bb26fd53 100644 --- a/include/soc/rockchip/rockchip_grf.h +++ b/include/soc/rockchip/rockchip_grf.h @@ -13,6 +13,7 @@ enum { ROCKCHIP_DDRTYPE_LPDDR3 = 6, ROCKCHIP_DDRTYPE_LPDDR4 = 7, ROCKCHIP_DDRTYPE_LPDDR4X = 8, + ROCKCHIP_DDRTYPE_LPDDR5 = 9, }; #endif /* __SOC_ROCKCHIP_GRF_H */ diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 8df55397414a..5f17d2e8e954 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -799,7 +799,7 @@ void em_adjust_cpu_capacity(unsigned int cpu) static void em_check_capacity_update(void) { cpumask_var_t cpu_done_mask; - int cpu; + int cpu, failed_cpus = 0; if (!zalloc_cpumask_var(&cpu_done_mask, GFP_KERNEL)) { pr_warn("no free memory\n"); @@ -817,10 +817,8 @@ static void em_check_capacity_update(void) policy = cpufreq_cpu_get(cpu); if (!policy) { - pr_debug("Accessing cpu%d policy failed\n", cpu); - schedule_delayed_work(&em_update_work, - msecs_to_jiffies(1000)); - break; + failed_cpus++; + continue; } cpufreq_cpu_put(policy); @@ -835,6 +833,9 @@ static void em_check_capacity_update(void) em_adjust_new_capacity(cpu, dev, pd); } + if (failed_cpus) + schedule_delayed_work(&em_update_work, msecs_to_jiffies(1000)); + free_cpumask_var(cpu_done_mask); } diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 2f66ab453823..14e85ff23551 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -80,6 +80,17 @@ static const struct platform_hibernation_ops *hibernation_ops; static atomic_t hibernate_atomic = ATOMIC_INIT(1); +#ifdef CONFIG_SUSPEND +/** + * pm_hibernation_mode_is_suspend - Check if hibernation has been set to suspend + */ +bool pm_hibernation_mode_is_suspend(void) +{ + return hibernation_mode == HIBERNATION_SUSPEND; +} +EXPORT_SYMBOL_GPL(pm_hibernation_mode_is_suspend); +#endif + bool hibernate_acquire(void) { return atomic_add_unless(&hibernate_atomic, -1, 0); @@ -695,19 +706,13 @@ static void power_down(void) #ifdef CONFIG_SUSPEND if (hibernation_mode == HIBERNATION_SUSPEND) { + pm_restore_gfp_mask(); error = suspend_devices_and_enter(mem_sleep_current); - if (error) { - hibernation_mode = hibernation_ops ? - HIBERNATION_PLATFORM : - HIBERNATION_SHUTDOWN; - } else { - /* Restore swap signature. */ - error = swsusp_unmark(); - if (error) - pr_err("Swap will be unusable! Try swapon -a.\n"); + if (!error) + goto exit; - return; - } + hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM : + HIBERNATION_SHUTDOWN; } #endif @@ -718,10 +723,9 @@ static void power_down(void) case HIBERNATION_PLATFORM: error = hibernation_platform_enter(); if (error == -EAGAIN || error == -EBUSY) { - swsusp_unmark(); events_check_enabled = false; pr_info("Wakeup event detected during hibernation, rolling back.\n"); - return; + goto exit; } fallthrough; case HIBERNATION_SHUTDOWN: @@ -740,6 +744,15 @@ static void power_down(void) pr_crit("Power down manually\n"); while (1) cpu_relax(); + +exit: + /* Match the pm_restore_gfp_mask() call in hibernate(). */ + pm_restrict_gfp_mask(); + + /* Restore swap signature. */ + error = swsusp_unmark(); + if (error) + pr_err("Swap will be unusable! Try swapon -a.\n"); } static int load_image_and_restore(void) diff --git a/kernel/power/process.c b/kernel/power/process.c index dc0dfc349f22..8ff68ebaa1e0 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -132,7 +132,6 @@ int freeze_processes(void) if (!pm_freezing) static_branch_inc(&freezer_active); - pm_wakeup_clear(0); pm_freezing = true; error = try_to_freeze_tasks(true); if (!error) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 501df0676a61..645f42e40478 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -363,7 +363,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) * * One radix tree is represented by one struct mem_zone_bm_rtree. There are * two linked lists for the nodes of the tree, one for the inner nodes and - * one for the leave nodes. The linked leave nodes are used for fast linear + * one for the leaf nodes. The linked leaf nodes are used for fast linear * access of the memory bitmap. * * The struct rtree_node represents one node of the radix tree. diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index b4ca17c2fecf..4bb4686c1c08 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -595,6 +595,7 @@ static int enter_state(suspend_state_t state) } pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); + pm_wakeup_clear(0); pm_suspend_clear_flags(); error = suspend_prepare(state); if (error) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index ad13c461b657..0beff7eeaaba 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -712,7 +712,7 @@ static int save_compressed_image(struct swap_map_handle *handle, goto out_clean; } - data = vzalloc(array_size(nr_threads, sizeof(*data))); + data = vcalloc(nr_threads, sizeof(*data)); if (!data) { pr_err("Failed to allocate %s data\n", hib_comp_algo); ret = -ENOMEM; @@ -1225,14 +1225,14 @@ static int load_compressed_image(struct swap_map_handle *handle, nr_threads = num_online_cpus() - 1; nr_threads = clamp_val(nr_threads, 1, CMP_THREADS); - page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page))); + page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page)); if (!page) { pr_err("Failed to allocate %s page\n", hib_comp_algo); ret = -ENOMEM; goto out_clean; } - data = vzalloc(array_size(nr_threads, sizeof(*data))); + data = vcalloc(nr_threads, sizeof(*data)); if (!data) { pr_err("Failed to allocate %s data\n", hib_comp_algo); ret = -ENOMEM; diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index 7174e1c1a392..537c6992bb63 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -856,6 +856,32 @@ static bool check_builtin_idle_enabled(void) return false; } +/* + * Determine whether @p is a migration-disabled task in the context of BPF + * code. + * + * We can't simply check whether @p->migration_disabled is set in a + * sched_ext callback, because migration is always disabled for the current + * task while running BPF code. + * + * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) respectively + * disable and re-enable migration. For this reason, the current task + * inside a sched_ext callback is always a migration-disabled task. + * + * Therefore, when @p->migration_disabled == 1, check whether @p is the + * current task or not: if it is, then migration was not disabled before + * entering the callback, otherwise migration was disabled. + * + * Returns true if @p is migration-disabled, false otherwise. + */ +static bool is_bpf_migration_disabled(const struct task_struct *p) +{ + if (p->migration_disabled == 1) + return p != current; + else + return p->migration_disabled; +} + static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags, const struct cpumask *allowed, u64 flags) { @@ -898,7 +924,7 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f * selection optimizations and simply check whether the previously * used CPU is idle and within the allowed cpumask. */ - if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) { + if (p->nr_cpus_allowed == 1 || is_bpf_migration_disabled(p)) { if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) && scx_idle_test_and_clear_cpu(prev_cpu)) cpu = prev_cpu; diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs index afc15e72a7c3..eea57ba95f24 100644 --- a/rust/kernel/cpufreq.rs +++ b/rust/kernel/cpufreq.rs @@ -543,7 +543,7 @@ impl Policy { pub fn cpus(&mut self) -> &mut cpumask::Cpumask { // SAFETY: The pointer to `cpus` is valid for writing and remains valid for the lifetime of // the returned reference. - unsafe { cpumask::CpumaskVar::as_mut_ref(&mut self.as_mut_ref().cpus) } + unsafe { cpumask::CpumaskVar::from_raw_mut(&mut self.as_mut_ref().cpus) } } /// Sets clock for the [`Policy`]. diff --git a/rust/kernel/cpumask.rs b/rust/kernel/cpumask.rs index 3fcbff438670..c1d17826ae7b 100644 --- a/rust/kernel/cpumask.rs +++ b/rust/kernel/cpumask.rs @@ -212,6 +212,7 @@ impl Cpumask { /// } /// assert_eq!(mask2.weight(), count); /// ``` +#[repr(transparent)] pub struct CpumaskVar { #[cfg(CONFIG_CPUMASK_OFFSTACK)] ptr: NonNull<Cpumask>, @@ -270,7 +271,7 @@ impl CpumaskVar { /// /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime /// of the returned reference. - pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self { + pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self { // SAFETY: Guaranteed by the safety requirements of the function. // // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the @@ -284,7 +285,7 @@ impl CpumaskVar { /// /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime /// of the returned reference. - pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self { + pub unsafe fn from_raw<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self { // SAFETY: Guaranteed by the safety requirements of the function. // // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the diff --git a/rust/kernel/opp.rs b/rust/kernel/opp.rs index 08126035d2c6..2c763fa9276d 100644 --- a/rust/kernel/opp.rs +++ b/rust/kernel/opp.rs @@ -12,11 +12,12 @@ use crate::{ clk::Hertz, cpumask::{Cpumask, CpumaskVar}, device::Device, - error::{code::*, from_err_ptr, from_result, to_result, Error, Result, VTABLE_DEFAULT_ERROR}, + error::{code::*, from_err_ptr, from_result, to_result, Result, VTABLE_DEFAULT_ERROR}, ffi::c_ulong, prelude::*, str::CString, - types::{ARef, AlwaysRefCounted, Opaque}, + sync::aref::{ARef, AlwaysRefCounted}, + types::Opaque, }; #[cfg(CONFIG_CPU_FREQ)] @@ -162,7 +163,7 @@ impl From<MicroWatt> for c_ulong { /// use kernel::device::Device; /// use kernel::error::Result; /// use kernel::opp::{Data, MicroVolt, Token}; -/// use kernel::types::ARef; +/// use kernel::sync::aref::ARef; /// /// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> { /// let data = Data::new(freq, volt, level, false); @@ -211,7 +212,7 @@ impl Drop for Token { /// use kernel::device::Device; /// use kernel::error::Result; /// use kernel::opp::{Data, MicroVolt, Token}; -/// use kernel::types::ARef; +/// use kernel::sync::aref::ARef; /// /// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> { /// let data = Data::new(freq, volt, level, false); @@ -262,7 +263,7 @@ impl Data { /// use kernel::clk::Hertz; /// use kernel::error::Result; /// use kernel::opp::{OPP, SearchType, Table}; -/// use kernel::types::ARef; +/// use kernel::sync::aref::ARef; /// /// fn find_opp(table: &Table, freq: Hertz) -> Result<ARef<OPP>> { /// let opp = table.opp_from_freq(freq, Some(true), None, SearchType::Exact)?; @@ -335,7 +336,7 @@ impl Drop for ConfigToken { /// use kernel::error::Result; /// use kernel::opp::{Config, ConfigOps, ConfigToken}; /// use kernel::str::CString; -/// use kernel::types::ARef; +/// use kernel::sync::aref::ARef; /// use kernel::macros::vtable; /// /// #[derive(Default)] @@ -500,11 +501,8 @@ impl<T: ConfigOps + Default> Config<T> { // requirements. The OPP core guarantees not to access fields of [`Config`] after this call // and so we don't need to save a copy of them for future use. let ret = unsafe { bindings::dev_pm_opp_set_config(dev.as_raw(), &mut config) }; - if ret < 0 { - Err(Error::from_errno(ret)) - } else { - Ok(ConfigToken(ret)) - } + + to_result(ret).map(|()| ConfigToken(ret)) } /// Config's clk callback. @@ -581,7 +579,7 @@ impl<T: ConfigOps + Default> Config<T> { /// use kernel::device::Device; /// use kernel::error::Result; /// use kernel::opp::Table; -/// use kernel::types::ARef; +/// use kernel::sync::aref::ARef; /// /// fn get_table(dev: &ARef<Device>, mask: &mut Cpumask, freq: Hertz) -> Result<Table> { /// let mut opp_table = Table::from_of_cpumask(dev, mask)?; @@ -713,11 +711,8 @@ impl Table { // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety // requirements. let ret = unsafe { bindings::dev_pm_opp_get_opp_count(self.dev.as_raw()) }; - if ret < 0 { - Err(Error::from_errno(ret)) - } else { - Ok(ret as u32) - } + + to_result(ret).map(|()| ret as u32) } /// Returns max clock latency (in nanoseconds) of the [`OPP`]s in the [`Table`]. diff --git a/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py b/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py index feb9f9421c7b..875b086550d1 100755 --- a/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py +++ b/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py @@ -11,7 +11,7 @@ Prerequisites: gnuplot 5.0 or higher gnuplot-py 1.8 or higher (Most of the distributions have these required packages. They may be called - gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... ) + gnuplot-py, python-gnuplot or python3-gnuplot, gnuplot-nox, ... ) Kernel config for Linux trace is enabled diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c index 651fc9f13c08..45c14323a618 100644 --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c @@ -113,7 +113,7 @@ static bool fail_nth_next(struct __test_metadata *_metadata, * necessarily mean a test failure, just that the limit has to be made * bigger. */ - ASSERT_GT(400, nth_state->iteration); + ASSERT_GT(1000, nth_state->iteration); if (nth_state->iteration != 0) { ssize_t res; ssize_t res2; |