From d29d67357db8dad08f97162bbeb51e4975ee67fd Mon Sep 17 00:00:00 2001 From: Ashwin Chaugule Date: Thu, 12 Nov 2015 19:52:30 -0500 Subject: ACPI / CPPC: Use h/w reduced version of the PCCT structure CPPC is enabled only on platforms which support the h/w reduced ACPI specification, so use the h/w reduced version of the PCCT consistently when deferencing PCCT contents. Fixes: 337aadff8e45 (ACPI: Introduce CPU performance controls using CPPC) Signed-off-by: Ashwin Chaugule Signed-off-by: Rafael J. Wysocki --- drivers/acpi/cppc_acpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 0bbf84bdcdc4..9361966f1b61 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map); static int register_pcc_channel(unsigned pcc_subspace_idx) { - struct acpi_pcct_subspace *cppc_ss; + struct acpi_pcct_hw_reduced *cppc_ss; unsigned int len; if (pcc_subspace_idx >= 0) { -- cgit v1.2.3 From 6f9b36cd2497660e254c67ed131f0b297ed8bf40 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 12 Nov 2015 10:26:57 -0800 Subject: PM / wakeirq: check that wake IRQ is valid before accepting it Check that IRQ number passed to dev_pm_set_wake_irq() and dev_pm_set_dedicated_wake_irq() is valid (not negative) before accepting it. Signed-off-by: Dmitry Torokhov Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeirq.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index eb6e67451dec..0d77cd6fd8d1 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq) struct wake_irq *wirq; int err; + if (irq < 0) + return -EINVAL; + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); if (!wirq) return -ENOMEM; @@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) struct wake_irq *wirq; int err; + if (irq < 0) + return -EINVAL; + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); if (!wirq) return -ENOMEM; -- cgit v1.2.3 From add68d6aa9e2492e51707ca603ada5b26c626757 Mon Sep 17 00:00:00 2001 From: Chris Bainbridge Date: Thu, 12 Nov 2015 18:05:37 +0000 Subject: ACPI / SMBus: Fix boot stalls / high CPU caused by reentrant code In the SBS initialisation, a reentrant call to wait_event_timeout() causes an intermittent boot stall of several minutes usually following the "Switching to clocksource tsc" message. Another symptom of this bug is high CPU usage from programs (Firefox, upowerd) querying the battery state. This is caused by: 1. drivers/acpi/sbshc.c wait_transaction_complete() calls wait_event_timeout(): if (wait_event_timeout(hc->wait, smb_check_done(hc), msecs_to_jiffies(timeout))) 2. ___wait_event sets task state to uninterruptible 3. ___wait_event calls the "condition" smb_check_done() 4. smb_check_done (sbshc.c) calls through to ec_read() in drivers/acpi/ec.c 5. ec_guard() is reached which calls wait_event_timeout() if (wait_event_timeout(ec->wait, ec_transaction_completed(ec), guard)) ie. wait_event_timeout() is being called again inside evaluation of the previous wait_event_timeout() condition 5. The EC IRQ handler calls wake_up() and wakes up the sleeping task in ec_guard() 6. The task is now in state running even though the wait "condition" is still being evaluated 7. The "condition" check returns false so ___wait_event calls schedule_timeout() 8. Since the task state is running, the scheduler immediately schedules it again 9. This loop usually repeats for around 250 seconds even though the original wait_event_timeout was only 1000ms. The timeout is incorrect because each call to schedule_timeout() usually returns immediately, taking less than 1ms, so the jiffies timeout counter is not decremented. The task is now stuck in a running state, and so is highly likely to be immediately rescheduled, which takes less than a jiffy. The loop will never exit if all schedule_timeout() calls take less than a jiffy. Fix this by replacing SMBus reads in the wait_event_timeout condition with checks of a boolean value that is updated by the EC query handler. Link: https://bugzilla.kernel.org/show_bug.cgi?id=107191 Link: https://lkml.org/lkml/2015/11/6/776 Signed-off-by: Chris Bainbridge Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sbshc.c | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index bf034f8b7c1a..e2900518cf7e 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -30,6 +30,7 @@ struct acpi_smb_hc { u8 query_bit; smbus_alarm_callback callback; void *context; + bool done; }; static int acpi_smbus_hc_add(struct acpi_device *device); @@ -100,27 +101,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data) return ec_write(hc->offset + address, data); } -static inline int smb_check_done(struct acpi_smb_hc *hc) -{ - union acpi_smb_status status = {.raw = 0}; - smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw); - return status.fields.done && (status.fields.status == SMBUS_OK); -} - static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) { - if (wait_event_timeout(hc->wait, smb_check_done(hc), - msecs_to_jiffies(timeout))) + if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout))) return 0; - /* - * After the timeout happens, OS will try to check the status of SMbus. - * If the status is what OS expected, it will be regarded as the bogus - * timeout. - */ - if (smb_check_done(hc)) - return 0; - else - return -ETIME; + return -ETIME; } static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, @@ -135,6 +120,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, } mutex_lock(&hc->lock); + hc->done = false; if (macbook) udelay(5); if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) @@ -235,8 +221,10 @@ static int smbus_alarm(void *context) if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) return 0; /* Check if it is only a completion notify */ - if (status.fields.done) + if (status.fields.done && status.fields.status == SMBUS_OK) { + hc->done = true; wake_up(&hc->wait); + } if (!status.fields.alarm) return 0; mutex_lock(&hc->lock); -- cgit v1.2.3 From a76032e0abef027df83f7f053580aa297de54818 Mon Sep 17 00:00:00 2001 From: Chris Bainbridge Date: Thu, 12 Nov 2015 19:24:47 +0000 Subject: Revert "ACPI / SBS: Add 5 us delay to fix SBS hangs on MacBook" Revert commit 3349fb64b292 (ACPI / SBS: Add 5 us delay to fix SBS hangs on MacBook), since the delay introduced by it is not necessary any more after commit add68d6aa9e2 (ACPI / SMBus: Fix boot stalls / high CPU caused by reentrant code). Signed-off-by: Chris Bainbridge [ rjw: Changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sbshc.c | 22 ---------------------- 1 file changed, 22 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index e2900518cf7e..2fa8304171e0 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -14,7 +14,6 @@ #include #include #include -#include #include "sbshc.h" #define PREFIX "ACPI: " @@ -89,8 +88,6 @@ enum acpi_smb_offset { ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ }; -static bool macbook; - static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) { return ec_read(hc->offset + address, data); @@ -121,8 +118,6 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, mutex_lock(&hc->lock); hc->done = false; - if (macbook) - udelay(5); if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) goto end; if (temp) { @@ -250,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, void *data); -static int macbook_dmi_match(const struct dmi_system_id *d) -{ - pr_debug("Detected MacBook, enabling workaround\n"); - macbook = true; - return 0; -} - -static struct dmi_system_id acpi_smbus_dmi_table[] = { - { macbook_dmi_match, "Apple MacBook", { - DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, - }, - { }, -}; - static int acpi_smbus_hc_add(struct acpi_device *device) { int status; unsigned long long val; struct acpi_smb_hc *hc; - dmi_check_system(acpi_smbus_dmi_table); - if (!device) return -EINVAL; -- cgit v1.2.3 From 4981c2b7abfe92a7ad3c9888b8a1ada552d49406 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 15 Nov 2015 22:42:27 +0100 Subject: ACPI-EC: Drop unnecessary check made before calling acpi_ec_delete_query() The acpi_ec_delete_query() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/ec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f61a7c834540..b420fb46669d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) } err_exit: - if (result && q) + if (result) acpi_ec_delete_query(q); if (data) *data = value; -- cgit v1.2.3 From 799281a3c481a738801bf17e3079a6f91df56cd3 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 18 Nov 2015 23:29:56 +0100 Subject: Revert "cpufreq: intel_pstate: Avoid calculation for max/min" Revert commit 4ef451487019 (cpufreq: intel_pstate: Avoid calculation for max/min) as it depends on commit 37afb0003242 (cpufreq: intel_pstate: Use ACPI perf configuration) that causes problems to happen and needs to be reverted. Conflicts: drivers/cpufreq/intel_pstate.c Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 48 +++++------------------------------------- 1 file changed, 5 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2e31d097def6..d444000ce148 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -163,8 +163,6 @@ struct perf_limits { int max_sysfs_pct; int min_policy_pct; int min_sysfs_pct; - int max_perf_ctl; - int min_perf_ctl; }; static struct perf_limits performance_limits = { @@ -191,8 +189,6 @@ static struct perf_limits powersave_limits = { .max_sysfs_pct = 100, .min_policy_pct = 0, .min_sysfs_pct = 0, - .max_perf_ctl = 0, - .min_perf_ctl = 0, }; #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE @@ -938,23 +934,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) * policy, or by cpu specific default values determined through * experimentation. */ - if (limits->max_perf_ctl && limits->max_sysfs_pct >= - limits->max_policy_pct) { - *max = limits->max_perf_ctl; - } else { - max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), - limits->max_perf)); - *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, - cpu->pstate.turbo_pstate); - } + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); + *max = clamp_t(int, max_perf_adj, + cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); - if (limits->min_perf_ctl) { - *min = limits->min_perf_ctl; - } else { - min_perf = fp_toint(mul_fp(int_tofp(max_perf), - limits->min_perf)); - *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); - } + min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); + *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) @@ -1229,12 +1214,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) static int intel_pstate_set_policy(struct cpufreq_policy *policy) { -#if IS_ENABLED(CONFIG_ACPI) - struct cpudata *cpu; - int i; -#endif - pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, - policy->cpuinfo.max_freq, policy->max); if (!policy->cpuinfo.max_freq) return -ENODEV; @@ -1270,23 +1249,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), int_tofp(100)); -#if IS_ENABLED(CONFIG_ACPI) - cpu = all_cpu_data[policy->cpu]; - for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { - int control; - - control = convert_to_native_pstate_format(cpu, i); - if (control * cpu->pstate.scaling == policy->max) - limits->max_perf_ctl = control; - if (control * cpu->pstate.scaling == policy->min) - limits->min_perf_ctl = control; - } - - pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", - policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl, - limits->max_perf_ctl); -#endif - if (hwp_active) intel_pstate_hwp_set(); -- cgit v1.2.3 From 6ee11e413c495390dacabd96ad462eea9de9dfbd Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 19 Nov 2015 00:20:42 +0100 Subject: Revert "cpufreq: intel_pstate: Use ACPI perf configuration" Revert commit 37afb0003242 (cpufreq: intel_pstate: Use ACPI perf configuration) that is reported to cause a regression to happen on a system where invalid data are returned by the ACPI _PSS object. Since that commit makes assumptions regarding the _PSS output correctness that may turn out to be overly optimistic in general, there is a concern that it may introduce regression on more systems, so it's better to revert it now and we'll revisit the underlying issue in the next cycle with a more robust solution. Conflicts: drivers/cpufreq/intel_pstate.c Fixes: 37afb0003242 (cpufreq: intel_pstate: Use ACPI perf configuration) Reported-by: Borislav Petkov Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/Kconfig.x86 | 1 - drivers/cpufreq/intel_pstate.c | 171 +---------------------------------------- 2 files changed, 1 insertion(+), 171 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index adbd1de1cea5..c59bdcb83217 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -5,7 +5,6 @@ config X86_INTEL_PSTATE bool "Intel P state control" depends on X86 - select ACPI_PROCESSOR if ACPI help This driver provides a P state for Intel core processors. The driver implements an internal governor and will become diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index d444000ce148..ef05944d8b16 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -34,10 +34,6 @@ #include #include -#if IS_ENABLED(CONFIG_ACPI) -#include -#endif - #define BYT_RATIOS 0x66a #define BYT_VIDS 0x66b #define BYT_TURBO_RATIOS 0x66c @@ -117,9 +113,6 @@ struct cpudata { u64 prev_mperf; u64 prev_tsc; struct sample sample; -#if IS_ENABLED(CONFIG_ACPI) - struct acpi_processor_performance acpi_perf_data; -#endif }; static struct cpudata **all_cpu_data; @@ -150,7 +143,6 @@ struct cpu_defaults { static struct pstate_adjust_policy pid_params; static struct pstate_funcs pstate_funcs; static int hwp_active; -static int no_acpi_perf; struct perf_limits { int no_turbo; @@ -197,153 +189,6 @@ static struct perf_limits *limits = &performance_limits; static struct perf_limits *limits = &powersave_limits; #endif -#if IS_ENABLED(CONFIG_ACPI) -/* - * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and - * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and - * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state - * ratio, out of it only high 8 bits are used. For example 0x1700 is setting - * target ratio 0x17. The _PSS control value stores in a format which can be - * directly written to PERF_CTL MSR. But in intel_pstate driver this shift - * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). - * This function converts the _PSS control value to intel pstate driver format - * for comparison and assignment. - */ -static int convert_to_native_pstate_format(struct cpudata *cpu, int index) -{ - return cpu->acpi_perf_data.states[index].control >> 8; -} - -static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) -{ - struct cpudata *cpu; - int ret; - bool turbo_absent = false; - int max_pstate_index; - int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; - int i; - - cpu = all_cpu_data[policy->cpu]; - - pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", - cpu->pstate.min_pstate, cpu->pstate.max_pstate, - cpu->pstate.turbo_pstate); - - if (!cpu->acpi_perf_data.shared_cpu_map && - zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, - GFP_KERNEL, cpu_to_node(policy->cpu))) { - return -ENOMEM; - } - - ret = acpi_processor_register_performance(&cpu->acpi_perf_data, - policy->cpu); - if (ret) - return ret; - - /* - * Check if the control value in _PSS is for PERF_CTL MSR, which should - * guarantee that the states returned by it map to the states in our - * list directly. - */ - if (cpu->acpi_perf_data.control_register.space_id != - ACPI_ADR_SPACE_FIXED_HARDWARE) - return -EIO; - - pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); - for (i = 0; i < cpu->acpi_perf_data.state_count; i++) - pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", - (i == cpu->acpi_perf_data.state ? '*' : ' '), i, - (u32) cpu->acpi_perf_data.states[i].core_frequency, - (u32) cpu->acpi_perf_data.states[i].power, - (u32) cpu->acpi_perf_data.states[i].control); - - /* - * If there is only one entry _PSS, simply ignore _PSS and continue as - * usual without taking _PSS into account - */ - if (cpu->acpi_perf_data.state_count < 2) - return 0; - - turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); - min_pss_ctl = convert_to_native_pstate_format(cpu, - cpu->acpi_perf_data.state_count - 1); - /* Check if there is a turbo freq in _PSS */ - if (turbo_pss_ctl <= cpu->pstate.max_pstate && - turbo_pss_ctl > cpu->pstate.min_pstate) { - pr_debug("intel_pstate: no turbo range exists in _PSS\n"); - limits->no_turbo = limits->turbo_disabled = 1; - cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; - turbo_absent = true; - } - - /* Check if the max non turbo p state < Intel P state max */ - max_pstate_index = turbo_absent ? 0 : 1; - max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); - if (max_pss_ctl < cpu->pstate.max_pstate && - max_pss_ctl > cpu->pstate.min_pstate) - cpu->pstate.max_pstate = max_pss_ctl; - - /* check If min perf > Intel P State min */ - if (min_pss_ctl > cpu->pstate.min_pstate && - min_pss_ctl < cpu->pstate.max_pstate) { - cpu->pstate.min_pstate = min_pss_ctl; - policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; - } - - if (turbo_absent) - policy->cpuinfo.max_freq = cpu->pstate.max_pstate * - cpu->pstate.scaling; - else { - policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * - cpu->pstate.scaling; - /* - * The _PSS table doesn't contain whole turbo frequency range. - * This just contains +1 MHZ above the max non turbo frequency, - * with control value corresponding to max turbo ratio. But - * when cpufreq set policy is called, it will call with this - * max frequency, which will cause a reduced performance as - * this driver uses real max turbo frequency as the max - * frequeny. So correct this frequency in _PSS table to - * correct max turbo frequency based on the turbo ratio. - * Also need to convert to MHz as _PSS freq is in MHz. - */ - cpu->acpi_perf_data.states[0].core_frequency = - turbo_pss_ctl * 100; - } - - pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", - cpu->pstate.min_pstate, cpu->pstate.max_pstate, - cpu->pstate.turbo_pstate); - pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", - policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); - - return 0; -} - -static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) -{ - struct cpudata *cpu; - - if (!no_acpi_perf) - return 0; - - cpu = all_cpu_data[policy->cpu]; - acpi_processor_unregister_performance(policy->cpu); - return 0; -} - -#else -static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) -{ - return 0; -} - -static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) -{ - return 0; -} -#endif - static inline void pid_reset(struct _pid *pid, int setpoint, int busy, int deadband, int integral) { pid->setpoint = setpoint; @@ -1303,30 +1148,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; - if (!no_acpi_perf) - intel_pstate_init_perf_limits(policy); - /* - * If there is no acpi perf data or error, we ignore and use Intel P - * state calculated limits, So this is not fatal error. - */ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; cpumask_set_cpu(policy->cpu, policy->cpus); return 0; } -static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) -{ - return intel_pstate_exit_perf_limits(policy); -} - static struct cpufreq_driver intel_pstate_driver = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_pstate_verify_policy, .setpolicy = intel_pstate_set_policy, .get = intel_pstate_get, .init = intel_pstate_cpu_init, - .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, .name = "intel_pstate", }; @@ -1368,6 +1201,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) } #if IS_ENABLED(CONFIG_ACPI) +#include static bool intel_pstate_no_acpi_pss(void) { @@ -1563,9 +1397,6 @@ static int __init intel_pstate_setup(char *str) force_load = 1; if (!strcmp(str, "hwp_only")) hwp_only = 1; - if (!strcmp(str, "no_acpi")) - no_acpi_perf = 1; - return 0; } early_param("intel_pstate", intel_pstate_setup); -- cgit v1.2.3 From 938d21a2a6370241c86d515ca574aaaa9e8812f2 Mon Sep 17 00:00:00 2001 From: Philippe Longepe Date: Mon, 9 Nov 2015 17:40:46 -0800 Subject: cpufreq: intel_pstate: Replace BYT with ATOM Rename symbol and function names starting with "BYT" or "byt" to start with "ATOM" or "atom", respectively, so as to make it clear that they may apply to Atom in general and not just to Baytrail (the goal is to support several Atoms architectures eventually). This should not lead to any functional changes. Signed-off-by: Philippe Longepe Signed-off-by: Stephane Gasparini Acked-by: Srinivas Pandruvada [ rjw : Changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 58 +++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ef05944d8b16..5f124e98f439 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -34,10 +34,10 @@ #include #include -#define BYT_RATIOS 0x66a -#define BYT_VIDS 0x66b -#define BYT_TURBO_RATIOS 0x66c -#define BYT_TURBO_VIDS 0x66d +#define ATOM_RATIOS 0x66a +#define ATOM_VIDS 0x66b +#define ATOM_TURBO_RATIOS 0x66c +#define ATOM_TURBO_VIDS 0x66d #define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) @@ -528,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); } -static int byt_get_min_pstate(void) +static int atom_get_min_pstate(void) { u64 value; - rdmsrl(BYT_RATIOS, value); + rdmsrl(ATOM_RATIOS, value); return (value >> 8) & 0x7F; } -static int byt_get_max_pstate(void) +static int atom_get_max_pstate(void) { u64 value; - rdmsrl(BYT_RATIOS, value); + rdmsrl(ATOM_RATIOS, value); return (value >> 16) & 0x7F; } -static int byt_get_turbo_pstate(void) +static int atom_get_turbo_pstate(void) { u64 value; - rdmsrl(BYT_TURBO_RATIOS, value); + rdmsrl(ATOM_TURBO_RATIOS, value); return value & 0x7F; } -static void byt_set_pstate(struct cpudata *cpudata, int pstate) +static void atom_set_pstate(struct cpudata *cpudata, int pstate) { u64 val; int32_t vid_fp; @@ -577,10 +577,10 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); } -#define BYT_BCLK_FREQS 5 -static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; +#define ATOM_BCLK_FREQS 5 +static int atom_freq_table[ATOM_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; -static int byt_get_scaling(void) +static int atom_get_scaling(void) { u64 value; int i; @@ -588,16 +588,16 @@ static int byt_get_scaling(void) rdmsrl(MSR_FSB_FREQ, value); i = value & 0x3; - BUG_ON(i > BYT_BCLK_FREQS); + WARN_ON(i > ATOM_BCLK_FREQS); - return byt_freq_table[i] * 100; + return atom_freq_table[i] * 100; } -static void byt_get_vid(struct cpudata *cpudata) +static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(BYT_VIDS, value); + rdmsrl(ATOM_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -605,7 +605,7 @@ static void byt_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(BYT_TURBO_VIDS, value); + rdmsrl(ATOM_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -726,7 +726,7 @@ static struct cpu_defaults core_params = { }, }; -static struct cpu_defaults byt_params = { +static struct cpu_defaults atom_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -736,13 +736,13 @@ static struct cpu_defaults byt_params = { .i_gain_pct = 4, }, .funcs = { - .get_max = byt_get_max_pstate, - .get_max_physical = byt_get_max_pstate, - .get_min = byt_get_min_pstate, - .get_turbo = byt_get_turbo_pstate, - .set = byt_set_pstate, - .get_scaling = byt_get_scaling, - .get_vid = byt_get_vid, + .get_max = atom_get_max_pstate, + .get_max_physical = atom_get_max_pstate, + .get_min = atom_get_min_pstate, + .get_turbo = atom_get_turbo_pstate, + .set = atom_set_pstate, + .get_scaling = atom_get_scaling, + .get_vid = atom_get_vid, }, }; @@ -983,7 +983,7 @@ static void intel_pstate_timer_func(unsigned long __data) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, core_params), ICPU(0x2d, core_params), - ICPU(0x37, byt_params), + ICPU(0x37, atom_params), ICPU(0x3a, core_params), ICPU(0x3c, core_params), ICPU(0x3d, core_params), @@ -992,7 +992,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x45, core_params), ICPU(0x46, core_params), ICPU(0x47, core_params), - ICPU(0x4c, byt_params), + ICPU(0x4c, atom_params), ICPU(0x4e, core_params), ICPU(0x4f, core_params), ICPU(0x5e, core_params), -- cgit v1.2.3 From 1421df63c3cf956c69c26ac9660c6e223eeed980 Mon Sep 17 00:00:00 2001 From: Philippe Longepe Date: Mon, 9 Nov 2015 17:40:47 -0800 Subject: cpufreq: intel_pstate: Add separate support for Airmont cores There are two flavors of Atom cores to be supported by intel_pstate, Silvermont and Airmont, so make the driver distinguish between them by adding separate frequency tables. Separate the CPU defaults params for each of them and match the CPU IDs against them as appropriate. Signed-off-by: Philippe Longepe Signed-off-by: Stephane Gasparini Acked-by: Srinivas Pandruvada [ rjw: Subject and changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 57 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 5f124e98f439..001a532e342e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -577,20 +577,35 @@ static void atom_set_pstate(struct cpudata *cpudata, int pstate) wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); } -#define ATOM_BCLK_FREQS 5 -static int atom_freq_table[ATOM_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; - -static int atom_get_scaling(void) +static int silvermont_get_scaling(void) { u64 value; int i; + /* Defined in Table 35-6 from SDM (Sept 2015) */ + static int silvermont_freq_table[] = { + 83300, 100000, 133300, 116700, 80000}; rdmsrl(MSR_FSB_FREQ, value); - i = value & 0x3; + i = value & 0x7; + WARN_ON(i > 4); - WARN_ON(i > ATOM_BCLK_FREQS); + return silvermont_freq_table[i]; +} - return atom_freq_table[i] * 100; +static int airmont_get_scaling(void) +{ + u64 value; + int i; + /* Defined in Table 35-10 from SDM (Sept 2015) */ + static int airmont_freq_table[] = { + 83300, 100000, 133300, 116700, 80000, + 93300, 90000, 88900, 87500}; + + rdmsrl(MSR_FSB_FREQ, value); + i = value & 0xF; + WARN_ON(i > 8); + + return airmont_freq_table[i]; } static void atom_get_vid(struct cpudata *cpudata) @@ -726,7 +741,27 @@ static struct cpu_defaults core_params = { }, }; -static struct cpu_defaults atom_params = { +static struct cpu_defaults silvermont_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 60, + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, + }, + .funcs = { + .get_max = atom_get_max_pstate, + .get_max_physical = atom_get_max_pstate, + .get_min = atom_get_min_pstate, + .get_turbo = atom_get_turbo_pstate, + .set = atom_set_pstate, + .get_scaling = silvermont_get_scaling, + .get_vid = atom_get_vid, + }, +}; + +static struct cpu_defaults airmont_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -741,7 +776,7 @@ static struct cpu_defaults atom_params = { .get_min = atom_get_min_pstate, .get_turbo = atom_get_turbo_pstate, .set = atom_set_pstate, - .get_scaling = atom_get_scaling, + .get_scaling = airmont_get_scaling, .get_vid = atom_get_vid, }, }; @@ -983,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, core_params), ICPU(0x2d, core_params), - ICPU(0x37, atom_params), + ICPU(0x37, silvermont_params), ICPU(0x3a, core_params), ICPU(0x3c, core_params), ICPU(0x3d, core_params), @@ -992,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x45, core_params), ICPU(0x46, core_params), ICPU(0x47, core_params), - ICPU(0x4c, atom_params), + ICPU(0x4c, airmont_params), ICPU(0x4e, core_params), ICPU(0x4f, core_params), ICPU(0x5e, core_params), -- cgit v1.2.3 From 2d4ee3036774e394d416cded9d5cf7661ffb4e4f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 16 Nov 2015 22:27:55 +0100 Subject: cpufreq: mediatek: fix build error The recently added mt8173 cpufreq driver relies on the cpu topology that is always present on ARM64 but optional on ARM32: drivers/cpufreq/mt8173-cpufreq.c: In function 'mtk_cpufreq_init': drivers/cpufreq/mt8173-cpufreq.c:441:30: error: 'cpu_topology' undeclared (first use in this function) cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling); This refines the Kconfig dependencies so that we can still build on ARM32, but only if COMPILE_TEST is selected and the CPU topology code is present. Signed-off-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/Kconfig.arm | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index cd0391e46c6d..c97d9ea97b48 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ config ARM_MT8173_CPUFREQ bool "Mediatek MT8173 CPUFreq support" depends on ARCH_MEDIATEK && REGULATOR + depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) depends on !CPU_THERMAL || THERMAL=y select PM_OPP help -- cgit v1.2.3 From 768acd64d68b232e0d2b9623d9846457355f0c27 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Wed, 18 Nov 2015 16:49:52 -0800 Subject: PCI: Fix OF logic in pci_dma_configure() This patch fixes a bug introduced by previous commit, which incorrectly checkes the of_node of the end-point device. Instead, it should check the of_node of the host bridge. Fixes: 50230713b639 ("PCI: OF: Move of_pci_dma_configure() to pci_dma_configure()") Reported-by: Robin Murphy Signed-off-by: Suravee Suthikulpanit Acked-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki --- drivers/pci/probe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 064078e11017..dc8fbe5c09d3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1646,8 +1646,8 @@ static void pci_dma_configure(struct pci_dev *dev) { struct device *bridge = pci_get_host_bridge_device(dev); - if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { - if (bridge->parent) + if (IS_ENABLED(CONFIG_OF) && + bridge->parent && bridge->parent->of_node) { of_dma_configure(&dev->dev, bridge->parent->of_node); } else if (has_acpi_companion(bridge)) { struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); -- cgit v1.2.3