diff options
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 452 |
1 files changed, 316 insertions, 136 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index fc459c9c00ff..9a515c460a00 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -36,6 +36,7 @@ #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 +#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 #ifdef CONFIG_ACPI @@ -218,13 +219,13 @@ struct global_params { * @epp_policy: Last saved policy used to set EPP/EPB * @epp_default: Power on default HWP energy performance * preference/bias - * @epp_saved: Saved EPP/EPB during system suspend or CPU offline - * operation + * @epp_cached Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance + * @suspended: Whether or not the driver has been suspended. * * This structure stores per CPU instance data for all CPUs. */ @@ -256,12 +257,13 @@ struct cpudata { s16 epp_powersave; s16 epp_policy; s16 epp_default; - s16 epp_saved; + s16 epp_cached; u64 hwp_req_cached; u64 hwp_cap_cached; u64 last_io_update; unsigned int sched_flags; u32 hwp_boost_min; + bool suspended; }; static struct cpudata **all_cpu_data; @@ -639,6 +641,32 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw return index; } +static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) +{ + int ret; + + /* + * Use the cached HWP Request MSR value, because in the active mode the + * register itself may be updated by intel_pstate_hwp_boost_up() or + * intel_pstate_hwp_boost_down() at any time. + */ + u64 value = READ_ONCE(cpu->hwp_req_cached); + + value &= ~GENMASK_ULL(31, 24); + value |= (u64)epp << 24; + /* + * The only other updater of hwp_req_cached in the active mode, + * intel_pstate_hwp_set(), is called under the same lock as this + * function, so it cannot run in parallel with the update below. + */ + WRITE_ONCE(cpu->hwp_req_cached, value); + ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + if (!ret) + cpu->epp_cached = epp; + + return ret; +} + static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, int pref_index, bool use_raw, u32 raw_epp) @@ -650,28 +678,20 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, epp = cpu_data->epp_default; if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { - /* - * Use the cached HWP Request MSR value, because the register - * itself may be updated by intel_pstate_hwp_boost_up() or - * intel_pstate_hwp_boost_down() at any time. - */ - u64 value = READ_ONCE(cpu_data->hwp_req_cached); - - value &= ~GENMASK_ULL(31, 24); - if (use_raw) epp = raw_epp; else if (epp == -EINVAL) epp = epp_values[pref_index - 1]; - value |= (u64)epp << 24; /* - * The only other updater of hwp_req_cached in the active mode, - * intel_pstate_hwp_set(), is called under the same lock as this - * function, so it cannot run in parallel with the update below. + * To avoid confusion, refuse to set EPP to any values different + * from 0 (performance) if the current policy is "performance", + * because those values would be overridden. */ - WRITE_ONCE(cpu_data->hwp_req_cached, value); - ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); + if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) + return -EBUSY; + + ret = intel_pstate_set_epp(cpu_data, epp); } else { if (epp == -EINVAL) epp = (pref_index - 1) << 2; @@ -697,10 +717,12 @@ static ssize_t show_energy_performance_available_preferences( cpufreq_freq_attr_ro(energy_performance_available_preferences); +static struct cpufreq_driver intel_pstate; + static ssize_t store_energy_performance_preference( struct cpufreq_policy *policy, const char *buf, size_t count) { - struct cpudata *cpu_data = all_cpu_data[policy->cpu]; + struct cpudata *cpu = all_cpu_data[policy->cpu]; char str_preference[21]; bool raw = false; ssize_t ret; @@ -725,15 +747,42 @@ static ssize_t store_energy_performance_preference( raw = true; } + /* + * This function runs with the policy R/W semaphore held, which + * guarantees that the driver pointer will not change while it is + * running. + */ + if (!intel_pstate_driver) + return -EAGAIN; + mutex_lock(&intel_pstate_limits_lock); - ret = intel_pstate_set_energy_pref_index(cpu_data, ret, raw, epp); - if (!ret) - ret = count; + if (intel_pstate_driver == &intel_pstate) { + ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); + } else { + /* + * In the passive mode the governor needs to be stopped on the + * target CPU before the EPP update and restarted after it, + * which is super-heavy-weight, so make sure it is worth doing + * upfront. + */ + if (!raw) + epp = ret ? epp_values[ret - 1] : cpu->epp_default; + + if (cpu->epp_cached != epp) { + int err; + + cpufreq_stop_governor(policy); + ret = intel_pstate_set_epp(cpu, epp); + err = cpufreq_start_governor(policy); + if (!ret) + ret = err; + } + } mutex_unlock(&intel_pstate_limits_lock); - return ret; + return ret ?: count; } static ssize_t show_energy_performance_preference( @@ -787,7 +836,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); - if (global.no_turbo) + if (global.no_turbo || global.turbo_disabled) *current_max = HWP_GUARANTEED_PERF(cap); else *current_max = HWP_HIGHEST_PERF(cap); @@ -821,12 +870,6 @@ static void intel_pstate_hwp_set(unsigned int cpu) cpu_data->epp_policy = cpu_data->policy; - if (cpu_data->epp_saved >= 0) { - epp = cpu_data->epp_saved; - cpu_data->epp_saved = -EINVAL; - goto update_epp; - } - if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { epp = intel_pstate_get_epp(cpu_data, value); cpu_data->epp_powersave = epp; @@ -853,7 +896,6 @@ static void intel_pstate_hwp_set(unsigned int cpu) epp = cpu_data->epp_powersave; } -update_epp: if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; @@ -865,14 +907,24 @@ skip_epp: wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } -static void intel_pstate_hwp_force_min_perf(int cpu) +static void intel_pstate_hwp_offline(struct cpudata *cpu) { - u64 value; + u64 value = READ_ONCE(cpu->hwp_req_cached); int min_perf; - value = all_cpu_data[cpu]->hwp_req_cached; + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { + /* + * In case the EPP has been set to "performance" by the + * active mode "performance" scaling algorithm, replace that + * temporary value with the cached EPP one. + */ + value &= ~GENMASK_ULL(31, 24); + value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); + WRITE_ONCE(cpu->hwp_req_cached, value); + } + value &= ~GENMASK_ULL(31, 0); - min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached); + min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached); /* Set hwp_max = hwp_min */ value |= HWP_MAX_PERF(min_perf); @@ -882,19 +934,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); - wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); -} - -static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) -{ - struct cpudata *cpu_data = all_cpu_data[policy->cpu]; - - if (!hwp_active) - return 0; - - cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); - - return 0; + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); } #define POWER_CTL_EE_ENABLE 1 @@ -921,8 +961,28 @@ static void set_power_ctl_ee_state(bool input) static void intel_pstate_hwp_enable(struct cpudata *cpudata); +static void intel_pstate_hwp_reenable(struct cpudata *cpu) +{ + intel_pstate_hwp_enable(cpu); + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); +} + +static int intel_pstate_suspend(struct cpufreq_policy *policy) +{ + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d suspending\n", cpu->cpu); + + cpu->suspended = true; + + return 0; +} + static int intel_pstate_resume(struct cpufreq_policy *policy) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d resuming\n", cpu->cpu); /* Only restore if the system default is changed */ if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) @@ -930,18 +990,16 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) set_power_ctl_ee_state(false); - if (!hwp_active) - return 0; + if (cpu->suspended && hwp_active) { + mutex_lock(&intel_pstate_limits_lock); - mutex_lock(&intel_pstate_limits_lock); - - if (policy->cpu == 0) - intel_pstate_hwp_enable(all_cpu_data[policy->cpu]); + /* Re-enable HWP, because "online" has not done that. */ + intel_pstate_hwp_reenable(cpu); - all_cpu_data[policy->cpu]->epp_policy = 0; - intel_pstate_hwp_set(policy->cpu); + mutex_unlock(&intel_pstate_limits_lock); + } - mutex_unlock(&intel_pstate_limits_lock); + cpu->suspended = false; return 0; } @@ -1145,8 +1203,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, return count; } -static struct cpufreq_driver intel_pstate; - static void update_qos_request(enum freq_qos_req_type type) { int max_state, turbo_max, freq, i, perf_pct; @@ -1330,9 +1386,10 @@ static const struct attribute_group intel_pstate_attr_group = { static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; +static struct kobject *intel_pstate_kobject; + static void __init intel_pstate_sysfs_expose_params(void) { - struct kobject *intel_pstate_kobject; int rc; intel_pstate_kobject = kobject_create_and_add("intel_pstate", @@ -1357,17 +1414,31 @@ static void __init intel_pstate_sysfs_expose_params(void) rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); WARN_ON(rc); - if (hwp_active) { - rc = sysfs_create_file(intel_pstate_kobject, - &hwp_dynamic_boost.attr); - WARN_ON(rc); - } - if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); WARN_ON(rc); } } + +static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) +{ + int rc; + + if (!hwp_active) + return; + + rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); + WARN_ON_ONCE(rc); +} + +static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) +{ + if (!hwp_active) + return; + + sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); +} + /************************** sysfs end ************************/ static void intel_pstate_hwp_enable(struct cpudata *cpudata) @@ -1377,7 +1448,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); - cpudata->epp_policy = 0; if (cpudata->epp_default == -EINVAL) cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); } @@ -2046,25 +2116,31 @@ static int intel_pstate_init_cpu(unsigned int cpunum) all_cpu_data[cpunum] = cpu; - cpu->epp_default = -EINVAL; - cpu->epp_powersave = -EINVAL; - cpu->epp_saved = -EINVAL; - } + cpu->cpu = cpunum; - cpu = all_cpu_data[cpunum]; - - cpu->cpu = cpunum; + cpu->epp_default = -EINVAL; - if (hwp_active) { - const struct x86_cpu_id *id; + if (hwp_active) { + const struct x86_cpu_id *id; - intel_pstate_hwp_enable(cpu); + intel_pstate_hwp_enable(cpu); - id = x86_match_cpu(intel_pstate_hwp_boost_ids); - if (id && intel_pstate_acpi_pm_profile_server()) - hwp_boost = true; + id = x86_match_cpu(intel_pstate_hwp_boost_ids); + if (id && intel_pstate_acpi_pm_profile_server()) + hwp_boost = true; + } + } else if (hwp_active) { + /* + * Re-enable HWP in case this happens after a resume from ACPI + * S3 if the CPU was offline during the whole system/resume + * cycle. + */ + intel_pstate_hwp_reenable(cpu); } + cpu->epp_powersave = -EINVAL; + cpu->epp_policy = 0; + intel_pstate_get_cpu_pstates(cpu); pr_debug("controlling: cpu %d\n", cpunum); @@ -2245,27 +2321,61 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) return 0; } -static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) +static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) { - intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d going offline\n", cpu->cpu); + + if (cpu->suspended) + return 0; + + /* + * If the CPU is an SMT thread and it goes offline with the performance + * settings different from the minimum, it will prevent its sibling + * from getting to lower performance levels, so force the minimum + * performance on CPU offline to prevent that from happening. + */ + if (hwp_active) + intel_pstate_hwp_offline(cpu); + else + intel_pstate_set_min_pstate(cpu); + + intel_pstate_exit_perf_limits(policy); + + return 0; } -static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) +static int intel_pstate_cpu_online(struct cpufreq_policy *policy) { - pr_debug("CPU %d exiting\n", policy->cpu); + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d going online\n", cpu->cpu); + + intel_pstate_init_acpi_perf_limits(policy); - intel_pstate_clear_update_util_hook(policy->cpu); if (hwp_active) { - intel_pstate_hwp_save_state(policy); - intel_pstate_hwp_force_min_perf(policy->cpu); - } else { - intel_cpufreq_stop_cpu(policy); + /* + * Re-enable HWP and clear the "suspended" flag to let "resume" + * know that it need not do that. + */ + intel_pstate_hwp_reenable(cpu); + cpu->suspended = false; } + + return 0; +} + +static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) +{ + pr_debug("CPU %d stopping\n", policy->cpu); + + intel_pstate_clear_update_util_hook(policy->cpu); } static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) { - intel_pstate_exit_perf_limits(policy); + pr_debug("CPU %d exiting\n", policy->cpu); policy->fast_switch_possible = false; @@ -2326,6 +2436,12 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) */ policy->policy = CPUFREQ_POLICY_POWERSAVE; + if (hwp_active) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + cpu->epp_cached = intel_pstate_get_epp(cpu, 0); + } + return 0; } @@ -2333,11 +2449,13 @@ static struct cpufreq_driver intel_pstate = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_pstate_verify_policy, .setpolicy = intel_pstate_set_policy, - .suspend = intel_pstate_hwp_save_state, + .suspend = intel_pstate_suspend, .resume = intel_pstate_resume, .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, + .offline = intel_pstate_cpu_offline, + .online = intel_pstate_cpu_online, .update_limits = intel_pstate_update_limits, .name = "intel_pstate", }; @@ -2390,13 +2508,71 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in fp_toint(cpu->iowait_boost * 100)); } +static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate, + bool fast_switch) +{ + u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; + + value &= ~HWP_MIN_PERF(~0L); + value |= HWP_MIN_PERF(target_pstate); + + /* + * The entire MSR needs to be updated in order to update the HWP min + * field in it, so opportunistically update the max too if needed. + */ + value &= ~HWP_MAX_PERF(~0L); + value |= HWP_MAX_PERF(cpu->max_perf_ratio); + + if (value == prev) + return; + + WRITE_ONCE(cpu->hwp_req_cached, value); + if (fast_switch) + wrmsrl(MSR_HWP_REQUEST, value); + else + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); +} + +static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu, + u32 target_pstate, bool fast_switch) +{ + if (fast_switch) + wrmsrl(MSR_IA32_PERF_CTL, + pstate_funcs.get_val(cpu, target_pstate)); + else + wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + pstate_funcs.get_val(cpu, target_pstate)); +} + +static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate, + bool fast_switch) +{ + int old_pstate = cpu->pstate.current_pstate; + + target_pstate = intel_pstate_prepare_request(cpu, target_pstate); + if (target_pstate != old_pstate) { + cpu->pstate.current_pstate = target_pstate; + if (hwp_active) + intel_cpufreq_adjust_hwp(cpu, target_pstate, + fast_switch); + else + intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, + fast_switch); + } + + intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : + INTEL_PSTATE_TRACE_TARGET, old_pstate); + + return target_pstate; +} + static int intel_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cpudata *cpu = all_cpu_data[policy->cpu]; struct cpufreq_freqs freqs; - int target_pstate, old_pstate; + int target_pstate; update_turbo_state(); @@ -2404,6 +2580,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); + switch (relation) { case CPUFREQ_RELATION_L: target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); @@ -2415,15 +2592,11 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); break; } - target_pstate = intel_pstate_prepare_request(cpu, target_pstate); - old_pstate = cpu->pstate.current_pstate; - if (target_pstate != cpu->pstate.current_pstate) { - cpu->pstate.current_pstate = target_pstate; - wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, - pstate_funcs.get_val(cpu, target_pstate)); - } + + target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false); + freqs.new = target_pstate * cpu->pstate.scaling; - intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate); + cpufreq_freq_transition_end(policy, &freqs, false); return 0; @@ -2433,15 +2606,14 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpudata *cpu = all_cpu_data[policy->cpu]; - int target_pstate, old_pstate; + int target_pstate; update_turbo_state(); target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); - target_pstate = intel_pstate_prepare_request(cpu, target_pstate); - old_pstate = cpu->pstate.current_pstate; - intel_pstate_update_pstate(cpu, target_pstate); - intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); + + target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true); + return target_pstate * cpu->pstate.scaling; } @@ -2461,7 +2633,6 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) return ret; policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; - policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; /* This reflects the intel_pstate_get_cpu_pstates() setting. */ policy->cur = policy->cpuinfo.min_freq; @@ -2473,10 +2644,18 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (hwp_active) + if (hwp_active) { + u64 value; + intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state); - else + policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; + rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); + WRITE_ONCE(cpu->hwp_req_cached, value); + cpu->epp_cached = intel_pstate_get_epp(cpu, value); + } else { turbo_max = cpu->pstate.turbo_pstate; + policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; + } min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); min_freq *= cpu->pstate.scaling; @@ -2531,7 +2710,10 @@ static struct cpufreq_driver intel_cpufreq = { .fast_switch = intel_cpufreq_fast_switch, .init = intel_cpufreq_cpu_init, .exit = intel_cpufreq_cpu_exit, - .stop_cpu = intel_cpufreq_stop_cpu, + .offline = intel_pstate_cpu_offline, + .online = intel_pstate_cpu_online, + .suspend = intel_pstate_suspend, + .resume = intel_pstate_resume, .update_limits = intel_pstate_update_limits, .name = "intel_cpufreq", }; @@ -2553,6 +2735,7 @@ static void intel_pstate_driver_cleanup(void) } } put_online_cpus(); + intel_pstate_driver = NULL; } @@ -2560,6 +2743,9 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) { int ret; + if (driver == &intel_pstate) + intel_pstate_sysfs_expose_hwp_dynamic_boost(); + memset(&global, 0, sizeof(global)); global.max_perf_pct = 100; @@ -2575,17 +2761,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) return 0; } -static int intel_pstate_unregister_driver(void) -{ - if (hwp_active) - return -EBUSY; - - cpufreq_unregister_driver(intel_pstate_driver); - intel_pstate_driver_cleanup(); - - return 0; -} - static ssize_t intel_pstate_show_status(char *buf) { if (!intel_pstate_driver) @@ -2597,20 +2772,24 @@ static ssize_t intel_pstate_show_status(char *buf) static int intel_pstate_update_status(const char *buf, size_t size) { - int ret; + if (size == 3 && !strncmp(buf, "off", size)) { + if (!intel_pstate_driver) + return -EINVAL; - if (size == 3 && !strncmp(buf, "off", size)) - return intel_pstate_driver ? - intel_pstate_unregister_driver() : -EINVAL; + if (hwp_active) + return -EBUSY; + + cpufreq_unregister_driver(intel_pstate_driver); + intel_pstate_driver_cleanup(); + return 0; + } if (size == 6 && !strncmp(buf, "active", size)) { if (intel_pstate_driver) { if (intel_pstate_driver == &intel_pstate) return 0; - ret = intel_pstate_unregister_driver(); - if (ret) - return ret; + cpufreq_unregister_driver(intel_pstate_driver); } return intel_pstate_register_driver(&intel_pstate); @@ -2621,9 +2800,8 @@ static int intel_pstate_update_status(const char *buf, size_t size) if (intel_pstate_driver == &intel_cpufreq) return 0; - ret = intel_pstate_unregister_driver(); - if (ret) - return ret; + cpufreq_unregister_driver(intel_pstate_driver); + intel_pstate_sysfs_hide_hwp_dynamic_boost(); } return intel_pstate_register_driver(&intel_cpufreq); @@ -2835,7 +3013,10 @@ static int __init intel_pstate_init(void) hwp_active++; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; - default_driver = &intel_pstate; + intel_cpufreq.attr = hwp_cpufreq_attrs; + if (!default_driver) + default_driver = &intel_pstate; + goto hwp_cpu_matched; } } else { @@ -2906,14 +3087,13 @@ static int __init intel_pstate_setup(char *str) if (!str) return -EINVAL; - if (!strcmp(str, "disable")) { + if (!strcmp(str, "disable")) no_load = 1; - } else if (!strcmp(str, "active")) { + else if (!strcmp(str, "active")) default_driver = &intel_pstate; - } else if (!strcmp(str, "passive")) { + else if (!strcmp(str, "passive")) default_driver = &intel_cpufreq; - no_hwp = 1; - } + if (!strcmp(str, "no_hwp")) { pr_info("HWP disabled\n"); no_hwp = 1; |