diff options
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/ec.c | 64 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 69 | ||||
-rw-r--r-- | drivers/acpi/resource.c | 10 |
3 files changed, 106 insertions, 37 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 959d41acc108..d7d32c28829b 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -67,6 +67,8 @@ enum ec_command { #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ +#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query + * when trying to clear the EC */ enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ @@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec); static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ /* -------------------------------------------------------------------------- Transaction Management @@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void) EXPORT_SYMBOL(ec_get_handle); +static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); + +/* + * Clears stale _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_query_unlocked(ec, &value); + if (status || !value) + break; + } + + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + void acpi_ec_block_transactions(void) { struct acpi_ec *ec = first_ec; @@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void) mutex_lock(&ec->mutex); /* Allow transactions to be carried out again */ clear_bit(EC_FLAGS_BLOCKED, &ec->flags); + + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); } @@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device) /* EC is fully operational, allow queries */ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + + /* Clear stale _Q events if hardware might require that */ + if (EC_FLAGS_CLEAR_ON_RESUME) { + mutex_lock(&ec->mutex); + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); + } return ret; } @@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) return 0; } +/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. + */ +static int ec_clear_on_resume(const struct dmi_system_id *id) +{ + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + return 0; +} + static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_skip_dsdt_scan, "Compal JFL92", { @@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, {}, }; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 28baa05b8018..84243c32e29c 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -56,6 +56,12 @@ struct throttling_tstate { int target_state; /* target T-state */ }; +struct acpi_processor_throttling_arg { + struct acpi_processor *pr; + int target_state; + bool force; +}; + #define THROTTLING_PRECHANGE (1) #define THROTTLING_POSTCHANGE (2) @@ -1060,16 +1066,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, return 0; } +static long acpi_processor_throttling_fn(void *data) +{ + struct acpi_processor_throttling_arg *arg = data; + struct acpi_processor *pr = arg->pr; + + return pr->throttling.acpi_processor_set_throttling(pr, + arg->target_state, arg->force); +} + int acpi_processor_set_throttling(struct acpi_processor *pr, int state, bool force) { - cpumask_var_t saved_mask; int ret = 0; unsigned int i; struct acpi_processor *match_pr; struct acpi_processor_throttling *p_throttling; + struct acpi_processor_throttling_arg arg; struct throttling_tstate t_state; - cpumask_var_t online_throttling_cpus; if (!pr) return -EINVAL; @@ -1080,14 +1094,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, if ((state < 0) || (state > (pr->throttling.state_count - 1))) return -EINVAL; - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) - return -ENOMEM; - - if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { - free_cpumask_var(saved_mask); - return -ENOMEM; - } - if (cpu_is_offline(pr->id)) { /* * the cpu pointed by pr->id is offline. Unnecessary to change @@ -1096,17 +1102,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, return -ENODEV; } - cpumask_copy(saved_mask, ¤t->cpus_allowed); t_state.target_state = state; p_throttling = &(pr->throttling); - cpumask_and(online_throttling_cpus, cpu_online_mask, - p_throttling->shared_cpu_map); + /* * The throttling notifier will be called for every * affected cpu in order to get one proper T-state. * The notifier event is THROTTLING_PRECHANGE. */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, &t_state); @@ -1118,21 +1122,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, * it can be called only for the cpu pointed by pr. */ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { - /* Can't migrate to the pr->id CPU. Exit */ - ret = -ENODEV; - goto exit; - } - ret = p_throttling->acpi_processor_set_throttling(pr, - t_state.target_state, force); + arg.pr = pr; + arg.target_state = state; + arg.force = force; + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, * it is necessary to set T-state for every affected * cpus. */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, + p_throttling->shared_cpu_map) { match_pr = per_cpu(processors, i); /* * If the pointer is invalid, we will report the @@ -1153,13 +1154,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, "on CPU %d\n", i)); continue; } - t_state.cpu = i; - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(i))) - continue; - ret = match_pr->throttling. - acpi_processor_set_throttling( - match_pr, t_state.target_state, force); + + arg.pr = match_pr; + arg.target_state = state; + arg.force = force; + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, + &arg); } } /* @@ -1168,17 +1168,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, * affected cpu to update the T-states. * The notifier event is THROTTLING_POSTCHANGE */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, &t_state); } - /* restore the previous state */ - /* FIXME: use work_on_cpu() */ - set_cpus_allowed_ptr(current, saved_mask); -exit: - free_cpumask_var(online_throttling_cpus); - free_cpumask_var(saved_mask); + return ret; } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index b7201fc6f1e1..0bdacc5e26a3 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_MEMORY24: memory24 = &ares->data.memory24; + if (!memory24->address_length) + return false; acpi_dev_get_memresource(res, memory24->minimum, memory24->address_length, memory24->write_protect); break; case ACPI_RESOURCE_TYPE_MEMORY32: memory32 = &ares->data.memory32; + if (!memory32->address_length) + return false; acpi_dev_get_memresource(res, memory32->minimum, memory32->address_length, memory32->write_protect); break; case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: fixed_memory32 = &ares->data.fixed_memory32; + if (!fixed_memory32->address_length) + return false; acpi_dev_get_memresource(res, fixed_memory32->address, fixed_memory32->address_length, fixed_memory32->write_protect); @@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_IO: io = &ares->data.io; + if (!io->address_length) + return false; acpi_dev_get_ioresource(res, io->minimum, io->address_length, io->io_decode); break; case ACPI_RESOURCE_TYPE_FIXED_IO: fixed_io = &ares->data.fixed_io; + if (!fixed_io->address_length) + return false; acpi_dev_get_ioresource(res, fixed_io->address, fixed_io->address_length, ACPI_DECODE_10); |