diff options
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/Kconfig | 3 | ||||
-rw-r--r-- | drivers/acpi/Makefile | 1 | ||||
-rw-r--r-- | drivers/acpi/acpi_lpss.c | 2 | ||||
-rw-r--r-- | drivers/acpi/acpi_platform.c | 2 | ||||
-rw-r--r-- | drivers/acpi/acpi_video.c | 5 | ||||
-rw-r--r-- | drivers/acpi/acpica/dbnames.c | 1 | ||||
-rw-r--r-- | drivers/acpi/acpica/dbobject.c | 23 | ||||
-rw-r--r-- | drivers/acpi/acpica/dsdebug.c | 12 | ||||
-rw-r--r-- | drivers/acpi/acpica/exconfig.c | 11 | ||||
-rw-r--r-- | drivers/acpi/acpica/nsdump.c | 3 | ||||
-rw-r--r-- | drivers/acpi/acpica/psloop.c | 51 | ||||
-rw-r--r-- | drivers/acpi/acpica/psobject.c | 30 | ||||
-rw-r--r-- | drivers/acpi/acpica/pswalk.c | 34 | ||||
-rw-r--r-- | drivers/acpi/acpica/uterror.c | 10 | ||||
-rw-r--r-- | drivers/acpi/acpica/utosi.c | 1 | ||||
-rw-r--r-- | drivers/acpi/apei/erst.c | 3 | ||||
-rw-r--r-- | drivers/acpi/apei/ghes.c | 16 | ||||
-rw-r--r-- | drivers/acpi/apei/hest.c | 3 | ||||
-rw-r--r-- | drivers/acpi/fan.c | 4 | ||||
-rw-r--r-- | drivers/acpi/nfit/core.c | 18 | ||||
-rw-r--r-- | drivers/acpi/pci_root.c | 17 | ||||
-rw-r--r-- | drivers/acpi/pptt.c | 655 | ||||
-rw-r--r-- | drivers/acpi/processor_perflib.c | 5 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 5 | ||||
-rw-r--r-- | drivers/acpi/sysfs.c | 6 | ||||
-rw-r--r-- | drivers/acpi/tables.c | 2 |
26 files changed, 870 insertions, 53 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 516d7b36d6fb..b533eeb6139d 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -547,6 +547,9 @@ config ACPI_CONFIGFS if ARM64 source "drivers/acpi/arm64/Kconfig" + +config ACPI_PPTT + bool endif config TPS68470_PMIC_OPREGION diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 48e202752754..6d59aa109a91 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -88,6 +88,7 @@ obj-$(CONFIG_ACPI_BGRT) += bgrt.o obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o +obj-$(CONFIG_ACPI_PPTT) += pptt.o # processor has its own "processor." module_param namespace processor-y := processor_driver.o diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index cb6ac5c65c2e..38a286975c31 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -233,11 +233,13 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = { static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, + .prv_offset = 0x800, .setup = byt_pwm_setup, }; static const struct lpss_device_desc bsw_pwm_dev_desc = { .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, .setup = bsw_pwm_setup, }; diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 88cd949003f3..eaa60c94205a 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -82,7 +82,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, if (count < 0) { return NULL; } else if (count > 0) { - resources = kzalloc(count * sizeof(struct resource), + resources = kcalloc(count, sizeof(struct resource), GFP_KERNEL); if (!resources) { dev_err(&adev->dev, "No memory for resources\n"); diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 2f2e737be0f8..f0b52266b3ac 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -832,8 +832,9 @@ int acpi_video_get_levels(struct acpi_device *device, * in order to account for buggy BIOS which don't export the first two * special levels (see below) */ - br->levels = kmalloc((obj->package.count + ACPI_VIDEO_FIRST_LEVEL) * - sizeof(*br->levels), GFP_KERNEL); + br->levels = kmalloc_array(obj->package.count + ACPI_VIDEO_FIRST_LEVEL, + sizeof(*br->levels), + GFP_KERNEL); if (!br->levels) { result = -ENOMEM; goto out_free; diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index dc94de91033e..992bd7b92540 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -322,6 +322,7 @@ acpi_db_walk_and_match_name(acpi_handle obj_handle, acpi_os_printf("Could Not get pathname for object %p\n", obj_handle); } else { + info.count = 0; info.owner_id = ACPI_OWNER_ID_MAX; info.debug_level = ACPI_UINT32_MAX; info.display_type = ACPI_DISPLAY_SUMMARY | ACPI_DISPLAY_SHORT; diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c index 58c3253b533a..a1c76bf21122 100644 --- a/drivers/acpi/acpica/dbobject.c +++ b/drivers/acpi/acpica/dbobject.c @@ -35,6 +35,15 @@ void acpi_db_dump_method_info(acpi_status status, struct acpi_walk_state *walk_state) { struct acpi_thread_state *thread; + struct acpi_namespace_node *node; + + node = walk_state->method_node; + + /* There are no locals or arguments for the module-level code case */ + + if (node == acpi_gbl_root_node) { + return; + } /* Ignore control codes, they are not errors */ @@ -384,8 +393,14 @@ void acpi_db_decode_locals(struct acpi_walk_state *walk_state) struct acpi_namespace_node *node; u8 display_locals = FALSE; - obj_desc = walk_state->method_desc; node = walk_state->method_node; + obj_desc = walk_state->method_desc; + + /* There are no locals for the module-level code case */ + + if (node == acpi_gbl_root_node) { + return; + } if (!node) { acpi_os_printf @@ -452,6 +467,12 @@ void acpi_db_decode_arguments(struct acpi_walk_state *walk_state) node = walk_state->method_node; obj_desc = walk_state->method_desc; + /* There are no arguments for the module-level code case */ + + if (node == acpi_gbl_root_node) { + return; + } + if (!node) { acpi_os_printf ("No method node (Executing subtree for buffer or opregion)\n"); diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index 70a2fca60306..9d33f0bb2885 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -162,9 +162,15 @@ acpi_ds_dump_method_stack(acpi_status status, op->common.next = NULL; #ifdef ACPI_DISASSEMBLER - acpi_os_printf("Failed at "); - acpi_dm_disassemble(next_walk_state, op, - ACPI_UINT32_MAX); + if (walk_state->method_node != + acpi_gbl_root_node) { + + /* More verbose if not module-level code */ + + acpi_os_printf("Failed at "); + acpi_dm_disassemble(next_walk_state, op, + ACPI_UINT32_MAX); + } #endif op->common.next = next; } diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index f85c6f3271f6..2373a7492151 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -490,6 +490,17 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) ACPI_WARNING((AE_INFO, "Received request to unload an ACPI table")); /* + * May 2018: Unload is no longer supported for the following reasons: + * 1) A correct implementation on some hosts may not be possible. + * 2) Other ACPI implementations do not correctly/fully support it. + * 3) It requires host device driver support which does not exist. + * (To properly support namespace unload out from underneath.) + * 4) This AML operator has never been seen in the field. + */ + ACPI_EXCEPTION((AE_INFO, AE_NOT_IMPLEMENTED, + "AML Unload operator is not supported")); + + /* * Validate the handle * Although the handle is partially validated in acpi_ex_reconfiguration() * when it calls acpi_ex_resolve_operands(), the handle is more completely diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 4bdbd1d8431b..90ccffcd770b 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -170,6 +170,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, } type = this_node->type; + info->count++; /* Check if the owner matches */ @@ -639,6 +640,7 @@ acpi_ns_dump_objects(acpi_object_type type, return; } + info.count = 0; info.debug_level = ACPI_LV_TABLES; info.owner_id = owner_id; info.display_type = display_type; @@ -649,6 +651,7 @@ acpi_ns_dump_objects(acpi_object_type type, acpi_ns_dump_one_object, NULL, (void *)&info, NULL); + acpi_os_printf("\nNamespace node count: %u\n\n", info.count); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 68422afc365f..bc5f05906bd1 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -515,6 +515,22 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } + if (walk_state->opcode == AML_SCOPE_OP) { + /* + * If the scope op fails to parse, skip the body of the + * scope op because the parse failure indicates that the + * device may not exist. + */ + walk_state->parser_state.aml = + walk_state->aml + 1; + walk_state->parser_state.aml = + acpi_ps_get_next_package_end + (&walk_state->parser_state); + walk_state->aml = + walk_state->parser_state.aml; + ACPI_ERROR((AE_INFO, + "Skipping Scope block")); + } continue; } @@ -557,7 +573,40 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } - + if ((walk_state->control_state) && + ((walk_state->control_state->control. + opcode == AML_IF_OP) + || (walk_state->control_state->control. + opcode == AML_WHILE_OP))) { + /* + * If the if/while op fails to parse, we will skip parsing + * the body of the op. + */ + parser_state->aml = + walk_state->control_state->control. + aml_predicate_start + 1; + parser_state->aml = + acpi_ps_get_next_package_end + (parser_state); + walk_state->aml = parser_state->aml; + + ACPI_ERROR((AE_INFO, + "Skipping While/If block")); + if (*walk_state->aml == AML_ELSE_OP) { + ACPI_ERROR((AE_INFO, + "Skipping Else block")); + walk_state->parser_state.aml = + walk_state->aml + 1; + walk_state->parser_state.aml = + acpi_ps_get_next_package_end + (parser_state); + walk_state->aml = + parser_state->aml; + } + ACPI_FREE(acpi_ut_pop_generic_state + (&walk_state->control_state)); + } + op = NULL; continue; } } diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index 7d9d0151ee54..3138e7a00da8 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -12,6 +12,7 @@ #include "acparser.h" #include "amlcode.h" #include "acconvert.h" +#include "acnamesp.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psobject") @@ -549,6 +550,21 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, do { if (*op) { + /* + * These Opcodes need to be removed from the namespace because they + * get created even if these opcodes cannot be created due to + * errors. + */ + if (((*op)->common.aml_opcode == AML_REGION_OP) + || ((*op)->common.aml_opcode == + AML_DATA_REGION_OP)) { + acpi_ns_delete_children((*op)->common. + node); + acpi_ns_remove_node((*op)->common.node); + (*op)->common.node = NULL; + acpi_ps_delete_parse_tree(*op); + } + status2 = acpi_ps_complete_this_op(walk_state, *op); if (ACPI_FAILURE(status2)) { @@ -574,6 +590,20 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, #endif walk_state->prev_op = NULL; walk_state->prev_arg_types = walk_state->arg_types; + + if (walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL) { + /* + * There was something that went wrong while executing code at the + * module-level. We need to skip parsing whatever caused the + * error and keep going. One runtime error during the table load + * should not cause the entire table to not be loaded. This is + * because there could be correct AML beyond the parts that caused + * the runtime error. + */ + ACPI_ERROR((AE_INFO, + "Ignore error and continue table load")); + return_ACPI_STATUS(AE_OK); + } return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index e0a442b8648b..bd6af8c87d48 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -25,22 +25,48 @@ ACPI_MODULE_NAME("pswalk") * DESCRIPTION: Delete a portion of or an entire parse tree. * ******************************************************************************/ +#include "amlcode.h" void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) { union acpi_parse_object *op = subtree_root; union acpi_parse_object *next = NULL; union acpi_parse_object *parent = NULL; + u32 level = 0; ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root); + ACPI_DEBUG_PRINT((ACPI_DB_PARSE_TREES, " root %p\n", subtree_root)); + /* Visit all nodes in the subtree */ while (op) { - - /* Check if we are not ascending */ - if (op != parent) { + /* This is the descending case */ + + if (ACPI_IS_DEBUG_ENABLED + (ACPI_LV_PARSE_TREES, _COMPONENT)) { + + /* This debug option will print the entire parse tree */ + + acpi_os_printf(" %*.s%s %p", (level * 4), + " ", + acpi_ps_get_opcode_name(op-> + common. + aml_opcode), + op); + + if (op->named.aml_opcode == AML_INT_NAMEPATH_OP) { + acpi_os_printf(" %4.4s", + op->common.value.string); + } + if (op->named.aml_opcode == AML_STRING_OP) { + acpi_os_printf(" %s", + op->common.value.string); + } + acpi_os_printf("\n"); + } + /* Look for an argument or child of the current op */ next = acpi_ps_get_arg(op, 0); @@ -49,6 +75,7 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) /* Still going downward in tree (Op is not completed yet) */ op = next; + level++; continue; } } @@ -69,6 +96,7 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) if (next) { op = next; } else { + level--; op = parent; } } diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index 12d4a0f6b8d2..5a64ddaed8a3 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c @@ -182,20 +182,20 @@ acpi_ut_prefixed_namespace_error(const char *module_name, switch (lookup_status) { case AE_ALREADY_EXISTS: - acpi_os_printf(ACPI_MSG_BIOS_ERROR); + acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); message = "Failure creating"; break; case AE_NOT_FOUND: - acpi_os_printf(ACPI_MSG_BIOS_ERROR); - message = "Failure looking up"; + acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); + message = "Could not resolve"; break; default: - acpi_os_printf(ACPI_MSG_ERROR); - message = "Failure looking up"; + acpi_os_printf("\n" ACPI_MSG_ERROR); + message = "Failure resolving"; break; } diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 1b415fa90cf8..64b63c81994b 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -69,6 +69,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = { {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */ + {"Windows 2017.2", NULL, 0, ACPI_OSI_WIN_10_RS3}, /* Windows 10 version 1709 - Added 02/2018 */ /* Feature Group Strings */ diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 9bff853e85f3..3c5ea7cb693e 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -524,7 +524,8 @@ retry: pr_warn(FW_WARN "too many record IDs!\n"); return 0; } - new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL); + new_entries = kvmalloc_array(new_size, sizeof(entries[0]), + GFP_KERNEL); if (!new_entries) return -ENOMEM; memcpy(new_entries, entries, diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 1efefe919555..02c6fd9caff7 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -481,7 +481,7 @@ static void ghes_do_proc(struct ghes *ghes, if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); - ghes_edac_report_mem_error(ghes, sev, mem_err); + ghes_edac_report_mem_error(sev, mem_err); arch_apei_report_mem_error(sev, mem_err); ghes_handle_memory_failure(gdata, sev); @@ -1087,10 +1087,6 @@ static int ghes_probe(struct platform_device *ghes_dev) goto err; } - rc = ghes_edac_register(ghes, &ghes_dev->dev); - if (rc < 0) - goto err; - switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); @@ -1102,14 +1098,14 @@ static int ghes_probe(struct platform_device *ghes_dev) if (rc) { pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", generic->header.source_id); - goto err_edac_unreg; + goto err; } rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED, "GHES IRQ", ghes); if (rc) { pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", generic->header.source_id); - goto err_edac_unreg; + goto err; } break; @@ -1132,14 +1128,16 @@ static int ghes_probe(struct platform_device *ghes_dev) default: BUG(); } + platform_set_drvdata(ghes_dev, ghes); + ghes_edac_register(ghes, &ghes_dev->dev); + /* Handle any pending errors right away */ ghes_proc(ghes); return 0; -err_edac_unreg: - ghes_edac_unregister(ghes); + err: if (ghes) { ghes_fini(ghes); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 9cb74115a43d..b1e9f81ebeea 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -195,7 +195,8 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count) struct ghes_arr ghes_arr; ghes_arr.count = 0; - ghes_arr.ghes_devs = kmalloc(sizeof(void *) * ghes_count, GFP_KERNEL); + ghes_arr.ghes_devs = kmalloc_array(ghes_count, sizeof(void *), + GFP_KERNEL); if (!ghes_arr.ghes_devs) return -ENOMEM; diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 3563103590c6..fe0183d48dcd 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -298,8 +298,8 @@ static int acpi_fan_get_fps(struct acpi_device *device) } fan->fps_count = obj->package.count - 1; /* minus revision field */ - fan->fps = devm_kzalloc(&device->dev, - fan->fps_count * sizeof(struct acpi_fan_fps), + fan->fps = devm_kcalloc(&device->dev, + fan->fps_count, sizeof(struct acpi_fan_fps), GFP_KERNEL); if (!fan->fps) { dev_err(&device->dev, "Not enough memory\n"); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index e2235ed3e4be..d15814e1727f 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1082,9 +1082,10 @@ static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, continue; nfit_mem->nfit_flush = nfit_flush; flush = nfit_flush->flush; - nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, - flush->hint_count - * sizeof(struct resource), GFP_KERNEL); + nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, + flush->hint_count, + sizeof(struct resource), + GFP_KERNEL); if (!nfit_mem->flush_wpq) return -ENOMEM; for (i = 0; i < flush->hint_count; i++) { @@ -1978,19 +1979,8 @@ static ssize_t range_index_show(struct device *dev, } static DEVICE_ATTR_RO(range_index); -static ssize_t ecc_unit_size_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nd_region *nd_region = to_nd_region(dev); - struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); - - return sprintf(buf, "%d\n", nfit_spa->clear_err_unit); -} -static DEVICE_ATTR_RO(ecc_unit_size); - static struct attribute *acpi_nfit_region_attributes[] = { &dev_attr_range_index.attr, - &dev_attr_ecc_unit_size.attr, NULL, }; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 0da18bde6a16..7433035ded95 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -153,6 +153,7 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = { { OSC_PCI_EXPRESS_PME_CONTROL, "PME" }, { OSC_PCI_EXPRESS_AER_CONTROL, "AER" }, { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" }, + { OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" }, }; static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word, @@ -472,9 +473,17 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) } control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL - | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | OSC_PCI_EXPRESS_PME_CONTROL; + if (IS_ENABLED(CONFIG_PCIEASPM)) + control |= OSC_PCI_EXPRESS_LTR_CONTROL; + + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL; + + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC)) + control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL; + if (pci_aer_available()) { if (aer_acpi_firmware_first()) dev_info(&device->dev, @@ -900,11 +909,15 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, host_bridge = to_pci_host_bridge(bus->bridge); if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)) - host_bridge->native_hotplug = 0; + host_bridge->native_pcie_hotplug = 0; + if (!(root->osc_control_set & OSC_PCI_SHPC_NATIVE_HP_CONTROL)) + host_bridge->native_shpc_hotplug = 0; if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL)) host_bridge->native_aer = 0; if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL)) host_bridge->native_pme = 0; + if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL)) + host_bridge->native_ltr = 0; pci_scan_child_bus(bus); pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info, diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c new file mode 100644 index 000000000000..e5ea1974d1e3 --- /dev/null +++ b/drivers/acpi/pptt.c @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pptt.c - parsing of Processor Properties Topology Table (PPTT) + * + * Copyright (C) 2018, ARM + * + * This file implements parsing of the Processor Properties Topology Table + * which is optionally used to describe the processor and cache topology. + * Due to the relative pointers used throughout the table, this doesn't + * leverage the existing subtable parsing in the kernel. + * + * The PPTT structure is an inverted tree, with each node potentially + * holding one or two inverted tree data structures describing + * the caches available at that level. Each cache structure optionally + * contains properties describing the cache at a given level which can be + * used to override hardware probed values. + */ +#define pr_fmt(fmt) "ACPI PPTT: " fmt + +#include <linux/acpi.h> +#include <linux/cacheinfo.h> +#include <acpi/processor.h> + +static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr, + u32 pptt_ref) +{ + struct acpi_subtable_header *entry; + + /* there isn't a subtable at reference 0 */ + if (pptt_ref < sizeof(struct acpi_subtable_header)) + return NULL; + + if (pptt_ref + sizeof(struct acpi_subtable_header) > table_hdr->length) + return NULL; + + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, pptt_ref); + + if (entry->length == 0) + return NULL; + + if (pptt_ref + entry->length > table_hdr->length) + return NULL; + + return entry; +} + +static struct acpi_pptt_processor *fetch_pptt_node(struct acpi_table_header *table_hdr, + u32 pptt_ref) +{ + return (struct acpi_pptt_processor *)fetch_pptt_subtable(table_hdr, pptt_ref); +} + +static struct acpi_pptt_cache *fetch_pptt_cache(struct acpi_table_header *table_hdr, + u32 pptt_ref) +{ + return (struct acpi_pptt_cache *)fetch_pptt_subtable(table_hdr, pptt_ref); +} + +static struct acpi_subtable_header *acpi_get_pptt_resource(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *node, + int resource) +{ + u32 *ref; + + if (resource >= node->number_of_priv_resources) + return NULL; + + ref = ACPI_ADD_PTR(u32, node, sizeof(struct acpi_pptt_processor)); + ref += resource; + + return fetch_pptt_subtable(table_hdr, *ref); +} + +static inline bool acpi_pptt_match_type(int table_type, int type) +{ + return ((table_type & ACPI_PPTT_MASK_CACHE_TYPE) == type || + table_type & ACPI_PPTT_CACHE_TYPE_UNIFIED & type); +} + +/** + * acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache + * @table_hdr: Pointer to the head of the PPTT table + * @local_level: passed res reflects this cache level + * @res: cache resource in the PPTT we want to walk + * @found: returns a pointer to the requested level if found + * @level: the requested cache level + * @type: the requested cache type + * + * Attempt to find a given cache level, while counting the max number + * of cache levels for the cache node. + * + * Given a pptt resource, verify that it is a cache node, then walk + * down each level of caches, counting how many levels are found + * as well as checking the cache type (icache, dcache, unified). If a + * level & type match, then we set found, and continue the search. + * Once the entire cache branch has been walked return its max + * depth. + * + * Return: The cache structure and the level we terminated with. + */ +static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, + int local_level, + struct acpi_subtable_header *res, + struct acpi_pptt_cache **found, + int level, int type) +{ + struct acpi_pptt_cache *cache; + + if (res->type != ACPI_PPTT_TYPE_CACHE) + return 0; + + cache = (struct acpi_pptt_cache *) res; + while (cache) { + local_level++; + + if (local_level == level && + cache->flags & ACPI_PPTT_CACHE_TYPE_VALID && + acpi_pptt_match_type(cache->attributes, type)) { + if (*found != NULL && cache != *found) + pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); + + pr_debug("Found cache @ level %d\n", level); + *found = cache; + /* + * continue looking at this node's resource list + * to verify that we don't find a duplicate + * cache node. + */ + } + cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache); + } + return local_level; +} + +static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + int *starting_level, int level, + int type) +{ + struct acpi_subtable_header *res; + int number_of_levels = *starting_level; + int resource = 0; + struct acpi_pptt_cache *ret = NULL; + int local_level; + + /* walk down from processor node */ + while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) { + resource++; + + local_level = acpi_pptt_walk_cache(table_hdr, *starting_level, + res, &ret, level, type); + /* + * we are looking for the max depth. Since its potentially + * possible for a given node to have resources with differing + * depths verify that the depth we have found is the largest. + */ + if (number_of_levels < local_level) + number_of_levels = local_level; + } + if (number_of_levels > *starting_level) + *starting_level = number_of_levels; + + return ret; +} + +/** + * acpi_count_levels() - Given a PPTT table, and a cpu node, count the caches + * @table_hdr: Pointer to the head of the PPTT table + * @cpu_node: processor node we wish to count caches for + * + * Given a processor node containing a processing unit, walk into it and count + * how many levels exist solely for it, and then walk up each level until we hit + * the root node (ignore the package level because it may be possible to have + * caches that exist across packages). Count the number of cache levels that + * exist at each level on the way up. + * + * Return: Total number of levels found. + */ +static int acpi_count_levels(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node) +{ + int total_levels = 0; + + do { + acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0); + cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); + } while (cpu_node); + + return total_levels; +} + +/** + * acpi_pptt_leaf_node() - Given a processor node, determine if its a leaf + * @table_hdr: Pointer to the head of the PPTT table + * @node: passed node is checked to see if its a leaf + * + * Determine if the *node parameter is a leaf node by iterating the + * PPTT table, looking for nodes which reference it. + * + * Return: 0 if we find a node referencing the passed node (or table error), + * or 1 if we don't. + */ +static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *node) +{ + struct acpi_subtable_header *entry; + unsigned long table_end; + u32 node_entry; + struct acpi_pptt_processor *cpu_node; + u32 proc_sz; + + table_end = (unsigned long)table_hdr + table_hdr->length; + node_entry = ACPI_PTR_DIFF(node, table_hdr); + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor *); + + while ((unsigned long)entry + proc_sz < table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && + cpu_node->parent == node_entry) + return 0; + if (entry->length == 0) + return 0; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + + } + return 1; +} + +/** + * acpi_find_processor_node() - Given a PPTT table find the requested processor + * @table_hdr: Pointer to the head of the PPTT table + * @acpi_cpu_id: cpu we are searching for + * + * Find the subtable entry describing the provided processor. + * This is done by iterating the PPTT table looking for processor nodes + * which have an acpi_processor_id that matches the acpi_cpu_id parameter + * passed into the function. If we find a node that matches this criteria + * we verify that its a leaf node in the topology rather than depending + * on the valid flag, which doesn't need to be set for leaf nodes. + * + * Return: NULL, or the processors acpi_pptt_processor* + */ +static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_header *table_hdr, + u32 acpi_cpu_id) +{ + struct acpi_subtable_header *entry; + unsigned long table_end; + struct acpi_pptt_processor *cpu_node; + u32 proc_sz; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor *); + + /* find the processor structure associated with this cpuid */ + while ((unsigned long)entry + proc_sz < table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; + + if (entry->length == 0) { + pr_warn("Invalid zero length subtable\n"); + break; + } + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && + acpi_cpu_id == cpu_node->acpi_processor_id && + acpi_pptt_leaf_node(table_hdr, cpu_node)) { + return (struct acpi_pptt_processor *)entry; + } + + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + } + + return NULL; +} + +static int acpi_find_cache_levels(struct acpi_table_header *table_hdr, + u32 acpi_cpu_id) +{ + int number_of_levels = 0; + struct acpi_pptt_processor *cpu; + + cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id); + if (cpu) + number_of_levels = acpi_count_levels(table_hdr, cpu); + + return number_of_levels; +} + +static u8 acpi_cache_type(enum cache_type type) +{ + switch (type) { + case CACHE_TYPE_DATA: + pr_debug("Looking for data cache\n"); + return ACPI_PPTT_CACHE_TYPE_DATA; + case CACHE_TYPE_INST: + pr_debug("Looking for instruction cache\n"); + return ACPI_PPTT_CACHE_TYPE_INSTR; + default: + case CACHE_TYPE_UNIFIED: + pr_debug("Looking for unified cache\n"); + /* + * It is important that ACPI_PPTT_CACHE_TYPE_UNIFIED + * contains the bit pattern that will match both + * ACPI unified bit patterns because we use it later + * to match both cases. + */ + return ACPI_PPTT_CACHE_TYPE_UNIFIED; + } +} + +static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *table_hdr, + u32 acpi_cpu_id, + enum cache_type type, + unsigned int level, + struct acpi_pptt_processor **node) +{ + int total_levels = 0; + struct acpi_pptt_cache *found = NULL; + struct acpi_pptt_processor *cpu_node; + u8 acpi_type = acpi_cache_type(type); + + pr_debug("Looking for CPU %d's level %d cache type %d\n", + acpi_cpu_id, level, acpi_type); + + cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id); + + while (cpu_node && !found) { + found = acpi_find_cache_level(table_hdr, cpu_node, + &total_levels, level, acpi_type); + *node = cpu_node; + cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); + } + + return found; +} + +/* total number of attributes checked by the properties code */ +#define PPTT_CHECKED_ATTRIBUTES 4 + +/** + * update_cache_properties() - Update cacheinfo for the given processor + * @this_leaf: Kernel cache info structure being updated + * @found_cache: The PPTT node describing this cache instance + * @cpu_node: A unique reference to describe this cache instance + * + * The ACPI spec implies that the fields in the cache structures are used to + * extend and correct the information probed from the hardware. Lets only + * set fields that we determine are VALID. + * + * Return: nothing. Side effect of updating the global cacheinfo + */ +static void update_cache_properties(struct cacheinfo *this_leaf, + struct acpi_pptt_cache *found_cache, + struct acpi_pptt_processor *cpu_node) +{ + int valid_flags = 0; + + this_leaf->fw_token = cpu_node; + if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) { + this_leaf->size = found_cache->size; + valid_flags++; + } + if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID) { + this_leaf->coherency_line_size = found_cache->line_size; + valid_flags++; + } + if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID) { + this_leaf->number_of_sets = found_cache->number_of_sets; + valid_flags++; + } + if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID) { + this_leaf->ways_of_associativity = found_cache->associativity; + valid_flags++; + } + if (found_cache->flags & ACPI_PPTT_WRITE_POLICY_VALID) { + switch (found_cache->attributes & ACPI_PPTT_MASK_WRITE_POLICY) { + case ACPI_PPTT_CACHE_POLICY_WT: + this_leaf->attributes = CACHE_WRITE_THROUGH; + break; + case ACPI_PPTT_CACHE_POLICY_WB: + this_leaf->attributes = CACHE_WRITE_BACK; + break; + } + } + if (found_cache->flags & ACPI_PPTT_ALLOCATION_TYPE_VALID) { + switch (found_cache->attributes & ACPI_PPTT_MASK_ALLOCATION_TYPE) { + case ACPI_PPTT_CACHE_READ_ALLOCATE: + this_leaf->attributes |= CACHE_READ_ALLOCATE; + break; + case ACPI_PPTT_CACHE_WRITE_ALLOCATE: + this_leaf->attributes |= CACHE_WRITE_ALLOCATE; + break; + case ACPI_PPTT_CACHE_RW_ALLOCATE: + case ACPI_PPTT_CACHE_RW_ALLOCATE_ALT: + this_leaf->attributes |= + CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE; + break; + } + } + /* + * If the above flags are valid, and the cache type is NOCACHE + * update the cache type as well. + */ + if (this_leaf->type == CACHE_TYPE_NOCACHE && + valid_flags == PPTT_CHECKED_ATTRIBUTES) + this_leaf->type = CACHE_TYPE_UNIFIED; +} + +static void cache_setup_acpi_cpu(struct acpi_table_header *table, + unsigned int cpu) +{ + struct acpi_pptt_cache *found_cache; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + struct cacheinfo *this_leaf; + unsigned int index = 0; + struct acpi_pptt_processor *cpu_node = NULL; + + while (index < get_cpu_cacheinfo(cpu)->num_leaves) { + this_leaf = this_cpu_ci->info_list + index; + found_cache = acpi_find_cache_node(table, acpi_cpu_id, + this_leaf->type, + this_leaf->level, + &cpu_node); + pr_debug("found = %p %p\n", found_cache, cpu_node); + if (found_cache) + update_cache_properties(this_leaf, + found_cache, + cpu_node); + + index++; + } +} + +/* Passing level values greater than this will result in search termination */ +#define PPTT_ABORT_PACKAGE 0xFF + +static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu, + int level, int flag) +{ + struct acpi_pptt_processor *prev_node; + + while (cpu && level) { + if (cpu->flags & flag) + break; + pr_debug("level %d\n", level); + prev_node = fetch_pptt_node(table_hdr, cpu->parent); + if (prev_node == NULL) + break; + cpu = prev_node; + level--; + } + return cpu; +} + +/** + * topology_get_acpi_cpu_tag() - Find a unique topology value for a feature + * @table: Pointer to the head of the PPTT table + * @cpu: Kernel logical cpu number + * @level: A level that terminates the search + * @flag: A flag which terminates the search + * + * Get a unique value given a cpu, and a topology level, that can be + * matched to determine which cpus share common topological features + * at that level. + * + * Return: Unique value, or -ENOENT if unable to locate cpu + */ +static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, + unsigned int cpu, int level, int flag) +{ + struct acpi_pptt_processor *cpu_node; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (cpu_node) { + cpu_node = acpi_find_processor_package_id(table, cpu_node, + level, flag); + /* Only the first level has a guaranteed id */ + if (level == 0) + return cpu_node->acpi_processor_id; + return ACPI_PTR_DIFF(cpu_node, table); + } + pr_warn_once("PPTT table found, but unable to locate core %d (%d)\n", + cpu, acpi_cpu_id); + return -ENOENT; +} + +static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) +{ + struct acpi_table_header *table; + acpi_status status; + int retval; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n"); + return -ENOENT; + } + retval = topology_get_acpi_cpu_tag(table, cpu, level, flag); + pr_debug("Topology Setup ACPI cpu %d, level %d ret = %d\n", + cpu, level, retval); + acpi_put_table(table); + + return retval; +} + +/** + * acpi_find_last_cache_level() - Determines the number of cache levels for a PE + * @cpu: Kernel logical cpu number + * + * Given a logical cpu number, returns the number of levels of cache represented + * in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0 + * indicating we didn't find any cache levels. + * + * Return: Cache levels visible to this core. + */ +int acpi_find_last_cache_level(unsigned int cpu) +{ + u32 acpi_cpu_id; + struct acpi_table_header *table; + int number_of_levels = 0; + acpi_status status; + + pr_debug("Cache Setup find last level cpu=%d\n", cpu); + + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_warn_once("No PPTT table found, cache topology may be inaccurate\n"); + } else { + number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id); + acpi_put_table(table); + } + pr_debug("Cache Setup find last level level=%d\n", number_of_levels); + + return number_of_levels; +} + +/** + * cache_setup_acpi() - Override CPU cache topology with data from the PPTT + * @cpu: Kernel logical cpu number + * + * Updates the global cache info provided by cpu_get_cacheinfo() + * when there are valid properties in the acpi_pptt_cache nodes. A + * successful parse may not result in any updates if none of the + * cache levels have any valid flags set. Futher, a unique value is + * associated with each known CPU cache entry. This unique value + * can be used to determine whether caches are shared between cpus. + * + * Return: -ENOENT on failure to find table, or 0 on success + */ +int cache_setup_acpi(unsigned int cpu) +{ + struct acpi_table_header *table; + acpi_status status; + + pr_debug("Cache Setup ACPI cpu %d\n", cpu); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_warn_once("No PPTT table found, cache topology may be inaccurate\n"); + return -ENOENT; + } + + cache_setup_acpi_cpu(table, cpu); + acpi_put_table(table); + + return status; +} + +/** + * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu + * @cpu: Kernel logical cpu number + * @level: The topological level for which we would like a unique ID + * + * Determine a topology unique ID for each thread/core/cluster/mc_grouping + * /socket/etc. This ID can then be used to group peers, which will have + * matching ids. + * + * The search terminates when either the requested level is found or + * we reach a root node. Levels beyond the termination point will return the + * same unique ID. The unique id for level 0 is the acpi processor id. All + * other levels beyond this use a generated value to uniquely identify + * a topological feature. + * + * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. + * Otherwise returns a value which represents a unique topological feature. + */ +int find_acpi_cpu_topology(unsigned int cpu, int level) +{ + return find_acpi_cpu_topology_tag(cpu, level, 0); +} + +/** + * find_acpi_cpu_cache_topology() - Determine a unique cache topology value + * @cpu: Kernel logical cpu number + * @level: The cache level for which we would like a unique ID + * + * Determine a unique ID for each unified cache in the system + * + * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. + * Otherwise returns a value which represents a unique topological feature. + */ +int find_acpi_cpu_cache_topology(unsigned int cpu, int level) +{ + struct acpi_table_header *table; + struct acpi_pptt_cache *found_cache; + acpi_status status; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + struct acpi_pptt_processor *cpu_node = NULL; + int ret = -1; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_warn_once("No PPTT table found, topology may be inaccurate\n"); + return -ENOENT; + } + + found_cache = acpi_find_cache_node(table, acpi_cpu_id, + CACHE_TYPE_UNIFIED, + level, + &cpu_node); + if (found_cache) + ret = ACPI_PTR_DIFF(cpu_node, table); + + acpi_put_table(table); + + return ret; +} + + +/** + * find_acpi_cpu_topology_package() - Determine a unique cpu package value + * @cpu: Kernel logical cpu number + * + * Determine a topology unique package ID for the given cpu. + * This ID can then be used to group peers, which will have matching ids. + * + * The search terminates when either a level is found with the PHYSICAL_PACKAGE + * flag set or we reach a root node. + * + * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found. + * Otherwise returns a value which represents the package for this cpu. + */ +int find_acpi_cpu_topology_package(unsigned int cpu) +{ + return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, + ACPI_PPTT_PHYSICAL_PACKAGE); +} diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index a651ab3490d8..a303fd0e108c 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -343,8 +343,9 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) pr->performance->state_count = pss->package.count; pr->performance->states = - kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, - GFP_KERNEL); + kmalloc_array(pss->package.count, + sizeof(struct acpi_processor_px), + GFP_KERNEL); if (!pr->performance->states) { result = -ENOMEM; goto end; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 7f9aff4b8d62..fbc936cf2025 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -534,8 +534,9 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr) pr->throttling.state_count = tss->package.count; pr->throttling.states_tss = - kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, - GFP_KERNEL); + kmalloc_array(tss->package.count, + sizeof(struct acpi_processor_tx_tss), + GFP_KERNEL); if (!pr->throttling.states_tss) { result = -ENOMEM; goto end; diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 4fc59c3bc673..41324f0b1bee 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -857,12 +857,12 @@ void acpi_irq_stats_init(void) num_gpes = acpi_current_gpe_count; num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; - all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1), + all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *), GFP_KERNEL); if (all_attrs == NULL) return; - all_counters = kzalloc(sizeof(struct event_counter) * (num_counters), + all_counters = kcalloc(num_counters, sizeof(struct event_counter), GFP_KERNEL); if (all_counters == NULL) goto fail; @@ -871,7 +871,7 @@ void acpi_irq_stats_init(void) if (ACPI_FAILURE(status)) goto fail; - counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), + counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute), GFP_KERNEL); if (counter_attrs == NULL) goto fail; diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 4a3410aa6540..a3d012b08fc5 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -462,7 +462,7 @@ static const char * const table_sigs[] = { ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, - ACPI_SIG_NFIT, ACPI_SIG_HMAT, NULL }; + ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, NULL }; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) |