diff options
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/acpi_lpss.c | 26 | ||||
-rw-r--r-- | drivers/acpi/acpica/psloop.c | 19 | ||||
-rw-r--r-- | drivers/acpi/arm64/iort.c | 48 |
3 files changed, 64 insertions, 29 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index f8fecfec5df9..9706613eecf9 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev) #define LPSS_GPIODEF0_DMA_LLP BIT(13) static DEFINE_MUTEX(lpss_iosf_mutex); +static bool lpss_iosf_d3_entered; static void lpss_iosf_enter_d3_state(void) { @@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void) iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, LPSS_IOSF_GPIODEF0, value1, mask1); + + lpss_iosf_d3_entered = true; + exit: mutex_unlock(&lpss_iosf_mutex); } @@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void) mutex_lock(&lpss_iosf_mutex); + if (!lpss_iosf_d3_entered) + goto exit; + + lpss_iosf_d3_entered = false; + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, LPSS_IOSF_GPIODEF0, value1, mask1); @@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void) iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, LPSS_IOSF_PMCSR, value2, mask2); +exit: mutex_unlock(&lpss_iosf_mutex); } -static int acpi_lpss_suspend(struct device *dev, bool runtime) +static int acpi_lpss_suspend(struct device *dev, bool wakeup) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - bool wakeup = runtime || device_may_wakeup(dev); int ret; if (pdata->dev_desc->flags & LPSS_SAVE_CTX) @@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime) * wrong status for devices being about to be powered off. See * lpss_iosf_enter_d3_state() for further information. */ - if ((runtime || !pm_suspend_via_firmware()) && + if (acpi_target_system_state() == ACPI_STATE_S0 && lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_enter_d3_state(); return ret; } -static int acpi_lpss_resume(struct device *dev, bool runtime) +static int acpi_lpss_resume(struct device *dev) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; @@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime) * This call is kept first to be in symmetry with * acpi_lpss_runtime_suspend() one. */ - if ((runtime || !pm_resume_via_firmware()) && - lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_exit_d3_state(); ret = acpi_dev_resume(dev); @@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev) return 0; ret = pm_generic_suspend_late(dev); - return ret ? ret : acpi_lpss_suspend(dev, false); + return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); } static int acpi_lpss_resume_early(struct device *dev) { - int ret = acpi_lpss_resume(dev, false); + int ret = acpi_lpss_resume(dev); return ret ? ret : pm_generic_resume_early(dev); } @@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev) static int acpi_lpss_runtime_resume(struct device *dev) { - int ret = acpi_lpss_resume(dev, true); + int ret = acpi_lpss_resume(dev); return ret ? ret : pm_generic_runtime_resume(dev); } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index ee840be150b5..44f35ab3347d 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -709,15 +709,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) } else if ((walk_state-> parse_flags & ACPI_PARSE_MODULE_LEVEL) + && status != AE_CTRL_TRANSFER && ACPI_FAILURE(status)) { /* - * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by - * executing it as a control method. However, if we encounter - * an error while loading the table, we need to keep trying to - * load the table rather than aborting the table load. Set the - * status to AE_OK to proceed with the table load. If we get a - * failure at this point, it means that the dispatcher got an - * error while processing Op (most likely an AML operand error. + * ACPI_PARSE_MODULE_LEVEL flag means that we are currently + * loading a table by executing it as a control method. + * However, if we encounter an error while loading the table, + * we need to keep trying to load the table rather than + * aborting the table load (setting the status to AE_OK + * continues the table load). If we get a failure at this + * point, it means that the dispatcher got an error while + * processing Op (most likely an AML operand error) or a + * control method was called from module level and the + * dispatcher returned AE_CTRL_TRANSFER. In the latter case, + * leave the status alone, there's nothing wrong with it. */ status = AE_OK; } diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 7a3a541046ed..08f26db2da7e 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -947,6 +947,24 @@ static int nc_dma_get_range(struct device *dev, u64 *size) return 0; } +static int rc_dma_get_range(struct device *dev, u64 *size) +{ + struct acpi_iort_node *node; + struct acpi_iort_root_complex *rc; + + node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, + iort_match_node_callback, dev); + if (!node || node->revision < 1) + return -ENODEV; + + rc = (struct acpi_iort_root_complex *)node->node_data; + + *size = rc->memory_address_limit >= 64 ? U64_MAX : + 1ULL<<rc->memory_address_limit; + + return 0; +} + /** * iort_dma_setup() - Set-up device DMA parameters. * @@ -960,25 +978,28 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) int ret, msb; /* - * Set default coherent_dma_mask to 32 bit. Drivers are expected to - * setup the correct supported mask. + * If @dev is expected to be DMA-capable then the bus code that created + * it should have initialised its dma_mask pointer by this point. For + * now, we'll continue the legacy behaviour of coercing it to the + * coherent mask if not, but we'll no longer do so quietly. */ - if (!dev->coherent_dma_mask) - dev->coherent_dma_mask = DMA_BIT_MASK(32); - - /* - * Set it to coherent_dma_mask by default if the architecture - * code has not set it. - */ - if (!dev->dma_mask) + if (!dev->dma_mask) { + dev_warn(dev, "DMA mask not set\n"); dev->dma_mask = &dev->coherent_dma_mask; + } - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); + if (dev->coherent_dma_mask) + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); + else + size = 1ULL << 32; - if (dev_is_pci(dev)) + if (dev_is_pci(dev)) { ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); - else + if (ret == -ENODEV) + ret = rc_dma_get_range(dev, &size); + } else { ret = nc_dma_get_range(dev, &size); + } if (!ret) { msb = fls64(dmaaddr + size - 1); @@ -993,6 +1014,7 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) * Limit coherent and dma mask based on size * retrieved from firmware. */ + dev->bus_dma_mask = mask; dev->coherent_dma_mask = mask; *dev->dma_mask = mask; } |