diff options
Diffstat (limited to 'drivers/mmc/host/sdhci-msm.c')
-rw-r--r-- | drivers/mmc/host/sdhci-msm.c | 139 |
1 files changed, 133 insertions, 6 deletions
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 3d0bb5e2e09b..c3a160c18047 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -15,6 +15,7 @@ #include <linux/regulator/consumer.h> #include "sdhci-pltfm.h" +#include "cqhci.h" #define CORE_MCI_VERSION 0x50 #define CORE_VERSION_MAJOR_SHIFT 28 @@ -122,6 +123,10 @@ #define msm_host_writel(msm_host, val, host, offset) \ msm_host->var_ops->msm_writel_relaxed(val, host, offset) +/* CQHCI vendor specific registers */ +#define CQHCI_VENDOR_CFG1 0xA00 +#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) + struct sdhci_msm_offset { u32 core_hc_mode; u32 core_mci_data_cnt; @@ -1567,6 +1572,127 @@ out: __sdhci_msm_set_clock(host, clock); } +/*****************************************************************************\ + * * + * MSM Command Queue Engine (CQE) * + * * +\*****************************************************************************/ + +static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + return 0; +} + +void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 ctrl; + + /* + * When CQE is halted, the legacy SDHCI path operates only + * on 16-byte descriptors in 64bit mode. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 16; + + spin_lock_irqsave(&host->lock, flags); + + /* + * During CQE command transfers, command complete bit gets latched. + * So s/w should clear command complete interrupt status when CQE is + * either halted or disabled. Otherwise unexpected SDCHI legacy + * interrupt gets triggered when CQE is halted/disabled. + */ + ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); + ctrl |= SDHCI_INT_RESPONSE; + sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); + + spin_unlock_irqrestore(&host->lock, flags); + + sdhci_cqe_disable(mmc, recovery); +} + +static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { + .enable = sdhci_cqe_enable, + .disable = sdhci_msm_cqe_disable, +}; + +static int sdhci_msm_cqe_add_host(struct sdhci_host *host, + struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + u32 cqcfg; + int ret; + + /* + * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. + * So ensure ADMA table is allocated for 16byte descriptors. + */ + if (host->caps & SDHCI_CAN_64BIT) + host->alloc_desc_sz = 16; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = cqhci_pltfm_init(pdev); + if (IS_ERR(cq_host)) { + ret = PTR_ERR(cq_host); + dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); + goto cleanup; + } + + msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host->ops = &sdhci_msm_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) { + dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", + mmc_hostname(host->mmc), ret); + goto cleanup; + } + + /* Disable cqe reset due to cqe enable signal */ + cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); + cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; + cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); + + /* + * SDHC expects 12byte ADMA descriptors till CQE is enabled. + * So limit desc_sz to 12 so that the data commands that are sent + * during card initialization (before CQE gets enabled) would + * get executed without any issues. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 12; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + dev_info(&pdev->dev, "%s: CQE init: success\n", + mmc_hostname(host->mmc)); + return ret; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + /* * Platform specific register write functions. This is so that, if any * register write needs to be followed up by platform specific actions, @@ -1731,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = { .set_uhs_signaling = sdhci_msm_set_uhs_signaling, .write_w = sdhci_msm_writew, .write_b = sdhci_msm_writeb, + .irq = sdhci_msm_cqe_irq, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { @@ -1746,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; - struct resource *core_memres; struct clk *clk; int ret; u16 host_version, core_minor; @@ -1754,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) u8 core_major; const struct sdhci_msm_offset *msm_offset; const struct sdhci_msm_variant_info *var_info; + struct device_node *node = pdev->dev.of_node; host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); if (IS_ERR(host)) @@ -1847,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) } if (!msm_host->mci_removed) { - core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); - msm_host->core_mem = devm_ioremap_resource(&pdev->dev, - core_memres); - + msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(msm_host->core_mem)) { ret = PTR_ERR(msm_host->core_mem); goto clk_disable; @@ -1952,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) pm_runtime_use_autosuspend(&pdev->dev); host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; - ret = sdhci_add_host(host); + if (of_property_read_bool(node, "supports-cqe")) + ret = sdhci_msm_cqe_add_host(host, pdev); + else + ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; sdhci_msm_set_regulator_caps(msm_host); |