diff options
Diffstat (limited to 'drivers/mmc')
47 files changed, 3870 insertions, 922 deletions
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 42e89060cd41..2f38a7ad07e0 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -14,7 +14,7 @@ config PWRSEQ_EMMC config PWRSEQ_SD8787 tristate "HW reset support for SD8787 BT + Wifi module" - depends on OF && (MWIFIEX || BT_MRVL_SDIO) + depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO) help This selects hardware reset support for the SD8787 BT + Wifi module. By default this option is set to n. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index a0b9102c4c6e..c35b5b08bb33 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1371,6 +1371,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, if (brq->data.blocks > 1) { /* + * Some SD cards in SPI mode return a CRC error or even lock up + * completely when trying to read the last block using a + * multiblock read command. + */ + if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && + (blk_rq_pos(req) + blk_rq_sectors(req) == + get_capacity(md->disk))) + brq->data.blocks--; + + /* * After a read error, we redo the request one sector * at a time in order to accurately determine which * sectors can be read successfully. @@ -2698,7 +2708,7 @@ static int mmc_add_disk(struct mmc_blk_data *md) int ret; struct mmc_card *card = md->queue.card; - device_add_disk(md->parent, md->disk); + device_add_disk(md->parent, md->disk, NULL); md->force_ro.show = force_ro_show; md->force_ro.store = force_ro_store; sysfs_attr_init(&md->force_ro.attr); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index abf9e884386c..f57f5de54206 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host) host->caps |= MMC_CAP_NEEDS_POLL; ret = mmc_gpiod_request_cd(host, "cd", 0, true, - cd_debounce_delay_ms, + cd_debounce_delay_ms * 1000, &cd_gpio_invert); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index a8b9fee4d62a..ece34c734693 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c @@ -40,17 +40,21 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq, struct gpio_descs *reset_gpios = pwrseq->reset_gpios; if (!IS_ERR(reset_gpios)) { - int i, *values; + unsigned long *values; int nvalues = reset_gpios->ndescs; - values = kmalloc_array(nvalues, sizeof(int), GFP_KERNEL); + values = bitmap_alloc(nvalues, GFP_KERNEL); if (!values) return; - for (i = 0; i < nvalues; i++) - values[i] = value; + if (value) + bitmap_fill(values, nvalues); + else + bitmap_zero(values, nvalues); + + gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc, + reset_gpios->info, values); - gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc, values); kfree(values); } } diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 2a833686784b..86803a3a04dc 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, if (debounce) { ret = gpiod_set_debounce(desc, debounce); if (ret < 0) - ctx->cd_debounce_delay_ms = debounce; + ctx->cd_debounce_delay_ms = debounce / 1000; } if (gpio_invert) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 694d0828215d..1b58739d9744 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -34,6 +34,16 @@ config MMC_QCOM_DML if unsure, say N. +config MMC_STM32_SDMMC + bool "STMicroelectronics STM32 SDMMC Controller" + depends on MMC_ARMMMCI + default y + help + This selects the STMicroelectronics STM32 SDMMC host controller. + If you have a STM32 sdmmc host with internal DMA say Y here. + + If unsure, say N. + config MMC_PXA tristate "Intel PXA25x/26x/27x Multimedia Card Interface support" depends on ARCH_PXA @@ -345,6 +355,7 @@ config MMC_SDHCI_IPROC tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller" depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST depends on MMC_SDHCI_PLTFM + depends on OF || ACPI default ARCH_BCM_IPROC select MMC_SDHCI_IO_ACCESSORS help @@ -592,6 +603,19 @@ config MMC_SDRICOH_CS To compile this driver as a module, choose M here: the module will be called sdricoh_cs. +config MMC_SDHCI_SPRD + tristate "Spreadtrum SDIO host Controller" + depends on ARCH_SPRD + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + help + This selects the SDIO Host Controller in Spreadtrum + SoCs, this driver supports R11(IP version: R11P0). + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + config MMC_TMIO_CORE tristate @@ -622,14 +646,24 @@ config MMC_SDHI_SYS_DMAC config MMC_SDHI_INTERNAL_DMAC tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering" - depends on ARM64 || COMPILE_TEST + depends on ARM64 || ARCH_R8A77470 || COMPILE_TEST depends on MMC_SDHI - default MMC_SDHI if ARM64 + default MMC_SDHI if (ARM64 || ARCH_R8A77470) help This provides DMA support for SDHI SD/SDIO controllers using on-chip bus mastering. This supports the controllers found in arm64 based SoCs. +config MMC_UNIPHIER + tristate "UniPhier SD/eMMC Host Controller support" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on OF + select MMC_TMIO_CORE + help + This provides support for the SD/eMMC controller found in + UniPhier SoCs. The eMMC variant of this controller is used + only for 32-bit SoCs. + config MMC_CB710 tristate "ENE CB710 MMC/SD Interface support" depends on PCI @@ -772,7 +806,7 @@ config MMC_SH_MMCIF config MMC_JZ4740 tristate "Ingenic JZ47xx SD/Multimedia Card Interface support" - depends on MACH_JZ4740 || MACH_JZ4780 + depends on MIPS help This selects support for the SD/MMC controller on Ingenic JZ4740, JZ4750, JZ4770 and JZ4780 SoCs. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index ce8398e6f2c0..720d37777098 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_MMC_ARMMMCI) += armmmci.o armmmci-y := mmci.o armmmci-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o +armmmci-$(CONFIG_MMC_STM32_SDMMC) += mmci_stm32_sdmmc.o obj-$(CONFIG_MMC_PXA) += pxamci.o obj-$(CONFIG_MMC_MXC) += mxcmmc.o obj-$(CONFIG_MMC_MXS) += mxs-mmc.o @@ -42,6 +43,7 @@ obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o +obj-$(CONFIG_MMC_UNIPHIER) += uniphier-sd.o obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o octeon-mmc-objs := cavium.o cavium-octeon.o @@ -91,6 +93,7 @@ obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o +obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o obj-$(CONFIG_MMC_CQHCI) += cqhci.o ifeq ($(CONFIG_CB710_DEBUG),y) diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index ab47b018716a..d46c3439b508 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -253,6 +253,8 @@ static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing) if (timing == MMC_TIMING_MMC_HS400) { dqs |= DATA_STROBE_EN; strobe = DQS_CTRL_RD_DELAY(strobe, priv->dqs_delay); + } else if (timing == MMC_TIMING_UHS_SDR104) { + dqs &= 0xffffff00; } else { dqs &= ~DATA_STROBE_EN; } @@ -312,6 +314,15 @@ static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) if (ios->bus_width == MMC_BUS_WIDTH_8) wanted <<= 1; break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + clksel = (priv->sdr_timing & 0xfff8ffff) | + (priv->ciu_div << 16); + break; + case MMC_TIMING_UHS_DDR50: + clksel = (priv->ddr_timing & 0xfff8ffff) | + (priv->ciu_div << 16); + break; default: clksel = priv->sdr_timing; } diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c index f9b333ff259e..bc51cef47c47 100644 --- a/drivers/mmc/host/dw_mmc-hi3798cv200.c +++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c @@ -23,6 +23,12 @@ struct hi3798cv200_priv { struct clk *drive_clk; }; +static unsigned long dw_mci_hi3798cv200_caps[] = { + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23 +}; + static void dw_mci_hi3798cv200_set_ios(struct dw_mci *host, struct mmc_ios *ios) { struct hi3798cv200_priv *priv = host->priv; @@ -160,6 +166,8 @@ disable_sample_clk: } static const struct dw_mci_drv_data hi3798cv200_data = { + .caps = dw_mci_hi3798cv200_caps, + .num_caps = ARRAY_SIZE(dw_mci_hi3798cv200_caps), .init = dw_mci_hi3798cv200_init, .set_ios = dw_mci_hi3798cv200_set_ios, .execute_tuning = dw_mci_hi3798cv200_execute_tuning, diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 993386c9ea50..0c1efd5100b7 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -115,7 +115,7 @@ enum jz4740_mmc_version { JZ_MMC_JZ4740, - JZ_MMC_JZ4750, + JZ_MMC_JZ4725B, JZ_MMC_JZ4780, }; @@ -176,7 +176,7 @@ struct jz4740_mmc_host { static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host, uint32_t val) { - if (host->version >= JZ_MMC_JZ4750) + if (host->version >= JZ_MMC_JZ4725B) return writel(val, host->base + JZ_REG_MMC_IMASK); else return writew(val, host->base + JZ_REG_MMC_IMASK); @@ -1012,6 +1012,7 @@ static void jz4740_mmc_free_gpios(struct platform_device *pdev) static const struct of_device_id jz4740_mmc_of_match[] = { { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 }, + { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B }, { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 }, {}, }; diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 2cfec33178c1..abe253c262a2 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -294,7 +294,7 @@ static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: vdd = 0; - /* fall-through: */ + /* fall through */ case MMC_POWER_UP: if (!IS_ERR(mmc->supply.vmmc)) { host->error = mmc_regulator_set_ocr(mmc, diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 1841d250e9e2..82bab35fff41 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -28,8 +28,7 @@ #include <linux/amba/bus.h> #include <linux/clk.h> #include <linux/scatterlist.h> -#include <linux/gpio.h> -#include <linux/of_gpio.h> +#include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> @@ -37,6 +36,7 @@ #include <linux/pm_runtime.h> #include <linux/types.h> #include <linux/pinctrl/consumer.h> +#include <linux/reset.h> #include <asm/div64.h> #include <asm/io.h> @@ -46,41 +46,77 @@ #define DRIVER_NAME "mmci-pl18x" +#ifdef CONFIG_DMA_ENGINE +void mmci_variant_init(struct mmci_host *host); +#else +static inline void mmci_variant_init(struct mmci_host *host) {} +#endif + +#ifdef CONFIG_MMC_STM32_SDMMC +void sdmmc_variant_init(struct mmci_host *host); +#else +static inline void sdmmc_variant_init(struct mmci_host *host) {} +#endif + static unsigned int fmax = 515633; static struct variant_data variant_arm = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 16, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .pwrreg_powerup = MCI_PWR_UP, .f_max = 100000000, .reversed_irq_handling = true, .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .start_err = MCI_STARTBITERR, .opendrain = MCI_ROD, + .init = mmci_variant_init, }; static struct variant_data variant_arm_extended_fifo = { .fifosize = 128 * 4, .fifohalfsize = 64 * 4, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 16, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .pwrreg_powerup = MCI_PWR_UP, .f_max = 100000000, .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .start_err = MCI_STARTBITERR, .opendrain = MCI_ROD, + .init = mmci_variant_init, }; static struct variant_data variant_arm_extended_fifo_hwfc = { .fifosize = 128 * 4, .fifohalfsize = 64 * 4, .clkreg_enable = MCI_ARM_HWFCEN, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 16, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .pwrreg_powerup = MCI_PWR_UP, .f_max = 100000000, .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .start_err = MCI_STARTBITERR, .opendrain = MCI_ROD, + .init = mmci_variant_init, }; static struct variant_data variant_u300 = { @@ -88,7 +124,13 @@ static struct variant_data variant_u300 = { .fifohalfsize = 8 * 4, .clkreg_enable = MCI_ST_U300_HWFCEN, .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 16, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .pwrreg_powerup = MCI_PWR_ON, @@ -97,8 +139,10 @@ static struct variant_data variant_u300 = { .pwrreg_clkgate = true, .pwrreg_nopower = true, .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .start_err = MCI_STARTBITERR, .opendrain = MCI_OD, + .init = mmci_variant_init, }; static struct variant_data variant_nomadik = { @@ -106,7 +150,13 @@ static struct variant_data variant_nomadik = { .fifohalfsize = 8 * 4, .clkreg = MCI_CLK_ENABLE, .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -116,8 +166,10 @@ static struct variant_data variant_nomadik = { .pwrreg_clkgate = true, .pwrreg_nopower = true, .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .start_err = MCI_STARTBITERR, .opendrain = MCI_OD, + .init = mmci_variant_init, }; static struct variant_data variant_ux500 = { @@ -127,7 +179,13 @@ static struct variant_data variant_ux500 = { .clkreg_enable = MCI_ST_UX500_HWFCEN, .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -141,8 +199,10 @@ static struct variant_data variant_ux500 = { .busy_detect_mask = MCI_ST_BUSYENDMASK, .pwrreg_nopower = true, .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .start_err = MCI_STARTBITERR, .opendrain = MCI_OD, + .init = mmci_variant_init, }; static struct variant_data variant_ux500v2 = { @@ -152,8 +212,14 @@ static struct variant_data variant_ux500v2 = { .clkreg_enable = MCI_ST_UX500_HWFCEN, .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -168,8 +234,10 @@ static struct variant_data variant_ux500v2 = { .busy_detect_mask = MCI_ST_BUSYENDMASK, .pwrreg_nopower = true, .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .start_err = MCI_STARTBITERR, .opendrain = MCI_OD, + .init = mmci_variant_init, }; static struct variant_data variant_stm32 = { @@ -179,7 +247,14 @@ static struct variant_data variant_stm32 = { .clkreg_enable = MCI_ST_UX500_HWFCEN, .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -187,6 +262,26 @@ static struct variant_data variant_stm32 = { .f_max = 48000000, .pwrreg_clkgate = true, .pwrreg_nopower = true, + .init = mmci_variant_init, +}; + +static struct variant_data variant_stm32_sdmmc = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .f_max = 208000000, + .stm32_clkdiv = true, + .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC, + .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC, + .cmdreg_srsp = MCI_CPSM_STM32_SRSP, + .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS, + .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK, + .datactrl_first = true, + .datacnt_useless = true, + .datalength_bits = 25, + .datactrl_blocksz = 14, + .stm32_idmabsize_mask = GENMASK(12, 5), + .init = sdmmc_variant_init, }; static struct variant_data variant_qcom = { @@ -197,15 +292,22 @@ static struct variant_data variant_qcom = { MCI_QCOM_CLK_SELECT_IN_FBCLK, .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8, .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, .blksz_datactrl4 = true, .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_dpsm_enable = MCI_DPSM_ENABLE, .pwrreg_powerup = MCI_PWR_UP, .f_max = 208000000, .explicit_mclk_control = true, .qcom_fifo = true, .qcom_dml = true, .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, .start_err = MCI_STARTBITERR, .opendrain = MCI_ROD, .init = qcom_variant_init, @@ -226,24 +328,6 @@ static int mmci_card_busy(struct mmc_host *mmc) return busy; } -/* - * Validate mmc prerequisites - */ -static int mmci_validate_data(struct mmci_host *host, - struct mmc_data *data) -{ - if (!data) - return 0; - - if (!is_power_of_2(data->blksz)) { - dev_err(mmc_dev(host->mmc), - "unsupported block size (%d bytes)\n", data->blksz); - return -EINVAL; - } - - return 0; -} - static void mmci_reg_delay(struct mmci_host *host) { /* @@ -262,7 +346,7 @@ static void mmci_reg_delay(struct mmci_host *host) /* * This must be called with host->lock held */ -static void mmci_write_clkreg(struct mmci_host *host, u32 clk) +void mmci_write_clkreg(struct mmci_host *host, u32 clk) { if (host->clk_reg != clk) { host->clk_reg = clk; @@ -273,7 +357,7 @@ static void mmci_write_clkreg(struct mmci_host *host, u32 clk) /* * This must be called with host->lock held */ -static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) +void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) { if (host->pwr_reg != pwr) { host->pwr_reg = pwr; @@ -357,6 +441,135 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) mmci_write_clkreg(host, clk); } +void mmci_dma_release(struct mmci_host *host) +{ + if (host->ops && host->ops->dma_release) + host->ops->dma_release(host); + + host->use_dma = false; +} + +void mmci_dma_setup(struct mmci_host *host) +{ + if (!host->ops || !host->ops->dma_setup) + return; + + if (host->ops->dma_setup(host)) + return; + + /* initialize pre request cookie */ + host->next_cookie = 1; + + host->use_dma = true; +} + +/* + * Validate mmc prerequisites + */ +static int mmci_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + if (!data) + return 0; + + if (!is_power_of_2(data->blksz)) { + dev_err(mmc_dev(host->mmc), + "unsupported block size (%d bytes)\n", data->blksz); + return -EINVAL; + } + + if (host->ops && host->ops->validate_data) + return host->ops->validate_data(host, data); + + return 0; +} + +int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next) +{ + int err; + + if (!host->ops || !host->ops->prep_data) + return 0; + + err = host->ops->prep_data(host, data, next); + + if (next && !err) + data->host_cookie = ++host->next_cookie < 0 ? + 1 : host->next_cookie; + + return err; +} + +void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data, + int err) +{ + if (host->ops && host->ops->unprep_data) + host->ops->unprep_data(host, data, err); + + data->host_cookie = 0; +} + +void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) +{ + WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie); + + if (host->ops && host->ops->get_next_data) + host->ops->get_next_data(host, data); +} + +int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) +{ + struct mmc_data *data = host->data; + int ret; + + if (!host->use_dma) + return -EINVAL; + + ret = mmci_prep_data(host, data, false); + if (ret) + return ret; + + if (!host->ops || !host->ops->dma_start) + return -EINVAL; + + /* Okay, go for it. */ + dev_vdbg(mmc_dev(host->mmc), + "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", + data->sg_len, data->blksz, data->blocks, data->flags); + + host->ops->dma_start(host, &datactrl); + + /* Trigger the DMA transfer */ + mmci_write_datactrlreg(host, datactrl); + + /* + * Let the MMCI say when the data is ended and it's time + * to fire next DMA request. When that happens, MMCI will + * call mmci_data_end() + */ + writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, + host->base + MMCIMASK0); + return 0; +} + +void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ + if (!host->use_dma) + return; + + if (host->ops && host->ops->dma_finalize) + host->ops->dma_finalize(host, data); +} + +void mmci_dma_error(struct mmci_host *host) +{ + if (!host->use_dma) + return; + + if (host->ops && host->ops->dma_error) + host->ops->dma_error(host); +} + static void mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) { @@ -378,7 +591,7 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) if (host->singleirq) { unsigned int mask0 = readl(base + MMCIMASK0); - mask0 &= ~MCI_IRQ1MASK; + mask0 &= ~variant->irq_pio_mask; mask0 |= mask; writel(mask0, base + MMCIMASK0); @@ -415,31 +628,50 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) * no custom DMA interfaces are supported. */ #ifdef CONFIG_DMA_ENGINE -static void mmci_dma_setup(struct mmci_host *host) +struct mmci_dmae_next { + struct dma_async_tx_descriptor *desc; + struct dma_chan *chan; +}; + +struct mmci_dmae_priv { + struct dma_chan *cur; + struct dma_chan *rx_channel; + struct dma_chan *tx_channel; + struct dma_async_tx_descriptor *desc_current; + struct mmci_dmae_next next_data; +}; + +int mmci_dmae_setup(struct mmci_host *host) { const char *rxname, *txname; + struct mmci_dmae_priv *dmae; - host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); - host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); + dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL); + if (!dmae) + return -ENOMEM; - /* initialize pre request cookie */ - host->next_data.cookie = 1; + host->dma_priv = dmae; + + dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), + "rx"); + dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), + "tx"); /* * If only an RX channel is specified, the driver will * attempt to use it bidirectionally, however if it is * is specified but cannot be located, DMA will be disabled. */ - if (host->dma_rx_channel && !host->dma_tx_channel) - host->dma_tx_channel = host->dma_rx_channel; + if (dmae->rx_channel && !dmae->tx_channel) + dmae->tx_channel = dmae->rx_channel; - if (host->dma_rx_channel) - rxname = dma_chan_name(host->dma_rx_channel); + if (dmae->rx_channel) + rxname = dma_chan_name(dmae->rx_channel); else rxname = "none"; - if (host->dma_tx_channel) - txname = dma_chan_name(host->dma_tx_channel); + if (dmae->tx_channel) + txname = dma_chan_name(dmae->tx_channel); else txname = "none"; @@ -450,66 +682,84 @@ static void mmci_dma_setup(struct mmci_host *host) * Limit the maximum segment size in any SG entry according to * the parameters of the DMA engine device. */ - if (host->dma_tx_channel) { - struct device *dev = host->dma_tx_channel->device->dev; + if (dmae->tx_channel) { + struct device *dev = dmae->tx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } - if (host->dma_rx_channel) { - struct device *dev = host->dma_rx_channel->device->dev; + if (dmae->rx_channel) { + struct device *dev = dmae->rx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } - if (host->ops && host->ops->dma_setup) - host->ops->dma_setup(host); + if (!dmae->tx_channel || !dmae->rx_channel) { + mmci_dmae_release(host); + return -EINVAL; + } + + return 0; } /* * This is used in or so inline it * so it can be discarded. */ -static inline void mmci_dma_release(struct mmci_host *host) +void mmci_dmae_release(struct mmci_host *host) { - if (host->dma_rx_channel) - dma_release_channel(host->dma_rx_channel); - if (host->dma_tx_channel) - dma_release_channel(host->dma_tx_channel); - host->dma_rx_channel = host->dma_tx_channel = NULL; -} + struct mmci_dmae_priv *dmae = host->dma_priv; -static void mmci_dma_data_error(struct mmci_host *host) -{ - dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); - dmaengine_terminate_all(host->dma_current); - host->dma_in_progress = false; - host->dma_current = NULL; - host->dma_desc_current = NULL; - host->data->host_cookie = 0; + if (dmae->rx_channel) + dma_release_channel(dmae->rx_channel); + if (dmae->tx_channel) + dma_release_channel(dmae->tx_channel); + dmae->rx_channel = dmae->tx_channel = NULL; } static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { + struct mmci_dmae_priv *dmae = host->dma_priv; struct dma_chan *chan; if (data->flags & MMC_DATA_READ) - chan = host->dma_rx_channel; + chan = dmae->rx_channel; else - chan = host->dma_tx_channel; + chan = dmae->tx_channel; dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); } -static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +void mmci_dmae_error(struct mmci_host *host) { + struct mmci_dmae_priv *dmae = host->dma_priv; + + if (!dma_inprogress(host)) + return; + + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(dmae->cur); + host->dma_in_progress = false; + dmae->cur = NULL; + dmae->desc_current = NULL; + host->data->host_cookie = 0; + + mmci_dma_unmap(host, host->data); +} + +void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; u32 status; int i; + if (!dma_inprogress(host)) + return; + /* Wait up to 1ms for the DMA to complete */ for (i = 0; ; i++) { status = readl(host->base + MMCISTATUS); @@ -525,13 +775,12 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) * contiguous buffers. On TX, we'll get a FIFO underrun error. */ if (status & MCI_RXDATAAVLBLMASK) { - mmci_dma_data_error(host); + mmci_dma_error(host); if (!data->error) data->error = -EIO; - } - - if (!data->host_cookie) + } else if (!data->host_cookie) { mmci_dma_unmap(host, data); + } /* * Use of DMA with scatter-gather is impossible. @@ -543,15 +792,16 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) } host->dma_in_progress = false; - host->dma_current = NULL; - host->dma_desc_current = NULL; + dmae->cur = NULL; + dmae->desc_current = NULL; } /* prepares DMA channel and DMA descriptor, returns non-zero on failure */ -static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, +static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, struct dma_chan **dma_chan, struct dma_async_tx_descriptor **dma_desc) { + struct mmci_dmae_priv *dmae = host->dma_priv; struct variant_data *variant = host->variant; struct dma_slave_config conf = { .src_addr = host->phybase + MMCIFIFO, @@ -570,10 +820,10 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, if (data->flags & MMC_DATA_READ) { conf.direction = DMA_DEV_TO_MEM; - chan = host->dma_rx_channel; + chan = dmae->rx_channel; } else { conf.direction = DMA_MEM_TO_DEV; - chan = host->dma_tx_channel; + chan = dmae->tx_channel; } /* If there's no DMA channel, fall back to PIO */ @@ -610,160 +860,137 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, return -ENOMEM; } -static inline int mmci_dma_prep_data(struct mmci_host *host, - struct mmc_data *data) +int mmci_dmae_prep_data(struct mmci_host *host, + struct mmc_data *data, + bool next) { + struct mmci_dmae_priv *dmae = host->dma_priv; + struct mmci_dmae_next *nd = &dmae->next_data; + + if (!host->use_dma) + return -EINVAL; + + if (next) + return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc); /* Check if next job is already prepared. */ - if (host->dma_current && host->dma_desc_current) + if (dmae->cur && dmae->desc_current) return 0; /* No job were prepared thus do it now. */ - return __mmci_dma_prep_data(host, data, &host->dma_current, - &host->dma_desc_current); -} - -static inline int mmci_dma_prep_next(struct mmci_host *host, - struct mmc_data *data) -{ - struct mmci_host_next *nd = &host->next_data; - return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); + return _mmci_dmae_prep_data(host, data, &dmae->cur, + &dmae->desc_current); } -static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) { - int ret; + struct mmci_dmae_priv *dmae = host->dma_priv; struct mmc_data *data = host->data; - ret = mmci_dma_prep_data(host, host->data); - if (ret) - return ret; - - /* Okay, go for it. */ - dev_vdbg(mmc_dev(host->mmc), - "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", - data->sg_len, data->blksz, data->blocks, data->flags); host->dma_in_progress = true; - dmaengine_submit(host->dma_desc_current); - dma_async_issue_pending(host->dma_current); + dmaengine_submit(dmae->desc_current); + dma_async_issue_pending(dmae->cur); if (host->variant->qcom_dml) dml_start_xfer(host, data); - datactrl |= MCI_DPSM_DMAENABLE; + *datactrl |= MCI_DPSM_DMAENABLE; - /* Trigger the DMA transfer */ - mmci_write_datactrlreg(host, datactrl); - - /* - * Let the MMCI say when the data is ended and it's time - * to fire next DMA request. When that happens, MMCI will - * call mmci_data_end() - */ - writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, - host->base + MMCIMASK0); return 0; } -static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) +void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data) { - struct mmci_host_next *next = &host->next_data; - - WARN_ON(data->host_cookie && data->host_cookie != next->cookie); - WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); - - host->dma_desc_current = next->dma_desc; - host->dma_current = next->dma_chan; - next->dma_desc = NULL; - next->dma_chan = NULL; -} + struct mmci_dmae_priv *dmae = host->dma_priv; + struct mmci_dmae_next *next = &dmae->next_data; -static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq) -{ - struct mmci_host *host = mmc_priv(mmc); - struct mmc_data *data = mrq->data; - struct mmci_host_next *nd = &host->next_data; - - if (!data) + if (!host->use_dma) return; - BUG_ON(data->host_cookie); + WARN_ON(!data->host_cookie && (next->desc || next->chan)); - if (mmci_validate_data(host, data)) - return; - - if (!mmci_dma_prep_next(host, data)) - data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; + dmae->desc_current = next->desc; + dmae->cur = next->chan; + next->desc = NULL; + next->chan = NULL; } -static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, - int err) +void mmci_dmae_unprep_data(struct mmci_host *host, + struct mmc_data *data, int err) + { - struct mmci_host *host = mmc_priv(mmc); - struct mmc_data *data = mrq->data; + struct mmci_dmae_priv *dmae = host->dma_priv; - if (!data || !data->host_cookie) + if (!host->use_dma) return; mmci_dma_unmap(host, data); if (err) { - struct mmci_host_next *next = &host->next_data; + struct mmci_dmae_next *next = &dmae->next_data; struct dma_chan *chan; if (data->flags & MMC_DATA_READ) - chan = host->dma_rx_channel; + chan = dmae->rx_channel; else - chan = host->dma_tx_channel; + chan = dmae->tx_channel; dmaengine_terminate_all(chan); - if (host->dma_desc_current == next->dma_desc) - host->dma_desc_current = NULL; + if (dmae->desc_current == next->desc) + dmae->desc_current = NULL; - if (host->dma_current == next->dma_chan) { + if (dmae->cur == next->chan) { host->dma_in_progress = false; - host->dma_current = NULL; + dmae->cur = NULL; } - next->dma_desc = NULL; - next->dma_chan = NULL; - data->host_cookie = 0; + next->desc = NULL; + next->chan = NULL; } } -#else -/* Blank functions if the DMA engine is not available */ -static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) -{ -} -static inline void mmci_dma_setup(struct mmci_host *host) -{ -} +static struct mmci_host_ops mmci_variant_ops = { + .prep_data = mmci_dmae_prep_data, + .unprep_data = mmci_dmae_unprep_data, + .get_next_data = mmci_dmae_get_next_data, + .dma_setup = mmci_dmae_setup, + .dma_release = mmci_dmae_release, + .dma_start = mmci_dmae_start, + .dma_finalize = mmci_dmae_finalize, + .dma_error = mmci_dmae_error, +}; -static inline void mmci_dma_release(struct mmci_host *host) +void mmci_variant_init(struct mmci_host *host) { + host->ops = &mmci_variant_ops; } +#endif -static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq) { -} + struct mmci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; -static inline void mmci_dma_finalize(struct mmci_host *host, - struct mmc_data *data) -{ -} + if (!data) + return; -static inline void mmci_dma_data_error(struct mmci_host *host) -{ + WARN_ON(data->host_cookie); + + if (mmci_validate_data(host, data)) + return; + + mmci_prep_data(host, data, true); } -static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, + int err) { - return -ENOSYS; -} + struct mmci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; -#define mmci_pre_request NULL -#define mmci_post_request NULL + if (!data || !data->host_cookie) + return; -#endif + mmci_unprep_data(host, data, err); +} static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) { @@ -793,11 +1020,11 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) BUG_ON(1 << blksz_bits != data->blksz); if (variant->blksz_datactrl16) - datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); + datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16); else if (variant->blksz_datactrl4) - datactrl = MCI_DPSM_ENABLE | (data->blksz << 4); + datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4); else - datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; + datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4; if (data->flags & MMC_DATA_READ) datactrl |= MCI_DPSM_DIRECTION; @@ -831,7 +1058,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) * Attempt to use DMA operation mode, if this * should fail, fall back to PIO mode */ - if (!mmci_dma_start_data(host, datactrl)) + if (!mmci_dma_start(host, datactrl)) return; /* IRQ mode, map the SG list for CPU reading/writing */ @@ -868,16 +1095,19 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", cmd->opcode, cmd->arg, cmd->flags); - if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { + if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) { writel(0, base + MMCICOMMAND); mmci_reg_delay(host); } - c |= cmd->opcode | MCI_CPSM_ENABLE; + c |= cmd->opcode | host->variant->cmdreg_cpsm_enable; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) - c |= MCI_CPSM_LONGRSP; - c |= MCI_CPSM_RESPONSE; + c |= host->variant->cmdreg_lrsp_crc; + else if (cmd->flags & MMC_RSP_CRC) + c |= host->variant->cmdreg_srsp_crc; + else + c |= host->variant->cmdreg_srsp; } if (/*interrupt*/0) c |= MCI_CPSM_INTERRUPT; @@ -895,21 +1125,22 @@ static void mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) { + unsigned int status_err; + /* Make sure we have data to handle */ if (!data) return; /* First check for errors */ - if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT | - host->variant->start_err | - MCI_TXUNDERRUN | MCI_RXOVERRUN)) { + status_err = status & (host->variant->start_err | + MCI_DATACRCFAIL | MCI_DATATIMEOUT | + MCI_TXUNDERRUN | MCI_RXOVERRUN); + + if (status_err) { u32 remain, success; /* Terminate the DMA transfer */ - if (dma_inprogress(host)) { - mmci_dma_data_error(host); - mmci_dma_unmap(host, data); - } + mmci_dma_error(host); /* * Calculate how far we are into the transfer. Note that @@ -918,22 +1149,26 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, * can be as much as a FIFO-worth of data ahead. This * matters for FIFO overruns only. */ - remain = readl(host->base + MMCIDATACNT); - success = data->blksz * data->blocks - remain; + if (!host->variant->datacnt_useless) { + remain = readl(host->base + MMCIDATACNT); + success = data->blksz * data->blocks - remain; + } else { + success = 0; + } dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", - status, success); - if (status & MCI_DATACRCFAIL) { + status_err, success); + if (status_err & MCI_DATACRCFAIL) { /* Last block was not successful */ success -= 1; data->error = -EILSEQ; - } else if (status & MCI_DATATIMEOUT) { + } else if (status_err & MCI_DATATIMEOUT) { data->error = -ETIMEDOUT; - } else if (status & MCI_STARTBITERR) { + } else if (status_err & MCI_STARTBITERR) { data->error = -ECOMM; - } else if (status & MCI_TXUNDERRUN) { + } else if (status_err & MCI_TXUNDERRUN) { data->error = -EIO; - } else if (status & MCI_RXOVERRUN) { + } else if (status_err & MCI_RXOVERRUN) { if (success > host->variant->fifosize) success -= host->variant->fifosize; else @@ -947,8 +1182,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); if (status & MCI_DATAEND || data->error) { - if (dma_inprogress(host)) - mmci_dma_finalize(host, data); + mmci_dma_finalize(host, data); + mmci_stop_data(host); if (!data->error) @@ -1055,16 +1290,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, if ((!sbc && !cmd->data) || cmd->error) { if (host->data) { /* Terminate the DMA transfer */ - if (dma_inprogress(host)) { - mmci_dma_data_error(host); - mmci_dma_unmap(host, host->data); - } + mmci_dma_error(host); + mmci_stop_data(host); } mmci_request_end(host, host->mrq); } else if (sbc) { mmci_start_command(host, host->mrq->cmd, 0); - } else if (!(cmd->data->flags & MMC_DATA_READ)) { + } else if (!host->variant->datactrl_first && + !(cmd->data->flags & MMC_DATA_READ)) { mmci_start_data(host, cmd->data); } } @@ -1264,7 +1498,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) if (status & host->mask1_reg) mmci_pio_irq(irq, dev_id); - status &= ~MCI_IRQ1MASK; + status &= ~host->variant->irq_pio_mask; } /* @@ -1328,7 +1562,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) if (mrq->data) mmci_get_next_data(host, mrq->data); - if (mrq->data && mrq->data->flags & MMC_DATA_READ) + if (mrq->data && + (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ)) mmci_start_data(host, mrq->data); if (mrq->sbc) @@ -1438,8 +1673,16 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_lock_irqsave(&host->lock, flags); - mmci_set_clkreg(host, ios->clock); - mmci_write_pwrreg(host, pwr); + if (host->ops && host->ops->set_clkreg) + host->ops->set_clkreg(host, ios->clock); + else + mmci_set_clkreg(host, ios->clock); + + if (host->ops && host->ops->set_pwrreg) + host->ops->set_pwrreg(host, pwr); + else + mmci_write_pwrreg(host, pwr); + mmci_reg_delay(host); spin_unlock_irqrestore(&host->lock, flags); @@ -1518,6 +1761,12 @@ static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) host->pwr_reg_add |= MCI_ST_CMDDIREN; if (of_get_property(np, "st,sig-pin-fbclk", NULL)) host->pwr_reg_add |= MCI_ST_FBCLKEN; + if (of_get_property(np, "st,sig-dir", NULL)) + host->pwr_reg_add |= MCI_STM32_DIRPOL; + if (of_get_property(np, "st,neg-edge", NULL)) + host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE; + if (of_get_property(np, "st,use-ckin", NULL)) + host->clk_reg_add |= MCI_STM32_CLK_SELCKIN; if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) mmc->caps |= MMC_CAP_MMC_HIGHSPEED; @@ -1644,6 +1893,8 @@ static int mmci_probe(struct amba_device *dev, */ if (variant->st_clkdiv) mmc->f_min = DIV_ROUND_UP(host->mclk, 257); + else if (variant->stm32_clkdiv) + mmc->f_min = DIV_ROUND_UP(host->mclk, 2046); else if (variant->explicit_mclk_control) mmc->f_min = clk_round_rate(host->clk, 100000); else @@ -1665,6 +1916,12 @@ static int mmci_probe(struct amba_device *dev, dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); + host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL); + if (IS_ERR(host->rst)) { + ret = PTR_ERR(host->rst); + goto clk_disable; + } + /* Get regulators and the supported OCR mask */ ret = mmc_regulator_get_supply(mmc); if (ret) @@ -1675,13 +1932,6 @@ static int mmci_probe(struct amba_device *dev, else if (plat->ocr_mask) dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); - /* DT takes precedence over platform data. */ - if (!np) { - if (!plat->cd_invert) - mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; - mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - } - /* We support these capabilities. */ mmc->caps |= MMC_CAP_CMD23; @@ -1727,13 +1977,13 @@ static int mmci_probe(struct amba_device *dev, /* * Block size can be up to 2048 bytes, but must be a power of two. */ - mmc->max_blk_size = 1 << 11; + mmc->max_blk_size = 1 << variant->datactrl_blocksz; /* * Limit the number of blocks transferred so that we don't overflow * the maximum request size. */ - mmc->max_blk_count = mmc->max_req_size >> 11; + mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz; spin_lock_init(&host->lock); @@ -1749,30 +1999,16 @@ static int mmci_probe(struct amba_device *dev, * - not using DT but using a descriptor table, or * - using a table of descriptors ALONGSIDE DT, or * look up these descriptors named "cd" and "wp" right here, fail - * silently of these do not exist and proceed to try platform data + * silently of these do not exist */ if (!np) { ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); - if (ret < 0) { - if (ret == -EPROBE_DEFER) - goto clk_disable; - else if (gpio_is_valid(plat->gpio_cd)) { - ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0); - if (ret) - goto clk_disable; - } - } + if (ret == -EPROBE_DEFER) + goto clk_disable; ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); - if (ret < 0) { - if (ret == -EPROBE_DEFER) - goto clk_disable; - else if (gpio_is_valid(plat->gpio_wp)) { - ret = mmc_gpio_request_ro(mmc, plat->gpio_wp); - if (ret) - goto clk_disable; - } - } + if (ret == -EPROBE_DEFER) + goto clk_disable; } ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, @@ -1789,7 +2025,7 @@ static int mmci_probe(struct amba_device *dev, goto clk_disable; } - writel(MCI_IRQENABLE, host->base + MMCIMASK0); + writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0); amba_set_drvdata(dev, mmc); @@ -1876,7 +2112,8 @@ static void mmci_restore(struct mmci_host *host) writel(host->datactrl_reg, host->base + MMCIDATACTRL); writel(host->pwr_reg, host->base + MMCIPOWER); } - writel(MCI_IRQENABLE, host->base + MMCIMASK0); + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); mmci_reg_delay(host); spin_unlock_irqrestore(&host->lock, flags); @@ -1971,6 +2208,11 @@ static const struct amba_id mmci_ids[] = { .mask = 0x00ffffff, .data = &variant_stm32, }, + { + .id = 0x10153180, + .mask = 0xf0ffffff, + .data = &variant_stm32_sdmmc, + }, /* Qualcomm variants */ { .id = 0x00051180, diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 517591d219e9..550dd3914461 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -23,6 +23,14 @@ #define MCI_ST_DATA31DIREN (1 << 5) #define MCI_ST_FBCLKEN (1 << 7) #define MCI_ST_DATA74DIREN (1 << 8) +/* + * The STM32 sdmmc does not have PWR_UP/OD/ROD + * and uses the power register for + */ +#define MCI_STM32_PWR_CYC 0x02 +#define MCI_STM32_VSWITCH BIT(2) +#define MCI_STM32_VSWITCHEN BIT(3) +#define MCI_STM32_DIRPOL BIT(4) #define MMCICLOCK 0x004 #define MCI_CLK_ENABLE (1 << 8) @@ -50,6 +58,19 @@ #define MCI_QCOM_CLK_SELECT_IN_FBCLK BIT(15) #define MCI_QCOM_CLK_SELECT_IN_DDR_MODE (BIT(14) | BIT(15)) +/* Modified on STM32 sdmmc */ +#define MCI_STM32_CLK_CLKDIV_MSK GENMASK(9, 0) +#define MCI_STM32_CLK_WIDEBUS_4 BIT(14) +#define MCI_STM32_CLK_WIDEBUS_8 BIT(15) +#define MCI_STM32_CLK_NEGEDGE BIT(16) +#define MCI_STM32_CLK_HWFCEN BIT(17) +#define MCI_STM32_CLK_DDR BIT(18) +#define MCI_STM32_CLK_BUSSPEED BIT(19) +#define MCI_STM32_CLK_SEL_MSK GENMASK(21, 20) +#define MCI_STM32_CLK_SELCK (0 << 20) +#define MCI_STM32_CLK_SELCKIN (1 << 20) +#define MCI_STM32_CLK_SELFBCK (2 << 20) + #define MMCIARGUMENT 0x008 /* The command register controls the Command Path State Machine (CPSM) */ @@ -72,6 +93,15 @@ #define MCI_CPSM_QCOM_CCSDISABLE BIT(15) #define MCI_CPSM_QCOM_AUTO_CMD19 BIT(16) #define MCI_CPSM_QCOM_AUTO_CMD21 BIT(21) +/* Command register in STM32 sdmmc versions */ +#define MCI_CPSM_STM32_CMDTRANS BIT(6) +#define MCI_CPSM_STM32_CMDSTOP BIT(7) +#define MCI_CPSM_STM32_WAITRESP_MASK GENMASK(9, 8) +#define MCI_CPSM_STM32_NORSP (0 << 8) +#define MCI_CPSM_STM32_SRSP_CRC (1 << 8) +#define MCI_CPSM_STM32_SRSP (2 << 8) +#define MCI_CPSM_STM32_LRSP_CRC (3 << 8) +#define MCI_CPSM_STM32_ENABLE BIT(12) #define MMCIRESPCMD 0x010 #define MMCIRESPONSE0 0x014 @@ -130,6 +160,8 @@ #define MCI_ST_SDIOIT (1 << 22) #define MCI_ST_CEATAEND (1 << 23) #define MCI_ST_CARDBUSY (1 << 24) +/* Extended status bits for the STM32 variants */ +#define MCI_STM32_BUSYD0 BIT(20) #define MMCICLEAR 0x038 #define MCI_CMDCRCFAILCLR (1 << 0) @@ -175,21 +207,45 @@ #define MCI_ST_SDIOITMASK (1 << 22) #define MCI_ST_CEATAENDMASK (1 << 23) #define MCI_ST_BUSYENDMASK (1 << 24) +/* Extended status bits for the STM32 variants */ +#define MCI_STM32_BUSYD0ENDMASK BIT(21) #define MMCIMASK1 0x040 #define MMCIFIFOCNT 0x048 #define MMCIFIFO 0x080 /* to 0x0bc */ +/* STM32 sdmmc registers for IDMA (Internal DMA) */ +#define MMCI_STM32_IDMACTRLR 0x050 +#define MMCI_STM32_IDMAEN BIT(0) +#define MMCI_STM32_IDMALLIEN BIT(1) + +#define MMCI_STM32_IDMABSIZER 0x054 +#define MMCI_STM32_IDMABNDT_SHIFT 5 +#define MMCI_STM32_IDMABNDT_MASK GENMASK(12, 5) + +#define MMCI_STM32_IDMABASE0R 0x058 + +#define MMCI_STM32_IDMALAR 0x64 +#define MMCI_STM32_IDMALA_MASK GENMASK(13, 0) +#define MMCI_STM32_ABR BIT(29) +#define MMCI_STM32_ULS BIT(30) +#define MMCI_STM32_ULA BIT(31) + +#define MMCI_STM32_IDMABAR 0x68 + #define MCI_IRQENABLE \ - (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ - MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ - MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK) + (MCI_CMDCRCFAILMASK | MCI_DATACRCFAILMASK | MCI_CMDTIMEOUTMASK | \ + MCI_DATATIMEOUTMASK | MCI_TXUNDERRUNMASK | MCI_RXOVERRUNMASK | \ + MCI_CMDRESPENDMASK | MCI_CMDSENTMASK) /* These interrupts are directed to IRQ1 when two IRQ lines are available */ -#define MCI_IRQ1MASK \ +#define MCI_IRQ_PIO_MASK \ (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \ MCI_TXFIFOHALFEMPTYMASK) +#define MCI_IRQ_PIO_STM32_MASK \ + (MCI_RXFIFOHALFFULLMASK | MCI_TXFIFOHALFEMPTYMASK) + #define NR_SG 128 #define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain" @@ -204,6 +260,10 @@ struct mmci_host; * @clkreg_enable: enable value for MMCICLOCK register * @clkreg_8bit_bus_enable: enable value for 8 bit bus * @clkreg_neg_edge_enable: enable value for inverted data/cmd output + * @cmdreg_cpsm_enable: enable value for CPSM + * @cmdreg_lrsp_crc: enable value for long response with crc + * @cmdreg_srsp_crc: enable value for short response with crc + * @cmdreg_srsp: enable value for short response without crc * @datalength_bits: number of bits in the MMCIDATALENGTH register * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY * is asserted (likewise for RX) @@ -212,11 +272,17 @@ struct mmci_host; * @data_cmd_enable: enable value for data commands. * @st_sdio: enable ST specific SDIO logic * @st_clkdiv: true if using a ST-specific clock divider algorithm + * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl * register * @datactrl_mask_sdio: SDIO enable mask in datactrl register + * @datactrl_blksz: block size in power of two + * @datactrl_dpsm_enable: enable value for DPSM + * @datactrl_first: true if data must be setup before send command + * @datacnt_useless: true if you could not use datacnt register to read + * remaining data * @pwrreg_powerup: power up value for MMCIPOWER register * @f_max: maximum clk frequency supported by the controller. * @signal_direction: input/out direction of bus signals can be indicated @@ -233,53 +299,75 @@ struct mmci_host; * @qcom_dml: enables qcom specific dma glue for dma transfers. * @reversed_irq_handling: handle data irq before cmd irq. * @mmcimask1: true if variant have a MMCIMASK1 register. + * @irq_pio_mask: bitmask used to manage interrupt pio transfert in mmcimask + * register * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS * register. * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register + * @dma_lli: true if variant has dma link list feature. + * @stm32_idmabsize_mask: stm32 sdmmc idma buffer size. */ struct variant_data { unsigned int clkreg; unsigned int clkreg_enable; unsigned int clkreg_8bit_bus_enable; unsigned int clkreg_neg_edge_enable; + unsigned int cmdreg_cpsm_enable; + unsigned int cmdreg_lrsp_crc; + unsigned int cmdreg_srsp_crc; + unsigned int cmdreg_srsp; unsigned int datalength_bits; unsigned int fifosize; unsigned int fifohalfsize; unsigned int data_cmd_enable; unsigned int datactrl_mask_ddrmode; unsigned int datactrl_mask_sdio; - bool st_sdio; - bool st_clkdiv; - bool blksz_datactrl16; - bool blksz_datactrl4; + unsigned int datactrl_blocksz; + unsigned int datactrl_dpsm_enable; + u8 datactrl_first:1; + u8 datacnt_useless:1; + u8 st_sdio:1; + u8 st_clkdiv:1; + u8 stm32_clkdiv:1; + u8 blksz_datactrl16:1; + u8 blksz_datactrl4:1; u32 pwrreg_powerup; u32 f_max; - bool signal_direction; - bool pwrreg_clkgate; - bool busy_detect; + u8 signal_direction:1; + u8 pwrreg_clkgate:1; + u8 busy_detect:1; u32 busy_dpsm_flag; u32 busy_detect_flag; u32 busy_detect_mask; - bool pwrreg_nopower; - bool explicit_mclk_control; - bool qcom_fifo; - bool qcom_dml; - bool reversed_irq_handling; - bool mmcimask1; + u8 pwrreg_nopower:1; + u8 explicit_mclk_control:1; + u8 qcom_fifo:1; + u8 qcom_dml:1; + u8 reversed_irq_handling:1; + u8 mmcimask1:1; + unsigned int irq_pio_mask; u32 start_err; u32 opendrain; + u8 dma_lli:1; + u32 stm32_idmabsize_mask; void (*init)(struct mmci_host *host); }; /* mmci variant callbacks */ struct mmci_host_ops { - void (*dma_setup)(struct mmci_host *host); -}; - -struct mmci_host_next { - struct dma_async_tx_descriptor *dma_desc; - struct dma_chan *dma_chan; - s32 cookie; + int (*validate_data)(struct mmci_host *host, struct mmc_data *data); + int (*prep_data)(struct mmci_host *host, struct mmc_data *data, + bool next); + void (*unprep_data)(struct mmci_host *host, struct mmc_data *data, + int err); + void (*get_next_data)(struct mmci_host *host, struct mmc_data *data); + int (*dma_setup)(struct mmci_host *host); + void (*dma_release)(struct mmci_host *host); + int (*dma_start)(struct mmci_host *host, unsigned int *datactrl); + void (*dma_finalize)(struct mmci_host *host, struct mmc_data *data); + void (*dma_error)(struct mmci_host *host); + void (*set_clkreg)(struct mmci_host *host, unsigned int desired); + void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr); }; struct mmci_host { @@ -290,7 +378,9 @@ struct mmci_host { struct mmc_data *data; struct mmc_host *mmc; struct clk *clk; - bool singleirq; + u8 singleirq:1; + + struct reset_control *rst; spinlock_t lock; @@ -301,10 +391,11 @@ struct mmci_host { u32 pwr_reg; u32 pwr_reg_add; u32 clk_reg; + u32 clk_reg_add; u32 datactrl_reg; u32 busy_status; u32 mask1_reg; - bool vqmmc_enabled; + u8 vqmmc_enabled:1; struct mmci_platform_data *plat; struct mmci_host_ops *ops; struct variant_data *variant; @@ -323,18 +414,25 @@ struct mmci_host { unsigned int size; int (*get_rx_fifocnt)(struct mmci_host *h, u32 status, int remain); -#ifdef CONFIG_DMA_ENGINE - /* DMA stuff */ - struct dma_chan *dma_current; - struct dma_chan *dma_rx_channel; - struct dma_chan *dma_tx_channel; - struct dma_async_tx_descriptor *dma_desc_current; - struct mmci_host_next next_data; - bool dma_in_progress; + u8 use_dma:1; + u8 dma_in_progress:1; + void *dma_priv; -#define dma_inprogress(host) ((host)->dma_in_progress) -#else -#define dma_inprogress(host) (0) -#endif + s32 next_cookie; }; +#define dma_inprogress(host) ((host)->dma_in_progress) + +void mmci_write_clkreg(struct mmci_host *host, u32 clk); +void mmci_write_pwrreg(struct mmci_host *host, u32 pwr); + +int mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, + bool next); +void mmci_dmae_unprep_data(struct mmci_host *host, struct mmc_data *data, + int err); +void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data); +int mmci_dmae_setup(struct mmci_host *host); +void mmci_dmae_release(struct mmci_host *host); +int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl); +void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data); +void mmci_dmae_error(struct mmci_host *host); diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c index be3fab5db83f..25d0a75533ea 100644 --- a/drivers/mmc/host/mmci_qcom_dml.c +++ b/drivers/mmc/host/mmci_qcom_dml.c @@ -119,19 +119,23 @@ static int of_get_dml_pipe_index(struct device_node *np, const char *name) } /* Initialize the dml hardware connected to SD Card controller */ -static void qcom_dma_setup(struct mmci_host *host) +static int qcom_dma_setup(struct mmci_host *host) { u32 config; void __iomem *base; int consumer_id, producer_id; struct device_node *np = host->mmc->parent->of_node; + if (mmci_dmae_setup(host)) + return -EINVAL; + consumer_id = of_get_dml_pipe_index(np, "tx"); producer_id = of_get_dml_pipe_index(np, "rx"); if (producer_id < 0 || consumer_id < 0) { host->variant->qcom_dml = false; - return; + mmci_dmae_release(host); + return -EINVAL; } base = host->base + DML_OFFSET; @@ -175,10 +179,19 @@ static void qcom_dma_setup(struct mmci_host *host) /* Make sure dml initialization is finished */ mb(); + + return 0; } static struct mmci_host_ops qcom_variant_ops = { + .prep_data = mmci_dmae_prep_data, + .unprep_data = mmci_dmae_unprep_data, + .get_next_data = mmci_dmae_get_next_data, .dma_setup = qcom_dma_setup, + .dma_release = mmci_dmae_release, + .dma_start = mmci_dmae_start, + .dma_finalize = mmci_dmae_finalize, + .dma_error = mmci_dmae_error, }; void qcom_variant_init(struct mmci_host *host) diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c new file mode 100644 index 000000000000..cfbfc6f1048f --- /dev/null +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Ludovic.barre@st.com for STMicroelectronics. + */ +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/reset.h> +#include <linux/scatterlist.h> +#include "mmci.h" + +#define SDMMC_LLI_BUF_LEN PAGE_SIZE +#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT) + +struct sdmmc_lli_desc { + u32 idmalar; + u32 idmabase; + u32 idmasize; +}; + +struct sdmmc_priv { + dma_addr_t sg_dma; + void *sg_cpu; +}; + +int sdmmc_idma_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + struct scatterlist *sg; + int i; + + /* + * idma has constraints on idmabase & idmasize for each element + * excepted the last element which has no constraint on idmasize + */ + for_each_sg(data->sg, sg, data->sg_len - 1, i) { + if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32)) || + !IS_ALIGNED(sg_dma_len(data->sg), SDMMC_IDMA_BURST)) { + dev_err(mmc_dev(host->mmc), + "unaligned scatterlist: ofst:%x length:%d\n", + data->sg->offset, data->sg->length); + return -EINVAL; + } + } + + if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32))) { + dev_err(mmc_dev(host->mmc), + "unaligned last scatterlist: ofst:%x length:%d\n", + data->sg->offset, data->sg->length); + return -EINVAL; + } + + return 0; +} + +static int _sdmmc_idma_prep_data(struct mmci_host *host, + struct mmc_data *data) +{ + int n_elem; + + n_elem = dma_map_sg(mmc_dev(host->mmc), + data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + + if (!n_elem) { + dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); + return -EINVAL; + } + + return 0; +} + +static int sdmmc_idma_prep_data(struct mmci_host *host, + struct mmc_data *data, bool next) +{ + /* Check if job is already prepared. */ + if (!next && data->host_cookie == host->next_cookie) + return 0; + + return _sdmmc_idma_prep_data(host, data); +} + +static void sdmmc_idma_unprep_data(struct mmci_host *host, + struct mmc_data *data, int err) +{ + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + +static int sdmmc_idma_setup(struct mmci_host *host) +{ + struct sdmmc_priv *idma; + + idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL); + if (!idma) + return -ENOMEM; + + host->dma_priv = idma; + + if (host->variant->dma_lli) { + idma->sg_cpu = dmam_alloc_coherent(mmc_dev(host->mmc), + SDMMC_LLI_BUF_LEN, + &idma->sg_dma, GFP_KERNEL); + if (!idma->sg_cpu) { + dev_err(mmc_dev(host->mmc), + "Failed to alloc IDMA descriptor\n"); + return -ENOMEM; + } + host->mmc->max_segs = SDMMC_LLI_BUF_LEN / + sizeof(struct sdmmc_lli_desc); + host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; + } else { + host->mmc->max_segs = 1; + host->mmc->max_seg_size = host->mmc->max_req_size; + } + + return 0; +} + +static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) + +{ + struct sdmmc_priv *idma = host->dma_priv; + struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu; + struct mmc_data *data = host->data; + struct scatterlist *sg; + int i; + + if (!host->variant->dma_lli || data->sg_len == 1) { + writel_relaxed(sg_dma_address(data->sg), + host->base + MMCI_STM32_IDMABASE0R); + writel_relaxed(MMCI_STM32_IDMAEN, + host->base + MMCI_STM32_IDMACTRLR); + return 0; + } + + for_each_sg(data->sg, sg, data->sg_len, i) { + desc[i].idmalar = (i + 1) * sizeof(struct sdmmc_lli_desc); + desc[i].idmalar |= MMCI_STM32_ULA | MMCI_STM32_ULS + | MMCI_STM32_ABR; + desc[i].idmabase = sg_dma_address(sg); + desc[i].idmasize = sg_dma_len(sg); + } + + /* notice the end of link list */ + desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA; + + dma_wmb(); + writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR); + writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR); + writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R); + writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER); + writel_relaxed(MMCI_STM32_IDMAEN | MMCI_STM32_IDMALLIEN, + host->base + MMCI_STM32_IDMACTRLR); + + return 0; +} + +static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) +{ + writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); +} + +static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) +{ + unsigned int clk = 0, ddr = 0; + + if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 || + host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) + ddr = MCI_STM32_CLK_DDR; + + /* + * cclk = mclk / (2 * clkdiv) + * clkdiv 0 => bypass + * in ddr mode bypass is not possible + */ + if (desired) { + if (desired >= host->mclk && !ddr) { + host->cclk = host->mclk; + } else { + clk = DIV_ROUND_UP(host->mclk, 2 * desired); + if (clk > MCI_STM32_CLK_CLKDIV_MSK) + clk = MCI_STM32_CLK_CLKDIV_MSK; + host->cclk = host->mclk / (2 * clk); + } + } else { + /* + * while power-on phase the clock can't be define to 0, + * Only power-off and power-cyc deactivate the clock. + * if desired clock is 0, set max divider + */ + clk = MCI_STM32_CLK_CLKDIV_MSK; + host->cclk = host->mclk / (2 * clk); + } + + /* Set actual clock for debug */ + if (host->mmc->ios.power_mode == MMC_POWER_ON) + host->mmc->actual_clock = host->cclk; + else + host->mmc->actual_clock = 0; + + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) + clk |= MCI_STM32_CLK_WIDEBUS_4; + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) + clk |= MCI_STM32_CLK_WIDEBUS_8; + + clk |= MCI_STM32_CLK_HWFCEN; + clk |= host->clk_reg_add; + clk |= ddr; + + /* + * SDMMC_FBCK is selected when an external Delay Block is needed + * with SDR104. + */ + if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) { + clk |= MCI_STM32_CLK_BUSSPEED; + if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) { + clk &= ~MCI_STM32_CLK_SEL_MSK; + clk |= MCI_STM32_CLK_SELFBCK; + } + } + + mmci_write_clkreg(host, clk); +} + +static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr) +{ + struct mmc_ios ios = host->mmc->ios; + + pwr = host->pwr_reg_add; + + if (ios.power_mode == MMC_POWER_OFF) { + /* Only a reset could power-off sdmmc */ + reset_control_assert(host->rst); + udelay(2); + reset_control_deassert(host->rst); + + /* + * Set the SDMMC in Power-cycle state. + * This will make that the SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK + * are driven low, to prevent the Card from being supplied + * through the signal lines. + */ + mmci_write_pwrreg(host, MCI_STM32_PWR_CYC | pwr); + } else if (ios.power_mode == MMC_POWER_ON) { + /* + * After power-off (reset): the irq mask defined in probe + * functionis lost + * ault irq mask (probe) must be activated + */ + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); + + /* + * After a power-cycle state, we must set the SDMMC in + * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are + * driven high. Then we can set the SDMMC to Power-on state + */ + mmci_write_pwrreg(host, MCI_PWR_OFF | pwr); + mdelay(1); + mmci_write_pwrreg(host, MCI_PWR_ON | pwr); + } +} + +static struct mmci_host_ops sdmmc_variant_ops = { + .validate_data = sdmmc_idma_validate_data, + .prep_data = sdmmc_idma_prep_data, + .unprep_data = sdmmc_idma_unprep_data, + .dma_setup = sdmmc_idma_setup, + .dma_start = sdmmc_idma_start, + .dma_finalize = sdmmc_idma_finalize, + .set_clkreg = mmci_sdmmc_set_clkreg, + .set_pwrreg = mmci_sdmmc_set_pwrreg, +}; + +void sdmmc_variant_init(struct mmci_host *host) +{ + host->ops = &sdmmc_variant_ops; +} diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 04841386b65d..6334cc752d8b 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -87,6 +87,13 @@ #define SDC_FIFO_CFG 0x228 /*--------------------------------------------------------------------------*/ +/* Top Pad Register Offset */ +/*--------------------------------------------------------------------------*/ +#define EMMC_TOP_CONTROL 0x00 +#define EMMC_TOP_CMD 0x04 +#define EMMC50_PAD_DS_TUNE 0x0c + +/*--------------------------------------------------------------------------*/ /* Register Mask */ /*--------------------------------------------------------------------------*/ @@ -261,6 +268,23 @@ #define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */ #define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */ +/* EMMC_TOP_CONTROL mask */ +#define PAD_RXDLY_SEL (0x1 << 0) /* RW */ +#define DELAY_EN (0x1 << 1) /* RW */ +#define PAD_DAT_RD_RXDLY2 (0x1f << 2) /* RW */ +#define PAD_DAT_RD_RXDLY (0x1f << 7) /* RW */ +#define PAD_DAT_RD_RXDLY2_SEL (0x1 << 12) /* RW */ +#define PAD_DAT_RD_RXDLY_SEL (0x1 << 13) /* RW */ +#define DATA_K_VALUE_SEL (0x1 << 14) /* RW */ +#define SDC_RX_ENH_EN (0x1 << 15) /* TW */ + +/* EMMC_TOP_CMD mask */ +#define PAD_CMD_RXDLY2 (0x1f << 0) /* RW */ +#define PAD_CMD_RXDLY (0x1f << 5) /* RW */ +#define PAD_CMD_RD_RXDLY2_SEL (0x1 << 10) /* RW */ +#define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */ +#define PAD_CMD_TX_DLY (0x1f << 12) /* RW */ + #define REQ_CMD_EIO (0x1 << 0) #define REQ_CMD_TMO (0x1 << 1) #define REQ_DAT_ERR (0x1 << 2) @@ -333,6 +357,9 @@ struct msdc_save_para { u32 emmc50_cfg0; u32 emmc50_cfg3; u32 sdc_fifo_cfg; + u32 emmc_top_control; + u32 emmc_top_cmd; + u32 emmc50_pad_ds_tune; }; struct mtk_mmc_compatible { @@ -351,6 +378,8 @@ struct msdc_tune_para { u32 iocon; u32 pad_tune; u32 pad_cmd_tune; + u32 emmc_top_control; + u32 emmc_top_cmd; }; struct msdc_delay_phase { @@ -372,6 +401,7 @@ struct msdc_host { int error; void __iomem *base; /* host base address */ + void __iomem *top_base; /* host top register base address */ struct msdc_dma dma; /* dma channel */ u64 dma_mask; @@ -387,10 +417,10 @@ struct msdc_host { struct clk *src_clk; /* msdc source clock */ struct clk *h_clk; /* msdc h_clk */ + struct clk *bus_clk; /* bus clock which used to access register */ struct clk *src_clk_cg; /* msdc source clock control gate */ u32 mclk; /* mmc subsystem clock frequency */ u32 src_clk_freq; /* source clock frequency */ - u32 sclk; /* SD/MS bus clock frequency */ unsigned char timing; bool vqmmc_enabled; u32 latch_ck; @@ -429,6 +459,18 @@ static const struct mtk_mmc_compatible mt8173_compat = { .support_64g = false, }; +static const struct mtk_mmc_compatible mt8183_compat = { + .clk_div_bits = 12, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = true, + .stop_clk_fix = true, + .enhance_rx = true, + .support_64g = true, +}; + static const struct mtk_mmc_compatible mt2701_compat = { .clk_div_bits = 12, .hs400_tune = false, @@ -468,6 +510,7 @@ static const struct mtk_mmc_compatible mt7622_compat = { static const struct of_device_id msdc_of_ids[] = { { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat}, { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat}, + { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat}, { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat}, { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat}, { .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat}, @@ -635,10 +678,10 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) host->timeout_ns = ns; host->timeout_clks = clks; - if (host->sclk == 0) { + if (host->mmc->actual_clock == 0) { timeout = 0; } else { - clk_ns = 1000000000UL / host->sclk; + clk_ns = 1000000000UL / host->mmc->actual_clock; timeout = (ns + clk_ns - 1) / clk_ns + clks; /* in 1048576 sclk cycle unit */ timeout = (timeout + (0x1 << 20) - 1) >> 20; @@ -660,12 +703,14 @@ static void msdc_gate_clock(struct msdc_host *host) { clk_disable_unprepare(host->src_clk_cg); clk_disable_unprepare(host->src_clk); + clk_disable_unprepare(host->bus_clk); clk_disable_unprepare(host->h_clk); } static void msdc_ungate_clock(struct msdc_host *host) { clk_prepare_enable(host->h_clk); + clk_prepare_enable(host->bus_clk); clk_prepare_enable(host->src_clk); clk_prepare_enable(host->src_clk_cg); while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) @@ -683,6 +728,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) if (!hz) { dev_dbg(host->dev, "set mclk to 0\n"); host->mclk = 0; + host->mmc->actual_clock = 0; sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); return; } @@ -761,7 +807,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) cpu_relax(); sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); - host->sclk = sclk; + host->mmc->actual_clock = sclk; host->mclk = hz; host->timing = timing; /* need because clk changed. */ @@ -772,14 +818,30 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) * mmc_select_hs400() will drop to 50Mhz and High speed mode, * tune result of hs200/200Mhz is not suitable for 50Mhz */ - if (host->sclk <= 52000000) { + if (host->mmc->actual_clock <= 52000000) { writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); - writel(host->def_tune_para.pad_tune, host->base + tune_reg); + if (host->top_base) { + writel(host->def_tune_para.emmc_top_control, + host->top_base + EMMC_TOP_CONTROL); + writel(host->def_tune_para.emmc_top_cmd, + host->top_base + EMMC_TOP_CMD); + } else { + writel(host->def_tune_para.pad_tune, + host->base + tune_reg); + } } else { writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); - writel(host->saved_tune_para.pad_tune, host->base + tune_reg); writel(host->saved_tune_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); + if (host->top_base) { + writel(host->saved_tune_para.emmc_top_control, + host->top_base + EMMC_TOP_CONTROL); + writel(host->saved_tune_para.emmc_top_cmd, + host->top_base + EMMC_TOP_CMD); + } else { + writel(host->saved_tune_para.pad_tune, + host->base + tune_reg); + } } if (timing == MMC_TIMING_MMC_HS400 && @@ -787,7 +849,8 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) sdr_set_field(host->base + PAD_CMD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, host->hs400_cmd_int_delay); - dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing); + dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, + timing); } static inline u32 msdc_cmd_find_resp(struct msdc_host *host, @@ -1055,6 +1118,7 @@ static void msdc_start_command(struct msdc_host *host, WARN_ON(host->cmd); host->cmd = cmd; + mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); if (!msdc_cmd_is_ready(host, mrq, cmd)) return; @@ -1066,7 +1130,6 @@ static void msdc_start_command(struct msdc_host *host, cmd->error = 0; rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); - mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); writel(cmd->arg, host->base + SDC_ARG); @@ -1351,7 +1414,12 @@ static void msdc_init_hw(struct msdc_host *host) val = readl(host->base + MSDC_INT); writel(val, host->base + MSDC_INT); - writel(0, host->base + tune_reg); + if (host->top_base) { + writel(0, host->top_base + EMMC_TOP_CONTROL); + writel(0, host->top_base + EMMC_TOP_CMD); + } else { + writel(0, host->base + tune_reg); + } writel(0, host->base + MSDC_IOCON); sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0); writel(0x403c0046, host->base + MSDC_PATCH_BIT); @@ -1375,8 +1443,12 @@ static void msdc_init_hw(struct msdc_host *host) sdr_set_field(host->base + MSDC_PATCH_BIT2, MSDC_PB2_RESPWAIT, 3); if (host->dev_comp->enhance_rx) { - sdr_set_bits(host->base + SDC_ADV_CFG0, - SDC_RX_ENHANCE_EN); + if (host->top_base) + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, + SDC_RX_ENH_EN); + else + sdr_set_bits(host->base + SDC_ADV_CFG0, + SDC_RX_ENHANCE_EN); } else { sdr_set_field(host->base + MSDC_PATCH_BIT2, MSDC_PB2_RESPSTSENSEL, 2); @@ -1394,11 +1466,26 @@ static void msdc_init_hw(struct msdc_host *host) sdr_set_bits(host->base + MSDC_PATCH_BIT2, MSDC_PB2_SUPPORT_64G); if (host->dev_comp->data_tune) { - sdr_set_bits(host->base + tune_reg, - MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL); + if (host->top_base) { + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY_SEL); + sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL, + DATA_K_VALUE_SEL); + sdr_set_bits(host->top_base + EMMC_TOP_CMD, + PAD_CMD_RD_RXDLY_SEL); + } else { + sdr_set_bits(host->base + tune_reg, + MSDC_PAD_TUNE_RD_SEL | + MSDC_PAD_TUNE_CMD_SEL); + } } else { /* choose clock tune */ - sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL); + if (host->top_base) + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, + PAD_RXDLY_SEL); + else + sdr_set_bits(host->base + tune_reg, + MSDC_PAD_TUNE_RXDLYSEL); } /* Configure to enable SDIO mode. @@ -1413,9 +1500,20 @@ static void msdc_init_hw(struct msdc_host *host) sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); host->def_tune_para.iocon = readl(host->base + MSDC_IOCON); - host->def_tune_para.pad_tune = readl(host->base + tune_reg); host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); - host->saved_tune_para.pad_tune = readl(host->base + tune_reg); + if (host->top_base) { + host->def_tune_para.emmc_top_control = + readl(host->top_base + EMMC_TOP_CONTROL); + host->def_tune_para.emmc_top_cmd = + readl(host->top_base + EMMC_TOP_CMD); + host->saved_tune_para.emmc_top_control = + readl(host->top_base + EMMC_TOP_CONTROL); + host->saved_tune_para.emmc_top_cmd = + readl(host->top_base + EMMC_TOP_CMD); + } else { + host->def_tune_para.pad_tune = readl(host->base + tune_reg); + host->saved_tune_para.pad_tune = readl(host->base + tune_reg); + } dev_dbg(host->dev, "init hardware done!"); } @@ -1563,6 +1661,30 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) return delay_phase; } +static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value) +{ + u32 tune_reg = host->dev_comp->pad_tune_reg; + + if (host->top_base) + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, + value); + else + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, + value); +} + +static inline void msdc_set_data_delay(struct msdc_host *host, u32 value) +{ + u32 tune_reg = host->dev_comp->pad_tune_reg; + + if (host->top_base) + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY, value); + else + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, + value); +} + static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); @@ -1583,8 +1705,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); for (i = 0 ; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + tune_reg, - MSDC_PAD_TUNE_CMDRDLY, i); + msdc_set_cmd_delay(host, i); /* * Using the same parameters, it may sometimes pass the test, * but sometimes it may fail. To make sure the parameters are @@ -1608,8 +1729,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); for (i = 0; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + tune_reg, - MSDC_PAD_TUNE_CMDRDLY, i); + msdc_set_cmd_delay(host, i); /* * Using the same parameters, it may sometimes pass the test, * but sometimes it may fail. To make sure the parameters are @@ -1633,15 +1753,13 @@ skip_fall: final_maxlen = final_fall_delay.maxlen; if (final_maxlen == final_rise_delay.maxlen) { sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, - final_rise_delay.final_phase); final_delay = final_rise_delay.final_phase; } else { sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, - final_fall_delay.final_phase); final_delay = final_fall_delay.final_phase; } + msdc_set_cmd_delay(host, final_delay); + if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) goto skip_internal; @@ -1716,7 +1834,6 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) u32 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; - u32 tune_reg = host->dev_comp->pad_tune_reg; int i, ret; sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, @@ -1724,8 +1841,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); for (i = 0 ; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + tune_reg, - MSDC_PAD_TUNE_DATRRDLY, i); + msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) rise_delay |= (1 << i); @@ -1739,8 +1855,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); for (i = 0; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + tune_reg, - MSDC_PAD_TUNE_DATRRDLY, i); + msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) fall_delay |= (1 << i); @@ -1752,29 +1867,97 @@ skip_fall: if (final_maxlen == final_rise_delay.maxlen) { sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - sdr_set_field(host->base + tune_reg, - MSDC_PAD_TUNE_DATRRDLY, - final_rise_delay.final_phase); final_delay = final_rise_delay.final_phase; } else { sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - sdr_set_field(host->base + tune_reg, - MSDC_PAD_TUNE_DATRRDLY, - final_fall_delay.final_phase); final_delay = final_fall_delay.final_phase; } + msdc_set_data_delay(host, final_delay); dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay); return final_delay == 0xff ? -EIO : 0; } +/* + * MSDC IP which supports data tune + async fifo can do CMD/DAT tune + * together, which can save the tuning time. + */ +static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) +{ + struct msdc_host *host = mmc_priv(mmc); + u32 rise_delay = 0, fall_delay = 0; + struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; + u8 final_delay, final_maxlen; + int i, ret; + + sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, + host->latch_ck); + + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + sdr_clr_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + for (i = 0 ; i < PAD_DELAY_MAX; i++) { + msdc_set_cmd_delay(host, i); + msdc_set_data_delay(host, i); + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + rise_delay |= (1 << i); + } + final_rise_delay = get_best_delay(host, rise_delay); + /* if rising edge has enough margin, then do not scan falling edge */ + if (final_rise_delay.maxlen >= 12 || + (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) + goto skip_fall; + + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + sdr_set_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + for (i = 0; i < PAD_DELAY_MAX; i++) { + msdc_set_cmd_delay(host, i); + msdc_set_data_delay(host, i); + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + fall_delay |= (1 << i); + } + final_fall_delay = get_best_delay(host, fall_delay); + +skip_fall: + final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); + if (final_maxlen == final_rise_delay.maxlen) { + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + sdr_clr_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + final_delay = final_rise_delay.final_phase; + } else { + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + sdr_set_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + final_delay = final_fall_delay.final_phase; + } + + msdc_set_cmd_delay(host, final_delay); + msdc_set_data_delay(host, final_delay); + + dev_dbg(host->dev, "Final pad delay: %x\n", final_delay); + return final_delay == 0xff ? -EIO : 0; +} + static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); int ret; u32 tune_reg = host->dev_comp->pad_tune_reg; + if (host->dev_comp->data_tune && host->dev_comp->async_fifo) { + ret = msdc_tune_together(mmc, opcode); + if (host->hs400_mode) { + sdr_clr_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + msdc_set_data_delay(host, 0); + } + goto tune_done; + } if (host->hs400_mode && host->dev_comp->hs400_tune) ret = hs400_tune_response(mmc, opcode); @@ -1790,9 +1973,16 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) dev_err(host->dev, "Tune data fail!\n"); } +tune_done: host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); host->saved_tune_para.pad_tune = readl(host->base + tune_reg); host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); + if (host->top_base) { + host->saved_tune_para.emmc_top_control = readl(host->top_base + + EMMC_TOP_CONTROL); + host->saved_tune_para.emmc_top_cmd = readl(host->top_base + + EMMC_TOP_CMD); + } return ret; } @@ -1801,7 +1991,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) struct msdc_host *host = mmc_priv(mmc); host->hs400_mode = true; - writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); + if (host->top_base) + writel(host->hs400_ds_delay, + host->top_base + EMMC50_PAD_DS_TUNE); + else + writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); /* hs400 mode must set it to 0 */ sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS); /* to improve read performance, set outstanding to 2 */ @@ -1884,6 +2078,11 @@ static int msdc_drv_probe(struct platform_device *pdev) goto host_free; } + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + host->top_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->top_base)) + host->top_base = NULL; + ret = mmc_regulator_get_supply(mmc); if (ret) goto host_free; @@ -1900,6 +2099,9 @@ static int msdc_drv_probe(struct platform_device *pdev) goto host_free; } + host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk"); + if (IS_ERR(host->bus_clk)) + host->bus_clk = NULL; /*source clock control gate is optional clock*/ host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg"); if (IS_ERR(host->src_clk_cg)) @@ -2049,7 +2251,6 @@ static void msdc_save_reg(struct msdc_host *host) host->save_para.msdc_cfg = readl(host->base + MSDC_CFG); host->save_para.iocon = readl(host->base + MSDC_IOCON); host->save_para.sdc_cfg = readl(host->base + SDC_CFG); - host->save_para.pad_tune = readl(host->base + tune_reg); host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2); @@ -2058,6 +2259,16 @@ static void msdc_save_reg(struct msdc_host *host) host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0); host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3); host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG); + if (host->top_base) { + host->save_para.emmc_top_control = + readl(host->top_base + EMMC_TOP_CONTROL); + host->save_para.emmc_top_cmd = + readl(host->top_base + EMMC_TOP_CMD); + host->save_para.emmc50_pad_ds_tune = + readl(host->top_base + EMMC50_PAD_DS_TUNE); + } else { + host->save_para.pad_tune = readl(host->base + tune_reg); + } } static void msdc_restore_reg(struct msdc_host *host) @@ -2067,7 +2278,6 @@ static void msdc_restore_reg(struct msdc_host *host) writel(host->save_para.msdc_cfg, host->base + MSDC_CFG); writel(host->save_para.iocon, host->base + MSDC_IOCON); writel(host->save_para.sdc_cfg, host->base + SDC_CFG); - writel(host->save_para.pad_tune, host->base + tune_reg); writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2); @@ -2076,6 +2286,16 @@ static void msdc_restore_reg(struct msdc_host *host) writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0); writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3); writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG); + if (host->top_base) { + writel(host->save_para.emmc_top_control, + host->top_base + EMMC_TOP_CONTROL); + writel(host->save_para.emmc_top_cmd, + host->top_base + EMMC_TOP_CMD); + writel(host->save_para.emmc50_pad_ds_tune, + host->top_base + EMMC50_PAD_DS_TUNE); + } else { + writel(host->save_para.pad_tune, host->base + tune_reg); + } } static int msdc_runtime_suspend(struct device *dev) diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index de4e6e5bf304..4d17032d15ee 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -728,7 +728,6 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) static irqreturn_t mxcmci_irq(int irq, void *devid) { struct mxcmci_host *host = devid; - unsigned long flags; bool sdio_irq; u32 stat; @@ -740,9 +739,9 @@ static irqreturn_t mxcmci_irq(int irq, void *devid) dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); - spin_lock_irqsave(&host->lock, flags); + spin_lock(&host->lock); sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; - spin_unlock_irqrestore(&host->lock, flags); + spin_unlock(&host->lock); if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE))) mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 68760d4a5d3d..467d889a1638 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -30,7 +30,6 @@ #include <linux/clk.h> #include <linux/of.h> #include <linux/of_irq.h> -#include <linux/of_gpio.h> #include <linux/of_device.h> #include <linux/mmc/host.h> #include <linux/mmc/core.h> @@ -38,7 +37,6 @@ #include <linux/mmc/slot-gpio.h> #include <linux/io.h> #include <linux/irq.h> -#include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> @@ -198,7 +196,6 @@ struct omap_hsmmc_host { struct dma_chan *rx_chan; int response_busy; int context_loss; - int protect_card; int reqs_blocked; int req_in_progress; unsigned long clk_rate; @@ -207,16 +204,6 @@ struct omap_hsmmc_host { #define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */ struct omap_hsmmc_next next_data; struct omap_hsmmc_platform_data *pdata; - - /* return MMC cover switch state, can be NULL if not supported. - * - * possible return values: - * 0 - closed - * 1 - open - */ - int (*get_cover_state)(struct device *dev); - - int (*card_detect)(struct device *dev); }; struct omap_mmc_of_data { @@ -226,20 +213,6 @@ struct omap_mmc_of_data { static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host); -static int omap_hsmmc_card_detect(struct device *dev) -{ - struct omap_hsmmc_host *host = dev_get_drvdata(dev); - - return mmc_gpio_get_cd(host->mmc); -} - -static int omap_hsmmc_get_cover_state(struct device *dev) -{ - struct omap_hsmmc_host *host = dev_get_drvdata(dev); - - return mmc_gpio_get_cd(host->mmc); -} - static int omap_hsmmc_enable_supply(struct mmc_host *mmc) { int ret; @@ -484,38 +457,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) return 0; } -static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id); - -static int omap_hsmmc_gpio_init(struct mmc_host *mmc, - struct omap_hsmmc_host *host, - struct omap_hsmmc_platform_data *pdata) -{ - int ret; - - if (gpio_is_valid(pdata->gpio_cod)) { - ret = mmc_gpio_request_cd(mmc, pdata->gpio_cod, 0); - if (ret) - return ret; - - host->get_cover_state = omap_hsmmc_get_cover_state; - mmc_gpio_set_cd_isr(mmc, omap_hsmmc_cover_irq); - } else if (gpio_is_valid(pdata->gpio_cd)) { - ret = mmc_gpio_request_cd(mmc, pdata->gpio_cd, 0); - if (ret) - return ret; - - host->card_detect = omap_hsmmc_card_detect; - } - - if (gpio_is_valid(pdata->gpio_wp)) { - ret = mmc_gpio_request_ro(mmc, pdata->gpio_wp); - if (ret) - return ret; - } - - return 0; -} - /* * Start clock to the card */ @@ -781,9 +722,6 @@ static void send_init_stream(struct omap_hsmmc_host *host) int reg = 0; unsigned long timeout; - if (host->protect_card) - return; - disable_irq(host->irq); OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); @@ -804,29 +742,6 @@ static void send_init_stream(struct omap_hsmmc_host *host) enable_irq(host->irq); } -static inline -int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host) -{ - int r = 1; - - if (host->get_cover_state) - r = host->get_cover_state(host->dev); - return r; -} - -static ssize_t -omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); - struct omap_hsmmc_host *host = mmc_priv(mmc); - - return sprintf(buf, "%s\n", - omap_hsmmc_cover_is_closed(host) ? "closed" : "open"); -} - -static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL); - static ssize_t omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr, char *buf) @@ -1247,44 +1162,6 @@ err: return ret; } -/* Protect the card while the cover is open */ -static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) -{ - if (!host->get_cover_state) - return; - - host->reqs_blocked = 0; - if (host->get_cover_state(host->dev)) { - if (host->protect_card) { - dev_info(host->dev, "%s: cover is closed, " - "card is now accessible\n", - mmc_hostname(host->mmc)); - host->protect_card = 0; - } - } else { - if (!host->protect_card) { - dev_info(host->dev, "%s: cover is open, " - "card is now inaccessible\n", - mmc_hostname(host->mmc)); - host->protect_card = 1; - } - } -} - -/* - * irq handler when (cell-phone) cover is mounted/removed - */ -static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id) -{ - struct omap_hsmmc_host *host = dev_id; - - sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); - - omap_hsmmc_protect_card(host); - mmc_detect_change(host->mmc, (HZ * 200) / 1000); - return IRQ_HANDLED; -} - static void omap_hsmmc_dma_callback(void *param) { struct omap_hsmmc_host *host = param; @@ -1555,24 +1432,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) BUG_ON(host->req_in_progress); BUG_ON(host->dma_ch != -1); - if (host->protect_card) { - if (host->reqs_blocked < 3) { - /* - * Ensure the controller is left in a consistent - * state by resetting the command and data state - * machines. - */ - omap_hsmmc_reset_controller_fsm(host, SRD); - omap_hsmmc_reset_controller_fsm(host, SRC); - host->reqs_blocked += 1; - } - req->cmd->error = -EBADF; - if (req->data) - req->data->error = -EBADF; - req->cmd->retries = 0; - mmc_request_done(mmc, req); - return; - } else if (host->reqs_blocked) + if (host->reqs_blocked) host->reqs_blocked = 0; WARN_ON(host->mrq != NULL); host->mrq = req; @@ -1646,15 +1506,6 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) omap_hsmmc_set_bus_mode(host); } -static int omap_hsmmc_get_cd(struct mmc_host *mmc) -{ - struct omap_hsmmc_host *host = mmc_priv(mmc); - - if (!host->card_detect) - return -ENOSYS; - return host->card_detect(host->dev); -} - static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card) { struct omap_hsmmc_host *host = mmc_priv(mmc); @@ -1793,7 +1644,7 @@ static struct mmc_host_ops omap_hsmmc_ops = { .pre_req = omap_hsmmc_pre_req, .request = omap_hsmmc_request, .set_ios = omap_hsmmc_set_ios, - .get_cd = omap_hsmmc_get_cd, + .get_cd = mmc_gpio_get_cd, .get_ro = mmc_gpio_get_ro, .init_card = omap_hsmmc_init_card, .enable_sdio_irq = omap_hsmmc_enable_sdio_irq, @@ -1920,10 +1771,6 @@ static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev) if (of_find_property(np, "ti,dual-volt", NULL)) pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; - pdata->gpio_cd = -EINVAL; - pdata->gpio_cod = -EINVAL; - pdata->gpio_wp = -EINVAL; - if (of_find_property(np, "ti,non-removable", NULL)) { pdata->nonremovable = true; pdata->no_regulator_off_init = true; @@ -2008,10 +1855,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) host->pbias_enabled = 0; host->vqmmc_enabled = 0; - ret = omap_hsmmc_gpio_init(mmc, host, pdata); - if (ret) - goto err_gpio; - platform_set_drvdata(pdev, host); if (pdev->dev.of_node) @@ -2125,8 +1968,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) if (!ret) mmc->caps |= MMC_CAP_SDIO_IRQ; - omap_hsmmc_protect_card(host); - mmc_add_host(mmc); if (mmc_pdata(host)->name != NULL) { @@ -2134,12 +1975,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) if (ret < 0) goto err_slot_name; } - if (host->get_cover_state) { - ret = device_create_file(&mmc->class_dev, - &dev_attr_cover_switch); - if (ret < 0) - goto err_slot_name; - } omap_hsmmc_debugfs(mmc); pm_runtime_mark_last_busy(host->dev); @@ -2161,7 +1996,6 @@ err_irq: if (host->dbclk) clk_disable_unprepare(host->dbclk); err1: -err_gpio: mmc_free_host(mmc); err: return ret; @@ -2231,7 +2065,6 @@ static int omap_hsmmc_resume(struct device *dev) if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) omap_hsmmc_conf_bus_power(host); - omap_hsmmc_protect_card(host); pm_runtime_mark_last_busy(host->dev); pm_runtime_put_autosuspend(host->dev); return 0; diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index f13f798d8506..da1e49c45bec 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Renesas Mobile SDHI * * Copyright (C) 2017 Horms Solutions Ltd., Simon Horman * Copyright (C) 2017 Renesas Electronics Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RENESAS_SDHI_H diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 777e32b0e410..d3ac43c3d0b6 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Renesas SDHI * @@ -6,10 +7,6 @@ * Copyright (C) 2016-17 Horms Solutions, Simon Horman * Copyright (C) 2009 Magnus Damm * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Based on "Compaq ASIC3 support": * * Copyright 2001 Compaq Computer Corporation. @@ -155,6 +152,52 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host, return ret == 0 ? best_freq : clk_get_rate(priv->clk); } +static void renesas_sdhi_set_clock(struct tmio_mmc_host *host, + unsigned int new_clock) +{ + u32 clk = 0, clock; + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + if (new_clock == 0) + goto out; + + /* + * Both HS400 and HS200/SD104 set 200MHz, but some devices need to + * set 400MHz to distinguish the CPG settings in HS400. + */ + if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && + host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400 && + new_clock == 200000000) + new_clock = 400000000; + + clock = renesas_sdhi_clk_update(host, new_clock) / 512; + + for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1) + clock <<= 1; + + /* 1/1 clock is option */ + if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1)) { + if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400)) + clk |= 0xff; + else + clk &= ~0xff; + } + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + +out: + /* HW engineers overrode docs: no sleep needed on R-Car2+ */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); +} + static void renesas_sdhi_clk_disable(struct tmio_mmc_host *host) { struct renesas_sdhi *priv = host_to_priv(host); @@ -443,6 +486,19 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host) static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host) { struct renesas_sdhi *priv = host_to_priv(host); + bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400; + + /* + * Skip checking SCC errors when running on 4 taps in HS400 mode as + * any retuning would still result in the same 4 taps being used. + */ + if (!(host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) && + !(host->mmc->ios.timing == MMC_TIMING_MMC_HS200) && + !(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && !use_4tap)) + return false; + + if (mmc_doing_retune(host->mmc)) + return false; /* Check SCC error */ if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) & @@ -620,8 +676,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->write16_hook = renesas_sdhi_write16_hook; host->clk_enable = renesas_sdhi_clk_enable; - host->clk_update = renesas_sdhi_clk_update; host->clk_disable = renesas_sdhi_clk_disable; + host->set_clock = renesas_sdhi_set_clock; host->multi_io_quirk = renesas_sdhi_multi_io_quirk; host->dma_ops = dma_ops; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index ca0b43973769..b6f54102bfdd 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * DMA support for Internal DMAC with SDHI SD/SDIO controller * * Copyright (C) 2016-17 Renesas Electronics Corporation * Copyright (C) 2016-17 Horms Solutions, Simon Horman - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/bitops.h> @@ -35,8 +32,8 @@ /* DM_CM_DTRAN_MODE */ #define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */ -#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "uptream" = for read commands */ -#define DTRAN_MODE_BUS_WID_TH (BIT(5) | BIT(4)) +#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "upstream" = for read commands */ +#define DTRAN_MODE_BUS_WIDTH (BIT(5) | BIT(4)) #define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address */ /* DM_CM_DTRAN_CTRL */ @@ -116,6 +113,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { }; static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = { + { .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, }, { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_r8a7795_compatible, }, { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_r8a7795_compatible, }, { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, @@ -174,7 +172,7 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, struct mmc_data *data) { struct scatterlist *sg = host->sg_ptr; - u32 dtran_mode = DTRAN_MODE_BUS_WID_TH | DTRAN_MODE_ADDR_MODE; + u32 dtran_mode = DTRAN_MODE_BUS_WIDTH | DTRAN_MODE_ADDR_MODE; if (!dma_map_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data))) @@ -201,13 +199,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR, sg_dma_address(sg)); + host->dma_on = true; + return; force_pio_with_unmap: dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data)); force_pio: - host->force_pio = true; renesas_sdhi_internal_dmac_enable_dma(host, false); } @@ -291,16 +290,19 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC * implementation as others may use a different implementation. */ -static const struct soc_device_attribute gen3_soc_whitelist[] = { +static const struct soc_device_attribute soc_whitelist[] = { /* specific ones */ { .soc_id = "r8a7795", .revision = "ES1.*", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, { .soc_id = "r8a7796", .revision = "ES1.0", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, /* generic ones */ + { .soc_id = "r8a774a1" }, + { .soc_id = "r8a77470" }, { .soc_id = "r8a7795" }, { .soc_id = "r8a7796" }, { .soc_id = "r8a77965" }, + { .soc_id = "r8a77970" }, { .soc_id = "r8a77980" }, { .soc_id = "r8a77995" }, { /* sentinel */ } @@ -308,13 +310,21 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = { static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { - const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist); + const struct soc_device_attribute *soc = soc_device_match(soc_whitelist); + struct device *dev = &pdev->dev; if (!soc) return -ENODEV; global_flags |= (unsigned long)soc->data; + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + + /* value is max of SD_SECCNT. Confirmed by HW engineers */ + dma_set_max_seg_size(dev, 0xffffffff); + return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); } diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 890f192dedbd..1a4016f635d3 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * DMA support use of SYS DMAC with SDHI SD/SDIO controller * @@ -5,10 +6,6 @@ * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang * Copyright (C) 2017 Horms Solutions, Simon Horman * Copyright (C) 2010-2011 Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/device.h> @@ -213,10 +210,8 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) goto pio; } - if (sg->length < TMIO_MMC_MIN_DMA_LEN) { - host->force_pio = true; + if (sg->length < TMIO_MMC_MIN_DMA_LEN) return; - } /* The only sg element can be unaligned, use our bounce buffer then */ if (!aligned) { @@ -240,6 +235,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) desc = NULL; ret = cookie; } + host->dma_on = true; } pio: if (!desc) { @@ -286,10 +282,8 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) goto pio; } - if (sg->length < TMIO_MMC_MIN_DMA_LEN) { - host->force_pio = true; + if (sg->length < TMIO_MMC_MIN_DMA_LEN) return; - } /* The only sg element can be unaligned, use our bounce buffer then */ if (!aligned) { @@ -318,6 +312,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) desc = NULL; ret = cookie; } + host->dma_on = true; } pio: if (!desc) { @@ -498,7 +493,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = { static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) { - if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible && + if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible || + of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) && !soc_device_match(gen3_soc_whitelist)) return -ENODEV; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 32321bd596d8..057e24f4a620 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -76,6 +76,7 @@ struct sdhci_acpi_slot { size_t priv_size; int (*probe_slot)(struct platform_device *, const char *, const char *); int (*remove_slot)(struct platform_device *); + int (*free_slot)(struct platform_device *pdev); int (*setup_host)(struct platform_device *pdev); }; @@ -246,7 +247,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { static bool sdhci_acpi_byt(void) { static const struct x86_cpu_id byt[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, {} }; @@ -470,10 +471,70 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { .priv_size = sizeof(struct intel_host), }; +#define VENDOR_SPECIFIC_PWRCTL_CLEAR_REG 0x1a8 +#define VENDOR_SPECIFIC_PWRCTL_CTL_REG 0x1ac +static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr) +{ + struct sdhci_host *host = ptr; + + sdhci_writel(host, 0x3, VENDOR_SPECIFIC_PWRCTL_CLEAR_REG); + sdhci_writel(host, 0x1, VENDOR_SPECIFIC_PWRCTL_CTL_REG); + + return IRQ_HANDLED; +} + +static int qcom_probe_slot(struct platform_device *pdev, const char *hid, + const char *uid) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct sdhci_host *host = c->host; + int *irq = sdhci_acpi_priv(c); + + *irq = -EINVAL; + + if (strcmp(hid, "QCOM8051")) + return 0; + + *irq = platform_get_irq(pdev, 1); + if (*irq < 0) + return 0; + + return request_threaded_irq(*irq, NULL, sdhci_acpi_qcom_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + "sdhci_qcom", host); +} + +static int qcom_free_slot(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct sdhci_host *host = c->host; + struct acpi_device *adev; + int *irq = sdhci_acpi_priv(c); + const char *hid; + + adev = ACPI_COMPANION(dev); + if (!adev) + return -ENODEV; + + hid = acpi_device_hid(adev); + if (strcmp(hid, "QCOM8051")) + return 0; + + if (*irq < 0) + return 0; + + free_irq(*irq, host); + return 0; +} + static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = { .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, .quirks2 = SDHCI_QUIRK2_NO_1_8_V, .caps = MMC_CAP_NONREMOVABLE, + .priv_size = sizeof(int), + .probe_slot = qcom_probe_slot, + .free_slot = qcom_free_slot, }; static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { @@ -756,6 +817,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev) err_cleanup: sdhci_cleanup_host(c->host); err_free: + if (c->slot && c->slot->free_slot) + c->slot->free_slot(pdev); + sdhci_free_host(c->host); return err; } @@ -777,6 +841,10 @@ static int sdhci_acpi_remove(struct platform_device *pdev) dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); sdhci_remove_host(c->host, dead); + + if (c->slot && c->slot->free_slot) + c->slot->free_slot(pdev); + sdhci_free_host(c->host); return 0; diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index dfa58f8b8dfa..3f16d9c90ba2 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -60,6 +60,7 @@ /* Tuning Block Control Register */ #define ESDHC_TBCTL 0x120 #define ESDHC_TB_EN 0x00000004 +#define ESDHC_TBPTR 0x128 /* Control Register for DMA transfer */ #define ESDHC_DMA_SYSCTL 0x40c diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index d0e83db42ae5..0db99057c44f 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -15,6 +15,7 @@ * iProc SDHCI platform driver */ +#include <linux/acpi.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/mmc/host.h> @@ -162,9 +163,19 @@ static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg) sdhci_iproc_writel(host, newval, reg & ~3); } +static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + if (pltfm_host->clk) + return sdhci_pltfm_clk_get_max_clock(host); + else + return pltfm_host->clock; +} + static const struct sdhci_ops sdhci_iproc_ops = { .set_clock = sdhci_set_clock, - .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_max_clock = sdhci_iproc_get_max_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, @@ -178,7 +189,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = { .write_w = sdhci_iproc_writew, .write_b = sdhci_iproc_writeb, .set_clock = sdhci_set_clock, - .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_max_clock = sdhci_iproc_get_max_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, @@ -256,19 +267,25 @@ static const struct of_device_id sdhci_iproc_of_match[] = { }; MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match); +static const struct acpi_device_id sdhci_iproc_acpi_ids[] = { + { .id = "BRCM5871", .driver_data = (kernel_ulong_t)&iproc_cygnus_data }, + { .id = "BRCM5872", .driver_data = (kernel_ulong_t)&iproc_data }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, sdhci_iproc_acpi_ids); + static int sdhci_iproc_probe(struct platform_device *pdev) { - const struct of_device_id *match; - const struct sdhci_iproc_data *iproc_data; + struct device *dev = &pdev->dev; + const struct sdhci_iproc_data *iproc_data = NULL; struct sdhci_host *host; struct sdhci_iproc_host *iproc_host; struct sdhci_pltfm_host *pltfm_host; int ret; - match = of_match_device(sdhci_iproc_of_match, &pdev->dev); - if (!match) - return -EINVAL; - iproc_data = match->data; + iproc_data = device_get_match_data(dev); + if (!iproc_data) + return -ENODEV; host = sdhci_pltfm_init(pdev, iproc_data->pdata, sizeof(*iproc_host)); if (IS_ERR(host)) @@ -280,19 +297,21 @@ static int sdhci_iproc_probe(struct platform_device *pdev) iproc_host->data = iproc_data; mmc_of_parse(host->mmc); - sdhci_get_of_property(pdev); + sdhci_get_property(pdev); host->mmc->caps |= iproc_host->data->mmc_caps; - pltfm_host->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(pltfm_host->clk)) { - ret = PTR_ERR(pltfm_host->clk); - goto err; - } - ret = clk_prepare_enable(pltfm_host->clk); - if (ret) { - dev_err(&pdev->dev, "failed to enable host clk\n"); - goto err; + if (dev->of_node) { + pltfm_host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pltfm_host->clk)) { + ret = PTR_ERR(pltfm_host->clk); + goto err; + } + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) { + dev_err(dev, "failed to enable host clk\n"); + goto err; + } } if (iproc_host->data->pdata->quirks & SDHCI_QUIRK_MISSING_CAPS) { @@ -307,7 +326,8 @@ static int sdhci_iproc_probe(struct platform_device *pdev) return 0; err_clk: - clk_disable_unprepare(pltfm_host->clk); + if (dev->of_node) + clk_disable_unprepare(pltfm_host->clk); err: sdhci_pltfm_free(pdev); return ret; @@ -317,6 +337,7 @@ static struct platform_driver sdhci_iproc_driver = { .driver = { .name = "sdhci-iproc", .of_match_table = sdhci_iproc_of_match, + .acpi_match_table = ACPI_PTR(sdhci_iproc_acpi_ids), .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_iproc_probe, diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index a40bcc27f187..142c4b802f31 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -107,6 +107,11 @@ struct sdhci_arasan_data { #define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1) }; +struct sdhci_arasan_of_data { + const struct sdhci_arasan_soc_ctl_map *soc_ctl_map; + const struct sdhci_pltfm_data *pdata; +}; + static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = { .baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 }, .clockmultiplier = { .reg = 0xf02c, .width = 8, .shift = 0}, @@ -226,6 +231,25 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) } } +static void sdhci_arasan_am654_set_clock(struct sdhci_host *host, + unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + + if (sdhci_arasan->is_phy_on) { + phy_power_off(sdhci_arasan->phy); + sdhci_arasan->is_phy_on = false; + } + + sdhci_set_clock(host, clock); + + if (clock > PHY_CLK_TOO_SLOW_HZ) { + phy_power_on(sdhci_arasan->phy); + sdhci_arasan->is_phy_on = true; + } +} + static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -307,6 +331,33 @@ static const struct sdhci_pltfm_data sdhci_arasan_pdata = { SDHCI_QUIRK2_STOP_WITH_TC, }; +static struct sdhci_arasan_of_data sdhci_arasan_data = { + .pdata = &sdhci_arasan_pdata, +}; + +static const struct sdhci_ops sdhci_arasan_am654_ops = { + .set_clock = sdhci_arasan_am654_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_arasan_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_arasan_am654_pdata = { + .ops = &sdhci_arasan_am654_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_INVERTED_WRITE_PROTECT | + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN | + SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400, +}; + +static const struct sdhci_arasan_of_data sdhci_arasan_am654_data = { + .pdata = &sdhci_arasan_am654_pdata, +}; + static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask) { int cmd_error = 0; @@ -363,6 +414,11 @@ static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = { SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, }; +static struct sdhci_arasan_of_data sdhci_arasan_rk3399_data = { + .soc_ctl_map = &rk3399_soc_ctl_map, + .pdata = &sdhci_arasan_cqe_pdata, +}; + #ifdef CONFIG_PM_SLEEP /** * sdhci_arasan_suspend - Suspend method for the driver @@ -462,14 +518,25 @@ static const struct of_device_id sdhci_arasan_of_match[] = { /* SoC-specific compatible strings w/ soc_ctl_map */ { .compatible = "rockchip,rk3399-sdhci-5.1", - .data = &rk3399_soc_ctl_map, + .data = &sdhci_arasan_rk3399_data, + }, + { + .compatible = "ti,am654-sdhci-5.1", + .data = &sdhci_arasan_am654_data, }, - /* Generic compatible below here */ - { .compatible = "arasan,sdhci-8.9a" }, - { .compatible = "arasan,sdhci-5.1" }, - { .compatible = "arasan,sdhci-4.9a" }, - + { + .compatible = "arasan,sdhci-8.9a", + .data = &sdhci_arasan_data, + }, + { + .compatible = "arasan,sdhci-5.1", + .data = &sdhci_arasan_data, + }, + { + .compatible = "arasan,sdhci-4.9a", + .data = &sdhci_arasan_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match); @@ -707,14 +774,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host; struct sdhci_arasan_data *sdhci_arasan; struct device_node *np = pdev->dev.of_node; - const struct sdhci_pltfm_data *pdata; - - if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-5.1")) - pdata = &sdhci_arasan_cqe_pdata; - else - pdata = &sdhci_arasan_pdata; + const struct sdhci_arasan_of_data *data; - host = sdhci_pltfm_init(pdev, pdata, sizeof(*sdhci_arasan)); + match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node); + data = match->data; + host = sdhci_pltfm_init(pdev, data->pdata, sizeof(*sdhci_arasan)); if (IS_ERR(host)) return PTR_ERR(host); @@ -723,8 +787,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev) sdhci_arasan = sdhci_pltfm_priv(pltfm_host); sdhci_arasan->host = host; - match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node); - sdhci_arasan->soc_ctl_map = match->data; + sdhci_arasan->soc_ctl_map = data->soc_ctl_map; node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0); if (node) { @@ -788,7 +851,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev) ret = mmc_of_parse(host->mmc); if (ret) { - dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret); goto unreg_clk; } diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 1b7cd144fb01..a5137845a1c7 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -8,21 +8,51 @@ */ #include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/sizes.h> #include "sdhci-pltfm.h" +#define BOUNDARY_OK(addr, len) \ + ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) + struct dwcmshc_priv { struct clk *bus_clk; }; +/* + * If DMA addr spans 128MB boundary, we split the DMA transfer into two + * so that each DMA transfer doesn't exceed the boundary. + */ +static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd) +{ + int tmplen, offset; + + if (likely(!len || BOUNDARY_OK(addr, len))) { + sdhci_adma_write_desc(host, desc, addr, len, cmd); + return; + } + + offset = addr & (SZ_128M - 1); + tmplen = SZ_128M - offset; + sdhci_adma_write_desc(host, desc, addr, tmplen, cmd); + + addr += tmplen; + len -= tmplen; + sdhci_adma_write_desc(host, desc, addr, len, cmd); +} + static const struct sdhci_ops sdhci_dwcmshc_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .set_uhs_signaling = sdhci_set_uhs_signaling, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .reset = sdhci_reset, + .adma_write_desc = dwcmshc_adma_write_desc, }; static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { @@ -36,12 +66,21 @@ static int dwcmshc_probe(struct platform_device *pdev) struct sdhci_host *host; struct dwcmshc_priv *priv; int err; + u32 extra; host = sdhci_pltfm_init(pdev, &sdhci_dwcmshc_pdata, sizeof(struct dwcmshc_priv)); if (IS_ERR(host)) return PTR_ERR(host); + /* + * extra adma table cnt for cross 128M boundary handling. + */ + extra = DIV_ROUND_UP_ULL(dma_get_required_mask(&pdev->dev), SZ_128M); + if (extra > SDHCI_MAX_SEGS) + extra = SDHCI_MAX_SEGS; + host->adma_table_cnt += extra; + pltfm_host = sdhci_priv(host); priv = sdhci_pltfm_priv(pltfm_host); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 9cb7554a463d..86fc9f022002 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -78,8 +78,10 @@ struct sdhci_esdhc { u8 vendor_ver; u8 spec_ver; bool quirk_incorrect_hostver; + bool quirk_fixup_tuning; unsigned int peripheral_clock; const struct esdhc_clk_fixup *clk_fixup; + u32 div_ratio; }; /** @@ -580,6 +582,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", clock, host->max_clk / pre_div / div); host->mmc->actual_clock = host->max_clk / pre_div / div; + esdhc->div_ratio = pre_div * div; pre_div >>= 1; div--; @@ -712,9 +715,24 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc, } } +static struct soc_device_attribute soc_fixup_tuning[] = { + { .family = "QorIQ T1040", .revision = "1.0", }, + { .family = "QorIQ T2080", .revision = "1.0", }, + { .family = "QorIQ T1023", .revision = "1.0", }, + { .family = "QorIQ LS1021A", .revision = "1.0", }, + { .family = "QorIQ LS1080A", .revision = "1.0", }, + { .family = "QorIQ LS2080A", .revision = "1.0", }, + { .family = "QorIQ LS1012A", .revision = "1.0", }, + { .family = "QorIQ LS1043A", .revision = "1.*", }, + { .family = "QorIQ LS1046A", .revision = "1.0", }, + { }, +}; + static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); u32 val; /* Use tuning block for tuning procedure */ @@ -728,7 +746,26 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) sdhci_writel(host, val, ESDHC_TBCTL); esdhc_clock_enable(host, true); - return sdhci_execute_tuning(mmc, opcode); + sdhci_execute_tuning(mmc, opcode); + if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) { + + /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and + * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO + */ + val = sdhci_readl(host, ESDHC_TBPTR); + val = (val & ~((0x7f << 8) | 0x7f)) | + (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8); + sdhci_writel(host, val, ESDHC_TBPTR); + + /* program the software tuning mode by setting + * TBCTL[TB_MODE]=2'h3 + */ + val = sdhci_readl(host, ESDHC_TBCTL); + val |= 0x3; + sdhci_writel(host, val, ESDHC_TBCTL); + sdhci_execute_tuning(mmc, opcode); + } + return 0; } #ifdef CONFIG_PM_SLEEP @@ -903,6 +940,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); esdhc = sdhci_pltfm_priv(pltfm_host); + if (soc_device_match(soc_fixup_tuning)) + esdhc->quirk_fixup_tuning = true; + else + esdhc->quirk_fixup_tuning = false; + if (esdhc->vendor_ver == VENDOR_V_22) host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 77e9bc4aaee9..cc3ffeffd7a2 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -490,6 +490,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; case PCI_DEVICE_ID_O2_SEABIRD0: + if (chip->pdev->revision == 0x01) + chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; + /* fall through */ case PCI_DEVICE_ID_O2_SEABIRD1: /* UnLock WP */ ret = pci_read_config_byte(chip->pdev, diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index 02bea6159d79..b231c9a3f888 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -30,6 +30,7 @@ #include <linux/err.h> #include <linux/module.h> +#include <linux/property.h> #include <linux/of.h> #ifdef CONFIG_PPC #include <asm/machdep.h> @@ -51,11 +52,10 @@ static const struct sdhci_ops sdhci_pltfm_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; -#ifdef CONFIG_OF -static bool sdhci_of_wp_inverted(struct device_node *np) +static bool sdhci_wp_inverted(struct device *dev) { - if (of_get_property(np, "sdhci,wp-inverted", NULL) || - of_get_property(np, "wp-inverted", NULL)) + if (device_property_present(dev, "sdhci,wp-inverted") || + device_property_present(dev, "wp-inverted")) return true; /* Old device trees don't have the wp-inverted property. */ @@ -66,52 +66,64 @@ static bool sdhci_of_wp_inverted(struct device_node *np) #endif /* CONFIG_PPC */ } -void sdhci_get_of_property(struct platform_device *pdev) +#ifdef CONFIG_OF +static void sdhci_get_compatibility(struct platform_device *pdev) { + struct sdhci_host *host = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node; + + if (!np) + return; + + if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) + host->quirks |= SDHCI_QUIRK_BROKEN_DMA; + + if (of_device_is_compatible(np, "fsl,p2020-esdhc") || + of_device_is_compatible(np, "fsl,p1010-esdhc") || + of_device_is_compatible(np, "fsl,t4240-esdhc") || + of_device_is_compatible(np, "fsl,mpc8536-esdhc")) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; +} +#else +void sdhci_get_compatibility(struct platform_device *pdev) {} +#endif /* CONFIG_OF */ + +void sdhci_get_property(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); u32 bus_width; - if (of_get_property(np, "sdhci,auto-cmd12", NULL)) + if (device_property_present(dev, "sdhci,auto-cmd12")) host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; - if (of_get_property(np, "sdhci,1-bit-only", NULL) || - (of_property_read_u32(np, "bus-width", &bus_width) == 0 && + if (device_property_present(dev, "sdhci,1-bit-only") || + (device_property_read_u32(dev, "bus-width", &bus_width) == 0 && bus_width == 1)) host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; - if (sdhci_of_wp_inverted(np)) + if (sdhci_wp_inverted(dev)) host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; - if (of_get_property(np, "broken-cd", NULL)) + if (device_property_present(dev, "broken-cd")) host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; - if (of_get_property(np, "no-1-8-v", NULL)) + if (device_property_present(dev, "no-1-8-v")) host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) - host->quirks |= SDHCI_QUIRK_BROKEN_DMA; - - if (of_device_is_compatible(np, "fsl,p2020-esdhc") || - of_device_is_compatible(np, "fsl,p1010-esdhc") || - of_device_is_compatible(np, "fsl,t4240-esdhc") || - of_device_is_compatible(np, "fsl,mpc8536-esdhc")) - host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + sdhci_get_compatibility(pdev); - of_property_read_u32(np, "clock-frequency", &pltfm_host->clock); + device_property_read_u32(dev, "clock-frequency", &pltfm_host->clock); - if (of_find_property(np, "keep-power-in-suspend", NULL)) + if (device_property_present(dev, "keep-power-in-suspend")) host->mmc->pm_caps |= MMC_PM_KEEP_POWER; - if (of_property_read_bool(np, "wakeup-source") || - of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */ + if (device_property_read_bool(dev, "wakeup-source") || + device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; } -#else -void sdhci_get_of_property(struct platform_device *pdev) {} -#endif /* CONFIG_OF */ -EXPORT_SYMBOL_GPL(sdhci_get_of_property); +EXPORT_SYMBOL_GPL(sdhci_get_property); struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, const struct sdhci_pltfm_data *pdata, @@ -184,7 +196,7 @@ int sdhci_pltfm_register(struct platform_device *pdev, if (IS_ERR(host)) return PTR_ERR(host); - sdhci_get_of_property(pdev); + sdhci_get_property(pdev); ret = sdhci_add_host(host); if (ret) diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index 1e91fb1c020e..6109987fc3b5 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -90,7 +90,12 @@ static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) } #endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */ -extern void sdhci_get_of_property(struct platform_device *pdev); +void sdhci_get_property(struct platform_device *pdev); + +static inline void sdhci_get_of_property(struct platform_device *pdev) +{ + return sdhci_get_property(pdev); +} extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, const struct sdhci_pltfm_data *pdata, diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index b8e96f392428..1783e29eae04 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -21,17 +21,14 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/gpio.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> -#include <linux/mmc/slot-gpio.h> #include <linux/platform_data/pxa_sdhci.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/mbus.h> @@ -452,16 +449,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) host->mmc->caps2 |= pdata->host_caps2; if (pdata->pm_caps) host->mmc->pm_caps |= pdata->pm_caps; - - if (gpio_is_valid(pdata->ext_cd_gpio)) { - ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio, - 0); - if (ret) { - dev_err(mmc_dev(host->mmc), - "failed to allocate card detect gpio\n"); - goto err_cd_req; - } - } } pm_runtime_get_noresume(&pdev->dev); @@ -486,7 +473,6 @@ err_add_host: pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); err_of_parse: -err_cd_req: err_mbus_win: clk_disable_unprepare(pxa->clk_io); clk_disable_unprepare(pxa->clk_core); diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index 391d52b467ca..5eada6f87e60 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -11,7 +11,6 @@ #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/mmc/slot-gpio.h> #include "sdhci-pltfm.h" @@ -19,10 +18,6 @@ #define SDHCI_SIRF_8BITBUS BIT(3) #define SIRF_TUNING_COUNT 16384 -struct sdhci_sirf_priv { - int gpio_cd; -}; - static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width) { u8 ctrl; @@ -170,9 +165,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev) { struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; - struct sdhci_sirf_priv *priv; struct clk *clk; - int gpio_cd; int ret; clk = devm_clk_get(&pdev->dev, NULL); @@ -181,19 +174,12 @@ static int sdhci_sirf_probe(struct platform_device *pdev) return PTR_ERR(clk); } - if (pdev->dev.of_node) - gpio_cd = of_get_named_gpio(pdev->dev.of_node, "cd-gpios", 0); - else - gpio_cd = -EINVAL; - - host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, sizeof(struct sdhci_sirf_priv)); + host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, 0); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); pltfm_host->clk = clk; - priv = sdhci_pltfm_priv(pltfm_host); - priv->gpio_cd = gpio_cd; sdhci_get_of_property(pdev); @@ -209,15 +195,11 @@ static int sdhci_sirf_probe(struct platform_device *pdev) * We must request the IRQ after sdhci_add_host(), as the tasklet only * gets setup in sdhci_add_host() and we oops. */ - if (gpio_is_valid(priv->gpio_cd)) { - ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0); - if (ret) { - dev_err(&pdev->dev, "card detect irq request failed: %d\n", - ret); - goto err_request_cd; - } + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + if (ret == -EPROBE_DEFER) + goto err_request_cd; + if (!ret) mmc_gpiod_request_cd_irq(host->mmc); - } return 0; diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index 9247d51f2eed..916b5b09c3d1 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -15,13 +15,11 @@ #include <linux/clk.h> #include <linux/delay.h> -#include <linux/gpio.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/slab.h> @@ -32,7 +30,6 @@ struct spear_sdhci { struct clk *clk; - int card_int_gpio; }; /* sdhci ops */ @@ -43,18 +40,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static void sdhci_probe_config_dt(struct device_node *np, - struct spear_sdhci *host) -{ - int cd_gpio; - - cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); - if (!gpio_is_valid(cd_gpio)) - cd_gpio = -1; - - host->card_int_gpio = cd_gpio; -} - static int sdhci_probe(struct platform_device *pdev) { struct sdhci_host *host; @@ -109,21 +94,13 @@ static int sdhci_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n", clk_get_rate(sdhci->clk)); - sdhci_probe_config_dt(pdev->dev.of_node, sdhci); /* - * It is optional to use GPIOs for sdhci card detection. If - * sdhci->card_int_gpio < 0, then use original sdhci lines otherwise - * GPIO lines. We use the built-in GPIO support for this. + * It is optional to use GPIOs for sdhci card detection. If we + * find a descriptor using slot GPIO, we use it. */ - if (sdhci->card_int_gpio >= 0) { - ret = mmc_gpio_request_cd(host->mmc, sdhci->card_int_gpio, 0); - if (ret < 0) { - dev_dbg(&pdev->dev, - "failed to request card-detect gpio%d\n", - sdhci->card_int_gpio); - goto disable_clk; - } - } + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + if (ret == -EPROBE_DEFER) + goto disable_clk; ret = sdhci_add_host(host); if (ret) diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c new file mode 100644 index 000000000000..9a822e2e9f0b --- /dev/null +++ b/drivers/mmc/host/sdhci-sprd.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Secure Digital Host Controller +// +// Copyright (C) 2018 Spreadtrum, Inc. +// Author: Chunyan Zhang <chunyan.zhang@unisoc.com> + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/highmem.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> + +#include "sdhci-pltfm.h" + +/* SDHCI_ARGUMENT2 register high 16bit */ +#define SDHCI_SPRD_ARG2_STUFF GENMASK(31, 16) + +#define SDHCI_SPRD_REG_32_DLL_DLY_OFFSET 0x208 +#define SDHCIBSPRD_IT_WR_DLY_INV BIT(5) +#define SDHCI_SPRD_BIT_CMD_DLY_INV BIT(13) +#define SDHCI_SPRD_BIT_POSRD_DLY_INV BIT(21) +#define SDHCI_SPRD_BIT_NEGRD_DLY_INV BIT(29) + +#define SDHCI_SPRD_REG_32_BUSY_POSI 0x250 +#define SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN BIT(25) +#define SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN BIT(24) + +#define SDHCI_SPRD_REG_DEBOUNCE 0x28C +#define SDHCI_SPRD_BIT_DLL_BAK BIT(0) +#define SDHCI_SPRD_BIT_DLL_VAL BIT(1) + +#define SDHCI_SPRD_INT_SIGNAL_MASK 0x1B7F410B + +/* SDHCI_HOST_CONTROL2 */ +#define SDHCI_SPRD_CTRL_HS200 0x0005 +#define SDHCI_SPRD_CTRL_HS400 0x0006 + +/* + * According to the standard specification, BIT(3) of SDHCI_SOFTWARE_RESET is + * reserved, and only used on Spreadtrum's design, the hardware cannot work + * if this bit is cleared. + * 1 : normal work + * 0 : hardware reset + */ +#define SDHCI_HW_RESET_CARD BIT(3) + +#define SDHCI_SPRD_MAX_CUR 0xFFFFFF +#define SDHCI_SPRD_CLK_MAX_DIV 1023 + +#define SDHCI_SPRD_CLK_DEF_RATE 26000000 + +struct sdhci_sprd_host { + u32 version; + struct clk *clk_sdio; + struct clk *clk_enable; + u32 base_rate; + int flags; /* backup of host attribute */ +}; + +#define TO_SPRD_HOST(host) sdhci_pltfm_priv(sdhci_priv(host)) + +static void sdhci_sprd_init_config(struct sdhci_host *host) +{ + u16 val; + + /* set dll backup mode */ + val = sdhci_readl(host, SDHCI_SPRD_REG_DEBOUNCE); + val |= SDHCI_SPRD_BIT_DLL_BAK | SDHCI_SPRD_BIT_DLL_VAL; + sdhci_writel(host, val, SDHCI_SPRD_REG_DEBOUNCE); +} + +static inline u32 sdhci_sprd_readl(struct sdhci_host *host, int reg) +{ + if (unlikely(reg == SDHCI_MAX_CURRENT)) + return SDHCI_SPRD_MAX_CUR; + + return readl_relaxed(host->ioaddr + reg); +} + +static inline void sdhci_sprd_writel(struct sdhci_host *host, u32 val, int reg) +{ + /* SDHCI_MAX_CURRENT is reserved on Spreadtrum's platform */ + if (unlikely(reg == SDHCI_MAX_CURRENT)) + return; + + if (unlikely(reg == SDHCI_SIGNAL_ENABLE || reg == SDHCI_INT_ENABLE)) + val = val & SDHCI_SPRD_INT_SIGNAL_MASK; + + writel_relaxed(val, host->ioaddr + reg); +} + +static inline void sdhci_sprd_writew(struct sdhci_host *host, u16 val, int reg) +{ + /* SDHCI_BLOCK_COUNT is Read Only on Spreadtrum's platform */ + if (unlikely(reg == SDHCI_BLOCK_COUNT)) + return; + + writew_relaxed(val, host->ioaddr + reg); +} + +static inline void sdhci_sprd_writeb(struct sdhci_host *host, u8 val, int reg) +{ + /* + * Since BIT(3) of SDHCI_SOFTWARE_RESET is reserved according to the + * standard specification, sdhci_reset() write this register directly + * without checking other reserved bits, that will clear BIT(3) which + * is defined as hardware reset on Spreadtrum's platform and clearing + * it by mistake will lead the card not work. So here we need to work + * around it. + */ + if (unlikely(reg == SDHCI_SOFTWARE_RESET)) { + if (readb_relaxed(host->ioaddr + reg) & SDHCI_HW_RESET_CARD) + val |= SDHCI_HW_RESET_CARD; + } + + writeb_relaxed(val, host->ioaddr + reg); +} + +static inline void sdhci_sprd_sd_clk_off(struct sdhci_host *host) +{ + u16 ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + + ctrl &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL); +} + +static inline void +sdhci_sprd_set_dll_invert(struct sdhci_host *host, u32 mask, bool en) +{ + u32 dll_dly_offset; + + dll_dly_offset = sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_DLY_OFFSET); + if (en) + dll_dly_offset |= mask; + else + dll_dly_offset &= ~mask; + sdhci_writel(host, dll_dly_offset, SDHCI_SPRD_REG_32_DLL_DLY_OFFSET); +} + +static inline u32 sdhci_sprd_calc_div(u32 base_clk, u32 clk) +{ + u32 div; + + /* select 2x clock source */ + if (base_clk <= clk * 2) + return 0; + + div = (u32) (base_clk / (clk * 2)); + + if ((base_clk / div) > (clk * 2)) + div++; + + if (div > SDHCI_SPRD_CLK_MAX_DIV) + div = SDHCI_SPRD_CLK_MAX_DIV; + + if (div % 2) + div = (div + 1) / 2; + else + div = div / 2; + + return div; +} + +static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, + unsigned int clk) +{ + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + u32 div, val, mask; + + div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); + + clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8); + sdhci_enable_clk(host, clk); + + /* enable auto gate sdhc_enable_auto_gate */ + val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); + mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | + SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; + if (mask != (val & mask)) { + val |= mask; + sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + } +} + +static void sdhci_sprd_set_clock(struct sdhci_host *host, unsigned int clock) +{ + bool en = false; + + if (clock == 0) { + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + } else if (clock != host->clock) { + sdhci_sprd_sd_clk_off(host); + _sdhci_sprd_set_clock(host, clock); + + if (clock <= 400000) + en = true; + sdhci_sprd_set_dll_invert(host, SDHCI_SPRD_BIT_CMD_DLY_INV | + SDHCI_SPRD_BIT_POSRD_DLY_INV, en); + } else { + _sdhci_sprd_set_clock(host, clock); + } +} + +static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + + return clk_round_rate(sprd_host->clk_sdio, ULONG_MAX); +} + +static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host) +{ + return 400000; +} + +static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + if (timing == host->timing) + return; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + switch (timing) { + case MMC_TIMING_UHS_SDR12: + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + break; + case MMC_TIMING_UHS_SDR104: + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + break; + case MMC_TIMING_MMC_HS200: + ctrl_2 |= SDHCI_SPRD_CTRL_HS200; + break; + case MMC_TIMING_MMC_HS400: + ctrl_2 |= SDHCI_SPRD_CTRL_HS400; + break; + default: + break; + } + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void sdhci_sprd_hw_reset(struct sdhci_host *host) +{ + int val; + + /* + * Note: don't use sdhci_writeb() API here since it is redirected to + * sdhci_sprd_writeb() in which we have a workaround for + * SDHCI_SOFTWARE_RESET which would make bit SDHCI_HW_RESET_CARD can + * not be cleared. + */ + val = readb_relaxed(host->ioaddr + SDHCI_SOFTWARE_RESET); + val &= ~SDHCI_HW_RESET_CARD; + writeb_relaxed(val, host->ioaddr + SDHCI_SOFTWARE_RESET); + /* wait for 10 us */ + usleep_range(10, 20); + + val |= SDHCI_HW_RESET_CARD; + writeb_relaxed(val, host->ioaddr + SDHCI_SOFTWARE_RESET); + usleep_range(300, 500); +} + +static struct sdhci_ops sdhci_sprd_ops = { + .read_l = sdhci_sprd_readl, + .write_l = sdhci_sprd_writel, + .write_b = sdhci_sprd_writeb, + .set_clock = sdhci_sprd_set_clock, + .get_max_clock = sdhci_sprd_get_max_clock, + .get_min_clock = sdhci_sprd_get_min_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_sprd_set_uhs_signaling, + .hw_reset = sdhci_sprd_hw_reset, +}; + +static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + + host->flags |= sprd_host->flags & SDHCI_AUTO_CMD23; + + /* + * From version 4.10 onward, ARGUMENT2 register is also as 32-bit + * block count register which doesn't support stuff bits of + * CMD23 argument on Spreadtrum's sd host controller. + */ + if (host->version >= SDHCI_SPEC_410 && + mrq->sbc && (mrq->sbc->arg & SDHCI_SPRD_ARG2_STUFF) && + (host->flags & SDHCI_AUTO_CMD23)) + host->flags &= ~SDHCI_AUTO_CMD23; + + sdhci_request(mmc, mrq); +} + +static const struct sdhci_pltfm_data sdhci_sprd_pdata = { + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, + .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | + SDHCI_QUIRK2_USE_32BIT_BLK_CNT, + .ops = &sdhci_sprd_ops, +}; + +static int sdhci_sprd_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_sprd_host *sprd_host; + struct clk *clk; + int ret = 0; + + host = sdhci_pltfm_init(pdev, &sdhci_sprd_pdata, sizeof(*sprd_host)); + if (IS_ERR(host)) + return PTR_ERR(host); + + host->dma_mask = DMA_BIT_MASK(64); + pdev->dev.dma_mask = &host->dma_mask; + host->mmc_host_ops.request = sdhci_sprd_request; + + host->mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_ERASE | MMC_CAP_CMD23; + ret = mmc_of_parse(host->mmc); + if (ret) + goto pltfm_free; + + sprd_host = TO_SPRD_HOST(host); + + clk = devm_clk_get(&pdev->dev, "sdio"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto pltfm_free; + } + sprd_host->clk_sdio = clk; + sprd_host->base_rate = clk_get_rate(sprd_host->clk_sdio); + if (!sprd_host->base_rate) + sprd_host->base_rate = SDHCI_SPRD_CLK_DEF_RATE; + + clk = devm_clk_get(&pdev->dev, "enable"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto pltfm_free; + } + sprd_host->clk_enable = clk; + + ret = clk_prepare_enable(sprd_host->clk_sdio); + if (ret) + goto pltfm_free; + + clk_prepare_enable(sprd_host->clk_enable); + if (ret) + goto clk_disable; + + sdhci_sprd_init_config(host); + host->version = sdhci_readw(host, SDHCI_HOST_VERSION); + sprd_host->version = ((host->version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT); + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); + + sdhci_enable_v4_mode(host); + + ret = sdhci_setup_host(host); + if (ret) + goto pm_runtime_disable; + + sprd_host->flags = host->flags; + + ret = __sdhci_add_host(host); + if (ret) + goto err_cleanup_host; + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + +err_cleanup_host: + sdhci_cleanup_host(host); + +pm_runtime_disable: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + + clk_disable_unprepare(sprd_host->clk_enable); + +clk_disable: + clk_disable_unprepare(sprd_host->clk_sdio); + +pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_sprd_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + struct mmc_host *mmc = host->mmc; + + mmc_remove_host(mmc); + clk_disable_unprepare(sprd_host->clk_sdio); + clk_disable_unprepare(sprd_host->clk_enable); + + mmc_free_host(mmc); + + return 0; +} + +static const struct of_device_id sdhci_sprd_of_match[] = { + { .compatible = "sprd,sdhci-r11", }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_sprd_of_match); + +#ifdef CONFIG_PM +static int sdhci_sprd_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + + sdhci_runtime_suspend_host(host); + + clk_disable_unprepare(sprd_host->clk_sdio); + clk_disable_unprepare(sprd_host->clk_enable); + + return 0; +} + +static int sdhci_sprd_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + int ret; + + ret = clk_prepare_enable(sprd_host->clk_enable); + if (ret) + return ret; + + ret = clk_prepare_enable(sprd_host->clk_sdio); + if (ret) { + clk_disable_unprepare(sprd_host->clk_enable); + return ret; + } + + sdhci_runtime_resume_host(host); + + return 0; +} +#endif + +static const struct dev_pm_ops sdhci_sprd_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(sdhci_sprd_runtime_suspend, + sdhci_sprd_runtime_resume, NULL) +}; + +static struct platform_driver sdhci_sprd_driver = { + .probe = sdhci_sprd_probe, + .remove = sdhci_sprd_remove, + .driver = { + .name = "sdhci_sprd_r11", + .of_match_table = of_match_ptr(sdhci_sprd_of_match), + .pm = &sdhci_sprd_pm_ops, + }, +}; +module_platform_driver(sdhci_sprd_driver); + +MODULE_DESCRIPTION("Spreadtrum sdio host controller r11 driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sdhci-sprd-r11"); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 908b23e6a03c..7b95d088fdef 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -16,17 +16,21 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/iopoll.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/pinctrl/consumer.h> +#include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/slot-gpio.h> #include <linux/gpio/consumer.h> +#include <linux/ktime.h> #include "sdhci-pltfm.h" @@ -34,40 +38,96 @@ #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 #define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000 #define SDHCI_CLOCK_CTRL_TAP_SHIFT 16 +#define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000 +#define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5) #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3) #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2) -#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 -#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 -#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 -#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 -#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 +#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104 +#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31) -#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4 -#define SDHCI_AUTO_CAL_START BIT(31) -#define SDHCI_AUTO_CAL_ENABLE BIT(29) +#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c +#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00 +#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8 -#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) -#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) -#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) -#define NVQUIRK_ENABLE_SDR50 BIT(3) -#define NVQUIRK_ENABLE_SDR104 BIT(4) -#define NVQUIRK_ENABLE_DDR50 BIT(5) -#define NVQUIRK_HAS_PADCALIB BIT(6) +#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 +#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 +#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 +#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 +#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 + +#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0 +#define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31) + +#define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc +#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31) + +#define SDHCI_VNDR_TUN_CTRL0_0 0x1c0 +#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000 + +#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4 +#define SDHCI_AUTO_CAL_START BIT(31) +#define SDHCI_AUTO_CAL_ENABLE BIT(29) +#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff + +#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0 +#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f +#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7 +#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31) + +#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec +#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) + +#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) +#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) +#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) +#define NVQUIRK_ENABLE_SDR50 BIT(3) +#define NVQUIRK_ENABLE_SDR104 BIT(4) +#define NVQUIRK_ENABLE_DDR50 BIT(5) +#define NVQUIRK_HAS_PADCALIB BIT(6) +#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7) +#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) struct sdhci_tegra_soc_data { const struct sdhci_pltfm_data *pdata; u32 nvquirks; }; +/* Magic pull up and pull down pad calibration offsets */ +struct sdhci_tegra_autocal_offsets { + u32 pull_up_3v3; + u32 pull_down_3v3; + u32 pull_up_3v3_timeout; + u32 pull_down_3v3_timeout; + u32 pull_up_1v8; + u32 pull_down_1v8; + u32 pull_up_1v8_timeout; + u32 pull_down_1v8_timeout; + u32 pull_up_sdr104; + u32 pull_down_sdr104; + u32 pull_up_hs400; + u32 pull_down_hs400; +}; + struct sdhci_tegra { const struct sdhci_tegra_soc_data *soc_data; struct gpio_desc *power_gpio; bool ddr_signaling; bool pad_calib_required; + bool pad_control_available; struct reset_control *rst; + struct pinctrl *pinctrl_sdmmc; + struct pinctrl_state *pinctrl_state_3v3; + struct pinctrl_state *pinctrl_state_1v8; + + struct sdhci_tegra_autocal_offsets autocal_offsets; + ktime_t last_calib; + + u32 default_tap; + u32 default_trim; + u32 dqs_trim; }; static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) @@ -133,23 +193,149 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) } } +static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable) +{ + bool status; + u32 reg; + + reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + status = !!(reg & SDHCI_CLOCK_CARD_EN); + + if (status == enable) + return status; + + if (enable) + reg |= SDHCI_CLOCK_CARD_EN; + else + reg &= ~SDHCI_CLOCK_CARD_EN; + + sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL); + + return status; +} + +static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) +{ + bool is_tuning_cmd = 0; + bool clk_enabled; + u8 cmd; + + if (reg == SDHCI_COMMAND) { + cmd = SDHCI_GET_CMD(val); + is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK || + cmd == MMC_SEND_TUNING_BLOCK_HS200; + } + + if (is_tuning_cmd) + clk_enabled = tegra_sdhci_configure_card_clk(host, 0); + + writew(val, host->ioaddr + reg); + + if (is_tuning_cmd) { + udelay(1); + tegra_sdhci_configure_card_clk(host, clk_enabled); + } +} + static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) { return mmc_gpio_get_ro(host->mmc); } +static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + int has_1v8, has_3v3; + + /* + * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad + * voltage configuration in order to perform voltage switching. This + * means that valid pinctrl info is required on SDHCI instances capable + * of performing voltage switching. Whether or not an SDHCI instance is + * capable of voltage switching is determined based on the regulator. + */ + + if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) + return true; + + if (IS_ERR(host->mmc->supply.vqmmc)) + return false; + + has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, + 1700000, 1950000); + + has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, + 2700000, 3600000); + + if (has_1v8 == 1 && has_3v3 == 1) + return tegra_host->pad_control_available; + + /* Fixed voltage, no pad control required. */ + return true; +} + +static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + bool card_clk_enabled = false; + u32 reg; + + /* + * Touching the tap values is a bit tricky on some SoC generations. + * The quirk enables a workaround for a glitch that sometimes occurs if + * the tap values are changed. + */ + + if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP) + card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); + + reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK; + reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT; + sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + + if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP && + card_clk_enabled) { + udelay(1); + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + tegra_sdhci_configure_card_clk(host, card_clk_enabled); + } +} + +static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 val; + + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); + + if (ios->enhanced_strobe) + val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; + else + val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; + + sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); + +} + static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; - u32 misc_ctrl, clk_ctrl; + u32 misc_ctrl, clk_ctrl, pad_ctrl; sdhci_reset(host, mask); if (!(mask & SDHCI_RESET_ALL)) return; + tegra_sdhci_set_tap(host, tegra_host->default_tap); + misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); @@ -158,15 +344,10 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) SDHCI_MISC_CTRL_ENABLE_DDR50 | SDHCI_MISC_CTRL_ENABLE_SDR104); - clk_ctrl &= ~SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE; + clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK | + SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE); - /* - * If the board does not define a regulator for the SDHCI - * IO voltage, then don't advertise support for UHS modes - * even if the device supports it because the IO voltage - * cannot be configured. - */ - if (!IS_ERR(host->mmc->supply.vqmmc)) { + if (tegra_sdhci_is_pad_and_regulator_valid(host)) { /* Erratum: Enable SDHCI spec v3.00 support */ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; @@ -181,24 +362,237 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; } + clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT; + sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); - if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) + if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) { + pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK; + pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL; + sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + tegra_host->pad_calib_required = true; + } tegra_host->ddr_signaling = false; } -static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) +static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable) { u32 val; - mdelay(1); + /* + * Enable or disable the additional I/O pad used by the drive strength + * calibration process. + */ + val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + + if (enable) + val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; + else + val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; + + sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + + if (enable) + usleep_range(1, 2); +} + +static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host, + u16 pdpu) +{ + u32 reg; + + reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); + reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK; + reg |= pdpu; + sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); +} + +static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct sdhci_tegra_autocal_offsets offsets = + tegra_host->autocal_offsets; + struct mmc_ios *ios = &host->mmc->ios; + bool card_clk_enabled; + u16 pdpu; + u32 reg; + int ret; + + switch (ios->timing) { + case MMC_TIMING_UHS_SDR104: + pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104; + break; + case MMC_TIMING_MMC_HS400: + pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400; + break; + default: + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8; + else + pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3; + } + + tegra_sdhci_set_pad_autocal_offset(host, pdpu); + + card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); + + tegra_sdhci_configure_cal_pad(host, true); + + reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); + reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START; + sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); + + usleep_range(1, 2); + /* 10 ms timeout */ + ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS, + reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE), + 1000, 10000); + + tegra_sdhci_configure_cal_pad(host, false); + + tegra_sdhci_configure_card_clk(host, card_clk_enabled); + + if (ret) { + dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n"); + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + pdpu = offsets.pull_down_1v8_timeout << 8 | + offsets.pull_up_1v8_timeout; + else + pdpu = offsets.pull_down_3v3_timeout << 8 | + offsets.pull_up_3v3_timeout; + + /* Disable automatic calibration and use fixed offsets */ + reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); + reg &= ~SDHCI_AUTO_CAL_ENABLE; + sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); - val = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); - val |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START; - sdhci_writel(host,val, SDHCI_TEGRA_AUTO_CAL_CONFIG); + tegra_sdhci_set_pad_autocal_offset(host, pdpu); + } +} + +static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct sdhci_tegra_autocal_offsets *autocal = + &tegra_host->autocal_offsets; + int err; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-3v3", + &autocal->pull_up_3v3); + if (err) + autocal->pull_up_3v3 = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-3v3", + &autocal->pull_down_3v3); + if (err) + autocal->pull_down_3v3 = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-1v8", + &autocal->pull_up_1v8); + if (err) + autocal->pull_up_1v8 = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-1v8", + &autocal->pull_down_1v8); + if (err) + autocal->pull_down_1v8 = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-3v3-timeout", + &autocal->pull_up_3v3); + if (err) + autocal->pull_up_3v3_timeout = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-3v3-timeout", + &autocal->pull_down_3v3); + if (err) + autocal->pull_down_3v3_timeout = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-1v8-timeout", + &autocal->pull_up_1v8); + if (err) + autocal->pull_up_1v8_timeout = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-1v8-timeout", + &autocal->pull_down_1v8); + if (err) + autocal->pull_down_1v8_timeout = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-sdr104", + &autocal->pull_up_sdr104); + if (err) + autocal->pull_up_sdr104 = autocal->pull_up_1v8; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-sdr104", + &autocal->pull_down_sdr104); + if (err) + autocal->pull_down_sdr104 = autocal->pull_down_1v8; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-hs400", + &autocal->pull_up_hs400); + if (err) + autocal->pull_up_hs400 = autocal->pull_up_1v8; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-hs400", + &autocal->pull_down_hs400); + if (err) + autocal->pull_down_hs400 = autocal->pull_down_1v8; +} + +static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib); + + /* 100 ms calibration interval is specified in the TRM */ + if (ktime_to_ms(since_calib) > 100) { + tegra_sdhci_pad_autocalib(host); + tegra_host->last_calib = ktime_get(); + } + + sdhci_request(mmc, mrq); +} + +static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + int err; + + err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap", + &tegra_host->default_tap); + if (err) + tegra_host->default_tap = 0; + + err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim", + &tegra_host->default_trim); + if (err) + tegra_host->default_trim = 0; + + err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim", + &tegra_host->dqs_trim); + if (err) + tegra_host->dqs_trim = 0x11; } static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) @@ -237,34 +631,82 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) } } -static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, - unsigned timing) +static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); - if (timing == MMC_TIMING_UHS_DDR50 || - timing == MMC_TIMING_MMC_DDR52) - tegra_host->ddr_signaling = true; - - sdhci_set_uhs_signaling(host, timing); + return clk_round_rate(pltfm_host->clk, UINT_MAX); } -static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host) +static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + u32 val; - return clk_round_rate(pltfm_host->clk, UINT_MAX); + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); + val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK; + val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT; + sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); } -static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap) +static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host) { u32 reg; + int err; + + reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); + reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE; + sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); + + /* 1 ms sleep, 5 ms timeout */ + err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA, + reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE), + 1000, 5000); + if (err) + dev_err(mmc_dev(host->mmc), + "HS400 delay line calibration timed out\n"); +} - reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); - reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK; - reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT; - sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); +static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, + unsigned timing) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + bool set_default_tap = false; + bool set_dqs_trim = false; + bool do_hs400_dll_cal = false; + + switch (timing) { + case MMC_TIMING_UHS_SDR50: + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* Don't set default tap on tunable modes. */ + break; + case MMC_TIMING_MMC_HS400: + set_dqs_trim = true; + do_hs400_dll_cal = true; + break; + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + tegra_host->ddr_signaling = true; + set_default_tap = true; + break; + default: + set_default_tap = true; + break; + } + + sdhci_set_uhs_signaling(host, timing); + + tegra_sdhci_pad_autocalib(host); + + if (set_default_tap) + tegra_sdhci_set_tap(host, tegra_host->default_tap); + + if (set_dqs_trim) + tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim); + + if (do_hs400_dll_cal) + tegra_sdhci_hs400_dll_cal(host); } static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) @@ -301,6 +743,89 @@ static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) return mmc_send_tuning(host->mmc, opcode, NULL); } +static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + int ret; + + if (!tegra_host->pad_control_available) + return 0; + + if (voltage == MMC_SIGNAL_VOLTAGE_180) { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + tegra_host->pinctrl_state_1v8); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "setting 1.8V failed, ret: %d\n", ret); + } else { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + tegra_host->pinctrl_state_3v3); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "setting 3.3V failed, ret: %d\n", ret); + } + + return ret; +} + +static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + int ret = 0; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage); + if (ret < 0) + return ret; + ret = sdhci_start_signal_voltage_switch(mmc, ios); + } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + ret = sdhci_start_signal_voltage_switch(mmc, ios); + if (ret < 0) + return ret; + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage); + } + + if (tegra_host->pad_calib_required) + tegra_sdhci_pad_autocalib(host); + + return ret; +} + +static int tegra_sdhci_init_pinctrl_info(struct device *dev, + struct sdhci_tegra *tegra_host) +{ + tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev); + if (IS_ERR(tegra_host->pinctrl_sdmmc)) { + dev_dbg(dev, "No pinctrl info, err: %ld\n", + PTR_ERR(tegra_host->pinctrl_sdmmc)); + return -1; + } + + tegra_host->pinctrl_state_3v3 = + pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3"); + if (IS_ERR(tegra_host->pinctrl_state_3v3)) { + dev_warn(dev, "Missing 3.3V pad state, err: %ld\n", + PTR_ERR(tegra_host->pinctrl_state_3v3)); + return -1; + } + + tegra_host->pinctrl_state_1v8 = + pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8"); + if (IS_ERR(tegra_host->pinctrl_state_1v8)) { + dev_warn(dev, "Missing 1.8V pad state, err: %ld\n", + PTR_ERR(tegra_host->pinctrl_state_1v8)); + return -1; + } + + tegra_host->pad_control_available = true; + + return 0; +} + static void tegra_sdhci_voltage_switch(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -421,6 +946,19 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = { .pdata = &sdhci_tegra124_pdata, }; +static const struct sdhci_ops tegra210_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, + .read_w = tegra_sdhci_readw, + .write_w = tegra210_sdhci_writew, + .write_l = tegra_sdhci_writel, + .set_clock = tegra_sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = tegra_sdhci_reset, + .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, + .voltage_switch = tegra_sdhci_voltage_switch, + .get_max_clock = tegra_sdhci_get_max_clock, +}; + static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | @@ -429,11 +967,28 @@ static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, - .ops = &tegra114_sdhci_ops, + .ops = &tegra210_sdhci_ops, }; static const struct sdhci_tegra_soc_data soc_data_tegra210 = { .pdata = &sdhci_tegra210_pdata, + .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | + NVQUIRK_HAS_PADCALIB | + NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | + NVQUIRK_ENABLE_SDR50 | + NVQUIRK_ENABLE_SDR104, +}; + +static const struct sdhci_ops tegra186_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, + .read_w = tegra_sdhci_readw, + .write_l = tegra_sdhci_writel, + .set_clock = tegra_sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = tegra_sdhci_reset, + .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, + .voltage_switch = tegra_sdhci_voltage_switch, + .get_max_clock = tegra_sdhci_get_max_clock, }; static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { @@ -452,11 +1007,16 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { * But it is not supported as of now. */ SDHCI_QUIRK2_BROKEN_64_BIT_DMA, - .ops = &tegra114_sdhci_ops, + .ops = &tegra186_sdhci_ops, }; static const struct sdhci_tegra_soc_data soc_data_tegra186 = { .pdata = &sdhci_tegra186_pdata, + .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | + NVQUIRK_HAS_PADCALIB | + NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | + NVQUIRK_ENABLE_SDR50 | + NVQUIRK_ENABLE_SDR104, }; static const struct of_device_id sdhci_tegra_dt_match[] = { @@ -493,8 +1053,23 @@ static int sdhci_tegra_probe(struct platform_device *pdev) tegra_host = sdhci_pltfm_priv(pltfm_host); tegra_host->ddr_signaling = false; tegra_host->pad_calib_required = false; + tegra_host->pad_control_available = false; tegra_host->soc_data = soc_data; + if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) { + rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host); + if (rc == 0) + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_tegra_start_signal_voltage_switch; + } + + /* Hook to periodically rerun pad calibration */ + if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) + host->mmc_host_ops.request = tegra_sdhci_request; + + host->mmc_host_ops.hs400_enhanced_strobe = + tegra_sdhci_hs400_enhanced_strobe; + rc = mmc_of_parse(host->mmc); if (rc) goto err_parse_dt; @@ -502,6 +1077,10 @@ static int sdhci_tegra_probe(struct platform_device *pdev) if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) host->mmc->caps |= MMC_CAP_1_8V_DDR; + tegra_sdhci_parse_pad_autocal_dt(host); + + tegra_sdhci_parse_tap_and_trim(host); + tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", GPIOD_OUT_HIGH); if (IS_ERR(tegra_host->power_gpio)) { diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index c335052d0c02..5956e90380e8 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -660,8 +660,8 @@ static int get_dt_pad_ctrl_data(struct sdhci_host *host, return 0; if (of_address_to_resource(np, 1, &iomem)) { - dev_err(mmc_dev(host->mmc), "Unable to find SoC PAD ctrl register address for %s\n", - np->name); + dev_err(mmc_dev(host->mmc), "Unable to find SoC PAD ctrl register address for %pOFn\n", + np); return -EINVAL; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 1b3fbd9bd5c5..99bdae53fa2e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -123,6 +123,29 @@ EXPORT_SYMBOL_GPL(sdhci_dumpregs); * * \*****************************************************************************/ +static void sdhci_do_enable_v4_mode(struct sdhci_host *host) +{ + u16 ctrl2; + + ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2); + if (ctrl2 & SDHCI_CTRL_V4_MODE) + return; + + ctrl2 |= SDHCI_CTRL_V4_MODE; + sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL); +} + +/* + * This can be called before sdhci_add_host() by Vendor's host controller + * driver to enable v4 mode if supported. + */ +void sdhci_enable_v4_mode(struct sdhci_host *host) +{ + host->v4_mode = true; + sdhci_do_enable_v4_mode(host); +} +EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); + static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) { return cmd->data || cmd->flags & MMC_RSP_BUSY; @@ -243,6 +266,52 @@ static void sdhci_set_default_irqs(struct sdhci_host *host) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } +static void sdhci_config_dma(struct sdhci_host *host) +{ + u8 ctrl; + u16 ctrl2; + + if (host->version < SDHCI_SPEC_200) + return; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + + /* + * Always adjust the DMA selection as some controllers + * (e.g. JMicron) can't do PIO properly when the selection + * is ADMA. + */ + ctrl &= ~SDHCI_CTRL_DMA_MASK; + if (!(host->flags & SDHCI_REQ_USE_DMA)) + goto out; + + /* Note if DMA Select is zero then SDMA is selected */ + if (host->flags & SDHCI_USE_ADMA) + ctrl |= SDHCI_CTRL_ADMA32; + + if (host->flags & SDHCI_USE_64_BIT_DMA) { + /* + * If v4 mode, all supported DMA can be 64-bit addressing if + * controller supports 64-bit system address, otherwise only + * ADMA can support 64-bit addressing. + */ + if (host->v4_mode) { + ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl2 |= SDHCI_CTRL_64BIT_ADDR; + sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); + } else if (host->flags & SDHCI_USE_ADMA) { + /* + * Don't need to undo SDHCI_CTRL_ADMA32 in order to + * set SDHCI_CTRL_ADMA64. + */ + ctrl |= SDHCI_CTRL_ADMA64; + } + } + +out: + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + static void sdhci_init(struct sdhci_host *host, int soft) { struct mmc_host *mmc = host->mmc; @@ -252,6 +321,9 @@ static void sdhci_init(struct sdhci_host *host, int soft) else sdhci_do_reset(host, SDHCI_RESET_ALL); + if (host->v4_mode) + sdhci_do_enable_v4_mode(host); + sdhci_set_default_irqs(host); host->cqe_on = false; @@ -554,10 +626,10 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) local_irq_restore(*flags); } -static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, - dma_addr_t addr, int len, unsigned cmd) +void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd) { - struct sdhci_adma2_64_desc *dma_desc = desc; + struct sdhci_adma2_64_desc *dma_desc = *desc; /* 32-bit and 64-bit descriptors have these members in same position */ dma_desc->cmd = cpu_to_le16(cmd); @@ -566,6 +638,19 @@ static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, if (host->flags & SDHCI_USE_64_BIT_DMA) dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); + + *desc += host->desc_sz; +} +EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); + +static inline void __sdhci_adma_write_desc(struct sdhci_host *host, + void **desc, dma_addr_t addr, + int len, unsigned int cmd) +{ + if (host->ops->adma_write_desc) + host->ops->adma_write_desc(host, desc, addr, len, cmd); + else + sdhci_adma_write_desc(host, desc, addr, len, cmd); } static void sdhci_adma_mark_end(void *desc) @@ -618,28 +703,24 @@ static void sdhci_adma_table_pre(struct sdhci_host *host, } /* tran, valid */ - sdhci_adma_write_desc(host, desc, align_addr, offset, - ADMA2_TRAN_VALID); + __sdhci_adma_write_desc(host, &desc, align_addr, + offset, ADMA2_TRAN_VALID); BUG_ON(offset > 65536); align += SDHCI_ADMA2_ALIGN; align_addr += SDHCI_ADMA2_ALIGN; - desc += host->desc_sz; - addr += offset; len -= offset; } BUG_ON(len > 65536); - if (len) { - /* tran, valid */ - sdhci_adma_write_desc(host, desc, addr, len, - ADMA2_TRAN_VALID); - desc += host->desc_sz; - } + /* tran, valid */ + if (len) + __sdhci_adma_write_desc(host, &desc, addr, len, + ADMA2_TRAN_VALID); /* * If this triggers then we have a calculation bug @@ -656,7 +737,7 @@ static void sdhci_adma_table_pre(struct sdhci_host *host, } } else { /* Add a terminating entry - nop, end, valid */ - sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); + __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); } } @@ -701,7 +782,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host, } } -static u32 sdhci_sdma_address(struct sdhci_host *host) +static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) { if (host->bounce_buffer) return host->bounce_addr; @@ -709,6 +790,17 @@ static u32 sdhci_sdma_address(struct sdhci_host *host) return sg_dma_address(host->data->sg); } +static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) +{ + if (host->v4_mode) { + sdhci_writel(host, addr, SDHCI_ADMA_ADDRESS); + if (host->flags & SDHCI_USE_64_BIT_DMA) + sdhci_writel(host, (u64)addr >> 32, SDHCI_ADMA_ADDRESS_HI); + } else { + sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); + } +} + static unsigned int sdhci_target_timeout(struct sdhci_host *host, struct mmc_command *cmd, struct mmc_data *data) @@ -876,7 +968,6 @@ static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) { - u8 ctrl; struct mmc_data *data = cmd->data; host->data_timeout = 0; @@ -968,30 +1059,11 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) SDHCI_ADMA_ADDRESS_HI); } else { WARN_ON(sg_cnt != 1); - sdhci_writel(host, sdhci_sdma_address(host), - SDHCI_DMA_ADDRESS); + sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); } } - /* - * Always adjust the DMA selection as some controllers - * (e.g. JMicron) can't do PIO properly when the selection - * is ADMA. - */ - if (host->version >= SDHCI_SPEC_200) { - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - ctrl &= ~SDHCI_CTRL_DMA_MASK; - if ((host->flags & SDHCI_REQ_USE_DMA) && - (host->flags & SDHCI_USE_ADMA)) { - if (host->flags & SDHCI_USE_64_BIT_DMA) - ctrl |= SDHCI_CTRL_ADMA64; - else - ctrl |= SDHCI_CTRL_ADMA32; - } else { - ctrl |= SDHCI_CTRL_SDMA; - } - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - } + sdhci_config_dma(host); if (!(host->flags & SDHCI_REQ_USE_DMA)) { int flags; @@ -1010,7 +1082,19 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) /* Set the DMA boundary value and block size */ sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), SDHCI_BLOCK_SIZE); - sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + + /* + * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count + * can be supported, in that case 16-bit block count register must be 0. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && + (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { + if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) + sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); + sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); + } else { + sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + } } static inline bool sdhci_auto_cmd12(struct sdhci_host *host, @@ -1020,6 +1104,43 @@ static inline bool sdhci_auto_cmd12(struct sdhci_host *host, !mrq->cap_cmd_during_tfr; } +static inline void sdhci_auto_cmd_select(struct sdhci_host *host, + struct mmc_command *cmd, + u16 *mode) +{ + bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && + (cmd->opcode != SD_IO_RW_EXTENDED); + bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); + u16 ctrl2; + + /* + * In case of Version 4.10 or later, use of 'Auto CMD Auto + * Select' is recommended rather than use of 'Auto CMD12 + * Enable' or 'Auto CMD23 Enable'. + */ + if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) { + *mode |= SDHCI_TRNS_AUTO_SEL; + + ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (use_cmd23) + ctrl2 |= SDHCI_CMD23_ENABLE; + else + ctrl2 &= ~SDHCI_CMD23_ENABLE; + sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); + + return; + } + + /* + * If we are sending CMD23, CMD12 never gets sent + * on successful completion (so no Auto-CMD12). + */ + if (use_cmd12) + *mode |= SDHCI_TRNS_AUTO_CMD12; + else if (use_cmd23) + *mode |= SDHCI_TRNS_AUTO_CMD23; +} + static void sdhci_set_transfer_mode(struct sdhci_host *host, struct mmc_command *cmd) { @@ -1048,17 +1169,9 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; - /* - * If we are sending CMD23, CMD12 never gets sent - * on successful completion (so no Auto-CMD12). - */ - if (sdhci_auto_cmd12(host, cmd->mrq) && - (cmd->opcode != SD_IO_RW_EXTENDED)) - mode |= SDHCI_TRNS_AUTO_CMD12; - else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { - mode |= SDHCI_TRNS_AUTO_CMD23; + sdhci_auto_cmd_select(host, cmd, &mode); + if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); - } } if (data->flags & MMC_DATA_READ) @@ -1630,7 +1743,7 @@ EXPORT_SYMBOL_GPL(sdhci_set_power); * * \*****************************************************************************/ -static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) +void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sdhci_host *host; int present; @@ -1669,6 +1782,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } +EXPORT_SYMBOL_GPL(sdhci_request); void sdhci_set_bus_width(struct sdhci_host *host, int width) { @@ -2219,7 +2333,7 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) } EXPORT_SYMBOL_GPL(sdhci_send_tuning); -static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) +static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) { int i; @@ -2236,13 +2350,13 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", mmc_hostname(host->mmc)); sdhci_abort_tuning(host, opcode); - return; + return -ETIMEDOUT; } ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { if (ctrl & SDHCI_CTRL_TUNED_CLK) - return; /* Success! */ + return 0; /* Success! */ break; } @@ -2254,6 +2368,7 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", mmc_hostname(host->mmc)); sdhci_reset_tuning(host); + return -EAGAIN; } int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) @@ -2315,7 +2430,7 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) sdhci_start_tuning(host); - __sdhci_execute_tuning(host, opcode); + host->tuning_err = __sdhci_execute_tuning(host, opcode); sdhci_end_tuning(host); out: @@ -2802,7 +2917,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) * some controllers are faulty, don't trust them. */ if (intmask & SDHCI_INT_DMA_END) { - u32 dmastart, dmanow; + dma_addr_t dmastart, dmanow; dmastart = sdhci_sdma_address(host); dmanow = dmastart + host->data->bytes_xfered; @@ -2810,12 +2925,12 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) * Force update to the next DMA block boundary. */ dmanow = (dmanow & - ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + + ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + SDHCI_DEFAULT_BOUNDARY_SIZE; host->data->bytes_xfered = dmanow - dmastart; - DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n", - dmastart, host->data->bytes_xfered, dmanow); - sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); + DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", + &dmastart, host->data->bytes_xfered, &dmanow); + sdhci_set_sdma_addr(host, dmanow); } if (intmask & SDHCI_INT_DATA_END) { @@ -3322,6 +3437,13 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; + /* + * The DMA table descriptor count is calculated as the maximum + * number of segments times 2, to allow for an alignment + * descriptor for each segment, plus 1 for a nop end descriptor. + */ + host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; + return host; } @@ -3376,6 +3498,9 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) sdhci_do_reset(host, SDHCI_RESET_ALL); + if (host->v4_mode) + sdhci_do_enable_v4_mode(host); + of_property_read_u64(mmc_dev(host->mmc)->of_node, "sdhci-caps-mask", &dt_caps_mask); of_property_read_u64(mmc_dev(host->mmc)->of_node, @@ -3470,6 +3595,19 @@ static int sdhci_allocate_bounce_buffer(struct sdhci_host *host) return 0; } +static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) +{ + /* + * According to SD Host Controller spec v4.10, bit[27] added from + * version 4.10 in Capabilities Register is used as 64-bit System + * Address support for V4 mode. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode) + return host->caps & SDHCI_CAN_64BIT_V4; + + return host->caps & SDHCI_CAN_64BIT; +} + int sdhci_setup_host(struct sdhci_host *host) { struct mmc_host *mmc; @@ -3506,7 +3644,7 @@ int sdhci_setup_host(struct sdhci_host *host) override_timeout_clk = host->timeout_clk; - if (host->version > SDHCI_SPEC_300) { + if (host->version > SDHCI_SPEC_420) { pr_err("%s: Unknown controller version (%d). You may experience problems.\n", mmc_hostname(mmc), host->version); } @@ -3541,7 +3679,7 @@ int sdhci_setup_host(struct sdhci_host *host) * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to * implement. */ - if (host->caps & SDHCI_CAN_64BIT) + if (sdhci_can_64bit_dma(host)) host->flags |= SDHCI_USE_64_BIT_DMA; if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { @@ -3559,32 +3697,30 @@ int sdhci_setup_host(struct sdhci_host *host) } } - /* SDMA does not support 64-bit DMA */ - if (host->flags & SDHCI_USE_64_BIT_DMA) + /* SDMA does not support 64-bit DMA if v4 mode not set */ + if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) host->flags &= ~SDHCI_USE_SDMA; if (host->flags & SDHCI_USE_ADMA) { dma_addr_t dma; void *buf; - /* - * The DMA descriptor table size is calculated as the maximum - * number of segments times 2, to allow for an alignment - * descriptor for each segment, plus 1 for a nop end descriptor, - * all multipled by the descriptor size. - */ if (host->flags & SDHCI_USE_64_BIT_DMA) { - host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * - SDHCI_ADMA2_64_DESC_SZ; - host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; + host->adma_table_sz = host->adma_table_cnt * + SDHCI_ADMA2_64_DESC_SZ(host); + host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); } else { - host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * + host->adma_table_sz = host->adma_table_cnt * SDHCI_ADMA2_32_DESC_SZ; host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; } host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; - buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz + + /* + * Use zalloc to zero the reserved high 32-bits of 128-bit + * descriptors so that they never need to be written. + */ + buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + host->adma_table_sz, &dma, GFP_KERNEL); if (!buf) { pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", @@ -3708,10 +3844,13 @@ int sdhci_setup_host(struct sdhci_host *host) if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) host->flags |= SDHCI_AUTO_CMD12; - /* Auto-CMD23 stuff only works in ADMA or PIO. */ + /* + * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. + * For v4 mode, SDMA may use Auto-CMD23 as well. + */ if ((host->version >= SDHCI_SPEC_300) && ((host->flags & SDHCI_USE_ADMA) || - !(host->flags & SDHCI_USE_SDMA)) && + !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { host->flags |= SDHCI_AUTO_CMD23; DBG("Auto-CMD23 available\n"); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index f0bd36ce3817..b001cf4d3d7e 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -28,6 +28,7 @@ #define SDHCI_DMA_ADDRESS 0x00 #define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS +#define SDHCI_32BIT_BLK_CNT SDHCI_DMA_ADDRESS #define SDHCI_BLOCK_SIZE 0x04 #define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) @@ -41,6 +42,7 @@ #define SDHCI_TRNS_BLK_CNT_EN 0x02 #define SDHCI_TRNS_AUTO_CMD12 0x04 #define SDHCI_TRNS_AUTO_CMD23 0x08 +#define SDHCI_TRNS_AUTO_SEL 0x0C #define SDHCI_TRNS_READ 0x10 #define SDHCI_TRNS_MULTI 0x20 @@ -184,6 +186,9 @@ #define SDHCI_CTRL_DRV_TYPE_D 0x0030 #define SDHCI_CTRL_EXEC_TUNING 0x0040 #define SDHCI_CTRL_TUNED_CLK 0x0080 +#define SDHCI_CMD23_ENABLE 0x0800 +#define SDHCI_CTRL_V4_MODE 0x1000 +#define SDHCI_CTRL_64BIT_ADDR 0x2000 #define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000 #define SDHCI_CAPABILITIES 0x40 @@ -204,6 +209,7 @@ #define SDHCI_CAN_VDD_330 0x01000000 #define SDHCI_CAN_VDD_300 0x02000000 #define SDHCI_CAN_VDD_180 0x04000000 +#define SDHCI_CAN_64BIT_V4 0x08000000 #define SDHCI_CAN_64BIT 0x10000000 #define SDHCI_SUPPORT_SDR50 0x00000001 @@ -270,6 +276,9 @@ #define SDHCI_SPEC_100 0 #define SDHCI_SPEC_200 1 #define SDHCI_SPEC_300 2 +#define SDHCI_SPEC_400 3 +#define SDHCI_SPEC_410 4 +#define SDHCI_SPEC_420 5 /* * End of controller registers. @@ -305,8 +314,14 @@ struct sdhci_adma2_32_desc { */ #define SDHCI_ADMA2_DESC_ALIGN 8 -/* ADMA2 64-bit DMA descriptor size */ -#define SDHCI_ADMA2_64_DESC_SZ 12 +/* + * ADMA2 64-bit DMA descriptor size + * According to SD Host Controller spec v4.10, there are two kinds of + * descriptors for 64-bit addressing mode: 96-bit Descriptor and 128-bit + * Descriptor, if Host Version 4 Enable is set in the Host Control 2 + * register, 128-bit Descriptor will be selected. + */ +#define SDHCI_ADMA2_64_DESC_SZ(host) ((host)->v4_mode ? 16 : 12) /* * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte @@ -450,6 +465,13 @@ struct sdhci_host { * obtainable timeout. */ #define SDHCI_QUIRK2_DISABLE_HW_TIMEOUT (1<<17) +/* + * 32-bit block count may not support eMMC where upper bits of CMD23 are used + * for other purposes. Consequently we support 16-bit block count by default. + * Otherwise, SDHCI_QUIRK2_USE_32BIT_BLK_CNT can be selected to use 32-bit + * block count. + */ +#define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ @@ -501,6 +523,7 @@ struct sdhci_host { bool preset_enabled; /* Preset is enabled */ bool pending_reset; /* Cmd/data reset is pending */ bool irq_wake_enabled; /* IRQ wakeup is enabled */ + bool v4_mode; /* Host Version 4 Enable */ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ struct mmc_command *cmd; /* Current command */ @@ -554,6 +577,7 @@ struct sdhci_host { unsigned int tuning_count; /* Timer count for re-tuning */ unsigned int tuning_mode; /* Re-tuning mode supported by host */ + unsigned int tuning_err; /* Error code for re-tuning */ #define SDHCI_TUNING_MODE_1 0 #define SDHCI_TUNING_MODE_2 1 #define SDHCI_TUNING_MODE_3 2 @@ -563,6 +587,9 @@ struct sdhci_host { /* Host SDMA buffer boundary. */ u32 sdma_boundary; + /* Host ADMA table count */ + u32 adma_table_cnt; + u64 data_timeout; unsigned long private[0] ____cacheline_aligned; @@ -603,6 +630,8 @@ struct sdhci_ops { void (*adma_workaround)(struct sdhci_host *host, u32 intmask); void (*card_event)(struct sdhci_host *host); void (*voltage_switch)(struct sdhci_host *host); + void (*adma_write_desc)(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd); }; #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS @@ -725,6 +754,7 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd); void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, unsigned short vdd); +void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq); void sdhci_set_bus_width(struct sdhci_host *host, int width); void sdhci_reset(struct sdhci_host *host, u8 mask); void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); @@ -733,6 +763,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios); void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable); +void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd); #ifdef CONFIG_PM int sdhci_suspend_host(struct sdhci_host *host); @@ -747,6 +779,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, int *data_error); void sdhci_dumpregs(struct sdhci_host *host); +void sdhci_enable_v4_mode(struct sdhci_host *host); void sdhci_start_tuning(struct sdhci_host *host); void sdhci_end_tuning(struct sdhci_host *host); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 4c2a1f8ddbf3..81bd9afb0980 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * MMCIF eMMC driver. * * Copyright (C) 2010 Renesas Solutions Corp. * Yusuke Goda <yusuke.goda.sx@renesas.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License. */ /* @@ -1573,6 +1570,6 @@ static struct platform_driver sh_mmcif_driver = { module_platform_driver(sh_mmcif_driver); MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>"); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 568349e1fbc2..279e326e397e 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -258,11 +258,16 @@ struct sunxi_mmc_cfg { /* Does DATA0 needs to be masked while the clock is updated */ bool mask_data0; - /* hardware only supports new timing mode */ + /* + * hardware only supports new timing mode, either due to lack of + * a mode switch in the clock controller, or the mmc controller + * is permanently configured in the new timing mode, without the + * NTSR mode switch. + */ bool needs_new_timings; - /* hardware can switch between old and new timing modes */ - bool has_timings_switch; + /* clock hardware can switch between old and new timing modes */ + bool ccu_has_timings_switch; }; struct sunxi_mmc_host { @@ -787,7 +792,7 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, clock <<= 1; } - if (host->use_new_timings && host->cfg->has_timings_switch) { + if (host->use_new_timings && host->cfg->ccu_has_timings_switch) { ret = sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); if (ret) { dev_err(mmc_dev(mmc), @@ -822,6 +827,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, /* update card clock rate to account for internal divider */ rate /= div; + /* + * Configure the controller to use the new timing mode if needed. + * On controllers that only support the new timing mode, such as + * the eMMC controller on the A64, this register does not exist, + * and any writes to it are ignored. + */ if (host->use_new_timings) { /* Don't touch the delay bits */ rval = mmc_readl(host, REG_SD_NTSR); @@ -1145,7 +1156,7 @@ static const struct sunxi_mmc_cfg sun8i_a83t_emmc_cfg = { .idma_des_size_bits = 16, .clk_delays = sunxi_mmc_clk_delays, .can_calibrate = false, - .has_timings_switch = true, + .ccu_has_timings_switch = true, }; static const struct sunxi_mmc_cfg sun9i_a80_cfg = { @@ -1166,6 +1177,7 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = { .idma_des_size_bits = 13, .clk_delays = NULL, .can_calibrate = true, + .needs_new_timings = true, }; static const struct of_device_id sunxi_mmc_of_match[] = { @@ -1351,7 +1363,7 @@ static int sunxi_mmc_probe(struct platform_device *pdev) goto error_free_host; } - if (host->cfg->has_timings_switch) { + if (host->cfg->ccu_has_timings_switch) { /* * Supports both old and new timing modes. * Try setting the clk to new timing mode. diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index a3d8380ab480..b6644ce296b2 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -336,7 +336,8 @@ static unsigned int tifm_sd_op_flags(struct mmc_command *cmd) rc |= TIFM_MMCSD_RSP_R0; break; case MMC_RSP_R1B: - rc |= TIFM_MMCSD_RSP_BUSY; // deliberate fall-through + rc |= TIFM_MMCSD_RSP_BUSY; + /* fall-through */ case MMC_RSP_R1: rc |= TIFM_MMCSD_RSP_R1; break; diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 43a2ea5cff24..93e83ad25976 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for the MMC / SD / SDIO cell found in: * @@ -7,12 +8,9 @@ * Copyright (C) 2017 Horms Solutions, Simon Horman * Copyright (C) 2007 Ian Molton * Copyright (C) 2004 Ian Molton - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ +#include <linux/delay.h> #include <linux/device.h> #include <linux/mfd/core.h> #include <linux/mfd/tmio.h> @@ -23,6 +21,76 @@ #include "tmio_mmc.h" +/* Registers specific to this variant */ +#define CTL_SDIO_REGS 0x100 +#define CTL_CLK_AND_WAIT_CTL 0x138 +#define CTL_RESET_SDIO 0x1e0 + +static void tmio_mmc_clk_start(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + usleep_range(10000, 11000); + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); + usleep_range(10000, 11000); +} + +static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); + usleep_range(10000, 11000); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + usleep_range(10000, 11000); +} + +static void tmio_mmc_set_clock(struct tmio_mmc_host *host, + unsigned int new_clock) +{ + unsigned int divisor; + u32 clk = 0; + int clk_sel; + + if (new_clock == 0) { + tmio_mmc_clk_stop(host); + return; + } + + divisor = host->pdata->hclk / new_clock; + + /* bit7 set: 1/512, ... bit0 set: 1/4, all bits clear: 1/2 */ + clk_sel = (divisor <= 1); + clk = clk_sel ? 0 : (roundup_pow_of_two(divisor) >> 2); + + host->pdata->set_clk_div(host->pdev, clk_sel); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); + usleep_range(10000, 11000); + + tmio_mmc_clk_start(host); +} + +static void tmio_mmc_reset(struct tmio_mmc_host *host) +{ + /* FIXME - should we set stop clock reg here */ + sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); + usleep_range(10000, 11000); + sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); + usleep_range(10000, 11000); + + if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) { + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); + } +} + #ifdef CONFIG_PM_SLEEP static int tmio_mmc_suspend(struct device *dev) { @@ -90,8 +158,6 @@ static int tmio_mmc_probe(struct platform_device *pdev) goto cell_disable; } - pdata->flags |= TMIO_MMC_HAVE_HIGH_REG; - host = tmio_mmc_host_alloc(pdev, pdata); if (IS_ERR(host)) { ret = PTR_ERR(host); @@ -100,6 +166,8 @@ static int tmio_mmc_probe(struct platform_device *pdev) /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ host->bus_shift = resource_size(res) >> 10; + host->set_clock = tmio_mmc_set_clock; + host->reset = tmio_mmc_reset; host->mmc->f_max = pdata->hclk; host->mmc->f_min = pdata->hclk / 512; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 5d141f79e175..1e317027bf53 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the MMC / SD / SDIO cell found in: * @@ -8,11 +9,6 @@ * Copyright (C) 2016-17 Horms Solutions, Simon Horman * Copyright (C) 2007 Ian Molton * Copyright (C) 2004 Ian Molton - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #ifndef TMIO_MMC_H @@ -47,9 +43,6 @@ #define CTL_RESET_SD 0xe0 #define CTL_VERSION 0xe2 #define CTL_SDIF_MODE 0xe6 -#define CTL_SDIO_REGS 0x100 -#define CTL_CLK_AND_WAIT_CTL 0x138 -#define CTL_RESET_SDIO 0x1e0 /* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */ #define TMIO_STOP_STP BIT(0) @@ -133,7 +126,6 @@ struct tmio_mmc_host { /* Callbacks for clock / power control */ void (*set_pwr)(struct platform_device *host, int state); - void (*set_clk_div)(struct platform_device *host, int state); /* pio related stuff */ struct scatterlist *sg_ptr; @@ -146,7 +138,7 @@ struct tmio_mmc_host { struct tmio_mmc_data *pdata; /* DMA support */ - bool force_pio; + bool dma_on; struct dma_chan *chan_rx; struct dma_chan *chan_tx; struct tasklet_struct dma_issue; @@ -170,14 +162,14 @@ struct tmio_mmc_host { /* Mandatory callback */ int (*clk_enable)(struct tmio_mmc_host *host); + void (*set_clock)(struct tmio_mmc_host *host, unsigned int clock); /* Optional callbacks */ - unsigned int (*clk_update)(struct tmio_mmc_host *host, - unsigned int new_clock); void (*clk_disable)(struct tmio_mmc_host *host); int (*multi_io_quirk)(struct mmc_card *card, unsigned int direction, int blk_size); int (*write16_hook)(struct tmio_mmc_host *host, int addr); + void (*reset)(struct tmio_mmc_host *host); void (*hw_reset)(struct tmio_mmc_host *host); void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap); bool (*check_scc_error)(struct tmio_mmc_host *host); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 261b4d62d2b1..8d64f6196f33 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for the MMC / SD / SDIO IP found in: * @@ -10,10 +11,6 @@ * Copyright (C) 2007 Ian Molton * Copyright (C) 2004 Ian Molton * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * This driver draws mainly on scattered spec sheets, Reverse engineering * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit * support). (Further 4 bit support from a later datasheet). @@ -160,100 +157,18 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) } } -static void tmio_mmc_clk_start(struct tmio_mmc_host *host) -{ - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - - /* HW engineers overrode docs: no sleep needed on R-Car2+ */ - if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - usleep_range(10000, 11000); - - if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); - usleep_range(10000, 11000); - } -} - -static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) -{ - if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); - usleep_range(10000, 11000); - } - - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - - /* HW engineers overrode docs: no sleep needed on R-Car2+ */ - if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - usleep_range(10000, 11000); -} - -static void tmio_mmc_set_clock(struct tmio_mmc_host *host, - unsigned int new_clock) -{ - u32 clk = 0, clock; - - if (new_clock == 0) { - tmio_mmc_clk_stop(host); - return; - } - /* - * Both HS400 and HS200/SD104 set 200MHz, but some devices need to - * set 400MHz to distinguish the CPG settings in HS400. - */ - if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && - host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400 && - new_clock == 200000000) - new_clock = 400000000; - - if (host->clk_update) - clock = host->clk_update(host, new_clock) / 512; - else - clock = host->mmc->f_min; - - for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1) - clock <<= 1; - - /* 1/1 clock is option */ - if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && - ((clk >> 22) & 0x1)) { - if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400)) - clk |= 0xff; - else - clk &= ~0xff; - } - - if (host->set_clk_div) - host->set_clk_div(host->pdev, (clk >> 22) & 1); - - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); - if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - usleep_range(10000, 11000); - - tmio_mmc_clk_start(host); -} - static void tmio_mmc_reset(struct tmio_mmc_host *host) { /* FIXME - should we set stop clock reg here */ sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); - if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); usleep_range(10000, 11000); sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); - if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); usleep_range(10000, 11000); if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) { sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); } - } static void tmio_mmc_reset_work(struct work_struct *work) @@ -294,7 +209,7 @@ static void tmio_mmc_reset_work(struct work_struct *work) spin_unlock_irqrestore(&host->lock, flags); - tmio_mmc_reset(host); + host->reset(host); /* Ready for new calls */ host->mrq = NULL; @@ -446,7 +361,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) unsigned int count; unsigned long flags; - if ((host->chan_tx || host->chan_rx) && !host->force_pio) { + if (host->dma_on) { pr_err("PIO IRQ in DMA mode!\n"); return; } else if (!data) { @@ -518,7 +433,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) */ if (data->flags & MMC_DATA_READ) { - if (host->chan_rx && !host->force_pio) + if (host->dma_on) tmio_mmc_check_bounce_buffer(host); dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", host->mrq); @@ -555,7 +470,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat) if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR || stat & TMIO_STAT_TXUNDERRUN) data->error = -EILSEQ; - if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { + if (host->dma_on && (data->flags & MMC_DATA_WRITE)) { u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); bool done = false; @@ -579,7 +494,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat) tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); tmio_mmc_dataend_dma(host); } - } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { + } else if (host->dma_on && (data->flags & MMC_DATA_READ)) { tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); tmio_mmc_dataend_dma(host); } else { @@ -632,7 +547,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat) */ if (host->data && (!cmd->error || cmd->error == -EILSEQ)) { if (host->data->flags & MMC_DATA_READ) { - if (host->force_pio || !host->chan_rx) { + if (!host->dma_on) { tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); } else { tmio_mmc_disable_mmc_irqs(host, @@ -640,7 +555,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat) tasklet_schedule(&host->dma_issue); } } else { - if (host->force_pio || !host->chan_tx) { + if (!host->dma_on) { tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); } else { tmio_mmc_disable_mmc_irqs(host, @@ -770,7 +685,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, tmio_mmc_init_sg(host, data); host->data = data; - host->force_pio = false; + host->dma_on = false; /* Set transfer length / blocksize */ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); @@ -919,8 +834,8 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) if (mrq->cmd->error || (mrq->data && mrq->data->error)) tmio_mmc_abort_dma(host); - if (host->check_scc_error) - host->check_scc_error(host); + if (host->check_scc_error && host->check_scc_error(host)) + mrq->cmd->error = -EILSEQ; /* If SET_BLOCK_COUNT, continue with main command */ if (host->mrq && !mrq->cmd->error) { @@ -1043,15 +958,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: tmio_mmc_power_off(host); - tmio_mmc_clk_stop(host); + host->set_clock(host, 0); break; case MMC_POWER_UP: tmio_mmc_power_on(host, ios->vdd); - tmio_mmc_set_clock(host, ios->clock); + host->set_clock(host, ios->clock); tmio_mmc_set_bus_width(host, ios->bus_width); break; case MMC_POWER_ON: - tmio_mmc_set_clock(host, ios->clock); + host->set_clock(host, ios->clock); tmio_mmc_set_bus_width(host, ios->bus_width); break; } @@ -1237,7 +1152,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) int ret; /* - * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from + * Check the sanity of mmc->f_min to prevent host->set_clock() from * looping forever... */ if (mmc->f_min == 0) @@ -1247,7 +1162,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) _host->write16_hook = NULL; _host->set_pwr = pdata->set_pwr; - _host->set_clk_div = pdata->set_clk_div; ret = tmio_mmc_init_ocr(_host); if (ret < 0) @@ -1290,6 +1204,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) mmc->caps & MMC_CAP_NEEDS_POLL || !mmc_card_is_removable(mmc)); + if (!_host->reset) + _host->reset = tmio_mmc_reset; + /* * On Gen2+, eMMC with NONREMOVABLE currently fails because native * hotplug gets disabled. It seems RuntimePM related yet we need further @@ -1310,8 +1227,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) if (pdata->flags & TMIO_MMC_SDIO_IRQ) _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; - tmio_mmc_clk_stop(_host); - tmio_mmc_reset(_host); + _host->set_clock(_host, 0); + _host->reset(_host); _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK); tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); @@ -1394,7 +1311,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev) tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); if (host->clk_cache) - tmio_mmc_clk_stop(host); + host->set_clock(host, 0); tmio_mmc_clk_disable(host); @@ -1411,11 +1328,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev) { struct tmio_mmc_host *host = dev_get_drvdata(dev); - tmio_mmc_reset(host); + host->reset(host); tmio_mmc_clk_enable(host); if (host->clk_cache) - tmio_mmc_set_clock(host, host->clk_cache); + host->set_clock(host, host->clk_cache); if (host->native_hotplug) tmio_mmc_enable_mmc_irqs(host, diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c new file mode 100644 index 000000000000..91a2be41edf6 --- /dev/null +++ b/drivers/mmc/host/uniphier-sd.c @@ -0,0 +1,698 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2017-2018 Socionext Inc. +// Author: Masahiro Yamada <yamada.masahiro@socionext.com> + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/mfd/tmio.h> +#include <linux/mmc/host.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include "tmio_mmc.h" + +#define UNIPHIER_SD_CLK_CTL_DIV1024 BIT(16) +#define UNIPHIER_SD_CLK_CTL_DIV1 BIT(10) +#define UNIPHIER_SD_CLKCTL_OFFEN BIT(9) // auto SDCLK stop +#define UNIPHIER_SD_CC_EXT_MODE 0x1b0 +#define UNIPHIER_SD_CC_EXT_MODE_DMA BIT(1) +#define UNIPHIER_SD_HOST_MODE 0x1c8 +#define UNIPHIER_SD_VOLT 0x1e4 +#define UNIPHIER_SD_VOLT_MASK GENMASK(1, 0) +#define UNIPHIER_SD_VOLT_OFF 0 +#define UNIPHIER_SD_VOLT_330 1 // 3.3V signal +#define UNIPHIER_SD_VOLT_180 2 // 1.8V signal +#define UNIPHIER_SD_DMA_MODE 0x410 +#define UNIPHIER_SD_DMA_MODE_DIR_MASK GENMASK(17, 16) +#define UNIPHIER_SD_DMA_MODE_DIR_TO_DEV 0 +#define UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV 1 +#define UNIPHIER_SD_DMA_MODE_WIDTH_MASK GENMASK(5, 4) +#define UNIPHIER_SD_DMA_MODE_WIDTH_8 0 +#define UNIPHIER_SD_DMA_MODE_WIDTH_16 1 +#define UNIPHIER_SD_DMA_MODE_WIDTH_32 2 +#define UNIPHIER_SD_DMA_MODE_WIDTH_64 3 +#define UNIPHIER_SD_DMA_MODE_ADDR_INC BIT(0) // 1: inc, 0: fixed +#define UNIPHIER_SD_DMA_CTL 0x414 +#define UNIPHIER_SD_DMA_CTL_START BIT(0) // start DMA (auto cleared) +#define UNIPHIER_SD_DMA_RST 0x418 +#define UNIPHIER_SD_DMA_RST_CH1 BIT(9) +#define UNIPHIER_SD_DMA_RST_CH0 BIT(8) +#define UNIPHIER_SD_DMA_ADDR_L 0x440 +#define UNIPHIER_SD_DMA_ADDR_H 0x444 + +/* + * IP is extended to support various features: built-in DMA engine, + * 1/1024 divisor, etc. + */ +#define UNIPHIER_SD_CAP_EXTENDED_IP BIT(0) +/* RX channel of the built-in DMA controller is broken (Pro5) */ +#define UNIPHIER_SD_CAP_BROKEN_DMA_RX BIT(1) + +struct uniphier_sd_priv { + struct tmio_mmc_data tmio_data; + struct pinctrl *pinctrl; + struct pinctrl_state *pinstate_default; + struct pinctrl_state *pinstate_uhs; + struct clk *clk; + struct reset_control *rst; + struct reset_control *rst_br; + struct reset_control *rst_hw; + struct dma_chan *chan; + enum dma_data_direction dma_dir; + unsigned long clk_rate; + unsigned long caps; +}; + +static void *uniphier_sd_priv(struct tmio_mmc_host *host) +{ + return container_of(host->pdata, struct uniphier_sd_priv, tmio_data); +} + +static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable) +{ + sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0); +} + +/* external DMA engine */ +static void uniphier_sd_external_dma_issue(unsigned long arg) +{ + struct tmio_mmc_host *host = (void *)arg; + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + uniphier_sd_dma_endisable(host, 1); + dma_async_issue_pending(priv->chan); +} + +static void uniphier_sd_external_dma_callback(void *param, + const struct dmaengine_result *result) +{ + struct tmio_mmc_host *host = param; + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + unsigned long flags; + + dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, + priv->dma_dir); + + spin_lock_irqsave(&host->lock, flags); + + if (result->result == DMA_TRANS_NOERROR) { + /* + * When the external DMA engine is enabled, strangely enough, + * the DATAEND flag can be asserted even if the DMA engine has + * not been kicked yet. Enable the TMIO_STAT_DATAEND irq only + * after we make sure the DMA engine finishes the transfer, + * hence, in this callback. + */ + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + } else { + host->data->error = -ETIMEDOUT; + tmio_mmc_do_data_irq(host); + } + + spin_unlock_irqrestore(&host->lock, flags); +} + +static void uniphier_sd_external_dma_start(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + enum dma_transfer_direction dma_tx_dir; + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + int sg_len; + + if (!priv->chan) + goto force_pio; + + if (data->flags & MMC_DATA_READ) { + priv->dma_dir = DMA_FROM_DEVICE; + dma_tx_dir = DMA_DEV_TO_MEM; + } else { + priv->dma_dir = DMA_TO_DEVICE; + dma_tx_dir = DMA_MEM_TO_DEV; + } + + sg_len = dma_map_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, + priv->dma_dir); + if (sg_len == 0) + goto force_pio; + + desc = dmaengine_prep_slave_sg(priv->chan, host->sg_ptr, sg_len, + dma_tx_dir, DMA_CTRL_ACK); + if (!desc) + goto unmap_sg; + + desc->callback_result = uniphier_sd_external_dma_callback; + desc->callback_param = host; + + cookie = dmaengine_submit(desc); + if (cookie < 0) + goto unmap_sg; + + host->dma_on = true; + + return; + +unmap_sg: + dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, + priv->dma_dir); +force_pio: + uniphier_sd_dma_endisable(host, 0); +} + +static void uniphier_sd_external_dma_enable(struct tmio_mmc_host *host, + bool enable) +{ +} + +static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + struct dma_chan *chan; + + chan = dma_request_chan(mmc_dev(host->mmc), "rx-tx"); + if (IS_ERR(chan)) { + dev_warn(mmc_dev(host->mmc), + "failed to request DMA channel. falling back to PIO\n"); + return; /* just use PIO even for -EPROBE_DEFER */ + } + + /* this driver uses a single channel for both RX an TX */ + priv->chan = chan; + host->chan_rx = chan; + host->chan_tx = chan; + + tasklet_init(&host->dma_issue, uniphier_sd_external_dma_issue, + (unsigned long)host); +} + +static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + if (priv->chan) + dma_release_channel(priv->chan); +} + +static void uniphier_sd_external_dma_abort(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + uniphier_sd_dma_endisable(host, 0); + + if (priv->chan) + dmaengine_terminate_sync(priv->chan); +} + +static void uniphier_sd_external_dma_dataend(struct tmio_mmc_host *host) +{ + uniphier_sd_dma_endisable(host, 0); + + tmio_mmc_do_data_irq(host); +} + +static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = { + .start = uniphier_sd_external_dma_start, + .enable = uniphier_sd_external_dma_enable, + .request = uniphier_sd_external_dma_request, + .release = uniphier_sd_external_dma_release, + .abort = uniphier_sd_external_dma_abort, + .dataend = uniphier_sd_external_dma_dataend, +}; + +static void uniphier_sd_internal_dma_issue(unsigned long arg) +{ + struct tmio_mmc_host *host = (void *)arg; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + spin_unlock_irqrestore(&host->lock, flags); + + uniphier_sd_dma_endisable(host, 1); + writel(UNIPHIER_SD_DMA_CTL_START, host->ctl + UNIPHIER_SD_DMA_CTL); +} + +static void uniphier_sd_internal_dma_start(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + struct scatterlist *sg = host->sg_ptr; + dma_addr_t dma_addr; + unsigned int dma_mode_dir; + u32 dma_mode; + int sg_len; + + if ((data->flags & MMC_DATA_READ) && !host->chan_rx) + goto force_pio; + + if (WARN_ON(host->sg_len != 1)) + goto force_pio; + + if (!IS_ALIGNED(sg->offset, 8)) + goto force_pio; + + if (data->flags & MMC_DATA_READ) { + priv->dma_dir = DMA_FROM_DEVICE; + dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV; + } else { + priv->dma_dir = DMA_TO_DEVICE; + dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_TO_DEV; + } + + sg_len = dma_map_sg(mmc_dev(host->mmc), sg, 1, priv->dma_dir); + if (sg_len == 0) + goto force_pio; + + dma_mode = FIELD_PREP(UNIPHIER_SD_DMA_MODE_DIR_MASK, dma_mode_dir); + dma_mode |= FIELD_PREP(UNIPHIER_SD_DMA_MODE_WIDTH_MASK, + UNIPHIER_SD_DMA_MODE_WIDTH_64); + dma_mode |= UNIPHIER_SD_DMA_MODE_ADDR_INC; + + writel(dma_mode, host->ctl + UNIPHIER_SD_DMA_MODE); + + dma_addr = sg_dma_address(data->sg); + writel(lower_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_L); + writel(upper_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_H); + + host->dma_on = true; + + return; +force_pio: + uniphier_sd_dma_endisable(host, 0); +} + +static void uniphier_sd_internal_dma_enable(struct tmio_mmc_host *host, + bool enable) +{ +} + +static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + /* + * Due to a hardware bug, Pro5 cannot use DMA for RX. + * We can still use DMA for TX, but PIO for RX. + */ + if (!(priv->caps & UNIPHIER_SD_CAP_BROKEN_DMA_RX)) + host->chan_rx = (void *)0xdeadbeaf; + + host->chan_tx = (void *)0xdeadbeaf; + + tasklet_init(&host->dma_issue, uniphier_sd_internal_dma_issue, + (unsigned long)host); +} + +static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host) +{ + /* Each value is set to zero to assume "disabling" each DMA */ + host->chan_rx = NULL; + host->chan_tx = NULL; +} + +static void uniphier_sd_internal_dma_abort(struct tmio_mmc_host *host) +{ + u32 tmp; + + uniphier_sd_dma_endisable(host, 0); + + tmp = readl(host->ctl + UNIPHIER_SD_DMA_RST); + tmp &= ~(UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0); + writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); + + tmp |= UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0; + writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); +} + +static void uniphier_sd_internal_dma_dataend(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + uniphier_sd_dma_endisable(host, 0); + dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, 1, priv->dma_dir); + + tmio_mmc_do_data_irq(host); +} + +static const struct tmio_mmc_dma_ops uniphier_sd_internal_dma_ops = { + .start = uniphier_sd_internal_dma_start, + .enable = uniphier_sd_internal_dma_enable, + .request = uniphier_sd_internal_dma_request, + .release = uniphier_sd_internal_dma_release, + .abort = uniphier_sd_internal_dma_abort, + .dataend = uniphier_sd_internal_dma_dataend, +}; + +static int uniphier_sd_clk_enable(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + struct mmc_host *mmc = host->mmc; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = clk_set_rate(priv->clk, ULONG_MAX); + if (ret) + goto disable_clk; + + priv->clk_rate = clk_get_rate(priv->clk); + + /* If max-frequency property is set, use it. */ + if (!mmc->f_max) + mmc->f_max = priv->clk_rate; + + /* + * 1/512 is the finest divisor in the original IP. Newer versions + * also supports 1/1024 divisor. (UniPhier-specific extension) + */ + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + mmc->f_min = priv->clk_rate / 1024; + else + mmc->f_min = priv->clk_rate / 512; + + ret = reset_control_deassert(priv->rst); + if (ret) + goto disable_clk; + + ret = reset_control_deassert(priv->rst_br); + if (ret) + goto assert_rst; + + return 0; + +assert_rst: + reset_control_assert(priv->rst); +disable_clk: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void uniphier_sd_clk_disable(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + reset_control_assert(priv->rst_br); + reset_control_assert(priv->rst); + clk_disable_unprepare(priv->clk); +} + +static void uniphier_sd_hw_reset(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + reset_control_assert(priv->rst_hw); + /* For eMMC, minimum is 1us but give it 9us for good measure */ + udelay(9); + reset_control_deassert(priv->rst_hw); + /* For eMMC, minimum is 200us but give it 300us for good measure */ + usleep_range(300, 1000); +} + +static void uniphier_sd_set_clock(struct tmio_mmc_host *host, + unsigned int clock) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + unsigned long divisor; + u32 tmp; + + tmp = readl(host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); + + /* stop the clock before changing its rate to avoid a glitch signal */ + tmp &= ~CLK_CTL_SCLKEN; + writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); + + if (clock == 0) + return; + + tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1024; + tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1; + tmp &= ~CLK_CTL_DIV_MASK; + + divisor = priv->clk_rate / clock; + + /* + * In the original IP, bit[7:0] represents the divisor. + * bit7 set: 1/512, ... bit0 set:1/4, all bits clear: 1/2 + * + * The IP does not define a way to achieve 1/1. For UniPhier variants, + * bit10 is used for 1/1. Newer versions of UniPhier variants use + * bit16 for 1/1024. + */ + if (divisor <= 1) + tmp |= UNIPHIER_SD_CLK_CTL_DIV1; + else if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP && divisor > 512) + tmp |= UNIPHIER_SD_CLK_CTL_DIV1024; + else + tmp |= roundup_pow_of_two(divisor) >> 2; + + writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); + + tmp |= CLK_CTL_SCLKEN; + writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); +} + +static void uniphier_sd_host_init(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + u32 val; + + /* + * Connected to 32bit AXI. + * This register holds settings for SoC-specific internal bus + * connection. What is worse, the register spec was changed, + * breaking the backward compatibility. Write an appropriate + * value depending on a flag associated with a compatible string. + */ + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + val = 0x00000101; + else + val = 0x00000000; + + writel(val, host->ctl + UNIPHIER_SD_HOST_MODE); + + val = 0; + /* + * If supported, the controller can automatically + * enable/disable the clock line to the card. + */ + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + val |= UNIPHIER_SD_CLKCTL_OFFEN; + + writel(val, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); +} + +static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + struct pinctrl_state *pinstate; + u32 val, tmp; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + val = UNIPHIER_SD_VOLT_330; + pinstate = priv->pinstate_default; + break; + case MMC_SIGNAL_VOLTAGE_180: + val = UNIPHIER_SD_VOLT_180; + pinstate = priv->pinstate_uhs; + break; + default: + return -ENOTSUPP; + } + + tmp = readl(host->ctl + UNIPHIER_SD_VOLT); + tmp &= ~UNIPHIER_SD_VOLT_MASK; + tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); + writel(tmp, host->ctl + UNIPHIER_SD_VOLT); + + pinctrl_select_state(priv->pinctrl, pinstate); + + return 0; +} + +static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, + struct uniphier_sd_priv *priv) +{ + priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc)); + if (IS_ERR(priv->pinctrl)) + return PTR_ERR(priv->pinctrl); + + priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(priv->pinstate_default)) + return PTR_ERR(priv->pinstate_default); + + priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); + if (IS_ERR(priv->pinstate_uhs)) + return PTR_ERR(priv->pinstate_uhs); + + host->ops.start_signal_voltage_switch = + uniphier_sd_start_signal_voltage_switch; + + return 0; +} + +static int uniphier_sd_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uniphier_sd_priv *priv; + struct tmio_mmc_data *tmio_data; + struct tmio_mmc_host *host; + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "failed to get IRQ number"); + return irq; + } + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->caps = (unsigned long)of_device_get_match_data(dev); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(priv->clk); + } + + priv->rst = devm_reset_control_get_shared(dev, "host"); + if (IS_ERR(priv->rst)) { + dev_err(dev, "failed to get host reset\n"); + return PTR_ERR(priv->rst); + } + + /* old version has one more reset */ + if (!(priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)) { + priv->rst_br = devm_reset_control_get_shared(dev, "bridge"); + if (IS_ERR(priv->rst_br)) { + dev_err(dev, "failed to get bridge reset\n"); + return PTR_ERR(priv->rst_br); + } + } + + tmio_data = &priv->tmio_data; + tmio_data->flags |= TMIO_MMC_32BIT_DATA_PORT; + + host = tmio_mmc_host_alloc(pdev, tmio_data); + if (IS_ERR(host)) + return PTR_ERR(host); + + if (host->mmc->caps & MMC_CAP_HW_RESET) { + priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw"); + if (IS_ERR(priv->rst_hw)) { + dev_err(dev, "failed to get hw reset\n"); + ret = PTR_ERR(priv->rst_hw); + goto free_host; + } + host->hw_reset = uniphier_sd_hw_reset; + } + + if (host->mmc->caps & MMC_CAP_UHS) { + ret = uniphier_sd_uhs_init(host, priv); + if (ret) { + dev_warn(dev, + "failed to setup UHS (error %d). Disabling UHS.", + ret); + host->mmc->caps &= ~MMC_CAP_UHS; + } + } + + ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, + dev_name(dev), host); + if (ret) + goto free_host; + + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + host->dma_ops = &uniphier_sd_internal_dma_ops; + else + host->dma_ops = &uniphier_sd_external_dma_ops; + + host->bus_shift = 1; + host->clk_enable = uniphier_sd_clk_enable; + host->clk_disable = uniphier_sd_clk_disable; + host->set_clock = uniphier_sd_set_clock; + + ret = uniphier_sd_clk_enable(host); + if (ret) + goto free_host; + + uniphier_sd_host_init(host); + + tmio_data->ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34; + if (host->mmc->caps & MMC_CAP_UHS) + tmio_data->ocr_mask |= MMC_VDD_165_195; + + tmio_data->max_segs = 1; + tmio_data->max_blk_count = U16_MAX; + + ret = tmio_mmc_host_probe(host); + if (ret) + goto free_host; + + return 0; + +free_host: + tmio_mmc_host_free(host); + + return ret; +} + +static int uniphier_sd_remove(struct platform_device *pdev) +{ + struct tmio_mmc_host *host = platform_get_drvdata(pdev); + + tmio_mmc_host_remove(host); + uniphier_sd_clk_disable(host); + + return 0; +} + +static const struct of_device_id uniphier_sd_match[] = { + { + .compatible = "socionext,uniphier-sd-v2.91", + }, + { + .compatible = "socionext,uniphier-sd-v3.1", + .data = (void *)(UNIPHIER_SD_CAP_EXTENDED_IP | + UNIPHIER_SD_CAP_BROKEN_DMA_RX), + }, + { + .compatible = "socionext,uniphier-sd-v3.1.1", + .data = (void *)UNIPHIER_SD_CAP_EXTENDED_IP, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_sd_match); + +static struct platform_driver uniphier_sd_driver = { + .probe = uniphier_sd_probe, + .remove = uniphier_sd_remove, + .driver = { + .name = "uniphier-sd", + .of_match_table = uniphier_sd_match, + }, +}; +module_platform_driver(uniphier_sd_driver); + +MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); +MODULE_DESCRIPTION("UniPhier SD/eMMC host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index cdfeb15b6f05..cd8b1b9d4d8a 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. */ #include <linux/clk.h> |