diff options
Diffstat (limited to 'drivers/spi')
31 files changed, 1808 insertions, 579 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3b1044ebc400..e32f6a2058ae 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -183,7 +183,7 @@ config SPI_BCM63XX config SPI_BCM63XX_HSSPI tristate "Broadcom BCM63XX HS SPI controller driver" - depends on BCM63XX || BMIPS_GENERIC || ARCH_BCM_63XX || COMPILE_TEST + depends on BCM63XX || BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST help This enables support for the High Speed SPI controller present on newer Broadcom BCM63XX SoCs. @@ -371,6 +371,13 @@ config SPI_FSL_QUADSPI This controller does not support generic SPI messages. It only supports the high-level SPI memory interface. +config SPI_GXP + tristate "GXP SPI driver" + depends on ARCH_HPE || COMPILE_TEST + help + This enables support for the driver for GXP bus attached SPI + controllers. + config SPI_HISI_KUNPENG tristate "HiSilicon SPI Controller for Kunpeng SoCs" depends on (ARM64 && ACPI) || COMPILE_TEST @@ -575,6 +582,15 @@ config SPI_MESON_SPIFC This enables master mode support for the SPIFC (SPI flash controller) available in Amlogic Meson SoCs. +config SPI_MICROCHIP_CORE + tristate "Microchip FPGA SPI controllers" + depends on SPI_MASTER + help + This enables the SPI driver for Microchip FPGA SPI controllers. + Say Y or M here if you want to use the "hard" controllers on + PolarFire SoC. + If built as a module, it will be called spi-microchip-core. + config SPI_MT65XX tristate "MediaTek SPI controller" depends on ARCH_MEDIATEK || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 0f44eb6083a5..15d2f3835e45 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o +obj-$(CONFIG_SPI_GXP) += spi-gxp.o obj-$(CONFIG_SPI_HISI_KUNPENG) += spi-hisi-kunpeng.o obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o @@ -71,6 +72,7 @@ obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o +obj-$(CONFIG_SPI_MICROCHIP_CORE) += spi-microchip-core.o obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 480c0c8c18e4..976a217e356d 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -21,6 +21,7 @@ #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/spi/spi-mem.h> /* QSPI register offsets */ @@ -285,7 +286,7 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem, /* special case not supported by hardware */ if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth && - op->dummy.nbytes == 0) + op->dummy.nbytes == 0) return false; return true; @@ -417,9 +418,13 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) if (op->addr.val + op->data.nbytes > aq->mmap_size) return -ENOTSUPP; + err = pm_runtime_resume_and_get(&aq->pdev->dev); + if (err < 0) + return err; + err = atmel_qspi_set_cfg(aq, op, &offset); if (err) - return err; + goto pm_runtime_put; /* Skip to the final steps if there is no data */ if (op->data.nbytes) { @@ -441,7 +446,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) /* Poll INSTRuction End status */ sr = atmel_qspi_read(aq, QSPI_SR); if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) - return err; + goto pm_runtime_put; /* Wait for INSTRuction End interrupt */ reinit_completion(&aq->cmd_completion); @@ -452,6 +457,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) err = -ETIMEDOUT; atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR); +pm_runtime_put: + pm_runtime_mark_last_busy(&aq->pdev->dev); + pm_runtime_put_autosuspend(&aq->pdev->dev); return err; } @@ -472,6 +480,7 @@ static int atmel_qspi_setup(struct spi_device *spi) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); unsigned long src_rate; u32 scbr; + int ret; if (ctrl->busy) return -EBUSY; @@ -488,9 +497,16 @@ static int atmel_qspi_setup(struct spi_device *spi) if (scbr > 0) scbr--; + ret = pm_runtime_resume_and_get(ctrl->dev.parent); + if (ret < 0) + return ret; + aq->scr = QSPI_SCR_SCBR(scbr); atmel_qspi_write(aq->scr, aq, QSPI_SCR); + pm_runtime_mark_last_busy(ctrl->dev.parent); + pm_runtime_put_autosuspend(ctrl->dev.parent); + return 0; } @@ -621,11 +637,24 @@ static int atmel_qspi_probe(struct platform_device *pdev) if (err) goto disable_qspick; + pm_runtime_set_autosuspend_delay(&pdev->dev, 500); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + atmel_qspi_init(aq); err = spi_register_controller(ctrl); - if (err) + if (err) { + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); goto disable_qspick; + } + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); return 0; @@ -641,9 +670,18 @@ static int atmel_qspi_remove(struct platform_device *pdev) { struct spi_controller *ctrl = platform_get_drvdata(pdev); struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) + return ret; spi_unregister_controller(ctrl); atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + clk_disable_unprepare(aq->qspick); clk_disable_unprepare(aq->pclk); return 0; @@ -653,10 +691,19 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev) { struct spi_controller *ctrl = dev_get_drvdata(dev); struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); - clk_disable_unprepare(aq->qspick); - clk_disable_unprepare(aq->pclk); + + pm_runtime_mark_last_busy(dev); + pm_runtime_force_suspend(dev); + + clk_unprepare(aq->qspick); + clk_unprepare(aq->pclk); return 0; } @@ -665,19 +712,54 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) { struct spi_controller *ctrl = dev_get_drvdata(dev); struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; - clk_prepare_enable(aq->pclk); - clk_prepare_enable(aq->qspick); + clk_prepare(aq->pclk); + clk_prepare(aq->qspick); + + ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; atmel_qspi_init(aq); atmel_qspi_write(aq->scr, aq, QSPI_SCR); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; +} + +static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev) +{ + struct spi_controller *ctrl = dev_get_drvdata(dev); + struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + + clk_disable(aq->qspick); + clk_disable(aq->pclk); + return 0; } -static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend, - atmel_qspi_resume); +static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev) +{ + struct spi_controller *ctrl = dev_get_drvdata(dev); + struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; + + ret = clk_enable(aq->pclk); + if (ret) + return ret; + + return clk_enable(aq->qspick); +} + +static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume) + SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend, + atmel_qspi_runtime_resume, NULL) +}; static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {}; @@ -704,7 +786,7 @@ static struct platform_driver atmel_qspi_driver = { .driver = { .name = "atmel_qspi", .of_match_table = atmel_qspi_dt_ids, - .pm = &atmel_qspi_pm_ops, + .pm = pm_ptr(&atmel_qspi_pm_ops), }, .probe = atmel_qspi_probe, .remove = atmel_qspi_remove, diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c index ca40923258af..596e181ae136 100644 --- a/drivers/spi/spi-altera-dfl.c +++ b/drivers/spi/spi-altera-dfl.c @@ -128,9 +128,9 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) struct spi_master *master; struct altera_spi *hw; void __iomem *base; - int err = -ENODEV; + int err; - master = spi_alloc_master(dev, sizeof(struct altera_spi)); + master = devm_spi_alloc_master(dev, sizeof(struct altera_spi)); if (!master) return -ENOMEM; @@ -159,10 +159,9 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) altera_spi_init_master(master); err = devm_spi_register_master(dev, master); - if (err) { - dev_err(dev, "%s failed to register spi master %d\n", __func__, err); - goto exit; - } + if (err) + return dev_err_probe(dev, err, "%s failed to register spi master\n", + __func__); if (dfl_dev->revision == FME_FEATURE_REV_MAX10_SPI_N5010) strscpy(board_info.modalias, "m10-n5010", SPI_NAME_SIZE); @@ -179,9 +178,6 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) } return 0; -exit: - spi_master_put(master); - return err; } static const struct dfl_device_id dfl_spi_altera_ids[] = { diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index efdcbe6c4c26..08df4f8d0531 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -40,14 +40,23 @@ #define AMD_SPI_XFER_TX 1 #define AMD_SPI_XFER_RX 2 +/** + * enum amd_spi_versions - SPI controller versions + * @AMD_SPI_V1: AMDI0061 hardware version + * @AMD_SPI_V2: AMDI0062 hardware version + */ enum amd_spi_versions { - AMD_SPI_V1 = 1, /* AMDI0061 */ - AMD_SPI_V2, /* AMDI0062 */ + AMD_SPI_V1 = 1, + AMD_SPI_V2, }; +/** + * struct amd_spi - SPI driver instance + * @io_remap_addr: Start address of the SPI controller registers + * @version: SPI controller hardware version + */ struct amd_spi { void __iomem *io_remap_addr; - unsigned long io_base_addr; enum amd_spi_versions version; }; @@ -281,22 +290,19 @@ static int amd_spi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct spi_master *master; struct amd_spi *amd_spi; - int err = 0; + int err; /* Allocate storage for spi_master and driver private data */ - master = spi_alloc_master(dev, sizeof(struct amd_spi)); - if (!master) { - dev_err(dev, "Error allocating SPI master\n"); - return -ENOMEM; - } + master = devm_spi_alloc_master(dev, sizeof(struct amd_spi)); + if (!master) + return dev_err_probe(dev, -ENOMEM, "Error allocating SPI master\n"); amd_spi = spi_master_get_devdata(master); amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(amd_spi->io_remap_addr)) { - err = PTR_ERR(amd_spi->io_remap_addr); - dev_err(dev, "error %d ioremap of SPI registers failed\n", err); - goto err_free_master; - } + if (IS_ERR(amd_spi->io_remap_addr)) + return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr), + "ioremap of SPI registers failed\n"); + dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); amd_spi->version = (enum amd_spi_versions) device_get_match_data(dev); @@ -313,17 +319,10 @@ static int amd_spi_probe(struct platform_device *pdev) /* Register the controller with SPI framework */ err = devm_spi_register_master(dev, master); - if (err) { - dev_err(dev, "error %d registering SPI controller\n", err); - goto err_free_master; - } + if (err) + return dev_err_probe(dev, err, "error registering SPI controller\n"); return 0; - -err_free_master: - spi_master_put(master); - - return err; } #ifdef CONFIG_ACPI diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index d8cc4b270644..9df9fc40b783 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -497,7 +497,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { val = *(u32 *)a3700_spi->tx_buf; - spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, cpu_to_le32(val)); a3700_spi->buf_len -= 4; a3700_spi->tx_buf += 4; } @@ -519,7 +519,7 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi) while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) { val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG); if (a3700_spi->buf_len >= 4) { - + val = le32_to_cpu(val); memcpy(a3700_spi->rx_buf, &val, 4); a3700_spi->buf_len -= 4; diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9e300a932699..c4f22d50dba5 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1631,7 +1631,6 @@ static int atmel_spi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM static int atmel_spi_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); @@ -1653,7 +1652,6 @@ static int atmel_spi_runtime_resume(struct device *dev) return clk_prepare_enable(as->clk); } -#ifdef CONFIG_PM_SLEEP static int atmel_spi_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); @@ -1693,17 +1691,12 @@ static int atmel_spi_resume(struct device *dev) /* Start the queue running */ return spi_master_resume(master); } -#endif static const struct dev_pm_ops atmel_spi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) - SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend, - atmel_spi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) + RUNTIME_PM_OPS(atmel_spi_runtime_suspend, + atmel_spi_runtime_resume, NULL) }; -#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops) -#else -#define ATMEL_SPI_PM_OPS NULL -#endif static const struct of_device_id atmel_spi_dt_ids[] = { { .compatible = "atmel,at91rm9200-spi" }, @@ -1715,7 +1708,7 @@ MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids); static struct platform_driver atmel_spi_driver = { .driver = { .name = "atmel_spi", - .pm = ATMEL_SPI_PM_OPS, + .pm = pm_ptr(&atmel_spi_pm_ops), .of_match_table = atmel_spi_dt_ids, }, .probe = atmel_spi_probe, diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 0933948d7df3..747e03228c48 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -372,6 +372,10 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) struct bcm2835_spi *bs = dev_id; u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); + /* Bail out early if interrupts are not enabled */ + if (!(cs & BCM2835_SPI_CS_INTR)) + return IRQ_NONE; + /* * An interrupt is signaled either if DONE is set (TX FIFO empty) * or if RXR is set (RX FIFO >= ¾ full). @@ -1369,8 +1373,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev) bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); - err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, - dev_name(&pdev->dev), bs); + err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, + IRQF_SHARED, dev_name(&pdev->dev), bs); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); goto out_dma_release; diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index ecea471ff42c..f87d97ccd2d6 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -307,8 +307,9 @@ static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi) if (spi->mode & SPI_LOOP) cr0 |= DW_HSSI_CTRLR0_SRL; - if (dws->caps & DW_SPI_CAP_KEEMBAY_MST) - cr0 |= DW_HSSI_CTRLR0_KEEMBAY_MST; + /* CTRLR0[31] MST */ + if (dw_spi_ver_is_ge(dws, HSSI, 102A)) + cr0 |= DW_HSSI_CTRLR0_MST; } return cr0; @@ -942,7 +943,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) if (dws->dma_ops && dws->dma_ops->dma_init) { ret = dws->dma_ops->dma_init(dev, dws); - if (ret) { + if (ret == -EPROBE_DEFER) { + goto err_free_irq; + } else if (ret) { dev_warn(dev, "DMA init failed\n"); } else { master->can_dma = dws->dma_ops->can_dma; @@ -963,6 +966,7 @@ err_dma_exit: if (dws->dma_ops && dws->dma_ops->dma_exit) dws->dma_ops->dma_exit(dws); dw_spi_enable_chip(dws, 0); +err_free_irq: free_irq(dws->irq, master); err_free_master: spi_controller_put(master); diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c index 63e5260100ec..1322b8cce5b7 100644 --- a/drivers/spi/spi-dw-dma.c +++ b/drivers/spi/spi-dw-dma.c @@ -139,15 +139,20 @@ err_exit: static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) { - dws->rxchan = dma_request_slave_channel(dev, "rx"); - if (!dws->rxchan) - return -ENODEV; + int ret; - dws->txchan = dma_request_slave_channel(dev, "tx"); - if (!dws->txchan) { - dma_release_channel(dws->rxchan); + dws->rxchan = dma_request_chan(dev, "rx"); + if (IS_ERR(dws->rxchan)) { + ret = PTR_ERR(dws->rxchan); dws->rxchan = NULL; - return -ENODEV; + goto err_exit; + } + + dws->txchan = dma_request_chan(dev, "tx"); + if (IS_ERR(dws->txchan)) { + ret = PTR_ERR(dws->txchan); + dws->txchan = NULL; + goto free_rxchan; } dws->master->dma_rx = dws->rxchan; @@ -160,6 +165,12 @@ static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) dw_spi_dma_sg_burst_init(dws); return 0; + +free_rxchan: + dma_release_channel(dws->rxchan); + dws->rxchan = NULL; +err_exit: + return ret; } static void dw_spi_dma_exit(struct dw_spi *dws) diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 5101c4c6017b..26c40ea6dd12 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -214,11 +214,10 @@ static int dw_spi_hssi_init(struct platform_device *pdev, return 0; } -static int dw_spi_keembay_init(struct platform_device *pdev, - struct dw_spi_mmio *dwsmmio) +static int dw_spi_intel_init(struct platform_device *pdev, + struct dw_spi_mmio *dwsmmio) { dwsmmio->dws.ip = DW_HSSI_ID; - dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST; return 0; } @@ -349,7 +348,8 @@ static const struct of_device_id dw_spi_mmio_of_match[] = { { .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init}, { .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init}, { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init}, - { .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init}, + { .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init}, + { .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init}, { .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init}, { .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init}, { /* end of table */} diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index d5ee5130601e..9e8eb2b52d5c 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -23,7 +23,7 @@ ((_dws)->ip == DW_ ## _ip ## _ID) #define __dw_spi_ver_cmp(_dws, _ip, _ver, _op) \ - (dw_spi_ip_is(_dws, _ip) && (_dws)->ver _op DW_ ## _ip ## _ver) + (dw_spi_ip_is(_dws, _ip) && (_dws)->ver _op DW_ ## _ip ## _ ## _ver) #define dw_spi_ver_is(_dws, _ip, _ver) __dw_spi_ver_cmp(_dws, _ip, _ver, ==) @@ -31,8 +31,7 @@ /* DW SPI controller capabilities */ #define DW_SPI_CAP_CS_OVERRIDE BIT(0) -#define DW_SPI_CAP_KEEMBAY_MST BIT(1) -#define DW_SPI_CAP_DFS32 BIT(2) +#define DW_SPI_CAP_DFS32 BIT(1) /* Register offsets (Generic for both DWC APB SSI and DWC SSI IP-cores) */ #define DW_SPI_CTRLR0 0x00 @@ -94,13 +93,7 @@ #define DW_HSSI_CTRLR0_SCPOL BIT(9) #define DW_HSSI_CTRLR0_TMOD_MASK GENMASK(11, 10) #define DW_HSSI_CTRLR0_SRL BIT(13) - -/* - * For Keem Bay, CTRLR0[31] is used to select controller mode. - * 0: SSI is slave - * 1: SSI is master - */ -#define DW_HSSI_CTRLR0_KEEMBAY_MST BIT(31) +#define DW_HSSI_CTRLR0_MST BIT(31) /* Bit fields in CTRLR1 */ #define DW_SPI_NDF_MASK GENMASK(15, 0) diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c index 72ab066ce552..cf1e4f9ebd72 100644 --- a/drivers/spi/spi-fsi.c +++ b/drivers/spi/spi-fsi.c @@ -24,8 +24,7 @@ #define FSI2SPI_IRQ 0x20 #define SPI_FSI_BASE 0x70000 -#define SPI_FSI_INIT_TIMEOUT_MS 1000 -#define SPI_FSI_STATUS_TIMEOUT_MS 100 +#define SPI_FSI_TIMEOUT_MS 1000 #define SPI_FSI_MAX_RX_SIZE 8 #define SPI_FSI_MAX_TX_SIZE 40 @@ -299,6 +298,7 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq) static int fsi_spi_transfer_data(struct fsi_spi *ctx, struct spi_transfer *transfer) { + int loops; int rc = 0; unsigned long end; u64 status = 0ULL; @@ -317,9 +317,10 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, if (rc) return rc; - end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS); + loops = 0; + end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS); do { - if (time_after(jiffies, end)) + if (loops++ && time_after(jiffies, end)) return -ETIMEDOUT; rc = fsi_spi_status(ctx, &status, "TX"); @@ -335,9 +336,10 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, u8 *rx = transfer->rx_buf; while (transfer->len > recv) { - end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS); + loops = 0; + end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS); do { - if (time_after(jiffies, end)) + if (loops++ && time_after(jiffies, end)) return -ETIMEDOUT; rc = fsi_spi_status(ctx, &status, "RX"); @@ -359,6 +361,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, static int fsi_spi_transfer_init(struct fsi_spi *ctx) { + int loops = 0; int rc; bool reset = false; unsigned long end; @@ -369,9 +372,9 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx) SPI_FSI_CLOCK_CFG_SCK_NO_DEL | FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19); - end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS); + end = jiffies + msecs_to_jiffies(SPI_FSI_TIMEOUT_MS); do { - if (time_after(jiffies, end)) + if (loops++ && time_after(jiffies, end)) return -ETIMEDOUT; rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status); diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c new file mode 100644 index 000000000000..9ea355f7d64f --- /dev/null +++ b/drivers/spi/spi-gxp.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0=or-later +/* Copyright (C) 2022 Hewlett-Packard Development Company, L.P. */ + +#include <linux/iopoll.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +#define GXP_SPI0_MAX_CHIPSELECT 2 +#define GXP_SPI_SLEEP_TIME 1 +#define GXP_SPI_TIMEOUT (130 * 1000000 / GXP_SPI_SLEEP_TIME) + +#define MANUAL_MODE 0 +#define DIRECT_MODE 1 +#define SPILDAT_LEN 256 + +#define OFFSET_SPIMCFG 0x0 +#define OFFSET_SPIMCTRL 0x4 +#define OFFSET_SPICMD 0x5 +#define OFFSET_SPIDCNT 0x6 +#define OFFSET_SPIADDR 0x8 +#define OFFSET_SPIINTSTS 0xc + +#define SPIMCTRL_START 0x01 +#define SPIMCTRL_BUSY 0x02 +#define SPIMCTRL_DIR 0x08 + +struct gxp_spi; + +struct gxp_spi_chip { + struct gxp_spi *spifi; + u32 cs; +}; + +struct gxp_spi_data { + u32 max_cs; + u32 mode_bits; +}; + +struct gxp_spi { + const struct gxp_spi_data *data; + void __iomem *reg_base; + void __iomem *dat_base; + void __iomem *dir_base; + struct device *dev; + struct gxp_spi_chip chips[GXP_SPI0_MAX_CHIPSELECT]; +}; + +static void gxp_spi_set_mode(struct gxp_spi *spifi, int mode) +{ + u8 value; + void __iomem *reg_base = spifi->reg_base; + + value = readb(reg_base + OFFSET_SPIMCTRL); + + if (mode == MANUAL_MODE) { + writeb(0x55, reg_base + OFFSET_SPICMD); + writeb(0xaa, reg_base + OFFSET_SPICMD); + value &= ~0x30; + } else { + value |= 0x30; + } + writeb(value, reg_base + OFFSET_SPIMCTRL); +} + +static int gxp_spi_read_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + int ret; + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 value; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(0, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value &= ~SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) { + dev_warn(spifi->dev, "read reg busy time out\n"); + return ret; + } + + memcpy_fromio(op->data.buf.in, spifi->dat_base, op->data.nbytes); + return ret; +} + +static int gxp_spi_write_reg(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + int ret; + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 value; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(0, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + memcpy_toio(spifi->dat_base, op->data.buf.in, op->data.nbytes); + + writew(op->data.nbytes, reg_base + OFFSET_SPIDCNT); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value |= SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) + dev_warn(spifi->dev, "write reg busy time out\n"); + + return ret; +} + +static ssize_t gxp_spi_read(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = chip->spifi; + u32 offset = op->addr.val; + + if (chip->cs == 0) + offset += 0x4000000; + + memcpy_fromio(op->data.buf.in, spifi->dir_base + offset, op->data.nbytes); + + return 0; +} + +static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = chip->spifi; + void __iomem *reg_base = spifi->reg_base; + u32 write_len; + u32 value; + int ret; + + write_len = op->data.nbytes; + if (write_len > SPILDAT_LEN) + write_len = SPILDAT_LEN; + + value = readl(reg_base + OFFSET_SPIMCFG); + value &= ~(1 << 24); + value |= (chip->cs << 24); + value &= ~(0x07 << 16); + value |= (op->addr.nbytes << 16); + value &= ~(0x1f << 19); + writel(value, reg_base + OFFSET_SPIMCFG); + + writel(op->addr.val, reg_base + OFFSET_SPIADDR); + + writeb(op->cmd.opcode, reg_base + OFFSET_SPICMD); + + writew(write_len, reg_base + OFFSET_SPIDCNT); + + memcpy_toio(spifi->dat_base, op->data.buf.in, write_len); + + value = readb(reg_base + OFFSET_SPIMCTRL); + value |= SPIMCTRL_DIR; + value |= SPIMCTRL_START; + + writeb(value, reg_base + OFFSET_SPIMCTRL); + + ret = readb_poll_timeout(reg_base + OFFSET_SPIMCTRL, value, + !(value & SPIMCTRL_BUSY), + GXP_SPI_SLEEP_TIME, GXP_SPI_TIMEOUT); + if (ret) { + dev_warn(spifi->dev, "write busy time out\n"); + return ret; + } + + return write_len; +} + +static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct gxp_spi *spifi = spi_controller_get_devdata(mem->spi->master); + struct gxp_spi_chip *chip = &spifi->chips[mem->spi->chip_select]; + int ret; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (!op->addr.nbytes) + ret = gxp_spi_read_reg(chip, op); + else + ret = gxp_spi_read(chip, op); + } else { + if (!op->addr.nbytes) + ret = gxp_spi_write_reg(chip, op); + else + ret = gxp_spi_write(chip, op); + } + + return ret; +} + +static int gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + int ret; + + ret = do_gxp_exec_mem_op(mem, op); + if (ret) + dev_err(&mem->spi->dev, "operation failed: %d", ret); + + return ret; +} + +static const struct spi_controller_mem_ops gxp_spi_mem_ops = { + .exec_op = gxp_exec_mem_op, +}; + +static int gxp_spi_setup(struct spi_device *spi) +{ + struct gxp_spi *spifi = spi_controller_get_devdata(spi->master); + unsigned int cs = spi->chip_select; + struct gxp_spi_chip *chip = &spifi->chips[cs]; + + chip->spifi = spifi; + chip->cs = cs; + + gxp_spi_set_mode(spifi, MANUAL_MODE); + + return 0; +} + +static int gxp_spifi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct gxp_spi_data *data; + struct spi_controller *ctlr; + struct gxp_spi *spifi; + struct resource *res; + int ret; + + data = of_device_get_match_data(&pdev->dev); + + ctlr = devm_spi_alloc_master(dev, sizeof(*spifi)); + if (!ctlr) + return -ENOMEM; + + spifi = spi_controller_get_devdata(ctlr); + + platform_set_drvdata(pdev, spifi); + spifi->data = data; + spifi->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + spifi->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->reg_base)) + return PTR_ERR(spifi->reg_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + spifi->dat_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->dat_base)) + return PTR_ERR(spifi->dat_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + spifi->dir_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->dir_base)) + return PTR_ERR(spifi->dir_base); + + ctlr->mode_bits = data->mode_bits; + ctlr->bus_num = pdev->id; + ctlr->mem_ops = &gxp_spi_mem_ops; + ctlr->setup = gxp_spi_setup; + ctlr->num_chipselect = data->max_cs; + ctlr->dev.of_node = dev->of_node; + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) { + return dev_err_probe(&pdev->dev, ret, + "failed to register spi controller\n"); + } + + return 0; +} + +static const struct gxp_spi_data gxp_spifi_data = { + .max_cs = 2, + .mode_bits = 0, +}; + +static const struct of_device_id gxp_spifi_match[] = { + {.compatible = "hpe,gxp-spifi", .data = &gxp_spifi_data }, + { /* null */ } +}; +MODULE_DEVICE_TABLE(of, gxp_spifi_match); + +static struct platform_driver gxp_spifi_driver = { + .probe = gxp_spifi_probe, + .driver = { + .name = "gxp-spifi", + .of_match_table = gxp_spifi_match, + }, +}; +module_platform_driver(gxp_spifi_driver); + +MODULE_DESCRIPTION("HPE GXP SPI Flash Interface driver"); +MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index f6eec7a869b6..f0d532ea40e8 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -74,6 +74,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index 50f42983b950..66063687ae27 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -1236,8 +1236,8 @@ static int intel_spi_populate_chip(struct intel_spi *ispi) return -ENOMEM; pdata->nr_parts = 1; - pdata->parts = devm_kcalloc(ispi->dev, sizeof(*pdata->parts), - pdata->nr_parts, GFP_KERNEL); + pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts, + sizeof(*pdata->parts), GFP_KERNEL); if (!pdata->parts) return -ENOMEM; diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c new file mode 100644 index 000000000000..ce4385330b19 --- /dev/null +++ b/drivers/spi/spi-microchip-core.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: (GPL-2.0) +/* + * Microchip CoreSPI SPI controller driver + * + * Copyright (c) 2018-2022 Microchip Technology Inc. and its subsidiaries + * + * Author: Daire McNamara <daire.mcnamara@microchip.com> + * Author: Conor Dooley <conor.dooley@microchip.com> + * + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> + +#define MAX_LEN (0xffff) +#define MAX_CS (8) +#define DEFAULT_FRAMESIZE (8) +#define FIFO_DEPTH (32) +#define CLK_GEN_MODE1_MAX (255) +#define CLK_GEN_MODE0_MAX (15) +#define CLK_GEN_MIN (0) +#define MODE_X_MASK_SHIFT (24) + +#define CONTROL_ENABLE BIT(0) +#define CONTROL_MASTER BIT(1) +#define CONTROL_RX_DATA_INT BIT(4) +#define CONTROL_TX_DATA_INT BIT(5) +#define CONTROL_RX_OVER_INT BIT(6) +#define CONTROL_TX_UNDER_INT BIT(7) +#define CONTROL_SPO BIT(24) +#define CONTROL_SPH BIT(25) +#define CONTROL_SPS BIT(26) +#define CONTROL_FRAMEURUN BIT(27) +#define CONTROL_CLKMODE BIT(28) +#define CONTROL_BIGFIFO BIT(29) +#define CONTROL_OENOFF BIT(30) +#define CONTROL_RESET BIT(31) + +#define CONTROL_MODE_MASK GENMASK(3, 2) +#define MOTOROLA_MODE (0) +#define CONTROL_FRAMECNT_MASK GENMASK(23, 8) +#define CONTROL_FRAMECNT_SHIFT (8) + +#define STATUS_ACTIVE BIT(14) +#define STATUS_SSEL BIT(13) +#define STATUS_FRAMESTART BIT(12) +#define STATUS_TXFIFO_EMPTY_NEXT_READ BIT(11) +#define STATUS_TXFIFO_EMPTY BIT(10) +#define STATUS_TXFIFO_FULL_NEXT_WRITE BIT(9) +#define STATUS_TXFIFO_FULL BIT(8) +#define STATUS_RXFIFO_EMPTY_NEXT_READ BIT(7) +#define STATUS_RXFIFO_EMPTY BIT(6) +#define STATUS_RXFIFO_FULL_NEXT_WRITE BIT(5) +#define STATUS_RXFIFO_FULL BIT(4) +#define STATUS_TX_UNDERRUN BIT(3) +#define STATUS_RX_OVERFLOW BIT(2) +#define STATUS_RXDAT_RXED BIT(1) +#define STATUS_TXDAT_SENT BIT(0) + +#define INT_TXDONE BIT(0) +#define INT_RXRDY BIT(1) +#define INT_RX_CHANNEL_OVERFLOW BIT(2) +#define INT_TX_CHANNEL_UNDERRUN BIT(3) + +#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \ + CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) + +#define REG_CONTROL (0x00) +#define REG_FRAME_SIZE (0x04) +#define REG_STATUS (0x08) +#define REG_INT_CLEAR (0x0c) +#define REG_RX_DATA (0x10) +#define REG_TX_DATA (0x14) +#define REG_CLK_GEN (0x18) +#define REG_SLAVE_SELECT (0x1c) +#define SSEL_MASK GENMASK(7, 0) +#define SSEL_DIRECT BIT(8) +#define SSELOUT_SHIFT 9 +#define SSELOUT BIT(SSELOUT_SHIFT) +#define REG_MIS (0x20) +#define REG_RIS (0x24) +#define REG_CONTROL2 (0x28) +#define REG_COMMAND (0x2c) +#define REG_PKTSIZE (0x30) +#define REG_CMD_SIZE (0x34) +#define REG_HWSTATUS (0x38) +#define REG_STAT8 (0x3c) +#define REG_CTRL2 (0x48) +#define REG_FRAMESUP (0x50) + +struct mchp_corespi { + void __iomem *regs; + struct clk *clk; + const u8 *tx_buf; + u8 *rx_buf; + u32 clk_gen; /* divider for spi output clock generated by the controller */ + u32 clk_mode; + int irq; + int tx_len; + int rx_len; + int pending; +}; + +static inline u32 mchp_corespi_read(struct mchp_corespi *spi, unsigned int reg) +{ + return readl(spi->regs + reg); +} + +static inline void mchp_corespi_write(struct mchp_corespi *spi, unsigned int reg, u32 val) +{ + writel(val, spi->regs + reg); +} + +static inline void mchp_corespi_enable(struct mchp_corespi *spi) +{ + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control |= CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_disable(struct mchp_corespi *spi) +{ + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control &= ~CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi) +{ + u8 data; + int fifo_max, i = 0; + + fifo_max = min(spi->rx_len, FIFO_DEPTH); + + while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) { + data = mchp_corespi_read(spi, REG_RX_DATA); + + if (spi->rx_buf) + *spi->rx_buf++ = data; + i++; + } + spi->rx_len -= i; + spi->pending -= i; +} + +static void mchp_corespi_enable_ints(struct mchp_corespi *spi) +{ + u32 control, mask = INT_ENABLE_MASK; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + + control |= mask; + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static void mchp_corespi_disable_ints(struct mchp_corespi *spi) +{ + u32 control, mask = INT_ENABLE_MASK; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~mask; + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len) +{ + u32 control; + u16 lenpart; + + /* + * Disable the SPI controller. Writes to transfer length have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + /* + * The lower 16 bits of the frame count are stored in the control reg + * for legacy reasons, but the upper 16 written to a different register: + * FRAMESUP. While both the upper and lower bits can be *READ* from the + * FRAMESUP register, writing to the lower 16 bits is a NOP + */ + lenpart = len & 0xffff; + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~CONTROL_FRAMECNT_MASK; + control |= lenpart << CONTROL_FRAMECNT_SHIFT; + mchp_corespi_write(spi, REG_CONTROL, control); + + lenpart = len & 0xffff0000; + mchp_corespi_write(spi, REG_FRAMESUP, lenpart); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) +{ + u8 byte; + int fifo_max, i = 0; + + fifo_max = min(spi->tx_len, FIFO_DEPTH); + mchp_corespi_set_xfer_size(spi, fifo_max); + + while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) { + byte = spi->tx_buf ? *spi->tx_buf++ : 0xaa; + mchp_corespi_write(spi, REG_TX_DATA, byte); + i++; + } + + spi->tx_len -= i; + spi->pending += i; +} + +static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt) +{ + u32 control; + + /* + * Disable the SPI controller. Writes to the frame size have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + mchp_corespi_write(spi, REG_FRAME_SIZE, bt); + + control = mchp_corespi_read(spi, REG_CONTROL); + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static void mchp_corespi_set_cs(struct spi_device *spi, bool disable) +{ + u32 reg; + struct mchp_corespi *corespi = spi_master_get_devdata(spi->master); + + reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); + reg &= ~BIT(spi->chip_select); + reg |= !disable << spi->chip_select; + + mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); +} + +static int mchp_corespi_setup(struct spi_device *spi) +{ + struct mchp_corespi *corespi = spi_master_get_devdata(spi->master); + u32 reg; + + /* + * Active high slaves need to be specifically set to their inactive + * states during probe by adding them to the "control group" & thus + * driving their select line low. + */ + if (spi->mode & SPI_CS_HIGH) { + reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); + reg |= BIT(spi->chip_select); + mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); + } + return 0; +} + +static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *spi) +{ + unsigned long clk_hz; + u32 control = mchp_corespi_read(spi, REG_CONTROL); + + control |= CONTROL_MASTER; + + control &= ~CONTROL_MODE_MASK; + control |= MOTOROLA_MODE; + + mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); + + /* max. possible spi clock rate is the apb clock rate */ + clk_hz = clk_get_rate(spi->clk); + master->max_speed_hz = clk_hz; + + /* + * The controller must be configured so that it doesn't remove Chip + * Select until the entire message has been transferred, even if at + * some points TX FIFO becomes empty. + * + * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames + * for the 8 bit transfers that this driver uses. + */ + control = mchp_corespi_read(spi, REG_CONTROL); + control |= CONTROL_SPS | CONTROL_BIGFIFO; + + mchp_corespi_write(spi, REG_CONTROL, control); + + mchp_corespi_enable_ints(spi); + + /* + * It is required to enable direct mode, otherwise control over the chip + * select is relinquished to the hardware. SSELOUT is enabled too so we + * can deal with active high slaves. + */ + mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT); + + control = mchp_corespi_read(spi, REG_CONTROL); + + control &= ~CONTROL_RESET; + control |= CONTROL_ENABLE; + + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi) +{ + u32 control; + + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + if (spi->clk_mode) + control |= CONTROL_CLKMODE; + else + control &= ~CONTROL_CLKMODE; + + mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen); + mchp_corespi_write(spi, REG_CONTROL, control); + mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE); +} + +static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode) +{ + u32 control, mode_val; + + switch (mode & SPI_MODE_X_MASK) { + case SPI_MODE_0: + mode_val = 0; + break; + case SPI_MODE_1: + mode_val = CONTROL_SPH; + break; + case SPI_MODE_2: + mode_val = CONTROL_SPO; + break; + case SPI_MODE_3: + mode_val = CONTROL_SPH | CONTROL_SPO; + break; + } + + /* + * Disable the SPI controller. Writes to the frame size have + * no effect when the controller is enabled. + */ + mchp_corespi_disable(spi); + + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT); + control |= mode_val; + + mchp_corespi_write(spi, REG_CONTROL, control); + + control |= CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); +} + +static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct mchp_corespi *spi = spi_master_get_devdata(master); + u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf; + bool finalise = false; + + /* Interrupt line may be shared and not for us at all */ + if (intfield == 0) + return IRQ_NONE; + + if (intfield & INT_TXDONE) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE); + + if (spi->rx_len) + mchp_corespi_read_fifo(spi); + + if (spi->tx_len) + mchp_corespi_write_fifo(spi); + + if (!spi->rx_len) + finalise = true; + } + + if (intfield & INT_RXRDY) + mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); + + if (intfield & INT_RX_CHANNEL_OVERFLOW) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW); + finalise = true; + dev_err(&master->dev, + "%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__, + spi->rx_len, spi->tx_len); + } + + if (intfield & INT_TX_CHANNEL_UNDERRUN) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN); + finalise = true; + dev_err(&master->dev, + "%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__, + spi->rx_len, spi->tx_len); + } + + if (finalise) + spi_finalize_current_transfer(master); + + return IRQ_HANDLED; +} + +static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi, + unsigned long target_hz) +{ + unsigned long clk_hz, spi_hz, clk_gen; + + clk_hz = clk_get_rate(spi->clk); + if (!clk_hz) + return -EINVAL; + spi_hz = min(target_hz, clk_hz); + + /* + * There are two possible clock modes for the controller generated + * clock's division ratio: + * CLK_MODE = 0: 1 / (2^(CLK_GEN + 1)) where CLK_GEN = 0 to 15. + * CLK_MODE = 1: 1 / (2 * CLK_GEN + 1) where CLK_GEN = 0 to 255. + * First try mode 1, fall back to 0 and if we have tried both modes and + * we /still/ can't get a good setting, we then throw the toys out of + * the pram and give up + * clk_gen is the register name for the clock divider on MPFS. + */ + clk_gen = DIV_ROUND_UP(clk_hz, 2 * spi_hz) - 1; + if (clk_gen > CLK_GEN_MODE1_MAX || clk_gen <= CLK_GEN_MIN) { + clk_gen = DIV_ROUND_UP(clk_hz, spi_hz); + clk_gen = fls(clk_gen) - 1; + + if (clk_gen > CLK_GEN_MODE0_MAX) + return -EINVAL; + + spi->clk_mode = 0; + } else { + spi->clk_mode = 1; + } + + spi->clk_gen = clk_gen; + return 0; +} + +static int mchp_corespi_transfer_one(struct spi_master *master, + struct spi_device *spi_dev, + struct spi_transfer *xfer) +{ + struct mchp_corespi *spi = spi_master_get_devdata(master); + int ret; + + ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz); + if (ret) { + dev_err(&master->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz); + return ret; + } + + mchp_corespi_set_clk_gen(spi); + + spi->tx_buf = xfer->tx_buf; + spi->rx_buf = xfer->rx_buf; + spi->tx_len = xfer->len; + spi->rx_len = xfer->len; + spi->pending = 0; + + mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH) + ? FIFO_DEPTH : spi->tx_len); + + if (spi->tx_len) + mchp_corespi_write_fifo(spi); + return 1; +} + +static int mchp_corespi_prepare_message(struct spi_master *master, + struct spi_message *msg) +{ + struct spi_device *spi_dev = msg->spi; + struct mchp_corespi *spi = spi_master_get_devdata(master); + + mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); + mchp_corespi_set_mode(spi, spi_dev->mode); + + return 0; +} + +static int mchp_corespi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct mchp_corespi *spi; + struct resource *res; + u32 num_cs; + int ret = 0; + + master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi)); + if (!master) + return dev_err_probe(&pdev->dev, -ENOMEM, + "unable to allocate master for SPI controller\n"); + + platform_set_drvdata(pdev, master); + + if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs)) + num_cs = MAX_CS; + + master->num_chipselect = num_cs; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->setup = mchp_corespi_setup; + master->bits_per_word_mask = SPI_BPW_MASK(8); + master->transfer_one = mchp_corespi_transfer_one; + master->prepare_message = mchp_corespi_prepare_message; + master->set_cs = mchp_corespi_set_cs; + master->dev.of_node = pdev->dev.of_node; + + spi = spi_master_get_devdata(master); + + spi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(spi->regs)) + return PTR_ERR(spi->regs); + + spi->irq = platform_get_irq(pdev, 0); + if (spi->irq <= 0) + return dev_err_probe(&pdev->dev, -ENXIO, + "invalid IRQ %d for SPI controller\n", + spi->irq); + + ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt, + IRQF_SHARED, dev_name(&pdev->dev), master); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "could not request irq: %d\n", ret); + + spi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(spi->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk), + "could not get clk: %d\n", ret); + + ret = clk_prepare_enable(spi->clk); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to enable clock\n"); + + mchp_corespi_init(master, spi); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + mchp_corespi_disable(spi); + clk_disable_unprepare(spi->clk); + return dev_err_probe(&pdev->dev, ret, + "unable to register master for SPI controller\n"); + } + + dev_info(&pdev->dev, "Registered SPI controller %d\n", master->bus_num); + + return 0; +} + +static int mchp_corespi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct mchp_corespi *spi = spi_master_get_devdata(master); + + mchp_corespi_disable_ints(spi); + clk_disable_unprepare(spi->clk); + mchp_corespi_disable(spi); + + return 0; +} + +#define MICROCHIP_SPI_PM_OPS (NULL) + +/* + * Platform driver data structure + */ + +#if defined(CONFIG_OF) +static const struct of_device_id mchp_corespi_dt_ids[] = { + { .compatible = "microchip,mpfs-spi" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mchp_corespi_dt_ids); +#endif + +static struct platform_driver mchp_corespi_driver = { + .probe = mchp_corespi_probe, + .driver = { + .name = "microchip-corespi", + .pm = MICROCHIP_SPI_PM_OPS, + .of_match_table = of_match_ptr(mchp_corespi_dt_ids), + }, + .remove = mchp_corespi_remove, +}; +module_platform_driver(mchp_corespi_driver); +MODULE_DESCRIPTION("Microchip coreSPI SPI controller driver"); +MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>"); +MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c index 7654736c2c0e..609311231e64 100644 --- a/drivers/spi/spi-mpc52xx-psc.c +++ b/drivers/spi/spi-mpc52xx-psc.c @@ -37,12 +37,6 @@ struct mpc52xx_psc_spi { struct mpc52xx_psc_fifo __iomem *fifo; unsigned int irq; u8 bits_per_word; - u8 busy; - - struct work_struct work; - - struct list_head queue; - spinlock_t lock; struct completion done; }; @@ -198,69 +192,53 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, return 0; } -static void mpc52xx_psc_spi_work(struct work_struct *work) +int mpc52xx_psc_spi_transfer_one_message(struct spi_controller *ctlr, + struct spi_message *m) { - struct mpc52xx_psc_spi *mps = - container_of(work, struct mpc52xx_psc_spi, work); - - spin_lock_irq(&mps->lock); - mps->busy = 1; - while (!list_empty(&mps->queue)) { - struct spi_message *m; - struct spi_device *spi; - struct spi_transfer *t = NULL; - unsigned cs_change; - int status; - - m = container_of(mps->queue.next, struct spi_message, queue); - list_del_init(&m->queue); - spin_unlock_irq(&mps->lock); - - spi = m->spi; - cs_change = 1; - status = 0; - list_for_each_entry (t, &m->transfers, transfer_list) { - if (t->bits_per_word || t->speed_hz) { - status = mpc52xx_psc_spi_transfer_setup(spi, t); - if (status < 0) - break; - } - - if (cs_change) - mpc52xx_psc_spi_activate_cs(spi); - cs_change = t->cs_change; - - status = mpc52xx_psc_spi_transfer_rxtx(spi, t); - if (status) + struct spi_device *spi; + struct spi_transfer *t = NULL; + unsigned cs_change; + int status; + + spi = m->spi; + cs_change = 1; + status = 0; + list_for_each_entry (t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = mpc52xx_psc_spi_transfer_setup(spi, t); + if (status < 0) break; - m->actual_length += t->len; + } - spi_transfer_delay_exec(t); + if (cs_change) + mpc52xx_psc_spi_activate_cs(spi); + cs_change = t->cs_change; - if (cs_change) - mpc52xx_psc_spi_deactivate_cs(spi); - } + status = mpc52xx_psc_spi_transfer_rxtx(spi, t); + if (status) + break; + m->actual_length += t->len; - m->status = status; - if (m->complete) - m->complete(m->context); + spi_transfer_delay_exec(t); - if (status || !cs_change) + if (cs_change) mpc52xx_psc_spi_deactivate_cs(spi); + } - mpc52xx_psc_spi_transfer_setup(spi, NULL); + m->status = status; + if (status || !cs_change) + mpc52xx_psc_spi_deactivate_cs(spi); - spin_lock_irq(&mps->lock); - } - mps->busy = 0; - spin_unlock_irq(&mps->lock); + mpc52xx_psc_spi_transfer_setup(spi, NULL); + + spi_finalize_current_message(ctlr); + + return 0; } static int mpc52xx_psc_spi_setup(struct spi_device *spi) { - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); struct mpc52xx_psc_spi_cs *cs = spi->controller_state; - unsigned long flags; if (spi->bits_per_word%8) return -EINVAL; @@ -275,28 +253,6 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi) cs->bits_per_word = spi->bits_per_word; cs->speed_hz = spi->max_speed_hz; - spin_lock_irqsave(&mps->lock, flags); - if (!mps->busy) - mpc52xx_psc_spi_deactivate_cs(spi); - spin_unlock_irqrestore(&mps->lock, flags); - - return 0; -} - -static int mpc52xx_psc_spi_transfer(struct spi_device *spi, - struct spi_message *m) -{ - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spin_lock_irqsave(&mps->lock, flags); - list_add_tail(&m->queue, &mps->queue); - schedule_work(&mps->work); - spin_unlock_irqrestore(&mps->lock, flags); - return 0; } @@ -391,7 +347,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, master->num_chipselect = pdata->max_chipselect; } master->setup = mpc52xx_psc_spi_setup; - master->transfer = mpc52xx_psc_spi_transfer; + master->transfer_one_message = mpc52xx_psc_spi_transfer_one_message; master->cleanup = mpc52xx_psc_spi_cleanup; master->dev.of_node = dev->of_node; @@ -415,10 +371,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, goto free_irq; } - spin_lock_init(&mps->lock); init_completion(&mps->done); - INIT_WORK(&mps->work, mpc52xx_psc_spi_work); - INIT_LIST_HEAD(&mps->queue); ret = spi_register_master(master); if (ret < 0) @@ -470,7 +423,6 @@ static int mpc52xx_psc_spi_of_remove(struct platform_device *op) struct spi_master *master = spi_master_get(platform_get_drvdata(op)); struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); - flush_work(&mps->work); spi_unregister_master(master); free_irq(mps->irq, mps); if (mps->psc) diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index ba67dbed9fb8..49f6424e35af 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -36,6 +36,7 @@ #define NPCM_FIU_UMA_DR1 0x34 #define NPCM_FIU_UMA_DR2 0x38 #define NPCM_FIU_UMA_DR3 0x3C +#define NPCM_FIU_CFG 0x78 #define NPCM_FIU_MAX_REG_LIMIT 0x80 /* FIU Direct Read Configuration Register */ @@ -151,6 +152,9 @@ #define NPCM_FIU_UMA_DR3_RB13 GENMASK(15, 8) #define NPCM_FIU_UMA_DR3_RB12 GENMASK(7, 0) +/* FIU Configuration Register */ +#define NPCM_FIU_CFG_FIU_FIX BIT(31) + /* FIU Read Mode */ enum { DRD_SINGLE_WIRE_MODE = 0, @@ -187,6 +191,7 @@ enum { FIU0 = 0, FIU3, FIUX, + FIU1, }; struct npcm_fiu_info { @@ -214,6 +219,21 @@ static const struct fiu_data npcm7xx_fiu_data = { .fiu_max = 3, }; +static const struct npcm_fiu_info npxm8xx_fiu_info[] = { + {.name = "FIU0", .fiu_id = FIU0, + .max_map_size = MAP_SIZE_128MB, .max_cs = 2}, + {.name = "FIU3", .fiu_id = FIU3, + .max_map_size = MAP_SIZE_128MB, .max_cs = 4}, + {.name = "FIUX", .fiu_id = FIUX, + .max_map_size = MAP_SIZE_16MB, .max_cs = 2}, + {.name = "FIU1", .fiu_id = FIU1, + .max_map_size = MAP_SIZE_16MB, .max_cs = 4} }; + +static const struct fiu_data npxm8xx_fiu_data = { + .npcm_fiu_data_info = npxm8xx_fiu_info, + .fiu_max = 4, +}; + struct npcm_fiu_spi; struct npcm_fiu_chip { @@ -252,8 +272,7 @@ static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu, fiu->drd_op.addr.buswidth = op->addr.buswidth; regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG, NPCM_FIU_DRD_CFG_DBW, - ((op->dummy.nbytes * ilog2(op->addr.buswidth)) / BITS_PER_BYTE) - << NPCM_FIU_DRD_DBW_SHIFT); + op->dummy.nbytes << NPCM_FIU_DRD_DBW_SHIFT); fiu->drd_op.dummy.nbytes = op->dummy.nbytes; regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG, NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode); @@ -625,6 +644,10 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc) regmap_update_bits(gcr_regmap, NPCM7XX_INTCR3_OFFSET, NPCM7XX_INTCR3_FIU_FIX, NPCM7XX_INTCR3_FIU_FIX); + } else { + regmap_update_bits(fiu->regmap, NPCM_FIU_CFG, + NPCM_FIU_CFG_FIU_FIX, + NPCM_FIU_CFG_FIU_FIX); } if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) { @@ -665,6 +688,7 @@ static const struct spi_controller_mem_ops npcm_fiu_mem_ops = { static const struct of_device_id npcm_fiu_dt_ids[] = { { .compatible = "nuvoton,npcm750-fiu", .data = &npcm7xx_fiu_data }, + { .compatible = "nuvoton,npcm845-fiu", .data = &npxm8xx_fiu_data }, { /* sentinel */ } }; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index edb42d08857d..838d12e65144 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1404,6 +1404,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { { PCI_VDEVICE(INTEL, 0x7aab), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x7af9), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x7afb), LPSS_CNL_SSP }, + /* MTL-P */ + { PCI_VDEVICE(INTEL, 0x7e27), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x7e30), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x7e46), LPSS_CNL_SSP }, /* CNL-LP */ { PCI_VDEVICE(INTEL, 0x9daa), LPSS_CNL_SSP }, { PCI_VDEVICE(INTEL, 0x9dab), LPSS_CNL_SSP }, diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index c26440e9058d..7f346866614a 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -18,7 +18,7 @@ #include <linux/platform_data/spi-s3c64xx.h> -#define MAX_SPI_PORTS 6 +#define MAX_SPI_PORTS 12 #define S3C64XX_SPI_QUIRK_POLL (1 << 0) #define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1) #define AUTOSUSPEND_TIMEOUT 2000 @@ -59,6 +59,7 @@ #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) +#define S3C64XX_SPI_MODE_SELF_LOOPBACK (1<<3) #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) #define S3C64XX_SPI_MODE_4BURST (1<<0) @@ -130,11 +131,13 @@ struct s3c64xx_spi_dma_data { * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register. * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter. * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter. + * @clk_div: Internal clock divider * @quirks: Bitmask of known quirks * @high_speed: True, if the controller supports HIGH_SPEED_EN bit. * @clk_from_cmu: True, if the controller does not include a clock mux and * prescaler unit. * @clk_ioclk: True if clock is present on this device + * @has_loopback: True if loopback mode can be supported * * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but * differ in some aspects such as the size of the fifo and spi bus clock @@ -146,9 +149,11 @@ struct s3c64xx_spi_port_config { int rx_lvl_offset; int tx_st_done; int quirks; + int clk_div; bool high_speed; bool clk_from_cmu; bool clk_ioclk; + bool has_loopback; }; /** @@ -350,19 +355,59 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) if (is_polling(sdd)) return 0; + /* Requests DMA channels */ + sdd->rx_dma.ch = dma_request_chan(&sdd->pdev->dev, "rx"); + if (IS_ERR(sdd->rx_dma.ch)) { + dev_err(&sdd->pdev->dev, "Failed to get RX DMA channel\n"); + sdd->rx_dma.ch = NULL; + return 0; + } + + sdd->tx_dma.ch = dma_request_chan(&sdd->pdev->dev, "tx"); + if (IS_ERR(sdd->tx_dma.ch)) { + dev_err(&sdd->pdev->dev, "Failed to get TX DMA channel\n"); + dma_release_channel(sdd->rx_dma.ch); + sdd->tx_dma.ch = NULL; + sdd->rx_dma.ch = NULL; + return 0; + } + spi->dma_rx = sdd->rx_dma.ch; spi->dma_tx = sdd->tx_dma.ch; return 0; } +static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) +{ + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); + + if (is_polling(sdd)) + return 0; + + /* Releases DMA channels if they are allocated */ + if (sdd->rx_dma.ch && sdd->tx_dma.ch) { + dma_release_channel(sdd->rx_dma.ch); + dma_release_channel(sdd->tx_dma.ch); + sdd->rx_dma.ch = 0; + sdd->tx_dma.ch = 0; + } + + return 0; +} + static bool s3c64xx_spi_can_dma(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer) { struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); - return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; + if (sdd->rx_dma.ch && sdd->tx_dma.ch) { + return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; + } else { + return false; + } + } static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, @@ -577,6 +622,7 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) void __iomem *regs = sdd->regs; int ret; u32 val; + int div = sdd->port_conf->clk_div; /* Disable Clock */ if (!sdd->port_conf->clk_from_cmu) { @@ -619,19 +665,21 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) break; } + if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback) + val |= S3C64XX_SPI_MODE_SELF_LOOPBACK; + writel(val, regs + S3C64XX_SPI_MODE_CFG); if (sdd->port_conf->clk_from_cmu) { - /* The src_clk clock is divided internally by 2 */ - ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); + ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * div); if (ret) return ret; - sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2; + sdd->cur_speed = clk_get_rate(sdd->src_clk) / div; } else { /* Configure Clock */ val = readl(regs + S3C64XX_SPI_CLK_CFG); val &= ~S3C64XX_SPI_PSR_MASK; - val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) + val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / div - 1) & S3C64XX_SPI_PSR_MASK); writel(val, regs + S3C64XX_SPI_CLK_CFG); @@ -697,7 +745,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, sdd->rx_dma.ch && sdd->tx_dma.ch) { use_dma = 1; - } else if (is_polling(sdd) && xfer->len > fifo_len) { + } else if (xfer->len > fifo_len) { tx_buf = xfer->tx_buf; rx_buf = xfer->rx_buf; origin_len = xfer->len; @@ -825,6 +873,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) struct s3c64xx_spi_csinfo *cs = spi->controller_data; struct s3c64xx_spi_driver_data *sdd; int err; + int div; sdd = spi_master_get_devdata(spi->master); if (spi->dev.of_node) { @@ -843,22 +892,24 @@ static int s3c64xx_spi_setup(struct spi_device *spi) pm_runtime_get_sync(&sdd->pdev->dev); + div = sdd->port_conf->clk_div; + /* Check if we can provide the requested rate */ if (!sdd->port_conf->clk_from_cmu) { u32 psr, speed; /* Max possible */ - speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); + speed = clk_get_rate(sdd->src_clk) / div / (0 + 1); if (spi->max_speed_hz > speed) spi->max_speed_hz = speed; - psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; + psr = clk_get_rate(sdd->src_clk) / div / spi->max_speed_hz - 1; psr &= S3C64XX_SPI_PSR_MASK; if (psr == S3C64XX_SPI_PSR_MASK) psr--; - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + speed = clk_get_rate(sdd->src_clk) / div / (psr + 1); if (spi->max_speed_hz < speed) { if (psr+1 < S3C64XX_SPI_PSR_MASK) { psr++; @@ -868,7 +919,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) } } - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + speed = clk_get_rate(sdd->src_clk) / div / (psr + 1); if (spi->max_speed_hz >= speed) { spi->max_speed_hz = speed; } else { @@ -1098,6 +1149,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) master->setup = s3c64xx_spi_setup; master->cleanup = s3c64xx_spi_cleanup; master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; + master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; master->prepare_message = s3c64xx_spi_prepare_message; master->transfer_one = s3c64xx_spi_transfer_one; master->num_chipselect = sci->num_cs; @@ -1107,6 +1159,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) SPI_BPW_MASK(8); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + if (sdd->port_conf->has_loopback) + master->mode_bits |= SPI_LOOP; master->auto_runtime_pm = true; if (!is_polling(sdd)) master->can_dma = s3c64xx_spi_can_dma; @@ -1167,22 +1221,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) } } - if (!is_polling(sdd)) { - /* Acquire DMA channels */ - sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx"); - if (IS_ERR(sdd->rx_dma.ch)) { - dev_err(&pdev->dev, "Failed to get RX DMA channel\n"); - ret = PTR_ERR(sdd->rx_dma.ch); - goto err_disable_io_clk; - } - sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx"); - if (IS_ERR(sdd->tx_dma.ch)) { - dev_err(&pdev->dev, "Failed to get TX DMA channel\n"); - ret = PTR_ERR(sdd->tx_dma.ch); - goto err_release_rx_dma; - } - } - pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_active(&pdev->dev); @@ -1228,12 +1266,6 @@ err_pm_put: pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); - if (!is_polling(sdd)) - dma_release_channel(sdd->tx_dma.ch); -err_release_rx_dma: - if (!is_polling(sdd)) - dma_release_channel(sdd->rx_dma.ch); -err_disable_io_clk: clk_disable_unprepare(sdd->ioclk); err_disable_src_clk: clk_disable_unprepare(sdd->src_clk); @@ -1369,6 +1401,7 @@ static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = { .fifo_lvl_mask = { 0x7f }, .rx_lvl_offset = 13, .tx_st_done = 21, + .clk_div = 2, .high_speed = true, }; @@ -1376,12 +1409,14 @@ static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7F }, .rx_lvl_offset = 13, .tx_st_done = 21, + .clk_div = 2, }; static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, .high_speed = true, }; @@ -1389,6 +1424,7 @@ static const struct s3c64xx_spi_port_config exynos4_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, .high_speed = true, .clk_from_cmu = true, .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, @@ -1398,6 +1434,7 @@ static const struct s3c64xx_spi_port_config exynos7_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff}, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, .high_speed = true, .clk_from_cmu = true, .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, @@ -1407,16 +1444,31 @@ static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff}, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, + .high_speed = true, + .clk_from_cmu = true, + .clk_ioclk = true, + .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, +}; + +static const struct s3c64xx_spi_port_config exynosautov9_spi_port_config = { + .fifo_lvl_mask = { 0x1ff, 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f}, + .rx_lvl_offset = 15, + .tx_st_done = 25, + .clk_div = 4, .high_speed = true, .clk_from_cmu = true, .clk_ioclk = true, + .has_loopback = true, .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, }; -static struct s3c64xx_spi_port_config fsd_spi_port_config = { +static const struct s3c64xx_spi_port_config fsd_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f}, .rx_lvl_offset = 15, .tx_st_done = 25, + .clk_div = 2, .high_speed = true, .clk_from_cmu = true, .clk_ioclk = false, @@ -1453,6 +1505,9 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = { { .compatible = "samsung,exynos5433-spi", .data = (void *)&exynos5433_spi_port_config, }, + { .compatible = "samsung,exynosautov9-spi", + .data = (void *)&exynosautov9_spi_port_config, + }, { .compatible = "tesla,fsd-spi", .data = (void *)&fsd_spi_port_config, }, diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c index 45f304935332..3e72fad99adf 100644 --- a/drivers/spi/spi-sh.c +++ b/drivers/spi/spi-sh.c @@ -73,11 +73,8 @@ struct spi_sh_data { void __iomem *addr; int irq; struct spi_master *master; - struct list_head queue; - struct work_struct ws; unsigned long cr1; wait_queue_head_t wait; - spinlock_t lock; int width; }; @@ -271,47 +268,39 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg, return 0; } -static void spi_sh_work(struct work_struct *work) +static int spi_sh_transfer_one_message(struct spi_controller *ctlr, + struct spi_message *mesg) { - struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws); - struct spi_message *mesg; + struct spi_sh_data *ss = spi_controller_get_devdata(ctlr); struct spi_transfer *t; - unsigned long flags; int ret; pr_debug("%s: enter\n", __func__); - spin_lock_irqsave(&ss->lock, flags); - while (!list_empty(&ss->queue)) { - mesg = list_entry(ss->queue.next, struct spi_message, queue); - list_del_init(&mesg->queue); - - spin_unlock_irqrestore(&ss->lock, flags); - list_for_each_entry(t, &mesg->transfers, transfer_list) { - pr_debug("tx_buf = %p, rx_buf = %p\n", - t->tx_buf, t->rx_buf); - pr_debug("len = %d, delay.value = %d\n", - t->len, t->delay.value); - - if (t->tx_buf) { - ret = spi_sh_send(ss, mesg, t); - if (ret < 0) - goto error; - } - if (t->rx_buf) { - ret = spi_sh_receive(ss, mesg, t); - if (ret < 0) - goto error; - } - mesg->actual_length += t->len; - } - spin_lock_irqsave(&ss->lock, flags); + spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); - mesg->status = 0; - if (mesg->complete) - mesg->complete(mesg->context); + list_for_each_entry(t, &mesg->transfers, transfer_list) { + pr_debug("tx_buf = %p, rx_buf = %p\n", + t->tx_buf, t->rx_buf); + pr_debug("len = %d, delay.value = %d\n", + t->len, t->delay.value); + + if (t->tx_buf) { + ret = spi_sh_send(ss, mesg, t); + if (ret < 0) + goto error; + } + if (t->rx_buf) { + ret = spi_sh_receive(ss, mesg, t); + if (ret < 0) + goto error; + } + mesg->actual_length += t->len; } + mesg->status = 0; + spi_finalize_current_message(ctlr); + clear_fifo(ss); spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1); udelay(100); @@ -321,12 +310,11 @@ static void spi_sh_work(struct work_struct *work) clear_fifo(ss); - spin_unlock_irqrestore(&ss->lock, flags); - - return; + return 0; error: mesg->status = ret; + spi_finalize_current_message(ctlr); if (mesg->complete) mesg->complete(mesg->context); @@ -334,6 +322,7 @@ static void spi_sh_work(struct work_struct *work) SPI_SH_CR1); clear_fifo(ss); + return ret; } static int spi_sh_setup(struct spi_device *spi) @@ -355,29 +344,6 @@ static int spi_sh_setup(struct spi_device *spi) return 0; } -static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg) -{ - struct spi_sh_data *ss = spi_master_get_devdata(spi->master); - unsigned long flags; - - pr_debug("%s: enter\n", __func__); - pr_debug("\tmode = %02x\n", spi->mode); - - spin_lock_irqsave(&ss->lock, flags); - - mesg->actual_length = 0; - mesg->status = -EINPROGRESS; - - spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); - - list_add_tail(&mesg->queue, &ss->queue); - schedule_work(&ss->ws); - - spin_unlock_irqrestore(&ss->lock, flags); - - return 0; -} - static void spi_sh_cleanup(struct spi_device *spi) { struct spi_sh_data *ss = spi_master_get_devdata(spi->master); @@ -416,7 +382,6 @@ static int spi_sh_remove(struct platform_device *pdev) struct spi_sh_data *ss = platform_get_drvdata(pdev); spi_unregister_master(ss->master); - flush_work(&ss->ws); free_irq(ss->irq, ss); return 0; @@ -467,9 +432,6 @@ static int spi_sh_probe(struct platform_device *pdev) dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } - INIT_LIST_HEAD(&ss->queue); - spin_lock_init(&ss->lock); - INIT_WORK(&ss->ws, spi_sh_work); init_waitqueue_head(&ss->wait); ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss); @@ -481,7 +443,7 @@ static int spi_sh_probe(struct platform_device *pdev) master->num_chipselect = 2; master->bus_num = pdev->id; master->setup = spi_sh_setup; - master->transfer = spi_sh_transfer; + master->transfer_one_message = spi_sh_transfer_one_message; master->cleanup = spi_sh_cleanup; ret = spi_register_master(master); diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c index f7c1e20432e0..e29e85cee88a 100644 --- a/drivers/spi/spi-sifive.c +++ b/drivers/spi/spi-sifive.c @@ -427,6 +427,44 @@ static int sifive_spi_remove(struct platform_device *pdev) return 0; } +static int sifive_spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct sifive_spi *spi = spi_master_get_devdata(master); + int ret; + + ret = spi_master_suspend(master); + if (ret) + return ret; + + /* Disable all the interrupts just in case */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + + clk_disable_unprepare(spi->clk); + + return ret; +} + +static int sifive_spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct sifive_spi *spi = spi_master_get_devdata(master); + int ret; + + ret = clk_prepare_enable(spi->clk); + if (ret) + return ret; + ret = spi_master_resume(master); + if (ret) + clk_disable_unprepare(spi->clk); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(sifive_spi_pm_ops, + sifive_spi_suspend, sifive_spi_resume); + + static const struct of_device_id sifive_spi_of_match[] = { { .compatible = "sifive,spi0", }, {} @@ -438,6 +476,7 @@ static struct platform_driver sifive_spi_driver = { .remove = sifive_spi_remove, .driver = { .name = SIFIVE_SPI_DRIVER_NAME, + .pm = &sifive_spi_pm_ops, .of_match_table = sifive_spi_of_match, }, }; diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index c0239e405c39..f3fe92300639 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -299,8 +299,7 @@ static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi) STM32_BUSY_TIMEOUT_US); } -static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi, - const struct spi_mem_op *op) +static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi) { u32 cr, sr; int err = 0; @@ -331,8 +330,7 @@ out: return err; } -static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi, - const struct spi_mem_op *op) +static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi) { u32 cr; @@ -349,7 +347,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi, return 0; } -static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth) +static int stm32_qspi_get_mode(u8 buswidth) { if (buswidth == 4) return CCR_BUSWIDTH_4; @@ -382,11 +380,11 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) ccr = qspi->fmode; ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode); ccr |= FIELD_PREP(CCR_IMODE_MASK, - stm32_qspi_get_mode(qspi, op->cmd.buswidth)); + stm32_qspi_get_mode(op->cmd.buswidth)); if (op->addr.nbytes) { ccr |= FIELD_PREP(CCR_ADMODE_MASK, - stm32_qspi_get_mode(qspi, op->addr.buswidth)); + stm32_qspi_get_mode(op->addr.buswidth)); ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1); } @@ -396,7 +394,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) if (op->data.nbytes) { ccr |= FIELD_PREP(CCR_DMODE_MASK, - stm32_qspi_get_mode(qspi, op->data.buswidth)); + stm32_qspi_get_mode(op->data.buswidth)); } writel_relaxed(ccr, qspi->io_base + QSPI_CCR); @@ -405,7 +403,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR); if (qspi->fmode == CCR_FMODE_APM) - err_poll_status = stm32_qspi_wait_poll_status(qspi, op); + err_poll_status = stm32_qspi_wait_poll_status(qspi); err = stm32_qspi_tx(qspi, op); @@ -420,7 +418,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op) goto abort; /* wait end of tx in indirect mode */ - err = stm32_qspi_wait_cmd(qspi, op); + err = stm32_qspi_wait_cmd(qspi); if (err) goto abort; diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c index ea706d9629cb..47cbe73137c2 100644 --- a/drivers/spi/spi-synquacer.c +++ b/drivers/spi/spi-synquacer.c @@ -783,6 +783,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev) ret = synquacer_spi_enable(master); if (ret) { + clk_disable_unprepare(sspi->clk); dev_err(dev, "failed to enable spi (%d)\n", ret); return ret; } diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 38360434d6e9..148043d0c2b8 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1136,7 +1136,7 @@ exit_free_master: static int tegra_slink_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct tegra_slink_data *tspi = spi_master_get_devdata(master); spi_unregister_master(master); @@ -1151,6 +1151,7 @@ static int tegra_slink_remove(struct platform_device *pdev) if (tspi->rx_dma_chan) tegra_slink_deinit_dma_param(tspi, true); + spi_master_put(master); return 0; } diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 66f647f32876..c89592b21ffc 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -37,6 +37,16 @@ #define QSPI_RX_EN BIT(12) #define QSPI_CS_SW_VAL BIT(20) #define QSPI_CS_SW_HW BIT(21) + +#define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n))) +#define QSPI_CS_POL_INACTIVE_MASK (0xF << 22) +#define QSPI_CS_SEL_0 (0 << 26) +#define QSPI_CS_SEL_1 (1 << 26) +#define QSPI_CS_SEL_2 (2 << 26) +#define QSPI_CS_SEL_3 (3 << 26) +#define QSPI_CS_SEL_MASK (3 << 26) +#define QSPI_CS_SEL(x) (((x) & 0x3) << 26) + #define QSPI_CONTROL_MODE_0 (0 << 28) #define QSPI_CONTROL_MODE_3 (3 << 28) #define QSPI_CONTROL_MODE_MASK (3 << 28) @@ -154,6 +164,7 @@ struct tegra_qspi_soc_data { bool has_dma; bool cmb_xfer_capable; + unsigned int cs_count; }; struct tegra_qspi_client_data { @@ -812,6 +823,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran tegra_qspi_mask_clear_irq(tqspi); command1 = tqspi->def_command1_reg; + command1 |= QSPI_CS_SEL(spi->chip_select); command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); command1 &= ~QSPI_CONTROL_MODE_MASK; @@ -941,10 +953,11 @@ static int tegra_qspi_setup(struct spi_device *spi) /* keep default cs state to inactive */ val = tqspi->def_command1_reg; + val |= QSPI_CS_SEL(spi->chip_select); if (spi->mode & SPI_CS_HIGH) - val &= ~QSPI_CS_SW_VAL; + val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select); else - val |= QSPI_CS_SW_VAL; + val |= QSPI_CS_POL_INACTIVE(spi->chip_select); tqspi->def_command1_reg = val; tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); @@ -1425,16 +1438,25 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) static struct tegra_qspi_soc_data tegra210_qspi_soc_data = { .has_dma = true, .cmb_xfer_capable = false, + .cs_count = 1, }; static struct tegra_qspi_soc_data tegra186_qspi_soc_data = { .has_dma = true, .cmb_xfer_capable = true, + .cs_count = 1, }; static struct tegra_qspi_soc_data tegra234_qspi_soc_data = { .has_dma = false, .cmb_xfer_capable = true, + .cs_count = 1, +}; + +static struct tegra_qspi_soc_data tegra241_qspi_soc_data = { + .has_dma = false, + .cmb_xfer_capable = true, + .cs_count = 4, }; static const struct of_device_id tegra_qspi_of_match[] = { @@ -1450,6 +1472,9 @@ static const struct of_device_id tegra_qspi_of_match[] = { }, { .compatible = "nvidia,tegra234-qspi", .data = &tegra234_qspi_soc_data, + }, { + .compatible = "nvidia,tegra241-qspi", + .data = &tegra241_qspi_soc_data, }, {} }; @@ -1467,6 +1492,9 @@ static const struct acpi_device_id tegra_qspi_acpi_match[] = { }, { .id = "NVDA1413", .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data, + }, { + .id = "NVDA1513", + .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data, }, {} }; @@ -1506,6 +1534,7 @@ static int tegra_qspi_probe(struct platform_device *pdev) spin_lock_init(&tqspi->lock); tqspi->soc_data = device_get_match_data(&pdev->dev); + master->num_chipselect = tqspi->soc_data->cs_count; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); tqspi->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(tqspi->base)) diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index b5b65d882d7a..60086869bcae 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -57,7 +57,6 @@ struct ti_qspi { void *rx_bb_addr; struct dma_chan *rx_chan; - u32 spi_max_frequency; u32 cmd; u32 dc; @@ -140,37 +139,19 @@ static inline void ti_qspi_write(struct ti_qspi *qspi, static int ti_qspi_setup(struct spi_device *spi) { struct ti_qspi *qspi = spi_master_get_devdata(spi->master); - struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; - int clk_div = 0, ret; - u32 clk_ctrl_reg, clk_rate, clk_mask; + int ret; if (spi->master->busy) { dev_dbg(qspi->dev, "master busy doing other transfers\n"); return -EBUSY; } - if (!qspi->spi_max_frequency) { + if (!qspi->master->max_speed_hz) { dev_err(qspi->dev, "spi max frequency not defined\n"); return -EINVAL; } - clk_rate = clk_get_rate(qspi->fclk); - - clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1; - - if (clk_div < 0) { - dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n"); - return -EINVAL; - } - - if (clk_div > QSPI_CLK_DIV_MAX) { - dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n", - QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1); - return -EINVAL; - } - - dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", - qspi->spi_max_frequency, clk_div); + spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz); ret = pm_runtime_resume_and_get(qspi->dev); if (ret < 0) { @@ -178,18 +159,6 @@ static int ti_qspi_setup(struct spi_device *spi) return ret; } - clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG); - - clk_ctrl_reg &= ~QSPI_CLK_EN; - - /* disable SCLK */ - ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG); - - /* enable SCLK */ - clk_mask = QSPI_CLK_EN | clk_div; - ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG); - ctx_reg->clkctrl = clk_mask; - pm_runtime_mark_last_busy(qspi->dev); ret = pm_runtime_put_autosuspend(qspi->dev); if (ret < 0) { @@ -200,6 +169,37 @@ static int ti_qspi_setup(struct spi_device *spi) return 0; } +static void ti_qspi_setup_clk(struct ti_qspi *qspi, u32 speed_hz) +{ + struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; + int clk_div; + u32 clk_ctrl_reg, clk_rate, clk_ctrl_new; + + clk_rate = clk_get_rate(qspi->fclk); + clk_div = DIV_ROUND_UP(clk_rate, speed_hz) - 1; + clk_div = clamp(clk_div, 0, QSPI_CLK_DIV_MAX); + dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", speed_hz, clk_div); + + pm_runtime_resume_and_get(qspi->dev); + + clk_ctrl_new = QSPI_CLK_EN | clk_div; + if (ctx_reg->clkctrl != clk_ctrl_new) { + clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG); + + clk_ctrl_reg &= ~QSPI_CLK_EN; + + /* disable SCLK */ + ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG); + + /* enable SCLK */ + ti_qspi_write(qspi, clk_ctrl_new, QSPI_SPI_CLOCK_CNTRL_REG); + ctx_reg->clkctrl = clk_ctrl_new; + } + + pm_runtime_mark_last_busy(qspi->dev); + pm_runtime_put_autosuspend(qspi->dev); +} + static void ti_qspi_restore_ctx(struct ti_qspi *qspi) { struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; @@ -623,8 +623,10 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, mutex_lock(&qspi->list_lock); - if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) + if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) { + ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz); ti_qspi_enable_memory_map(mem->spi); + } ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, op->addr.nbytes, op->dummy.nbytes); @@ -701,6 +703,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master, wlen = t->bits_per_word >> 3; transfer_len_words = min(t->len / wlen, frame_len_words); + ti_qspi_setup_clk(qspi, t->speed_hz); ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen); if (ret) { dev_dbg(qspi->dev, "transfer message failed\n"); @@ -851,7 +854,7 @@ static int ti_qspi_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (!of_property_read_u32(np, "spi-max-frequency", &max_freq)) - qspi->spi_max_frequency = max_freq; + master->max_speed_hz = max_freq; dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index dfaa1d79a78b..cbb60198a7f0 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -455,35 +455,10 @@ static void pch_spi_reset(struct spi_master *master) static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) { - - struct spi_transfer *transfer; struct pch_spi_data *data = spi_master_get_devdata(pspi->master); int retval; unsigned long flags; - spin_lock_irqsave(&data->lock, flags); - /* validate Tx/Rx buffers and Transfer length */ - list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { - if (!transfer->tx_buf && !transfer->rx_buf) { - dev_err(&pspi->dev, - "%s Tx and Rx buffer NULL\n", __func__); - retval = -EINVAL; - goto err_return_spinlock; - } - - if (!transfer->len) { - dev_err(&pspi->dev, "%s Transfer length invalid\n", - __func__); - retval = -EINVAL; - goto err_return_spinlock; - } - - dev_dbg(&pspi->dev, - "%s Tx/Rx buffer valid. Transfer length valid\n", - __func__); - } - spin_unlock_irqrestore(&data->lock, flags); - /* We won't process any messages if we have been asked to terminate */ if (data->status == STATUS_EXITING) { dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); @@ -518,10 +493,6 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) err_out: dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); return retval; -err_return_spinlock: - dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); - spin_unlock_irqrestore(&data->lock, flags); - return retval; } static inline void pch_spi_select_chip(struct pch_spi_data *data, @@ -1365,6 +1336,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->max_speed_hz = PCH_MAX_BAUDRATE; + master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; data->board_dat = board_dat; data->plat_dev = plat_dev; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 2b5afae8ff7f..c760aac070e5 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -134,6 +134,8 @@ #define GQSPI_DMA_UNALIGN 0x3 #define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */ +#define GQSPI_MAX_NUM_CS 2 /* Maximum number of chip selects */ + #define SPI_AUTOSUSPEND_TIMEOUT 3000 enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA}; @@ -363,8 +365,13 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high) genfifoentry |= GQSPI_GENFIFO_MODE_SPI; if (!is_high) { - xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER; - xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER; + if (!qspi->chip_select) { + xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER; + xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER; + } else { + xqspi->genfifobus = GQSPI_GENFIFO_BUS_UPPER; + xqspi->genfifocs = GQSPI_GENFIFO_CS_UPPER; + } genfifoentry |= xqspi->genfifobus; genfifoentry |= xqspi->genfifocs; genfifoentry |= GQSPI_GENFIFO_CS_SETUP; @@ -1099,6 +1106,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) struct zynqmp_qspi *xqspi; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + u32 num_cs; ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi)); if (!ctlr) @@ -1176,8 +1184,19 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) if (ret) goto clk_dis_all; + ret = of_property_read_u32(np, "num-cs", &num_cs); + if (ret < 0) { + ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS; + } else if (num_cs > GQSPI_MAX_NUM_CS) { + ret = -EINVAL; + dev_err(&pdev->dev, "only %d chip selects are available\n", + GQSPI_MAX_NUM_CS); + goto clk_dis_all; + } else { + ctlr->num_chipselect = num_cs; + } + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); - ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS; ctlr->mem_ops = &zynqmp_qspi_mem_ops; ctlr->setup = zynqmp_qspi_setup_op; ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ea09d1b42bf6..1c14d682ffed 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -33,6 +33,7 @@ #include <linux/idr.h> #include <linux/platform_data/x86/apple.h> #include <linux/ptp_clock_kernel.h> +#include <linux/percpu.h> #define CREATE_TRACE_POINTS #include <trace/events/spi.h> @@ -49,6 +50,7 @@ static void spidev_release(struct device *dev) spi_controller_put(spi->controller); kfree(spi->driver_override); + free_percpu(spi->pcpu_statistics); kfree(spi); } @@ -93,6 +95,47 @@ static ssize_t driver_override_show(struct device *dev, } static DEVICE_ATTR_RW(driver_override); +static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev) +{ + struct spi_statistics __percpu *pcpu_stats; + + if (dev) + pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); + else + pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); + + if (pcpu_stats) { + int cpu; + + for_each_possible_cpu(cpu) { + struct spi_statistics *stat; + + stat = per_cpu_ptr(pcpu_stats, cpu); + u64_stats_init(&stat->syncp); + } + } + return pcpu_stats; +} + +#define spi_pcpu_stats_totalize(ret, in, field) \ +do { \ + int i; \ + ret = 0; \ + for_each_possible_cpu(i) { \ + const struct spi_statistics *pcpu_stats; \ + u64 inc; \ + unsigned int start; \ + pcpu_stats = per_cpu_ptr(in, i); \ + do { \ + start = u64_stats_fetch_begin_irq( \ + &pcpu_stats->syncp); \ + inc = u64_stats_read(&pcpu_stats->field); \ + } while (u64_stats_fetch_retry_irq( \ + &pcpu_stats->syncp, start)); \ + ret += inc; \ + } \ +} while (0) + #define SPI_STATISTICS_ATTRS(field, file) \ static ssize_t spi_controller_##field##_show(struct device *dev, \ struct device_attribute *attr, \ @@ -100,7 +143,7 @@ static ssize_t spi_controller_##field##_show(struct device *dev, \ { \ struct spi_controller *ctlr = container_of(dev, \ struct spi_controller, dev); \ - return spi_statistics_##field##_show(&ctlr->statistics, buf); \ + return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ } \ static struct device_attribute dev_attr_spi_controller_##field = { \ .attr = { .name = file, .mode = 0444 }, \ @@ -111,47 +154,46 @@ static ssize_t spi_device_##field##_show(struct device *dev, \ char *buf) \ { \ struct spi_device *spi = to_spi_device(dev); \ - return spi_statistics_##field##_show(&spi->statistics, buf); \ + return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ } \ static struct device_attribute dev_attr_spi_device_##field = { \ .attr = { .name = file, .mode = 0444 }, \ .show = spi_device_##field##_show, \ } -#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ +#define SPI_STATISTICS_SHOW_NAME(name, file, field) \ static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ char *buf) \ { \ - unsigned long flags; \ ssize_t len; \ - spin_lock_irqsave(&stat->lock, flags); \ - len = sysfs_emit(buf, format_string "\n", stat->field); \ - spin_unlock_irqrestore(&stat->lock, flags); \ + u64 val; \ + spi_pcpu_stats_totalize(val, stat, field); \ + len = sysfs_emit(buf, "%llu\n", val); \ return len; \ } \ SPI_STATISTICS_ATTRS(name, file) -#define SPI_STATISTICS_SHOW(field, format_string) \ +#define SPI_STATISTICS_SHOW(field) \ SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ - field, format_string) + field) -SPI_STATISTICS_SHOW(messages, "%lu"); -SPI_STATISTICS_SHOW(transfers, "%lu"); -SPI_STATISTICS_SHOW(errors, "%lu"); -SPI_STATISTICS_SHOW(timedout, "%lu"); +SPI_STATISTICS_SHOW(messages); +SPI_STATISTICS_SHOW(transfers); +SPI_STATISTICS_SHOW(errors); +SPI_STATISTICS_SHOW(timedout); -SPI_STATISTICS_SHOW(spi_sync, "%lu"); -SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); -SPI_STATISTICS_SHOW(spi_async, "%lu"); +SPI_STATISTICS_SHOW(spi_sync); +SPI_STATISTICS_SHOW(spi_sync_immediate); +SPI_STATISTICS_SHOW(spi_async); -SPI_STATISTICS_SHOW(bytes, "%llu"); -SPI_STATISTICS_SHOW(bytes_rx, "%llu"); -SPI_STATISTICS_SHOW(bytes_tx, "%llu"); +SPI_STATISTICS_SHOW(bytes); +SPI_STATISTICS_SHOW(bytes_rx); +SPI_STATISTICS_SHOW(bytes_tx); #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ "transfer_bytes_histo_" number, \ - transfer_bytes_histo[index], "%lu") + transfer_bytes_histo[index]) SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); @@ -170,7 +212,7 @@ SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); -SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); +SPI_STATISTICS_SHOW(transfers_split_maxsize); static struct attribute *spi_dev_attrs[] = { &dev_attr_modalias.attr, @@ -267,30 +309,33 @@ static const struct attribute_group *spi_master_groups[] = { NULL, }; -static void spi_statistics_add_transfer_stats(struct spi_statistics *stats, +static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats, struct spi_transfer *xfer, struct spi_controller *ctlr) { - unsigned long flags; int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; + struct spi_statistics *stats; if (l2len < 0) l2len = 0; - spin_lock_irqsave(&stats->lock, flags); + get_cpu(); + stats = this_cpu_ptr(pcpu_stats); + u64_stats_update_begin(&stats->syncp); - stats->transfers++; - stats->transfer_bytes_histo[l2len]++; + u64_stats_inc(&stats->transfers); + u64_stats_inc(&stats->transfer_bytes_histo[l2len]); - stats->bytes += xfer->len; + u64_stats_add(&stats->bytes, xfer->len); if ((xfer->tx_buf) && (xfer->tx_buf != ctlr->dummy_tx)) - stats->bytes_tx += xfer->len; + u64_stats_add(&stats->bytes_tx, xfer->len); if ((xfer->rx_buf) && (xfer->rx_buf != ctlr->dummy_rx)) - stats->bytes_rx += xfer->len; + u64_stats_add(&stats->bytes_rx, xfer->len); - spin_unlock_irqrestore(&stats->lock, flags); + u64_stats_update_end(&stats->syncp); + put_cpu(); } /* @@ -519,14 +564,19 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr) return NULL; } + spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); + if (!spi->pcpu_statistics) { + kfree(spi); + spi_controller_put(ctlr); + return NULL; + } + spi->master = spi->controller = ctlr; spi->dev.parent = &ctlr->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; spi->mode = ctlr->buswidth_override_bits; - spin_lock_init(&spi->statistics.lock); - device_initialize(&spi->dev); return spi; } @@ -1225,8 +1275,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { - struct spi_statistics *statm = &ctlr->statistics; - struct spi_statistics *stats = &msg->spi->statistics; + struct spi_statistics *statm = ctlr->pcpu_statistics; + struct spi_statistics *stats = msg->spi->pcpu_statistics; u32 speed_hz = xfer->speed_hz; unsigned long long ms; @@ -1304,7 +1354,7 @@ int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) /* Nothing to do here */ break; case SPI_DELAY_UNIT_SCK: - /* clock cycles need to be obtained from spi_transfer */ + /* Clock cycles need to be obtained from spi_transfer */ if (!xfer) return -EINVAL; /* @@ -1353,7 +1403,7 @@ static void _spi_transfer_cs_change_delay(struct spi_message *msg, u32 unit = xfer->cs_change_delay.unit; int ret; - /* return early on "fast" mode - for everything but USECS */ + /* Return early on "fast" mode - for everything but USECS */ if (!delay) { if (unit == SPI_DELAY_UNIT_USECS) _spi_transfer_delay_ns(default_delay_ns); @@ -1382,8 +1432,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, struct spi_transfer *xfer; bool keep_cs = false; int ret = 0; - struct spi_statistics *statm = &ctlr->statistics; - struct spi_statistics *stats = &msg->spi->statistics; + struct spi_statistics *statm = ctlr->pcpu_statistics; + struct spi_statistics *stats = msg->spi->pcpu_statistics; spi_set_cs(msg->spi, true, false); @@ -1499,6 +1549,103 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr) } } +static int __spi_pump_transfer_message(struct spi_controller *ctlr, + struct spi_message *msg, bool was_busy) +{ + struct spi_transfer *xfer; + int ret; + + if (!was_busy && ctlr->auto_runtime_pm) { + ret = pm_runtime_get_sync(ctlr->dev.parent); + if (ret < 0) { + pm_runtime_put_noidle(ctlr->dev.parent); + dev_err(&ctlr->dev, "Failed to power device: %d\n", + ret); + return ret; + } + } + + if (!was_busy) + trace_spi_controller_busy(ctlr); + + if (!was_busy && ctlr->prepare_transfer_hardware) { + ret = ctlr->prepare_transfer_hardware(ctlr); + if (ret) { + dev_err(&ctlr->dev, + "failed to prepare transfer hardware: %d\n", + ret); + + if (ctlr->auto_runtime_pm) + pm_runtime_put(ctlr->dev.parent); + + msg->status = ret; + spi_finalize_current_message(ctlr); + + return ret; + } + } + + trace_spi_message_start(msg); + + if (ctlr->prepare_message) { + ret = ctlr->prepare_message(ctlr, msg); + if (ret) { + dev_err(&ctlr->dev, "failed to prepare message: %d\n", + ret); + msg->status = ret; + spi_finalize_current_message(ctlr); + return ret; + } + msg->prepared = true; + } + + ret = spi_map_msg(ctlr, msg); + if (ret) { + msg->status = ret; + spi_finalize_current_message(ctlr); + return ret; + } + + if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + xfer->ptp_sts_word_pre = 0; + ptp_read_system_prets(xfer->ptp_sts); + } + } + + /* + * Drivers implementation of transfer_one_message() must arrange for + * spi_finalize_current_message() to get called. Most drivers will do + * this in the calling context, but some don't. For those cases, a + * completion is used to guarantee that this function does not return + * until spi_finalize_current_message() is done accessing + * ctlr->cur_msg. + * Use of the following two flags enable to opportunistically skip the + * use of the completion since its use involves expensive spin locks. + * In case of a race with the context that calls + * spi_finalize_current_message() the completion will always be used, + * due to strict ordering of these flags using barriers. + */ + WRITE_ONCE(ctlr->cur_msg_incomplete, true); + WRITE_ONCE(ctlr->cur_msg_need_completion, false); + reinit_completion(&ctlr->cur_msg_completion); + smp_wmb(); /* Make these available to spi_finalize_current_message() */ + + ret = ctlr->transfer_one_message(ctlr, msg); + if (ret) { + dev_err(&ctlr->dev, + "failed to transfer one message from queue\n"); + return ret; + } + + WRITE_ONCE(ctlr->cur_msg_need_completion, true); + smp_mb(); /* See spi_finalize_current_message()... */ + if (READ_ONCE(ctlr->cur_msg_incomplete)) + wait_for_completion(&ctlr->cur_msg_completion); + + return 0; +} + /** * __spi_pump_messages - function which processes spi message queue * @ctlr: controller to process queue for @@ -1514,34 +1661,25 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr) */ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) { - struct spi_transfer *xfer; struct spi_message *msg; bool was_busy = false; unsigned long flags; int ret; + /* Take the IO mutex */ + mutex_lock(&ctlr->io_mutex); + /* Lock queue */ spin_lock_irqsave(&ctlr->queue_lock, flags); /* Make sure we are not already running a message */ - if (ctlr->cur_msg) { - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; - } - - /* If another context is idling the device then defer */ - if (ctlr->idling) { - kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; - } + if (ctlr->cur_msg) + goto out_unlock; /* Check if the queue is idle */ if (list_empty(&ctlr->queue) || !ctlr->running) { - if (!ctlr->busy) { - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; - } + if (!ctlr->busy) + goto out_unlock; /* Defer any non-atomic teardown to the thread */ if (!in_kthread) { @@ -1549,17 +1687,16 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) !ctlr->unprepare_transfer_hardware) { spi_idle_runtime_pm(ctlr); ctlr->busy = false; + ctlr->queue_empty = true; trace_spi_controller_idle(ctlr); } else { kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); } - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + goto out_unlock; } ctlr->busy = false; - ctlr->idling = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); kfree(ctlr->dummy_rx); @@ -1574,9 +1711,8 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) trace_spi_controller_idle(ctlr); spin_lock_irqsave(&ctlr->queue_lock, flags); - ctlr->idling = false; - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + ctlr->queue_empty = true; + goto out_unlock; } /* Extract head of queue */ @@ -1590,81 +1726,23 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ctlr->busy = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); - mutex_lock(&ctlr->io_mutex); - - if (!was_busy && ctlr->auto_runtime_pm) { - ret = pm_runtime_resume_and_get(ctlr->dev.parent); - if (ret < 0) { - dev_err(&ctlr->dev, "Failed to power device: %d\n", - ret); - mutex_unlock(&ctlr->io_mutex); - return; - } - } - - if (!was_busy) - trace_spi_controller_busy(ctlr); - - if (!was_busy && ctlr->prepare_transfer_hardware) { - ret = ctlr->prepare_transfer_hardware(ctlr); - if (ret) { - dev_err(&ctlr->dev, - "failed to prepare transfer hardware: %d\n", - ret); - - if (ctlr->auto_runtime_pm) - pm_runtime_put(ctlr->dev.parent); - - msg->status = ret; - spi_finalize_current_message(ctlr); - - mutex_unlock(&ctlr->io_mutex); - return; - } - } - - trace_spi_message_start(msg); - - if (ctlr->prepare_message) { - ret = ctlr->prepare_message(ctlr, msg); - if (ret) { - dev_err(&ctlr->dev, "failed to prepare message: %d\n", - ret); - msg->status = ret; - spi_finalize_current_message(ctlr); - goto out; - } - ctlr->cur_msg_prepared = true; - } - - ret = spi_map_msg(ctlr, msg); - if (ret) { - msg->status = ret; - spi_finalize_current_message(ctlr); - goto out; - } - - if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - xfer->ptp_sts_word_pre = 0; - ptp_read_system_prets(xfer->ptp_sts); - } - } + ret = __spi_pump_transfer_message(ctlr, msg, was_busy); + if (!ret) + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); - ret = ctlr->transfer_one_message(ctlr, msg); - if (ret) { - dev_err(&ctlr->dev, - "failed to transfer one message from queue: %d\n", - ret); - goto out; - } + ctlr->cur_msg = NULL; + ctlr->fallback = false; -out: mutex_unlock(&ctlr->io_mutex); /* Prod the scheduler in case transfer_one() was busy waiting */ if (!ret) cond_resched(); + return; + +out_unlock: + spin_unlock_irqrestore(&ctlr->queue_lock, flags); + mutex_unlock(&ctlr->io_mutex); } /** @@ -1789,6 +1867,7 @@ static int spi_init_queue(struct spi_controller *ctlr) { ctlr->running = false; ctlr->busy = false; + ctlr->queue_empty = true; ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); if (IS_ERR(ctlr->kworker)) { @@ -1826,7 +1905,7 @@ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) struct spi_message *next; unsigned long flags; - /* get a pointer to the next message, if any */ + /* Get a pointer to the next message, if any */ spin_lock_irqsave(&ctlr->queue_lock, flags); next = list_first_entry_or_null(&ctlr->queue, struct spi_message, queue); @@ -1847,12 +1926,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr) { struct spi_transfer *xfer; struct spi_message *mesg; - unsigned long flags; int ret; - spin_lock_irqsave(&ctlr->queue_lock, flags); mesg = ctlr->cur_msg; - spin_unlock_irqrestore(&ctlr->queue_lock, flags); if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { list_for_each_entry(xfer, &mesg->transfers, transfer_list) { @@ -1876,7 +1952,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) */ spi_res_release(ctlr, mesg); - if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { + if (mesg->prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { dev_err(&ctlr->dev, "failed to unprepare message: %d\n", @@ -1884,12 +1960,12 @@ void spi_finalize_current_message(struct spi_controller *ctlr) } } - spin_lock_irqsave(&ctlr->queue_lock, flags); - ctlr->cur_msg = NULL; - ctlr->cur_msg_prepared = false; - ctlr->fallback = false; - kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); - spin_unlock_irqrestore(&ctlr->queue_lock, flags); + mesg->prepared = false; + + WRITE_ONCE(ctlr->cur_msg_incomplete, false); + smp_mb(); /* See __spi_pump_transfer_message()... */ + if (READ_ONCE(ctlr->cur_msg_need_completion)) + complete(&ctlr->cur_msg_completion); trace_spi_message_done(mesg); @@ -1992,6 +2068,7 @@ static int __spi_queued_transfer(struct spi_device *spi, msg->status = -EINPROGRESS; list_add_tail(&msg->queue, &ctlr->queue); + ctlr->queue_empty = false; if (!ctlr->busy && need_pump) kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); @@ -2376,9 +2453,6 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) if (lookup->index != -1 && lookup->n++ != lookup->index) return 1; - if (lookup->index == -1 && !ctlr) - return -ENODEV; - status = acpi_get_handle(NULL, sb->resource_source.string_ptr, &parent_handle); @@ -2398,7 +2472,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) ctlr = acpi_spi_find_controller_by_adev(adev); if (!ctlr) - return -ENODEV; + return -EPROBE_DEFER; lookup->ctlr = ctlr; } @@ -2481,8 +2555,8 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, acpi_dev_free_resource_list(&resource_list); if (ret < 0) - /* found SPI in _CRS but it points to another controller */ - return ERR_PTR(-ENODEV); + /* Found SPI in _CRS but it points to another controller */ + return ERR_PTR(ret); if (!lookup.max_speed_hz && ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && @@ -2937,7 +3011,7 @@ int spi_register_controller(struct spi_controller *ctlr) return status; if (ctlr->bus_num >= 0) { - /* devices with a fixed bus num must check-in with the num */ + /* Devices with a fixed bus num must check-in with the num */ mutex_lock(&board_lock); id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, ctlr->bus_num + 1, GFP_KERNEL); @@ -2946,7 +3020,7 @@ int spi_register_controller(struct spi_controller *ctlr) return id == -ENOSPC ? -EBUSY : id; ctlr->bus_num = id; } else if (ctlr->dev.of_node) { - /* allocate dynamic bus number using Linux idr */ + /* Allocate dynamic bus number using Linux idr */ id = of_alias_get_id(ctlr->dev.of_node, "spi"); if (id >= 0) { ctlr->bus_num = id; @@ -2975,6 +3049,7 @@ int spi_register_controller(struct spi_controller *ctlr) } ctlr->bus_lock_flag = 0; init_completion(&ctlr->xfer_completion); + init_completion(&ctlr->cur_msg_completion); if (!ctlr->max_dma_len) ctlr->max_dma_len = INT_MAX; @@ -3004,7 +3079,7 @@ int spi_register_controller(struct spi_controller *ctlr) goto free_bus_id; } - /* setting last_cs to -1 means no chip selected */ + /* Setting last_cs to -1 means no chip selected */ ctlr->last_cs = -1; status = device_add(&ctlr->dev); @@ -3028,8 +3103,13 @@ int spi_register_controller(struct spi_controller *ctlr) goto free_bus_id; } } - /* add statistics */ - spin_lock_init(&ctlr->statistics.lock); + /* Add statistics */ + ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); + if (!ctlr->pcpu_statistics) { + dev_err(dev, "Error allocating per-cpu statistics\n"); + status = -ENOMEM; + goto destroy_queue; + } mutex_lock(&board_lock); list_add_tail(&ctlr->list, &spi_controller_list); @@ -3042,6 +3122,8 @@ int spi_register_controller(struct spi_controller *ctlr) acpi_register_spi_devices(ctlr); return status; +destroy_queue: + spi_destroy_queue(ctlr); free_bus_id: mutex_lock(&board_lock); idr_remove(&spi_master_idr, ctlr->bus_num); @@ -3050,9 +3132,9 @@ free_bus_id: } EXPORT_SYMBOL_GPL(spi_register_controller); -static void devm_spi_unregister(void *ctlr) +static void devm_spi_unregister(struct device *dev, void *res) { - spi_unregister_controller(ctlr); + spi_unregister_controller(*(struct spi_controller **)res); } /** @@ -3071,13 +3153,22 @@ static void devm_spi_unregister(void *ctlr) int devm_spi_register_controller(struct device *dev, struct spi_controller *ctlr) { + struct spi_controller **ptr; int ret; + ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + ret = spi_register_controller(ctlr); - if (ret) - return ret; + if (!ret) { + *ptr = ctlr; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } - return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr); + return ret; } EXPORT_SYMBOL_GPL(devm_spi_register_controller); @@ -3124,7 +3215,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) device_del(&ctlr->dev); - /* free bus id */ + /* Free bus id */ mutex_lock(&board_lock); if (found == ctlr) idr_remove(&spi_master_idr, id); @@ -3183,14 +3274,14 @@ static void __spi_replace_transfers_release(struct spi_controller *ctlr, struct spi_replaced_transfers *rxfer = res; size_t i; - /* call extra callback if requested */ + /* Call extra callback if requested */ if (rxfer->release) rxfer->release(ctlr, msg, res); - /* insert replaced transfers back into the message */ + /* Insert replaced transfers back into the message */ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); - /* remove the formerly inserted entries */ + /* Remove the formerly inserted entries */ for (i = 0; i < rxfer->inserted; i++) list_del(&rxfer->inserted_transfers[i].transfer_list); } @@ -3223,7 +3314,7 @@ static struct spi_replaced_transfers *spi_replace_transfers( struct spi_transfer *xfer; size_t i; - /* allocate the structure using spi_res */ + /* Allocate the structure using spi_res */ rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, struct_size(rxfer, inserted_transfers, insert) + extradatasize, @@ -3231,15 +3322,15 @@ static struct spi_replaced_transfers *spi_replace_transfers( if (!rxfer) return ERR_PTR(-ENOMEM); - /* the release code to invoke before running the generic release */ + /* The release code to invoke before running the generic release */ rxfer->release = release; - /* assign extradata */ + /* Assign extradata */ if (extradatasize) rxfer->extradata = &rxfer->inserted_transfers[insert]; - /* init the replaced_transfers list */ + /* Init the replaced_transfers list */ INIT_LIST_HEAD(&rxfer->replaced_transfers); /* @@ -3248,7 +3339,7 @@ static struct spi_replaced_transfers *spi_replace_transfers( */ rxfer->replaced_after = xfer_first->transfer_list.prev; - /* remove the requested number of transfers */ + /* Remove the requested number of transfers */ for (i = 0; i < remove; i++) { /* * If the entry after replaced_after it is msg->transfers @@ -3258,14 +3349,14 @@ static struct spi_replaced_transfers *spi_replace_transfers( if (rxfer->replaced_after->next == &msg->transfers) { dev_err(&msg->spi->dev, "requested to remove more spi_transfers than are available\n"); - /* insert replaced transfers back into the message */ + /* Insert replaced transfers back into the message */ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); - /* free the spi_replace_transfer structure */ + /* Free the spi_replace_transfer structure... */ spi_res_free(rxfer); - /* and return with an error */ + /* ...and return with an error */ return ERR_PTR(-EINVAL); } @@ -3282,26 +3373,26 @@ static struct spi_replaced_transfers *spi_replace_transfers( * based on the first transfer to get removed. */ for (i = 0; i < insert; i++) { - /* we need to run in reverse order */ + /* We need to run in reverse order */ xfer = &rxfer->inserted_transfers[insert - 1 - i]; - /* copy all spi_transfer data */ + /* Copy all spi_transfer data */ memcpy(xfer, xfer_first, sizeof(*xfer)); - /* add to list */ + /* Add to list */ list_add(&xfer->transfer_list, rxfer->replaced_after); - /* clear cs_change and delay for all but the last */ + /* Clear cs_change and delay for all but the last */ if (i) { xfer->cs_change = false; xfer->delay.value = 0; } } - /* set up inserted */ + /* Set up inserted... */ rxfer->inserted = insert; - /* and register it with spi_res/spi_message */ + /* ...and register it with spi_res/spi_message */ spi_res_add(msg, rxfer); return rxfer; @@ -3318,10 +3409,10 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, size_t offset; size_t count, i; - /* calculate how many we have to replace */ + /* Calculate how many we have to replace */ count = DIV_ROUND_UP(xfer->len, maxsize); - /* create replacement */ + /* Create replacement */ srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); if (IS_ERR(srt)) return PTR_ERR(srt); @@ -3344,9 +3435,9 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, */ xfers[0].len = min_t(size_t, maxsize, xfer[0].len); - /* all the others need rx_buf/tx_buf also set */ + /* All the others need rx_buf/tx_buf also set */ for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { - /* update rx_buf, tx_buf and dma */ + /* Update rx_buf, tx_buf and dma */ if (xfers[i].rx_buf) xfers[i].rx_buf += offset; if (xfers[i].rx_dma) @@ -3356,7 +3447,7 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, if (xfers[i].tx_dma) xfers[i].tx_dma += offset; - /* update length */ + /* Update length */ xfers[i].len = min(maxsize, xfers[i].len - offset); } @@ -3366,10 +3457,10 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, */ *xferp = &xfers[count - 1]; - /* increment statistics counters */ - SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, + /* Increment statistics counters */ + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, transfers_split_maxsize); - SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, + SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, transfers_split_maxsize); return 0; @@ -3628,7 +3719,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) return ret; list_for_each_entry(xfer, &message->transfers, transfer_list) { - /* don't change cs_change on the last entry in the list */ + /* Don't change cs_change on the last entry in the list */ if (list_is_last(&xfer->transfer_list, &message->transfers)) break; xfer->cs_change = 1; @@ -3721,7 +3812,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) !(spi->mode & SPI_TX_QUAD)) return -EINVAL; } - /* check transfer rx_nbits */ + /* Check transfer rx_nbits */ if (xfer->rx_buf) { if (spi->mode & SPI_NO_RX) return -EINVAL; @@ -3760,8 +3851,8 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) message->spi = spi; - SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); - SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); + SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); trace_spi_message_submit(message); @@ -3880,6 +3971,39 @@ static int spi_async_locked(struct spi_device *spi, struct spi_message *message) } +static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) +{ + bool was_busy; + int ret; + + mutex_lock(&ctlr->io_mutex); + + was_busy = ctlr->busy; + + ctlr->cur_msg = msg; + ret = __spi_pump_transfer_message(ctlr, msg, was_busy); + if (ret) + goto out; + + ctlr->cur_msg = NULL; + ctlr->fallback = false; + + if (!was_busy) { + kfree(ctlr->dummy_rx); + ctlr->dummy_rx = NULL; + kfree(ctlr->dummy_tx); + ctlr->dummy_tx = NULL; + if (ctlr->unprepare_transfer_hardware && + ctlr->unprepare_transfer_hardware(ctlr)) + dev_err(&ctlr->dev, + "failed to unprepare transfer hardware\n"); + spi_idle_runtime_pm(ctlr); + } + +out: + mutex_unlock(&ctlr->io_mutex); +} + /*-------------------------------------------------------------------------*/ /* @@ -3898,51 +4022,51 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) DECLARE_COMPLETION_ONSTACK(done); int status; struct spi_controller *ctlr = spi->controller; - unsigned long flags; status = __spi_validate(spi, message); if (status != 0) return status; - message->complete = spi_complete; - message->context = &done; message->spi = spi; - SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); - SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); + SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); /* - * If we're not using the legacy transfer method then we will - * try to transfer in the calling context so special case. - * This code would be less tricky if we could remove the - * support for driver implemented message queues. + * Checking queue_empty here only guarantees async/sync message + * ordering when coming from the same context. It does not need to + * guard against reentrancy from a different context. The io_mutex + * will catch those cases. */ - if (ctlr->transfer == spi_queued_transfer) { - spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); + if (READ_ONCE(ctlr->queue_empty)) { + message->actual_length = 0; + message->status = -EINPROGRESS; trace_spi_message_submit(message); - status = __spi_queued_transfer(spi, message, false); + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); + SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); - spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - } else { - status = spi_async_locked(spi, message); + __spi_transfer_message_noqueue(ctlr, message); + + return message->status; } + /* + * There are messages in the async queue that could have originated + * from the same context, so we need to preserve ordering. + * Therefor we send the message to the async queue and wait until they + * are completed. + */ + message->complete = spi_complete; + message->context = &done; + status = spi_async_locked(spi, message); if (status == 0) { - /* Push out the messages in the calling context if we can */ - if (ctlr->transfer == spi_queued_transfer) { - SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, - spi_sync_immediate); - SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, - spi_sync_immediate); - __spi_pump_messages(ctlr, false); - } - wait_for_completion(&done); status = message->status; } message->context = NULL; + return status; } @@ -4026,7 +4150,7 @@ int spi_bus_lock(struct spi_controller *ctlr) ctlr->bus_lock_flag = 1; spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - /* mutex remains locked until spi_bus_unlock is called */ + /* Mutex remains locked until spi_bus_unlock() is called */ return 0; } @@ -4055,7 +4179,7 @@ int spi_bus_unlock(struct spi_controller *ctlr) } EXPORT_SYMBOL_GPL(spi_bus_unlock); -/* portable code must never pass more than 32 bytes */ +/* Portable code must never pass more than 32 bytes */ #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) static u8 *buf; @@ -4121,7 +4245,7 @@ int spi_write_then_read(struct spi_device *spi, x[0].tx_buf = local_buf; x[1].rx_buf = local_buf + n_tx; - /* do the i/o */ + /* Do the i/o */ status = spi_sync(spi, &message); if (status == 0) memcpy(rxbuf, x[1].rx_buf, n_rx); @@ -4138,7 +4262,7 @@ EXPORT_SYMBOL_GPL(spi_write_then_read); /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_OF_DYNAMIC) -/* must call put_device() when done with returned spi_device device */ +/* Must call put_device() when done with returned spi_device device */ static struct spi_device *of_find_spi_device_by_node(struct device_node *node) { struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); @@ -4146,7 +4270,7 @@ static struct spi_device *of_find_spi_device_by_node(struct device_node *node) return dev ? to_spi_device(dev) : NULL; } -/* the spi controllers are not using spi_bus, so we find it with another way */ +/* The spi controllers are not using spi_bus, so we find it with another way */ static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) { struct device *dev; @@ -4157,7 +4281,7 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node if (!dev) return NULL; - /* reference got in class_find_device */ + /* Reference got in class_find_device */ return container_of(dev, struct spi_controller, dev); } @@ -4172,7 +4296,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action, case OF_RECONFIG_CHANGE_ADD: ctlr = of_find_spi_controller_by_node(rd->dn->parent); if (ctlr == NULL) - return NOTIFY_OK; /* not for us */ + return NOTIFY_OK; /* Not for us */ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { put_device(&ctlr->dev); @@ -4191,19 +4315,19 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action, break; case OF_RECONFIG_CHANGE_REMOVE: - /* already depopulated? */ + /* Already depopulated? */ if (!of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; - /* find our device by node */ + /* Find our device by node */ spi = of_find_spi_device_by_node(rd->dn); if (spi == NULL) - return NOTIFY_OK; /* no? not meant for us */ + return NOTIFY_OK; /* No? not meant for us */ - /* unregister takes one ref away */ + /* Unregister takes one ref away */ spi_unregister_device(spi); - /* and put the reference of the find */ + /* And put the reference of the find */ put_device(&spi->dev); break; } |