summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig22
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-ath79.c34
-rw-r--r--drivers/spi/spi-atmel.c310
-rw-r--r--drivers/spi/spi-bcm2835.c702
-rw-r--r--drivers/spi/spi-bcm53xx.c4
-rw-r--r--drivers/spi/spi-bfin5xx.c3
-rw-r--r--drivers/spi/spi-bitbang-txrx.h18
-rw-r--r--drivers/spi/spi-bitbang.c17
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dw-mid.c179
-rw-r--r--drivers/spi/spi-dw-pci.c4
-rw-r--r--drivers/spi/spi-dw.c310
-rw-r--r--drivers/spi/spi-dw.h70
-rw-r--r--drivers/spi/spi-fsl-cpm.c40
-rw-r--r--drivers/spi/spi-fsl-dspi.c404
-rw-r--r--drivers/spi/spi-fsl-espi.c51
-rw-r--r--drivers/spi/spi-img-spfi.c196
-rw-r--r--drivers/spi/spi-imx.c10
-rw-r--r--drivers/spi/spi-mpc512x-psc.c2
-rw-r--r--drivers/spi/spi-octeon.c2
-rw-r--r--drivers/spi/spi-omap-100k.c101
-rw-r--r--drivers/spi/spi-omap-uwire.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c270
-rw-r--r--drivers/spi/spi-orion.c70
-rw-r--r--drivers/spi/spi-pl022.c16
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c8
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c487
-rw-r--r--drivers/spi/spi-pxa2xx.c347
-rw-r--r--drivers/spi/spi-pxa2xx.h6
-rw-r--r--drivers/spi/spi-qup.c345
-rw-r--r--drivers/spi/spi-rockchip.c38
-rw-r--r--drivers/spi/spi-rspi.c122
-rw-r--r--drivers/spi/spi-s3c64xx.c6
-rw-r--r--drivers/spi/spi-sc18is602.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c3
-rw-r--r--drivers/spi/spi-st-ssc4.c2
-rw-r--r--drivers/spi/spi-ti-qspi.c22
-rw-r--r--drivers/spi/spi.c158
-rw-r--r--drivers/spi/spidev.c55
40 files changed, 2676 insertions, 1764 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8b1beaf6c9e5..ec40a27a4b76 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -77,7 +77,9 @@ config SPI_ATMEL
config SPI_BCM2835
tristate "BCM2835 SPI controller"
+ depends on GPIOLIB
depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on GPIOLIB
help
This selects a driver for the Broadcom BCM2835 SPI master.
@@ -159,10 +161,9 @@ config SPI_BUTTERFLY
config SPI_CADENCE
tristate "Cadence SPI controller"
- depends on ARM
help
This selects the Cadence SPI controller master driver
- used by Xilinx Zynq.
+ used by Xilinx Zynq and ZynqMP.
config SPI_CLPS711X
tristate "CLPS711X host SPI controller"
@@ -221,7 +222,7 @@ config SPI_FALCON
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
select SPI_BITBANG
help
This simple GPIO bitbanging SPI master uses the arch-neutral GPIO
@@ -303,7 +304,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
- depends on SOC_VF610 || COMPILE_TEST
+ depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
@@ -327,7 +328,7 @@ config SPI_MESON_SPIFC
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
select SPI_BITBANG
help
This is the driver for OpenCores tiny SPI master controller.
@@ -394,16 +395,9 @@ config SPI_PPC4xx
help
This selects a driver for the PPC4xx SPI Controller.
-config SPI_PXA2XX_PXADMA
- bool "PXA2xx SSP legacy PXA DMA API support"
- depends on SPI_PXA2XX && ARCH_PXA
- help
- Enable PXA private legacy DMA API support. Note that this is
- deprecated in favor of generic DMA engine API.
-
config SPI_PXA2XX_DMA
def_bool y
- depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA
+ depends on SPI_PXA2XX
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
@@ -638,7 +632,7 @@ config SPI_DW_PCI
config SPI_DW_MID_DMA
bool "DMA support for DW SPI controller on Intel MID platform"
- depends on SPI_DW_PCI && INTEL_MID_DMAC
+ depends on SPI_DW_PCI && DW_DMAC_PCI
config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 0218f39de7d2..2e7089fbc799 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -60,7 +60,6 @@ obj-$(CONFIG_SPI_ORION) += spi-orion.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
spi-pxa2xx-platform-objs := spi-pxa2xx.o
-spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index b02eb4ac0218..bf1f9b32c597 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -79,10 +79,8 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
}
if (spi->chip_select) {
- struct ath79_spi_controller_data *cdata = spi->controller_data;
-
/* SPI is normally active-low */
- gpio_set_value(cdata->gpio, cs_high);
+ gpio_set_value(spi->cs_gpio, cs_high);
} else {
if (cs_high)
sp->ioc_base |= AR71XX_SPI_IOC_CS0;
@@ -117,11 +115,10 @@ static void ath79_spi_disable(struct ath79_spi *sp)
static int ath79_spi_setup_cs(struct spi_device *spi)
{
- struct ath79_spi_controller_data *cdata;
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
int status;
- cdata = spi->controller_data;
- if (spi->chip_select && !cdata)
+ if (spi->chip_select && !gpio_is_valid(spi->cs_gpio))
return -EINVAL;
status = 0;
@@ -134,8 +131,15 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
else
flags |= GPIOF_INIT_HIGH;
- status = gpio_request_one(cdata->gpio, flags,
+ status = gpio_request_one(spi->cs_gpio, flags,
dev_name(&spi->dev));
+ } else {
+ if (spi->mode & SPI_CS_HIGH)
+ sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+ else
+ sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
return status;
@@ -144,8 +148,7 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
static void ath79_spi_cleanup_cs(struct spi_device *spi)
{
if (spi->chip_select) {
- struct ath79_spi_controller_data *cdata = spi->controller_data;
- gpio_free(cdata->gpio);
+ gpio_free(spi->cs_gpio);
}
}
@@ -217,6 +220,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
}
sp = spi_master_get_devdata(master);
+ master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, sp);
pdata = dev_get_platdata(&pdev->dev);
@@ -253,7 +257,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
- ret = clk_enable(sp->clk);
+ ret = clk_prepare_enable(sp->clk);
if (ret)
goto err_put_master;
@@ -277,7 +281,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
err_disable:
ath79_spi_disable(sp);
err_clk_disable:
- clk_disable(sp->clk);
+ clk_disable_unprepare(sp->clk);
err_put_master:
spi_master_put(sp->bitbang.master);
@@ -290,7 +294,7 @@ static int ath79_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&sp->bitbang);
ath79_spi_disable(sp);
- clk_disable(sp->clk);
+ clk_disable_unprepare(sp->clk);
spi_master_put(sp->bitbang.master);
return 0;
@@ -301,12 +305,18 @@ static void ath79_spi_shutdown(struct platform_device *pdev)
ath79_spi_remove(pdev);
}
+static const struct of_device_id ath79_spi_of_match[] = {
+ { .compatible = "qca,ar7100-spi", },
+ { },
+};
+
static struct platform_driver ath79_spi_driver = {
.probe = ath79_spi_probe,
.remove = ath79_spi_remove,
.shutdown = ath79_spi_shutdown,
.driver = {
.name = DRV_NAME,
+ .of_match_table = ath79_spi_of_match,
},
};
module_platform_driver(ath79_spi_driver);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 9af7841f2e8c..c9eca347787d 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -41,6 +41,8 @@
#define SPI_CSR1 0x0034
#define SPI_CSR2 0x0038
#define SPI_CSR3 0x003c
+#define SPI_FMR 0x0040
+#define SPI_FLR 0x0044
#define SPI_VERSION 0x00fc
#define SPI_RPR 0x0100
#define SPI_RCR 0x0104
@@ -62,6 +64,14 @@
#define SPI_SWRST_SIZE 1
#define SPI_LASTXFER_OFFSET 24
#define SPI_LASTXFER_SIZE 1
+#define SPI_TXFCLR_OFFSET 16
+#define SPI_TXFCLR_SIZE 1
+#define SPI_RXFCLR_OFFSET 17
+#define SPI_RXFCLR_SIZE 1
+#define SPI_FIFOEN_OFFSET 30
+#define SPI_FIFOEN_SIZE 1
+#define SPI_FIFODIS_OFFSET 31
+#define SPI_FIFODIS_SIZE 1
/* Bitfields in MR */
#define SPI_MSTR_OFFSET 0
@@ -114,6 +124,22 @@
#define SPI_TXEMPTY_SIZE 1
#define SPI_SPIENS_OFFSET 16
#define SPI_SPIENS_SIZE 1
+#define SPI_TXFEF_OFFSET 24
+#define SPI_TXFEF_SIZE 1
+#define SPI_TXFFF_OFFSET 25
+#define SPI_TXFFF_SIZE 1
+#define SPI_TXFTHF_OFFSET 26
+#define SPI_TXFTHF_SIZE 1
+#define SPI_RXFEF_OFFSET 27
+#define SPI_RXFEF_SIZE 1
+#define SPI_RXFFF_OFFSET 28
+#define SPI_RXFFF_SIZE 1
+#define SPI_RXFTHF_OFFSET 29
+#define SPI_RXFTHF_SIZE 1
+#define SPI_TXFPTEF_OFFSET 30
+#define SPI_TXFPTEF_SIZE 1
+#define SPI_RXFPTEF_OFFSET 31
+#define SPI_RXFPTEF_SIZE 1
/* Bitfields in CSR0 */
#define SPI_CPOL_OFFSET 0
@@ -157,6 +183,22 @@
#define SPI_TXTDIS_OFFSET 9
#define SPI_TXTDIS_SIZE 1
+/* Bitfields in FMR */
+#define SPI_TXRDYM_OFFSET 0
+#define SPI_TXRDYM_SIZE 2
+#define SPI_RXRDYM_OFFSET 4
+#define SPI_RXRDYM_SIZE 2
+#define SPI_TXFTHRES_OFFSET 16
+#define SPI_TXFTHRES_SIZE 6
+#define SPI_RXFTHRES_OFFSET 24
+#define SPI_RXFTHRES_SIZE 6
+
+/* Bitfields in FLR */
+#define SPI_TXFL_OFFSET 0
+#define SPI_TXFL_SIZE 6
+#define SPI_RXFL_OFFSET 16
+#define SPI_RXFL_SIZE 6
+
/* Constants for BITS */
#define SPI_BITS_8_BPT 0
#define SPI_BITS_9_BPT 1
@@ -167,6 +209,9 @@
#define SPI_BITS_14_BPT 6
#define SPI_BITS_15_BPT 7
#define SPI_BITS_16_BPT 8
+#define SPI_ONE_DATA 0
+#define SPI_TWO_DATA 1
+#define SPI_FOUR_DATA 2
/* Bit manipulation macros */
#define SPI_BIT(name) \
@@ -180,11 +225,37 @@
| SPI_BF(name, value))
/* Register access macros */
+#ifdef CONFIG_AVR32
#define spi_readl(port, reg) \
__raw_readl((port)->regs + SPI_##reg)
#define spi_writel(port, reg, value) \
__raw_writel((value), (port)->regs + SPI_##reg)
+#define spi_readw(port, reg) \
+ __raw_readw((port)->regs + SPI_##reg)
+#define spi_writew(port, reg, value) \
+ __raw_writew((value), (port)->regs + SPI_##reg)
+
+#define spi_readb(port, reg) \
+ __raw_readb((port)->regs + SPI_##reg)
+#define spi_writeb(port, reg, value) \
+ __raw_writeb((value), (port)->regs + SPI_##reg)
+#else
+#define spi_readl(port, reg) \
+ readl_relaxed((port)->regs + SPI_##reg)
+#define spi_writel(port, reg, value) \
+ writel_relaxed((value), (port)->regs + SPI_##reg)
+
+#define spi_readw(port, reg) \
+ readw_relaxed((port)->regs + SPI_##reg)
+#define spi_writew(port, reg, value) \
+ writew_relaxed((value), (port)->regs + SPI_##reg)
+
+#define spi_readb(port, reg) \
+ readb_relaxed((port)->regs + SPI_##reg)
+#define spi_writeb(port, reg, value) \
+ writeb_relaxed((value), (port)->regs + SPI_##reg)
+#endif
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
* cache operations; better heuristics consider wordsize and bitrate.
*/
@@ -240,11 +311,14 @@ struct atmel_spi {
bool use_dma;
bool use_pdc;
+ bool use_cs_gpios;
/* dmaengine data */
struct atmel_spi_dma dma;
bool keep_cs;
bool cs_active;
+
+ u32 fifo_size;
};
/* Controller-specific per-slave state */
@@ -315,7 +389,8 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
}
mr = spi_readl(as, MR);
- gpio_set_value(asd->npcs_pin, active);
+ if (as->use_cs_gpios)
+ gpio_set_value(asd->npcs_pin, active);
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
@@ -331,7 +406,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
mr = spi_readl(as, MR);
mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
- if (spi->chip_select != 0)
+ if (as->use_cs_gpios && spi->chip_select != 0)
gpio_set_value(asd->npcs_pin, active);
spi_writel(as, MR, mr);
}
@@ -360,7 +435,9 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
asd->npcs_pin, active ? " (low)" : "",
mr);
- if (atmel_spi_is_v2(as) || spi->chip_select != 0)
+ if (!as->use_cs_gpios)
+ spi_writel(as, CR, SPI_BIT(LASTXFER));
+ else if (atmel_spi_is_v2(as) || spi->chip_select != 0)
gpio_set_value(asd->npcs_pin, !active);
}
@@ -400,6 +477,20 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
slave_config->dst_maxburst = 1;
slave_config->device_fc = false;
+ /*
+ * This driver uses fixed peripheral select mode (PS bit set to '0' in
+ * the Mode Register).
+ * So according to the datasheet, when FIFOs are available (and
+ * enabled), the Transmit FIFO operates in Multiple Data Mode.
+ * In this mode, up to 2 data, not 4, can be written into the Transmit
+ * Data Register in a single access.
+ * However, the first data has to be written into the lowest 16 bits and
+ * the second data into the highest 16 bits of the Transmit
+ * Data Register. For 8bit data (the most frequent case), it would
+ * require to rework tx_buf so each data would actualy fit 16 bits.
+ * So we'd rather write only one data at the time. Hence the transmit
+ * path works the same whether FIFOs are available (and enabled) or not.
+ */
slave_config->direction = DMA_MEM_TO_DEV;
if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
dev_err(&as->pdev->dev,
@@ -407,6 +498,14 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
err = -EINVAL;
}
+ /*
+ * This driver configures the spi controller for master mode (MSTR bit
+ * set to '1' in the Mode Register).
+ * So according to the datasheet, when FIFOs are available (and
+ * enabled), the Receive FIFO operates in Single Data Mode.
+ * So the receive path works the same whether FIFOs are available (and
+ * enabled) or not.
+ */
slave_config->direction = DMA_DEV_TO_MEM;
if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
dev_err(&as->pdev->dev,
@@ -496,10 +595,10 @@ static void dma_callback(void *data)
}
/*
- * Next transfer using PIO.
+ * Next transfer using PIO without FIFO.
*/
-static void atmel_spi_next_xfer_pio(struct spi_master *master,
- struct spi_transfer *xfer)
+static void atmel_spi_next_xfer_single(struct spi_master *master,
+ struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_master_get_devdata(master);
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
@@ -532,6 +631,99 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
}
/*
+ * Next transfer using PIO with FIFO.
+ */
+static void atmel_spi_next_xfer_fifo(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 current_remaining_data, num_data;
+ u32 offset = xfer->len - as->current_remaining_bytes;
+ const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
+ const u8 *bytes = (const u8 *)((u8 *)xfer->tx_buf + offset);
+ u16 td0, td1;
+ u32 fifomr;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_fifo\n");
+
+ /* Compute the number of data to transfer in the current iteration */
+ current_remaining_data = ((xfer->bits_per_word > 8) ?
+ ((u32)as->current_remaining_bytes >> 1) :
+ (u32)as->current_remaining_bytes);
+ num_data = min(current_remaining_data, as->fifo_size);
+
+ /* Flush RX and TX FIFOs */
+ spi_writel(as, CR, SPI_BIT(RXFCLR) | SPI_BIT(TXFCLR));
+ while (spi_readl(as, FLR))
+ cpu_relax();
+
+ /* Set RX FIFO Threshold to the number of data to transfer */
+ fifomr = spi_readl(as, FMR);
+ spi_writel(as, FMR, SPI_BFINS(RXFTHRES, num_data, fifomr));
+
+ /* Clear FIFO flags in the Status Register, especially RXFTHF */
+ (void)spi_readl(as, SR);
+
+ /* Fill TX FIFO */
+ while (num_data >= 2) {
+ if (xfer->tx_buf) {
+ if (xfer->bits_per_word > 8) {
+ td0 = *words++;
+ td1 = *words++;
+ } else {
+ td0 = *bytes++;
+ td1 = *bytes++;
+ }
+ } else {
+ td0 = 0;
+ td1 = 0;
+ }
+
+ spi_writel(as, TDR, (td1 << 16) | td0);
+ num_data -= 2;
+ }
+
+ if (num_data) {
+ if (xfer->tx_buf) {
+ if (xfer->bits_per_word > 8)
+ td0 = *words++;
+ else
+ td0 = *bytes++;
+ } else {
+ td0 = 0;
+ }
+
+ spi_writew(as, TDR, td0);
+ num_data--;
+ }
+
+ dev_dbg(master->dev.parent,
+ " start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+ xfer->bits_per_word);
+
+ /*
+ * Enable RX FIFO Threshold Flag interrupt to be notified about
+ * transfer completion.
+ */
+ spi_writel(as, IER, SPI_BIT(RXFTHF) | SPI_BIT(OVRES));
+}
+
+/*
+ * Next transfer using PIO.
+ */
+static void atmel_spi_next_xfer_pio(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ if (as->fifo_size)
+ atmel_spi_next_xfer_fifo(master, xfer);
+ else
+ atmel_spi_next_xfer_single(master, xfer);
+}
+
+/*
* Submit next transfer for DMA.
*/
static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
@@ -764,17 +956,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
(unsigned long long)xfer->rx_dma);
}
- /* REVISIT: We're waiting for ENDRX before we start the next
+ /* REVISIT: We're waiting for RXBUFF before we start the next
* transfer because we need to handle some difficult timing
- * issues otherwise. If we wait for ENDTX in one transfer and
- * then starts waiting for ENDRX in the next, it's difficult
- * to tell the difference between the ENDRX interrupt we're
- * actually waiting for and the ENDRX interrupt of the
+ * issues otherwise. If we wait for TXBUFE in one transfer and
+ * then starts waiting for RXBUFF in the next, it's difficult
+ * to tell the difference between the RXBUFF interrupt we're
+ * actually waiting for and the RXBUFF interrupt of the
* previous transfer.
*
* It should be doable, though. Just not now...
*/
- spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
+ spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
@@ -833,13 +1025,8 @@ static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
}
-/* Called from IRQ
- *
- * Must update "current_remaining_bytes" to keep track of data
- * to transfer.
- */
static void
-atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
+atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
{
u8 *rxp;
u16 *rxp16;
@@ -866,6 +1053,57 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
}
}
+static void
+atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ u32 fifolr = spi_readl(as, FLR);
+ u32 num_bytes, num_data = SPI_BFEXT(RXFL, fifolr);
+ u32 offset = xfer->len - as->current_remaining_bytes;
+ u16 *words = (u16 *)((u8 *)xfer->rx_buf + offset);
+ u8 *bytes = (u8 *)((u8 *)xfer->rx_buf + offset);
+ u16 rd; /* RD field is the lowest 16 bits of RDR */
+
+ /* Update the number of remaining bytes to transfer */
+ num_bytes = ((xfer->bits_per_word > 8) ?
+ (num_data << 1) :
+ num_data);
+
+ if (as->current_remaining_bytes > num_bytes)
+ as->current_remaining_bytes -= num_bytes;
+ else
+ as->current_remaining_bytes = 0;
+
+ /* Handle odd number of bytes when data are more than 8bit width */
+ if (xfer->bits_per_word > 8)
+ as->current_remaining_bytes &= ~0x1;
+
+ /* Read data */
+ while (num_data) {
+ rd = spi_readl(as, RDR);
+ if (xfer->rx_buf) {
+ if (xfer->bits_per_word > 8)
+ *words++ = rd;
+ else
+ *bytes++ = rd;
+ }
+ num_data--;
+ }
+}
+
+/* Called from IRQ
+ *
+ * Must update "current_remaining_bytes" to keep track of data
+ * to transfer.
+ */
+static void
+atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ if (as->fifo_size)
+ atmel_spi_pump_fifo_data(as, xfer);
+ else
+ atmel_spi_pump_single_data(as, xfer);
+}
+
/* Interrupt
*
* No need for locking in this Interrupt handler: done_status is the
@@ -906,7 +1144,7 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
complete(&as->xfer_completion);
- } else if (pending & SPI_BIT(RDRF)) {
+ } else if (pending & (SPI_BIT(RDRF) | SPI_BIT(RXFTHF))) {
atmel_spi_lock(as);
if (as->current_remaining_bytes) {
@@ -990,6 +1228,8 @@ static int atmel_spi_setup(struct spi_device *spi)
csr |= SPI_BIT(CPOL);
if (!(spi->mode & SPI_CPHA))
csr |= SPI_BIT(NCPHA);
+ if (!as->use_cs_gpios)
+ csr |= SPI_BIT(CSAAT);
/* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
*
@@ -1003,7 +1243,9 @@ static int atmel_spi_setup(struct spi_device *spi)
/* chipselect must have been muxed as GPIO (e.g. in board setup) */
npcs_pin = (unsigned long)spi->controller_data;
- if (gpio_is_valid(spi->cs_gpio))
+ if (!as->use_cs_gpios)
+ npcs_pin = spi->chip_select;
+ else if (gpio_is_valid(spi->cs_gpio))
npcs_pin = spi->cs_gpio;
asd = spi->controller_state;
@@ -1012,15 +1254,19 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!asd)
return -ENOMEM;
- ret = gpio_request(npcs_pin, dev_name(&spi->dev));
- if (ret) {
- kfree(asd);
- return ret;
+ if (as->use_cs_gpios) {
+ ret = gpio_request(npcs_pin, dev_name(&spi->dev));
+ if (ret) {
+ kfree(asd);
+ return ret;
+ }
+
+ gpio_direction_output(npcs_pin,
+ !(spi->mode & SPI_CS_HIGH));
}
asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
- gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
}
asd->csr = csr;
@@ -1332,6 +1578,13 @@ static int atmel_spi_probe(struct platform_device *pdev)
atmel_get_caps(as);
+ as->use_cs_gpios = true;
+ if (atmel_spi_is_v2(as) &&
+ !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
+ as->use_cs_gpios = false;
+ master->num_chipselect = 4;
+ }
+
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
@@ -1374,6 +1627,13 @@ static int atmel_spi_probe(struct platform_device *pdev)
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
spi_writel(as, CR, SPI_BIT(SPIEN));
+ as->fifo_size = 0;
+ if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
+ &as->fifo_size)) {
+ dev_info(&pdev->dev, "Using FIFO (%u data)\n", as->fifo_size);
+ spi_writel(as, CR, SPI_BIT(FIFOEN));
+ }
+
/* go! */
dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
(unsigned long)regs->start, irq);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 419a782ab6d5..59705ab23577 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2012 Chris Boot
* Copyright (C) 2013 Stephen Warren
+ * Copyright (C) 2015 Martin Sperl
*
* This driver is inspired by:
* spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
@@ -19,17 +20,22 @@
* GNU General Public License for more details.
*/
+#include <asm/page.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/spi/spi.h>
/* SPI register offsets */
@@ -66,8 +72,11 @@
#define BCM2835_SPI_CS_CS_10 0x00000002
#define BCM2835_SPI_CS_CS_01 0x00000001
-#define BCM2835_SPI_TIMEOUT_MS 30000
-#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS)
+#define BCM2835_SPI_POLLING_LIMIT_US 30
+#define BCM2835_SPI_POLLING_JIFFIES 2
+#define BCM2835_SPI_DMA_MIN_LENGTH 96
+#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+ | SPI_NO_CS | SPI_3WIRE)
#define DRV_NAME "spi-bcm2835"
@@ -75,10 +84,11 @@ struct bcm2835_spi {
void __iomem *regs;
struct clk *clk;
int irq;
- struct completion done;
const u8 *tx_buf;
u8 *rx_buf;
- int len;
+ int tx_len;
+ int rx_len;
+ bool dma_pending;
};
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
@@ -91,205 +101,618 @@ static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val)
writel(val, bs->regs + reg);
}
-static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs, int len)
+static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
{
u8 byte;
- while (len--) {
+ while ((bs->rx_len) &&
+ (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
if (bs->rx_buf)
*bs->rx_buf++ = byte;
+ bs->rx_len--;
}
}
-static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs, int len)
+static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
{
u8 byte;
- if (len > bs->len)
- len = bs->len;
-
- while (len--) {
+ while ((bs->tx_len) &&
+ (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
byte = bs->tx_buf ? *bs->tx_buf++ : 0;
bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
- bs->len--;
+ bs->tx_len--;
}
}
+static void bcm2835_spi_reset_hw(struct spi_master *master)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /* Disable SPI interrupts and transfer */
+ cs &= ~(BCM2835_SPI_CS_INTR |
+ BCM2835_SPI_CS_INTD |
+ BCM2835_SPI_CS_DMAEN |
+ BCM2835_SPI_CS_TA);
+ /* and reset RX/TX FIFOS */
+ cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
+
+ /* and reset the SPI_HW */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+ /* as well as DLEN */
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
+}
+
static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
- u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
- /*
- * RXR - RX needs Reading. This means 12 (or more) bytes have been
- * transmitted and hence 12 (or more) bytes have been received.
- *
- * The FIFO is 16-bytes deep. We check for this interrupt to keep the
- * FIFO full; we have a 4-byte-time buffer for IRQ latency. We check
- * this before DONE (TX empty) just in case we delayed processing this
- * interrupt for some reason.
- *
- * We only check for this case if we have more bytes to TX; at the end
- * of the transfer, we ignore this pipelining optimization, and let
- * bcm2835_spi_finish_transfer() drain the RX FIFO.
+ /* Read as many bytes as possible from FIFO */
+ bcm2835_rd_fifo(bs);
+ /* Write as many bytes as possible to FIFO */
+ bcm2835_wr_fifo(bs);
+
+ /* based on flags decide if we can finish the transfer */
+ if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+ /* Transfer complete - reset SPI HW */
+ bcm2835_spi_reset_hw(master);
+ /* wake up the framework */
+ complete(&master->xfer_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr,
+ u32 cs)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+
+ /* fill in fifo if we have gpio-cs
+ * note that there have been rare events where the native-CS
+ * flapped for <1us which may change the behaviour
+ * with gpio-cs this does not happen, so it is implemented
+ * only for this case
*/
- if (bs->len && (cs & BCM2835_SPI_CS_RXR)) {
- /* Read 12 bytes of data */
- bcm2835_rd_fifo(bs, 12);
-
- /* Write up to 12 bytes */
- bcm2835_wr_fifo(bs, 12);
-
- /*
- * We must have written something to the TX FIFO due to the
- * bs->len check above, so cannot be DONE. Hence, return
- * early. Note that DONE could also be set if we serviced an
- * RXR interrupt really late.
+ if (gpio_is_valid(spi->cs_gpio)) {
+ /* enable HW block, but without interrupts enabled
+ * this would triggern an immediate interrupt
*/
- return IRQ_HANDLED;
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ cs | BCM2835_SPI_CS_TA);
+ /* fill in tx fifo as much as possible */
+ bcm2835_wr_fifo(bs);
}
/*
- * DONE - TX empty. This occurs when we first enable the transfer
- * since we do not pre-fill the TX FIFO. At any other time, given that
- * we refill the TX FIFO above based on RXR, and hence ignore DONE if
- * RXR is set, DONE really does mean end-of-transfer.
+ * Enable the HW block. This will immediately trigger a DONE (TX
+ * empty) interrupt, upon which we will fill the TX FIFO with the
+ * first TX bytes. Pre-filling the TX FIFO here to avoid the
+ * interrupt doesn't work:-(
*/
- if (cs & BCM2835_SPI_CS_DONE) {
- if (bs->len) { /* First interrupt in a transfer */
- bcm2835_wr_fifo(bs, 16);
- } else { /* Transfer complete */
- /* Disable SPI interrupts */
- cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD);
- bcm2835_wr(bs, BCM2835_SPI_CS, cs);
-
- /*
- * Wake up bcm2835_spi_transfer_one(), which will call
- * bcm2835_spi_finish_transfer(), to drain the RX FIFO.
- */
- complete(&bs->done);
- }
+ cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+ /* signal that we need to wait for completion */
+ return 1;
+}
+
+/*
+ * DMA support
+ *
+ * this implementation has currently a few issues in so far as it does
+ * not work arrount limitations of the HW.
+ *
+ * the main one being that DMA transfers are limited to 16 bit
+ * (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN
+ *
+ * also we currently assume that the scatter-gather fragments are
+ * all multiple of 4 (except the last) - otherwise we would need
+ * to reset the FIFO before subsequent transfers...
+ * this also means that tx/rx transfers sg's need to be of equal size!
+ *
+ * there may be a few more border-cases we may need to address as well
+ * but unfortunately this would mean splitting up the scatter-gather
+ * list making it slightly unpractical...
+ */
+static void bcm2835_spi_dma_done(void *data)
+{
+ struct spi_master *master = data;
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+
+ /* reset fifo and HW */
+ bcm2835_spi_reset_hw(master);
+
+ /* and terminate tx-dma as we do not have an irq for it
+ * because when the rx dma will terminate and this callback
+ * is called the tx-dma must have finished - can't get to this
+ * situation otherwise...
+ */
+ dmaengine_terminate_all(master->dma_tx);
+
+ /* mark as no longer pending */
+ bs->dma_pending = 0;
+
+ /* and mark as completed */;
+ complete(&master->xfer_completion);
+}
+
+static int bcm2835_spi_prepare_sg(struct spi_master *master,
+ struct spi_transfer *tfr,
+ bool is_tx)
+{
+ struct dma_chan *chan;
+ struct scatterlist *sgl;
+ unsigned int nents;
+ enum dma_transfer_direction dir;
+ unsigned long flags;
+
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+
+ if (is_tx) {
+ dir = DMA_MEM_TO_DEV;
+ chan = master->dma_tx;
+ nents = tfr->tx_sg.nents;
+ sgl = tfr->tx_sg.sgl;
+ flags = 0 /* no tx interrupt */;
+
+ } else {
+ dir = DMA_DEV_TO_MEM;
+ chan = master->dma_rx;
+ nents = tfr->rx_sg.nents;
+ sgl = tfr->rx_sg.sgl;
+ flags = DMA_PREP_INTERRUPT;
+ }
+ /* prepare the channel */
+ desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
+ if (!desc)
+ return -EINVAL;
+
+ /* set callback for rx */
+ if (!is_tx) {
+ desc->callback = bcm2835_spi_dma_done;
+ desc->callback_param = master;
+ }
+
+ /* submit it to DMA-engine */
+ cookie = dmaengine_submit(desc);
+
+ return dma_submit_error(cookie);
+}
+
+static inline int bcm2835_check_sg_length(struct sg_table *sgt)
+{
+ int i;
+ struct scatterlist *sgl;
+
+ /* check that the sg entries are word-sized (except for last) */
+ for_each_sg(sgt->sgl, sgl, (int)sgt->nents - 1, i) {
+ if (sg_dma_len(sgl) % 4)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr,
+ u32 cs)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ /* check that the scatter gather segments are all a multiple of 4 */
+ if (bcm2835_check_sg_length(&tfr->tx_sg) ||
+ bcm2835_check_sg_length(&tfr->rx_sg)) {
+ dev_warn_once(&spi->dev,
+ "scatter gather segment length is not a multiple of 4 - falling back to interrupt mode\n");
+ return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
+ }
+
+ /* setup tx-DMA */
+ ret = bcm2835_spi_prepare_sg(master, tfr, true);
+ if (ret)
+ return ret;
+
+ /* start TX early */
+ dma_async_issue_pending(master->dma_tx);
+
+ /* mark as dma pending */
+ bs->dma_pending = 1;
+
+ /* set the DMA length */
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, tfr->len);
+
+ /* start the HW */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
+
+ /* setup rx-DMA late - to run transfers while
+ * mapping of the rx buffers still takes place
+ * this saves 10us or more.
+ */
+ ret = bcm2835_spi_prepare_sg(master, tfr, false);
+ if (ret) {
+ /* need to reset on errors */
+ dmaengine_terminate_all(master->dma_tx);
+ bcm2835_spi_reset_hw(master);
+ return ret;
+ }
+
+ /* start rx dma late */
+ dma_async_issue_pending(master->dma_rx);
+
+ /* wait for wakeup in framework */
+ return 1;
+}
+
+static bool bcm2835_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ /* only run for gpio_cs */
+ if (!gpio_is_valid(spi->cs_gpio))
+ return false;
+
+ /* we start DMA efforts only on bigger transfers */
+ if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
+ return false;
+
+ /* BCM2835_SPI_DLEN has defined a max transfer size as
+ * 16 bit, so max is 65535
+ * we can revisit this by using an alternative transfer
+ * method - ideally this would get done without any more
+ * interaction...
+ */
+ if (tfr->len > 65535) {
+ dev_warn_once(&spi->dev,
+ "transfer size of %d too big for dma-transfer\n",
+ tfr->len);
+ return false;
+ }
+
+ /* if we run rx/tx_buf with word aligned addresses then we are OK */
+ if ((((size_t)tfr->rx_buf & 3) == 0) &&
+ (((size_t)tfr->tx_buf & 3) == 0))
+ return true;
+
+ /* otherwise we only allow transfers within the same page
+ * to avoid wasting time on dma_mapping when it is not practical
+ */
+ if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
+ dev_warn_once(&spi->dev,
+ "Unaligned spi tx-transfer bridging page\n");
+ return false;
+ }
+ if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
+ dev_warn_once(&spi->dev,
+ "Unaligned spi tx-transfer bridging page\n");
+ return false;
+ }
- return IRQ_HANDLED;
+ /* return OK */
+ return true;
+}
+
+static void bcm2835_dma_release(struct spi_master *master)
+{
+ if (master->dma_tx) {
+ dmaengine_terminate_all(master->dma_tx);
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+ if (master->dma_rx) {
+ dmaengine_terminate_all(master->dma_rx);
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+}
+
+static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
+{
+ struct dma_slave_config slave_config;
+ const __be32 *addr;
+ dma_addr_t dma_reg_base;
+ int ret;
+
+ /* base address in dma-space */
+ addr = of_get_address(master->dev.of_node, 0, NULL, NULL);
+ if (!addr) {
+ dev_err(dev, "could not get DMA-register address - not using dma mode\n");
+ goto err;
+ }
+ dma_reg_base = be32_to_cpup(addr);
+
+ /* get tx/rx dma */
+ master->dma_tx = dma_request_slave_channel(dev, "tx");
+ if (!master->dma_tx) {
+ dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
+ goto err;
+ }
+ master->dma_rx = dma_request_slave_channel(dev, "rx");
+ if (!master->dma_rx) {
+ dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
+ goto err_release;
}
- return IRQ_NONE;
+ /* configure DMAs */
+ slave_config.direction = DMA_MEM_TO_DEV;
+ slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ ret = dmaengine_slave_config(master->dma_tx, &slave_config);
+ if (ret)
+ goto err_config;
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ ret = dmaengine_slave_config(master->dma_rx, &slave_config);
+ if (ret)
+ goto err_config;
+
+ /* all went well, so set can_dma */
+ master->can_dma = bcm2835_spi_can_dma;
+ master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */
+ /* need to do TX AND RX DMA, so we need dummy buffers */
+ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+
+ return;
+
+err_config:
+ dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
+ ret);
+err_release:
+ bcm2835_dma_release(master);
+err:
+ return;
}
-static int bcm2835_spi_start_transfer(struct spi_device *spi,
- struct spi_transfer *tfr)
+static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr,
+ u32 cs,
+ unsigned long xfer_time_us)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(spi->master);
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ unsigned long timeout;
+
+ /* enable HW block without interrupts */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
+
+ /* fill in the fifo before timeout calculations
+ * if we are interrupted here, then the data is
+ * getting transferred by the HW while we are interrupted
+ */
+ bcm2835_wr_fifo(bs);
+
+ /* set the timeout */
+ timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES;
+
+ /* loop until finished the transfer */
+ while (bs->rx_len) {
+ /* fill in tx fifo with remaining data */
+ bcm2835_wr_fifo(bs);
+
+ /* read from fifo as much as possible */
+ bcm2835_rd_fifo(bs);
+
+ /* if there is still data pending to read
+ * then check the timeout
+ */
+ if (bs->rx_len && time_after(jiffies, timeout)) {
+ dev_dbg_ratelimited(&spi->dev,
+ "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
+ jiffies - timeout,
+ bs->tx_len, bs->rx_len);
+ /* fall back to interrupt mode */
+ return bcm2835_spi_transfer_one_irq(master, spi,
+ tfr, cs);
+ }
+ }
+
+ /* Transfer complete - reset SPI HW */
+ bcm2835_spi_reset_hw(master);
+ /* and return without waiting for completion */
+ return 0;
+}
+
+static int bcm2835_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
unsigned long spi_hz, clk_hz, cdiv;
- u32 cs = BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
+ unsigned long spi_used_hz, xfer_time_us;
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ /* set clock */
spi_hz = tfr->speed_hz;
clk_hz = clk_get_rate(bs->clk);
if (spi_hz >= clk_hz / 2) {
cdiv = 2; /* clk_hz/2 is the fastest we can go */
} else if (spi_hz) {
- /* CDIV must be a power of two */
- cdiv = roundup_pow_of_two(DIV_ROUND_UP(clk_hz, spi_hz));
+ /* CDIV must be a multiple of two */
+ cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
+ cdiv += (cdiv % 2);
if (cdiv >= 65536)
cdiv = 0; /* 0 is the slowest we can go */
- } else
+ } else {
cdiv = 0; /* 0 is the slowest we can go */
+ }
+ spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
+ bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+ /* handle all the modes */
+ if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+ cs |= BCM2835_SPI_CS_REN;
if (spi->mode & SPI_CPOL)
cs |= BCM2835_SPI_CS_CPOL;
if (spi->mode & SPI_CPHA)
cs |= BCM2835_SPI_CS_CPHA;
- if (!(spi->mode & SPI_NO_CS)) {
- if (spi->mode & SPI_CS_HIGH) {
- cs |= BCM2835_SPI_CS_CSPOL;
- cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select;
- }
-
- cs |= spi->chip_select;
- }
+ /* for gpio_cs set dummy CS so that no HW-CS get changed
+ * we can not run this in bcm2835_spi_set_cs, as it does
+ * not get called for cs_gpio cases, so we need to do it here
+ */
+ if (gpio_is_valid(spi->cs_gpio) || (spi->mode & SPI_NO_CS))
+ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
- reinit_completion(&bs->done);
+ /* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
bs->rx_buf = tfr->rx_buf;
- bs->len = tfr->len;
+ bs->tx_len = tfr->len;
+ bs->rx_len = tfr->len;
- bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
- /*
- * Enable the HW block. This will immediately trigger a DONE (TX
- * empty) interrupt, upon which we will fill the TX FIFO with the
- * first TX bytes. Pre-filling the TX FIFO here to avoid the
- * interrupt doesn't work:-(
- */
- bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+ /* calculate the estimated time in us the transfer runs */
+ xfer_time_us = tfr->len
+ * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */
+ * 1000000 / spi_used_hz;
- return 0;
+ /* for short requests run polling*/
+ if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US)
+ return bcm2835_spi_transfer_one_poll(master, spi, tfr,
+ cs, xfer_time_us);
+
+ /* run in dma mode if conditions are right */
+ if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr))
+ return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs);
+
+ /* run in interrupt-mode */
+ return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
}
-static int bcm2835_spi_finish_transfer(struct spi_device *spi,
- struct spi_transfer *tfr, bool cs_change)
+static void bcm2835_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(spi->master);
- u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
- /* Drain RX FIFO */
- while (cs & BCM2835_SPI_CS_RXD) {
- bcm2835_rd_fifo(bs, 1);
- cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ /* if an error occurred and we have an active dma, then terminate */
+ if (bs->dma_pending) {
+ dmaengine_terminate_all(master->dma_tx);
+ dmaengine_terminate_all(master->dma_rx);
+ bs->dma_pending = 0;
}
-
- if (tfr->delay_usecs)
- udelay(tfr->delay_usecs);
-
- if (cs_change)
- /* Clear TA flag */
- bcm2835_wr(bs, BCM2835_SPI_CS, cs & ~BCM2835_SPI_CS_TA);
-
- return 0;
+ /* and reset */
+ bcm2835_spi_reset_hw(master);
}
-static int bcm2835_spi_transfer_one(struct spi_master *master,
- struct spi_message *mesg)
+static void bcm2835_spi_set_cs(struct spi_device *spi, bool gpio_level)
{
+ /*
+ * we can assume that we are "native" as per spi_set_cs
+ * calling us ONLY when cs_gpio is not set
+ * we can also assume that we are CS < 3 as per bcm2835_spi_setup
+ * we would not get called because of error handling there.
+ * the level passed is the electrical level not enabled/disabled
+ * so it has to get translated back to enable/disable
+ * see spi_set_cs in spi.c for the implementation
+ */
+
+ struct spi_master *master = spi->master;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
- struct spi_transfer *tfr;
- struct spi_device *spi = mesg->spi;
- int err = 0;
- unsigned int timeout;
- bool cs_change;
-
- list_for_each_entry(tfr, &mesg->transfers, transfer_list) {
- err = bcm2835_spi_start_transfer(spi, tfr);
- if (err)
- goto out;
-
- timeout = wait_for_completion_timeout(&bs->done,
- msecs_to_jiffies(BCM2835_SPI_TIMEOUT_MS));
- if (!timeout) {
- err = -ETIMEDOUT;
- goto out;
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ bool enable;
+
+ /* calculate the enable flag from the passed gpio_level */
+ enable = (spi->mode & SPI_CS_HIGH) ? gpio_level : !gpio_level;
+
+ /* set flags for "reverse" polarity in the registers */
+ if (spi->mode & SPI_CS_HIGH) {
+ /* set the correct CS-bits */
+ cs |= BCM2835_SPI_CS_CSPOL;
+ cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select;
+ } else {
+ /* clean the CS-bits */
+ cs &= ~BCM2835_SPI_CS_CSPOL;
+ cs &= ~(BCM2835_SPI_CS_CSPOL0 << spi->chip_select);
+ }
+
+ /* select the correct chip_select depending on disabled/enabled */
+ if (enable) {
+ /* set cs correctly */
+ if (spi->mode & SPI_NO_CS) {
+ /* use the "undefined" chip-select */
+ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+ } else {
+ /* set the chip select */
+ cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01);
+ cs |= spi->chip_select;
}
+ } else {
+ /* disable CSPOL which puts HW-CS into deselected state */
+ cs &= ~BCM2835_SPI_CS_CSPOL;
+ /* use the "undefined" chip-select as precaution */
+ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+ }
- cs_change = tfr->cs_change ||
- list_is_last(&tfr->transfer_list, &mesg->transfers);
+ /* finally set the calculated flags in SPI_CS */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+}
- err = bcm2835_spi_finish_transfer(spi, tfr, cs_change);
- if (err)
- goto out;
+static int chip_match_name(struct gpio_chip *chip, void *data)
+{
+ return !strcmp(chip->label, data);
+}
- mesg->actual_length += (tfr->len - bs->len);
+static int bcm2835_spi_setup(struct spi_device *spi)
+{
+ int err;
+ struct gpio_chip *chip;
+ /*
+ * sanity checking the native-chipselects
+ */
+ if (spi->mode & SPI_NO_CS)
+ return 0;
+ if (gpio_is_valid(spi->cs_gpio))
+ return 0;
+ if (spi->chip_select > 1) {
+ /* error in the case of native CS requested with CS > 1
+ * officially there is a CS2, but it is not documented
+ * which GPIO is connected with that...
+ */
+ dev_err(&spi->dev,
+ "setup: only two native chip-selects are supported\n");
+ return -EINVAL;
}
+ /* now translate native cs to GPIO */
-out:
- /* Clear FIFOs, and disable the HW block */
- bcm2835_wr(bs, BCM2835_SPI_CS,
- BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
- mesg->status = err;
- spi_finalize_current_message(master);
+ /* get the gpio chip for the base */
+ chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
+ if (!chip)
+ return 0;
+
+ /* and calculate the real CS */
+ spi->cs_gpio = chip->base + 8 - spi->chip_select;
+
+ /* and set up the "mode" and level */
+ dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n",
+ spi->chip_select, spi->cs_gpio);
+
+ /* set up GPIO as output and pull to the correct level */
+ err = gpio_direction_output(spi->cs_gpio,
+ (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+ if (err) {
+ dev_err(&spi->dev,
+ "could not set CS%i gpio %i as output: %i",
+ spi->chip_select, spi->cs_gpio, err);
+ return err;
+ }
+ /* the implementation of pinctrl-bcm2835 currently does not
+ * set the GPIO value when using gpio_direction_output
+ * so we are setting it here explicitly
+ */
+ gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
return 0;
}
@@ -312,13 +735,14 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
master->mode_bits = BCM2835_SPI_MODE_BITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->num_chipselect = 3;
- master->transfer_one_message = bcm2835_spi_transfer_one;
+ master->setup = bcm2835_spi_setup;
+ master->set_cs = bcm2835_spi_set_cs;
+ master->transfer_one = bcm2835_spi_transfer_one;
+ master->handle_err = bcm2835_spi_handle_err;
master->dev.of_node = pdev->dev.of_node;
bs = spi_master_get_devdata(master);
- init_completion(&bs->done);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bs->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(bs->regs)) {
@@ -343,13 +767,15 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
clk_prepare_enable(bs->clk);
err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
- dev_name(&pdev->dev), master);
+ dev_name(&pdev->dev), master);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
}
- /* initialise the hardware */
+ bcm2835_dma_init(master, &pdev->dev);
+
+ /* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
@@ -379,6 +805,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(bs->clk);
+ bcm2835_dma_release(master);
+
return 0;
}
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index 3fb91c81015a..1520554978a3 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -44,7 +44,7 @@ static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms)
u32 tmp;
/* SPE bit has to be 0 before we read MSPI STATUS */
- deadline = jiffies + BCM53XXSPI_SPE_TIMEOUT_MS * HZ / 1000;
+ deadline = jiffies + msecs_to_jiffies(BCM53XXSPI_SPE_TIMEOUT_MS);
do {
tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
if (!(tmp & B53SPI_MSPI_SPCR2_SPE))
@@ -56,7 +56,7 @@ static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms)
goto spi_timeout;
/* Check status */
- deadline = jiffies + timeout_ms * HZ / 1000;
+ deadline = jiffies + msecs_to_jiffies(timeout_ms);
do {
tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS);
if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) {
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 37079937d2f7..a3d65b4f4944 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -559,7 +559,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
struct spi_transfer *previous = NULL;
struct bfin_spi_slave_data *chip = NULL;
unsigned int bits_per_word;
- u16 cr, cr_width, dma_width, dma_config;
+ u16 cr, cr_width = 0, dma_width, dma_config;
u32 tranf_success = 1;
u8 full_duplex = 0;
@@ -648,7 +648,6 @@ static void bfin_spi_pump_transfers(unsigned long data)
} else if (bits_per_word == 8) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = transfer->len;
- cr_width = 0;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
}
cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index c616e41521be..06b34e5bcfa3 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -49,12 +49,17 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+ bool oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (word <<= (32 - bits); likely(bits); bits--) {
/* setup MSB (to slave) on trailing edge */
- if ((flags & SPI_MASTER_NO_TX) == 0)
- setmosi(spi, word & (1 << 31));
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & (1 << 31)) != oldbit) {
+ setmosi(spi, word & (1 << 31));
+ oldbit = word & (1 << 31);
+ }
+ }
spidelay(nsecs); /* T(setup) */
setsck(spi, !cpol);
@@ -76,13 +81,18 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+ bool oldbit = !(word & (1 << 31));
/* clock starts at inactive polarity */
for (word <<= (32 - bits); likely(bits); bits--) {
/* setup MSB (to slave) on leading edge */
setsck(spi, !cpol);
- if ((flags & SPI_MASTER_NO_TX) == 0)
- setmosi(spi, word & (1 << 31));
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & (1 << 31)) != oldbit) {
+ setmosi(spi, word & (1 << 31));
+ oldbit = word & (1 << 31);
+ }
+ }
spidelay(nsecs); /* T(setup) */
setsck(spi, cpol);
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 5ef6638d5e8a..840a4984d365 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -180,7 +180,6 @@ int spi_bitbang_setup(struct spi_device *spi)
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
- int retval;
unsigned long flags;
bitbang = spi_master_get_devdata(spi->master);
@@ -197,9 +196,11 @@ int spi_bitbang_setup(struct spi_device *spi)
if (!cs->txrx_word)
return -EINVAL;
- retval = bitbang->setup_transfer(spi, NULL);
- if (retval < 0)
- return retval;
+ if (bitbang->setup_transfer) {
+ int retval = bitbang->setup_transfer(spi, NULL);
+ if (retval < 0)
+ return retval;
+ }
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
@@ -295,9 +296,11 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
/* init (-1) or override (1) transfer params */
if (do_setup != 0) {
- status = bitbang->setup_transfer(spi, t);
- if (status < 0)
- break;
+ if (bitbang->setup_transfer) {
+ status = bitbang->setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ }
if (do_setup == -1)
do_setup = 0;
}
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 5e991065f5b0..987afebea093 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -265,7 +265,7 @@ static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
- if (ret < 3 || ret > 256)
+ if (ret < 1 || ret > 256)
return -EINVAL;
return ret - 1;
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index a0197fd4e95c..bb1052e748f2 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -23,29 +23,31 @@
#include "spi-dw.h"
#ifdef CONFIG_SPI_DW_MID_DMA
-#include <linux/intel_mid_dma.h>
#include <linux/pci.h>
+#include <linux/platform_data/dma-dw.h>
#define RX_BUSY 0
#define TX_BUSY 1
-struct mid_dma {
- struct intel_mid_dma_slave dmas_tx;
- struct intel_mid_dma_slave dmas_rx;
-};
+static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 };
+static struct dw_dma_slave mid_dma_rx = { .src_id = 0 };
static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
{
- struct dw_spi *dws = param;
+ struct dw_dma_slave *s = param;
+
+ if (s->dma_dev != chan->device->dev)
+ return false;
- return dws->dma_dev == chan->device->dev;
+ chan->private = s;
+ return true;
}
static int mid_spi_dma_init(struct dw_spi *dws)
{
- struct mid_dma *dw_dma = dws->dma_priv;
struct pci_dev *dma_dev;
- struct intel_mid_dma_slave *rxs, *txs;
+ struct dw_dma_slave *tx = dws->dma_tx;
+ struct dw_dma_slave *rx = dws->dma_rx;
dma_cap_mask_t mask;
/*
@@ -56,28 +58,22 @@ static int mid_spi_dma_init(struct dw_spi *dws)
if (!dma_dev)
return -ENODEV;
- dws->dma_dev = &dma_dev->dev;
-
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* 1. Init rx channel */
- dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
+ rx->dma_dev = &dma_dev->dev;
+ dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx);
if (!dws->rxchan)
goto err_exit;
- rxs = &dw_dma->dmas_rx;
- rxs->hs_mode = LNW_DMA_HW_HS;
- rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
- dws->rxchan->private = rxs;
+ dws->master->dma_rx = dws->rxchan;
/* 2. Init tx channel */
- dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
+ tx->dma_dev = &dma_dev->dev;
+ dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx);
if (!dws->txchan)
goto free_rxchan;
- txs = &dw_dma->dmas_tx;
- txs->hs_mode = LNW_DMA_HW_HS;
- txs->cfg_mode = LNW_DMA_MEM_TO_PER;
- dws->txchan->private = txs;
+ dws->master->dma_tx = dws->txchan;
dws->dma_inited = 1;
return 0;
@@ -100,6 +96,42 @@ static void mid_spi_dma_exit(struct dw_spi *dws)
dma_release_channel(dws->rxchan);
}
+static irqreturn_t dma_transfer(struct dw_spi *dws)
+{
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR);
+
+ if (!irq_status)
+ return IRQ_NONE;
+
+ dw_readl(dws, DW_SPI_ICR);
+ spi_reset_chip(dws);
+
+ dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
+ dws->master->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(dws->master);
+ return IRQ_HANDLED;
+}
+
+static bool mid_spi_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct dw_spi *dws = spi_master_get_devdata(master);
+
+ if (!dws->dma_inited)
+ return false;
+
+ return xfer->len > dws->fifo_len;
+}
+
+static enum dma_slave_buswidth convert_dma_width(u32 dma_width) {
+ if (dma_width == 1)
+ return DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (dma_width == 2)
+ return DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+ return DMA_SLAVE_BUSWIDTH_UNDEFINED;
+}
+
/*
* dws->dma_chan_busy is set before the dma transfer starts, callback for tx
* channel will clear a corresponding bit.
@@ -108,37 +140,38 @@ static void dw_spi_dma_tx_done(void *arg)
{
struct dw_spi *dws = arg;
- if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
+ clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(RX_BUSY, &dws->dma_chan_busy))
return;
- dw_spi_xfer_done(dws);
+ spi_finalize_current_transfer(dws->master);
}
-static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
+ struct spi_transfer *xfer)
{
struct dma_slave_config txconf;
struct dma_async_tx_descriptor *txdesc;
- if (!dws->tx_dma)
+ if (!xfer->tx_buf)
return NULL;
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
- txconf.dst_maxburst = LNW_DMA_MSIZE_16;
+ txconf.dst_maxburst = 16;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- txconf.dst_addr_width = dws->dma_width;
+ txconf.dst_addr_width = convert_dma_width(dws->dma_width);
txconf.device_fc = false;
dmaengine_slave_config(dws->txchan, &txconf);
- memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
- dws->tx_sgl.dma_address = dws->tx_dma;
- dws->tx_sgl.length = dws->len;
-
txdesc = dmaengine_prep_slave_sg(dws->txchan,
- &dws->tx_sgl,
- 1,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ return NULL;
+
txdesc->callback = dw_spi_dma_tx_done;
txdesc->callback_param = dws;
@@ -153,74 +186,74 @@ static void dw_spi_dma_rx_done(void *arg)
{
struct dw_spi *dws = arg;
- if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
+ clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(TX_BUSY, &dws->dma_chan_busy))
return;
- dw_spi_xfer_done(dws);
+ spi_finalize_current_transfer(dws->master);
}
-static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
+ struct spi_transfer *xfer)
{
struct dma_slave_config rxconf;
struct dma_async_tx_descriptor *rxdesc;
- if (!dws->rx_dma)
+ if (!xfer->rx_buf)
return NULL;
rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
- rxconf.src_maxburst = LNW_DMA_MSIZE_16;
+ rxconf.src_maxburst = 16;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- rxconf.src_addr_width = dws->dma_width;
+ rxconf.src_addr_width = convert_dma_width(dws->dma_width);
rxconf.device_fc = false;
dmaengine_slave_config(dws->rxchan, &rxconf);
- memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
- dws->rx_sgl.dma_address = dws->rx_dma;
- dws->rx_sgl.length = dws->len;
-
rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
- &dws->rx_sgl,
- 1,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ return NULL;
+
rxdesc->callback = dw_spi_dma_rx_done;
rxdesc->callback_param = dws;
return rxdesc;
}
-static void dw_spi_dma_setup(struct dw_spi *dws)
+static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
{
u16 dma_ctrl = 0;
- spi_enable_chip(dws, 0);
-
- dw_writew(dws, DW_SPI_DMARDLR, 0xf);
- dw_writew(dws, DW_SPI_DMATDLR, 0x10);
+ dw_writel(dws, DW_SPI_DMARDLR, 0xf);
+ dw_writel(dws, DW_SPI_DMATDLR, 0x10);
- if (dws->tx_dma)
+ if (xfer->tx_buf)
dma_ctrl |= SPI_DMA_TDMAE;
- if (dws->rx_dma)
+ if (xfer->rx_buf)
dma_ctrl |= SPI_DMA_RDMAE;
- dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
+ dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
+
+ /* Set the interrupt mask */
+ spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);
+
+ dws->transfer_handler = dma_transfer;
- spi_enable_chip(dws, 1);
+ return 0;
}
-static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
{
struct dma_async_tx_descriptor *txdesc, *rxdesc;
- /* 1. setup DMA related registers */
- if (cs_change)
- dw_spi_dma_setup(dws);
-
- /* 2. Prepare the TX dma transfer */
- txdesc = dw_spi_dma_prepare_tx(dws);
+ /* Prepare the TX dma transfer */
+ txdesc = dw_spi_dma_prepare_tx(dws, xfer);
- /* 3. Prepare the RX dma transfer */
- rxdesc = dw_spi_dma_prepare_rx(dws);
+ /* Prepare the RX dma transfer */
+ rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
/* rx must be started before tx due to spi instinct */
if (rxdesc) {
@@ -238,10 +271,25 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
return 0;
}
+static void mid_spi_dma_stop(struct dw_spi *dws)
+{
+ if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
+ dmaengine_terminate_all(dws->txchan);
+ clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ }
+ if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
+ dmaengine_terminate_all(dws->rxchan);
+ clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ }
+}
+
static struct dw_spi_dma_ops mid_dma_ops = {
.dma_init = mid_spi_dma_init,
.dma_exit = mid_spi_dma_exit,
+ .dma_setup = mid_spi_dma_setup,
+ .can_dma = mid_spi_can_dma,
.dma_transfer = mid_spi_dma_transfer,
+ .dma_stop = mid_spi_dma_stop,
};
#endif
@@ -274,9 +322,8 @@ int dw_spi_mid_init(struct dw_spi *dws)
iounmap(clk_reg);
#ifdef CONFIG_SPI_DW_MID_DMA
- dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
- if (!dws->dma_priv)
- return -ENOMEM;
+ dws->dma_tx = &mid_dma_tx;
+ dws->dma_rx = &mid_dma_rx;
dws->dma_ops = &mid_dma_ops;
#endif
return 0;
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 5ba331047cbe..6d331e0db331 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -36,13 +36,13 @@ struct spi_pci_desc {
static struct spi_pci_desc spi_pci_mid_desc_1 = {
.setup = dw_spi_mid_init,
- .num_cs = 32,
+ .num_cs = 5,
.bus_num = 0,
};
static struct spi_pci_desc spi_pci_mid_desc_2 = {
.setup = dw_spi_mid_init,
- .num_cs = 4,
+ .num_cs = 2,
.bus_num = 1,
};
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 5a97a62b298a..8d67d03c71eb 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -28,11 +28,6 @@
#include <linux/debugfs.h>
#endif
-#define START_STATE ((void *)0)
-#define RUNNING_STATE ((void *)1)
-#define DONE_STATE ((void *)2)
-#define ERROR_STATE ((void *)-1)
-
/* Slave spi_dev related */
struct chip_data {
u16 cr0;
@@ -143,13 +138,26 @@ static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
}
#endif /* CONFIG_DEBUG_FS */
+static void dw_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ /* Chip select logic is inverted from spi_set_cs() */
+ if (chip && chip->cs_control)
+ chip->cs_control(!enable);
+
+ if (!enable)
+ dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
+}
+
/* Return the max entries we can fill into tx fifo */
static inline u32 tx_max(struct dw_spi *dws)
{
u32 tx_left, tx_room, rxtx_gap;
tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
- tx_room = dws->fifo_len - dw_readw(dws, DW_SPI_TXFLR);
+ tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
/*
* Another concern is about the tx/rx mismatch, we
@@ -170,7 +178,7 @@ static inline u32 rx_max(struct dw_spi *dws)
{
u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
- return min_t(u32, rx_left, dw_readw(dws, DW_SPI_RXFLR));
+ return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
}
static void dw_writer(struct dw_spi *dws)
@@ -186,7 +194,7 @@ static void dw_writer(struct dw_spi *dws)
else
txw = *(u16 *)(dws->tx);
}
- dw_writew(dws, DW_SPI_DR, txw);
+ dw_writel(dws, DW_SPI_DR, txw);
dws->tx += dws->n_bytes;
}
}
@@ -197,7 +205,7 @@ static void dw_reader(struct dw_spi *dws)
u16 rxw;
while (max--) {
- rxw = dw_readw(dws, DW_SPI_DR);
+ rxw = dw_readl(dws, DW_SPI_DR);
/* Care rx only if the transfer's original "rx" is not null */
if (dws->rx_end - dws->len) {
if (dws->n_bytes == 1)
@@ -209,103 +217,22 @@ static void dw_reader(struct dw_spi *dws)
}
}
-static void *next_transfer(struct dw_spi *dws)
-{
- struct spi_message *msg = dws->cur_msg;
- struct spi_transfer *trans = dws->cur_transfer;
-
- /* Move to next transfer */
- if (trans->transfer_list.next != &msg->transfers) {
- dws->cur_transfer =
- list_entry(trans->transfer_list.next,
- struct spi_transfer,
- transfer_list);
- return RUNNING_STATE;
- }
-
- return DONE_STATE;
-}
-
-/*
- * Note: first step is the protocol driver prepares
- * a dma-capable memory, and this func just need translate
- * the virt addr to physical
- */
-static int map_dma_buffers(struct dw_spi *dws)
-{
- if (!dws->cur_msg->is_dma_mapped
- || !dws->dma_inited
- || !dws->cur_chip->enable_dma
- || !dws->dma_ops)
- return 0;
-
- if (dws->cur_transfer->tx_dma)
- dws->tx_dma = dws->cur_transfer->tx_dma;
-
- if (dws->cur_transfer->rx_dma)
- dws->rx_dma = dws->cur_transfer->rx_dma;
-
- return 1;
-}
-
-/* Caller already set message->status; dma and pio irqs are blocked */
-static void giveback(struct dw_spi *dws)
-{
- struct spi_transfer *last_transfer;
- struct spi_message *msg;
-
- msg = dws->cur_msg;
- dws->cur_msg = NULL;
- dws->cur_transfer = NULL;
- dws->prev_chip = dws->cur_chip;
- dws->cur_chip = NULL;
- dws->dma_mapped = 0;
-
- last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
- transfer_list);
-
- if (!last_transfer->cs_change)
- spi_chip_sel(dws, msg->spi, 0);
-
- spi_finalize_current_message(dws->master);
-}
-
static void int_error_stop(struct dw_spi *dws, const char *msg)
{
- /* Stop the hw */
- spi_enable_chip(dws, 0);
+ spi_reset_chip(dws);
dev_err(&dws->master->dev, "%s\n", msg);
- dws->cur_msg->state = ERROR_STATE;
- tasklet_schedule(&dws->pump_transfers);
+ dws->master->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(dws->master);
}
-void dw_spi_xfer_done(struct dw_spi *dws)
-{
- /* Update total byte transferred return count actual bytes read */
- dws->cur_msg->actual_length += dws->len;
-
- /* Move to next transfer */
- dws->cur_msg->state = next_transfer(dws);
-
- /* Handle end of message */
- if (dws->cur_msg->state == DONE_STATE) {
- dws->cur_msg->status = 0;
- giveback(dws);
- } else
- tasklet_schedule(&dws->pump_transfers);
-}
-EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
-
static irqreturn_t interrupt_transfer(struct dw_spi *dws)
{
- u16 irq_status = dw_readw(dws, DW_SPI_ISR);
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR);
/* Error handling */
if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
- dw_readw(dws, DW_SPI_TXOICR);
- dw_readw(dws, DW_SPI_RXOICR);
- dw_readw(dws, DW_SPI_RXUICR);
+ dw_readl(dws, DW_SPI_ICR);
int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
return IRQ_HANDLED;
}
@@ -313,7 +240,7 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
dw_reader(dws);
if (dws->rx_end == dws->rx) {
spi_mask_intr(dws, SPI_INT_TXEI);
- dw_spi_xfer_done(dws);
+ spi_finalize_current_transfer(dws->master);
return IRQ_HANDLED;
}
if (irq_status & SPI_INT_TXEI) {
@@ -328,13 +255,14 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
- struct dw_spi *dws = dev_id;
- u16 irq_status = dw_readw(dws, DW_SPI_ISR) & 0x3f;
+ struct spi_master *master = dev_id;
+ struct dw_spi *dws = spi_master_get_devdata(master);
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
if (!irq_status)
return IRQ_NONE;
- if (!dws->cur_msg) {
+ if (!master->cur_msg) {
spi_mask_intr(dws, SPI_INT_TXEI);
return IRQ_HANDLED;
}
@@ -343,7 +271,7 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id)
}
/* Must be called inside pump_transfers() */
-static void poll_transfer(struct dw_spi *dws)
+static int poll_transfer(struct dw_spi *dws)
{
do {
dw_writer(dws);
@@ -351,64 +279,32 @@ static void poll_transfer(struct dw_spi *dws)
cpu_relax();
} while (dws->rx_end > dws->rx);
- dw_spi_xfer_done(dws);
+ return 0;
}
-static void pump_transfers(unsigned long data)
+static int dw_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi, struct spi_transfer *transfer)
{
- struct dw_spi *dws = (struct dw_spi *)data;
- struct spi_message *message = NULL;
- struct spi_transfer *transfer = NULL;
- struct spi_transfer *previous = NULL;
- struct spi_device *spi = NULL;
- struct chip_data *chip = NULL;
- u8 bits = 0;
+ struct dw_spi *dws = spi_master_get_devdata(master);
+ struct chip_data *chip = spi_get_ctldata(spi);
u8 imask = 0;
- u8 cs_change = 0;
- u16 txint_level = 0;
+ u16 txlevel = 0;
u16 clk_div = 0;
u32 speed = 0;
u32 cr0 = 0;
+ int ret;
- /* Get current state information */
- message = dws->cur_msg;
- transfer = dws->cur_transfer;
- chip = dws->cur_chip;
- spi = message->spi;
-
- if (message->state == ERROR_STATE) {
- message->status = -EIO;
- goto early_exit;
- }
-
- /* Handle end of message */
- if (message->state == DONE_STATE) {
- message->status = 0;
- goto early_exit;
- }
-
- /* Delay if requested at end of transfer */
- if (message->state == RUNNING_STATE) {
- previous = list_entry(transfer->transfer_list.prev,
- struct spi_transfer,
- transfer_list);
- if (previous->delay_usecs)
- udelay(previous->delay_usecs);
- }
-
+ dws->dma_mapped = 0;
dws->n_bytes = chip->n_bytes;
dws->dma_width = chip->dma_width;
- dws->cs_control = chip->cs_control;
- dws->rx_dma = transfer->rx_dma;
- dws->tx_dma = transfer->tx_dma;
dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
- dws->len = dws->cur_transfer->len;
- if (chip != dws->prev_chip)
- cs_change = 1;
+ dws->len = transfer->len;
+
+ spi_enable_chip(dws, 0);
cr0 = chip->cr0;
@@ -416,32 +312,37 @@ static void pump_transfers(unsigned long data)
if (transfer->speed_hz) {
speed = chip->speed_hz;
- if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
+ if ((transfer->speed_hz != speed) || !chip->clk_div) {
speed = transfer->speed_hz;
/* clk_div doesn't support odd number */
- clk_div = dws->max_freq / speed;
- clk_div = (clk_div + 1) & 0xfffe;
+ clk_div = (dws->max_freq / speed + 1) & 0xfffe;
chip->speed_hz = speed;
chip->clk_div = clk_div;
+
+ spi_set_clk(dws, chip->clk_div);
}
}
if (transfer->bits_per_word) {
- bits = transfer->bits_per_word;
- dws->n_bytes = dws->dma_width = bits >> 3;
- cr0 = (bits - 1)
+ if (transfer->bits_per_word == 8) {
+ dws->n_bytes = 1;
+ dws->dma_width = 1;
+ } else if (transfer->bits_per_word == 16) {
+ dws->n_bytes = 2;
+ dws->dma_width = 2;
+ }
+ cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
| (spi->mode << SPI_MODE_OFFSET)
| (chip->tmode << SPI_TMOD_OFFSET);
}
- message->state = RUNNING_STATE;
/*
* Adjust transfer mode if necessary. Requires platform dependent
* chipselect mechanism.
*/
- if (dws->cs_control) {
+ if (chip->cs_control) {
if (dws->rx && dws->tx)
chip->tmode = SPI_TMOD_TR;
else if (dws->rx)
@@ -453,80 +354,60 @@ static void pump_transfers(unsigned long data)
cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
}
+ dw_writel(dws, DW_SPI_CTRL0, cr0);
+
/* Check if current transfer is a DMA transaction */
- dws->dma_mapped = map_dma_buffers(dws);
+ if (master->can_dma && master->can_dma(master, spi, transfer))
+ dws->dma_mapped = master->cur_msg_mapped;
+
+ /* For poll mode just disable all interrupts */
+ spi_mask_intr(dws, 0xff);
/*
* Interrupt mode
* we only need set the TXEI IRQ, as TX/RX always happen syncronizely
*/
- if (!dws->dma_mapped && !chip->poll_mode) {
- int templen = dws->len / dws->n_bytes;
-
- txint_level = dws->fifo_len / 2;
- txint_level = (templen > txint_level) ? txint_level : templen;
+ if (dws->dma_mapped) {
+ ret = dws->dma_ops->dma_setup(dws, transfer);
+ if (ret < 0) {
+ spi_enable_chip(dws, 1);
+ return ret;
+ }
+ } else if (!chip->poll_mode) {
+ txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
+ dw_writel(dws, DW_SPI_TXFLTR, txlevel);
+ /* Set the interrupt mask */
imask |= SPI_INT_TXEI | SPI_INT_TXOI |
SPI_INT_RXUI | SPI_INT_RXOI;
+ spi_umask_intr(dws, imask);
+
dws->transfer_handler = interrupt_transfer;
}
- /*
- * Reprogram registers only if
- * 1. chip select changes
- * 2. clk_div is changed
- * 3. control value changes
- */
- if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) {
- spi_enable_chip(dws, 0);
-
- if (dw_readw(dws, DW_SPI_CTRL0) != cr0)
- dw_writew(dws, DW_SPI_CTRL0, cr0);
-
- spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
- spi_chip_sel(dws, spi, 1);
-
- /* Set the interrupt mask, for poll mode just disable all int */
- spi_mask_intr(dws, 0xff);
- if (imask)
- spi_umask_intr(dws, imask);
- if (txint_level)
- dw_writew(dws, DW_SPI_TXFLTR, txint_level);
+ spi_enable_chip(dws, 1);
- spi_enable_chip(dws, 1);
- if (cs_change)
- dws->prev_chip = chip;
+ if (dws->dma_mapped) {
+ ret = dws->dma_ops->dma_transfer(dws, transfer);
+ if (ret < 0)
+ return ret;
}
- if (dws->dma_mapped)
- dws->dma_ops->dma_transfer(dws, cs_change);
-
if (chip->poll_mode)
- poll_transfer(dws);
-
- return;
+ return poll_transfer(dws);
-early_exit:
- giveback(dws);
+ return 1;
}
-static int dw_spi_transfer_one_message(struct spi_master *master,
+static void dw_spi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
struct dw_spi *dws = spi_master_get_devdata(master);
- dws->cur_msg = msg;
- /* Initial message state */
- dws->cur_msg->state = START_STATE;
- dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
- struct spi_transfer,
- transfer_list);
- dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
-
- /* Launch transfers */
- tasklet_schedule(&dws->pump_transfers);
+ if (dws->dma_mapped)
+ dws->dma_ops->dma_stop(dws);
- return 0;
+ spi_reset_chip(dws);
}
/* This may be called twice for each spi dev */
@@ -561,8 +442,6 @@ static int dw_spi_setup(struct spi_device *spi)
chip->rx_threshold = 0;
chip->tx_threshold = 0;
-
- chip->enable_dma = chip_info->enable_dma;
}
if (spi->bits_per_word == 8) {
@@ -610,9 +489,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
/* Restart the controller, disable all interrupts, clean rx fifo */
static void spi_hw_init(struct device *dev, struct dw_spi *dws)
{
- spi_enable_chip(dws, 0);
- spi_mask_intr(dws, 0xff);
- spi_enable_chip(dws, 1);
+ spi_reset_chip(dws);
/*
* Try to detect the FIFO depth if not set by interface driver,
@@ -621,14 +498,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
if (!dws->fifo_len) {
u32 fifo;
- for (fifo = 2; fifo <= 256; fifo++) {
- dw_writew(dws, DW_SPI_TXFLTR, fifo);
- if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
+ for (fifo = 1; fifo < 256; fifo++) {
+ dw_writel(dws, DW_SPI_TXFLTR, fifo);
+ if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
break;
}
- dw_writew(dws, DW_SPI_TXFLTR, 0);
+ dw_writel(dws, DW_SPI_TXFLTR, 0);
- dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
+ dws->fifo_len = (fifo == 1) ? 0 : fifo;
dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
}
}
@@ -646,13 +523,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->master = master;
dws->type = SSI_MOTO_SPI;
- dws->prev_chip = NULL;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
- dws->name, dws);
+ dws->name, master);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
goto err_free_master;
@@ -664,7 +540,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->num_chipselect = dws->num_cs;
master->setup = dw_spi_setup;
master->cleanup = dw_spi_cleanup;
- master->transfer_one_message = dw_spi_transfer_one_message;
+ master->set_cs = dw_spi_set_cs;
+ master->transfer_one = dw_spi_transfer_one;
+ master->handle_err = dw_spi_handle_err;
master->max_speed_hz = dws->max_freq;
master->dev.of_node = dev->of_node;
@@ -676,11 +554,11 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
if (ret) {
dev_warn(dev, "DMA init failed\n");
dws->dma_inited = 0;
+ } else {
+ master->can_dma = dws->dma_ops->can_dma;
}
}
- tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws);
-
spi_master_set_devdata(master, dws);
ret = devm_spi_register_master(dev, master);
if (ret) {
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 3d32be68c142..6c91391c1a4f 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -91,12 +91,15 @@ struct dw_spi;
struct dw_spi_dma_ops {
int (*dma_init)(struct dw_spi *dws);
void (*dma_exit)(struct dw_spi *dws);
- int (*dma_transfer)(struct dw_spi *dws, int cs_change);
+ int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer);
+ bool (*can_dma)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer);
+ int (*dma_transfer)(struct dw_spi *dws, struct spi_transfer *xfer);
+ void (*dma_stop)(struct dw_spi *dws);
};
struct dw_spi {
struct spi_master *master;
- struct spi_device *cur_dev;
enum dw_ssi_type type;
char name[16];
@@ -109,41 +112,26 @@ struct dw_spi {
u16 bus_num;
u16 num_cs; /* supported slave numbers */
- /* Message Transfer pump */
- struct tasklet_struct pump_transfers;
-
/* Current message transfer state info */
- struct spi_message *cur_msg;
- struct spi_transfer *cur_transfer;
- struct chip_data *cur_chip;
- struct chip_data *prev_chip;
size_t len;
void *tx;
void *tx_end;
void *rx;
void *rx_end;
int dma_mapped;
- dma_addr_t rx_dma;
- dma_addr_t tx_dma;
- size_t rx_map_len;
- size_t tx_map_len;
u8 n_bytes; /* current is a 1/2 bytes op */
- u8 max_bits_per_word; /* maxim is 16b */
u32 dma_width;
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
- void (*cs_control)(u32 command);
- /* Dma info */
+ /* DMA info */
int dma_inited;
struct dma_chan *txchan;
- struct scatterlist tx_sgl;
struct dma_chan *rxchan;
- struct scatterlist rx_sgl;
unsigned long dma_chan_busy;
- struct device *dma_dev;
dma_addr_t dma_addr; /* phy address of the Data register */
struct dw_spi_dma_ops *dma_ops;
- void *dma_priv; /* platform relate info */
+ void *dma_tx;
+ void *dma_rx;
/* Bus interface info */
void *priv;
@@ -162,16 +150,6 @@ static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
__raw_writel(val, dws->regs + offset);
}
-static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
-{
- return __raw_readw(dws->regs + offset);
-}
-
-static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
-{
- __raw_writew(val, dws->regs + offset);
-}
-
static inline void spi_enable_chip(struct dw_spi *dws, int enable)
{
dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
@@ -182,22 +160,6 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div)
dw_writel(dws, DW_SPI_BAUDR, div);
}
-static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi,
- int active)
-{
- u16 cs = spi->chip_select;
- int gpio_val = active ? (spi->mode & SPI_CS_HIGH) :
- !(spi->mode & SPI_CS_HIGH);
-
- if (dws->cs_control)
- dws->cs_control(active);
- if (gpio_is_valid(spi->cs_gpio))
- gpio_set_value(spi->cs_gpio, gpio_val);
-
- if (active)
- dw_writel(dws, DW_SPI_SER, 1 << cs);
-}
-
/* Disable IRQ bits */
static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
{
@@ -217,15 +179,26 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
}
/*
+ * This does disable the SPI controller, interrupts, and re-enable the
+ * controller back. Transmit and receive FIFO buffers are cleared when the
+ * device is disabled.
+ */
+static inline void spi_reset_chip(struct dw_spi *dws)
+{
+ spi_enable_chip(dws, 0);
+ spi_mask_intr(dws, 0xff);
+ spi_enable_chip(dws, 1);
+}
+
+/*
* Each SPI slave device to work with dw_api controller should
- * has such a structure claiming its working mode (PIO/DMA etc),
+ * has such a structure claiming its working mode (poll or PIO/DMA),
* which can be save in the "controller_data" member of the
* struct spi_device.
*/
struct dw_spi_chip {
u8 poll_mode; /* 1 for controller polling mode */
u8 type; /* SPI/SSP/MicroWire */
- u8 enable_dma;
void (*cs_control)(u32 command);
};
@@ -233,7 +206,6 @@ extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws);
-extern void dw_spi_xfer_done(struct dw_spi *dws);
/* platform related setup */
extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 9c46a3058743..896add8cfd3b 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -24,6 +24,7 @@
#include <linux/of_address.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
+#include <linux/platform_device.h>
#include "spi-fsl-cpm.h"
#include "spi-fsl-lib.h"
@@ -269,17 +270,6 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
if (mspi->flags & SPI_CPM2) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
out_be16(spi_base, pram_ofs);
- } else {
- struct spi_pram __iomem *pram = spi_base;
- u16 rpbase = in_be16(&pram->rpbase);
-
- /* Microcode relocation patch applied? */
- if (rpbase) {
- pram_ofs = rpbase;
- } else {
- pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- out_be16(spi_base, pram_ofs);
- }
}
iounmap(spi_base);
@@ -292,7 +282,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
- unsigned long pram_ofs;
unsigned long bds_ofs;
if (!(mspi->flags & SPI_CPM_MODE))
@@ -319,8 +308,26 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
}
}
- pram_ofs = fsl_spi_cpm_get_pram(mspi);
- if (IS_ERR_VALUE(pram_ofs)) {
+ if (mspi->flags & SPI_CPM1) {
+ struct resource *res;
+ void *pram;
+
+ res = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM, 1);
+ pram = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pram))
+ mspi->pram = NULL;
+ else
+ mspi->pram = pram;
+ } else {
+ unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
+
+ if (IS_ERR_VALUE(pram_ofs))
+ mspi->pram = NULL;
+ else
+ mspi->pram = cpm_muram_addr(pram_ofs);
+ }
+ if (mspi->pram == NULL) {
dev_err(dev, "can't allocate spi parameter ram\n");
goto err_pram;
}
@@ -346,8 +353,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
goto err_dummy_rx;
}
- mspi->pram = cpm_muram_addr(pram_ofs);
-
mspi->tx_bd = cpm_muram_addr(bds_ofs);
mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
@@ -375,7 +380,8 @@ err_dummy_rx:
err_dummy_tx:
cpm_muram_free(bds_ofs);
err_bds:
- cpm_muram_free(pram_ofs);
+ if (!(mspi->flags & SPI_CPM1))
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
err_pram:
fsl_spi_free_dummy_rx();
return -ENOMEM;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index d1a39249704a..86bcdd68c1fe 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -20,15 +20,18 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#include <linux/time.h>
#define DRIVER_NAME "fsl-dspi"
@@ -45,13 +48,14 @@
#define SPI_MCR_CLR_RXF (1 << 10)
#define SPI_TCR 0x08
+#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
#define SPI_CTAR_CPOL(x) ((x) << 26)
#define SPI_CTAR_CPHA(x) ((x) << 25)
#define SPI_CTAR_LSBFE(x) ((x) << 24)
-#define SPI_CTAR_PCSSCR(x) (((x) & 0x00000003) << 22)
+#define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22)
#define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
#define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
#define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
@@ -59,14 +63,17 @@
#define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
#define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
#define SPI_CTAR_BR(x) ((x) & 0x0000000f)
+#define SPI_CTAR_SCALE_BITS 0xf
#define SPI_CTAR0_SLAVE 0x0c
#define SPI_SR 0x2c
#define SPI_SR_EOQF 0x10000000
+#define SPI_SR_TCFQF 0x80000000
#define SPI_RSER 0x30
#define SPI_RSER_EOQFE 0x10000000
+#define SPI_RSER_TCFQE 0x80000000
#define SPI_PUSHR 0x34
#define SPI_PUSHR_CONT (1 << 31)
@@ -99,12 +106,35 @@
#define SPI_CS_ASSERT 0x02
#define SPI_CS_DROP 0x04
+#define SPI_TCR_TCNT_MAX 0x10000
+
struct chip_data {
u32 mcr_val;
u32 ctar_val;
u16 void_write_data;
};
+enum dspi_trans_mode {
+ DSPI_EOQ_MODE = 0,
+ DSPI_TCFQ_MODE,
+};
+
+struct fsl_dspi_devtype_data {
+ enum dspi_trans_mode trans_mode;
+};
+
+static const struct fsl_dspi_devtype_data vf610_data = {
+ .trans_mode = DSPI_EOQ_MODE,
+};
+
+static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
+ .trans_mode = DSPI_TCFQ_MODE,
+};
+
+static const struct fsl_dspi_devtype_data ls2085a_data = {
+ .trans_mode = DSPI_TCFQ_MODE,
+};
+
struct fsl_dspi {
struct spi_master *master;
struct platform_device *pdev;
@@ -125,9 +155,12 @@ struct fsl_dspi {
u8 cs;
u16 void_write_data;
u32 cs_change;
+ struct fsl_dspi_devtype_data *devtype_data;
wait_queue_head_t waitq;
u32 waitflags;
+
+ u32 spi_tcnt;
};
static inline int is_double_byte_mode(struct fsl_dspi *dspi)
@@ -148,82 +181,122 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
16, 32, 64, 128,
256, 512, 1024, 2048,
4096, 8192, 16384, 32768 };
- int temp, i = 0, j = 0;
+ int scale_needed, scale, minscale = INT_MAX;
+ int i, j;
+
+ scale_needed = clkrate / speed_hz;
+ if (clkrate % speed_hz)
+ scale_needed++;
+
+ for (i = 0; i < ARRAY_SIZE(brs); i++)
+ for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
+ scale = brs[i] * pbr_tbl[j];
+ if (scale >= scale_needed) {
+ if (scale < minscale) {
+ minscale = scale;
+ *br = i;
+ *pbr = j;
+ }
+ break;
+ }
+ }
- temp = clkrate / 2 / speed_hz;
+ if (minscale == INT_MAX) {
+ pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
+ speed_hz, clkrate);
+ *pbr = ARRAY_SIZE(pbr_tbl) - 1;
+ *br = ARRAY_SIZE(brs) - 1;
+ }
+}
- for (i = 0; i < ARRAY_SIZE(pbr_tbl); i++)
- for (j = 0; j < ARRAY_SIZE(brs); j++) {
- if (pbr_tbl[i] * brs[j] >= temp) {
- *pbr = i;
- *br = j;
- return;
+static void ns_delay_scale(char *psc, char *sc, int delay_ns,
+ unsigned long clkrate)
+{
+ int pscale_tbl[4] = {1, 3, 5, 7};
+ int scale_needed, scale, minscale = INT_MAX;
+ int i, j;
+ u32 remainder;
+
+ scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
+ &remainder);
+ if (remainder)
+ scale_needed++;
+
+ for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
+ for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
+ scale = pscale_tbl[i] * (2 << j);
+ if (scale >= scale_needed) {
+ if (scale < minscale) {
+ minscale = scale;
+ *psc = i;
+ *sc = j;
+ }
+ break;
}
}
- pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld\
- ,we use the max prescaler value.\n", speed_hz, clkrate);
- *pbr = ARRAY_SIZE(pbr_tbl) - 1;
- *br = ARRAY_SIZE(brs) - 1;
+ if (minscale == INT_MAX) {
+ pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
+ delay_ns, clkrate);
+ *psc = ARRAY_SIZE(pscale_tbl) - 1;
+ *sc = SPI_CTAR_SCALE_BITS;
+ }
}
-static int dspi_transfer_write(struct fsl_dspi *dspi)
+static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
{
- int tx_count = 0;
- int tx_word;
u16 d16;
- u8 d8;
- u32 dspi_pushr = 0;
- int first = 1;
- tx_word = is_double_byte_mode(dspi);
+ if (!(dspi->dataflags & TRAN_STATE_TX_VOID))
+ d16 = tx_word ? *(u16 *)dspi->tx : *(u8 *)dspi->tx;
+ else
+ d16 = dspi->void_write_data;
- /* If we are in word mode, but only have a single byte to transfer
- * then switch to byte mode temporarily. Will switch back at the
- * end of the transfer.
- */
- if (tx_word && (dspi->len == 1)) {
- dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
- SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
- tx_word = 0;
- }
+ dspi->tx += tx_word + 1;
+ dspi->len -= tx_word + 1;
- while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
- if (tx_word) {
- if (dspi->len == 1)
- break;
+ return SPI_PUSHR_TXDATA(d16) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(dspi->cs) |
+ SPI_PUSHR_CONT;
+}
- if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
- d16 = *(u16 *)dspi->tx;
- dspi->tx += 2;
- } else {
- d16 = dspi->void_write_data;
- }
+static void dspi_data_from_popr(struct fsl_dspi *dspi, int rx_word)
+{
+ u16 d;
+ unsigned int val;
- dspi_pushr = SPI_PUSHR_TXDATA(d16) |
- SPI_PUSHR_PCS(dspi->cs) |
- SPI_PUSHR_CTAS(dspi->cs) |
- SPI_PUSHR_CONT;
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
- dspi->len -= 2;
- } else {
- if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
+ rx_word ? (*(u16 *)dspi->rx = d) : (*(u8 *)dspi->rx = d);
- d8 = *(u8 *)dspi->tx;
- dspi->tx++;
- } else {
- d8 = (u8)dspi->void_write_data;
- }
+ dspi->rx += rx_word + 1;
+}
- dspi_pushr = SPI_PUSHR_TXDATA(d8) |
- SPI_PUSHR_PCS(dspi->cs) |
- SPI_PUSHR_CTAS(dspi->cs) |
- SPI_PUSHR_CONT;
+static int dspi_eoq_write(struct fsl_dspi *dspi)
+{
+ int tx_count = 0;
+ int tx_word;
+ u32 dspi_pushr = 0;
+
+ tx_word = is_double_byte_mode(dspi);
- dspi->len--;
+ while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
+ /* If we are in word mode, only have a single byte to transfer
+ * switch to byte mode temporarily. Will switch back at the
+ * end of the transfer.
+ */
+ if (tx_word && (dspi->len == 1)) {
+ dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
+ tx_word = 0;
}
+ dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
+
if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
/* last transfer in the transfer */
dspi_pushr |= SPI_PUSHR_EOQ;
@@ -232,11 +305,6 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
} else if (tx_word && (dspi->len == 1))
dspi_pushr |= SPI_PUSHR_EOQ;
- if (first) {
- first = 0;
- dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */
- }
-
regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
tx_count++;
@@ -245,40 +313,55 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
return tx_count * (tx_word + 1);
}
-static int dspi_transfer_read(struct fsl_dspi *dspi)
+static int dspi_eoq_read(struct fsl_dspi *dspi)
{
int rx_count = 0;
int rx_word = is_double_byte_mode(dspi);
- u16 d;
while ((dspi->rx < dspi->rx_end)
&& (rx_count < DSPI_FIFO_SIZE)) {
- if (rx_word) {
- unsigned int val;
+ if (rx_word && (dspi->rx_end - dspi->rx) == 1)
+ rx_word = 0;
- if ((dspi->rx_end - dspi->rx) == 1)
- break;
+ dspi_data_from_popr(dspi, rx_word);
+ rx_count++;
+ }
- regmap_read(dspi->regmap, SPI_POPR, &val);
- d = SPI_POPR_RXDATA(val);
+ return rx_count;
+}
- if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
- *(u16 *)dspi->rx = d;
- dspi->rx += 2;
+static int dspi_tcfq_write(struct fsl_dspi *dspi)
+{
+ int tx_word;
+ u32 dspi_pushr = 0;
- } else {
- unsigned int val;
+ tx_word = is_double_byte_mode(dspi);
- regmap_read(dspi->regmap, SPI_POPR, &val);
- d = SPI_POPR_RXDATA(val);
- if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
- *(u8 *)dspi->rx = d;
- dspi->rx++;
- }
- rx_count++;
+ if (tx_word && (dspi->len == 1)) {
+ dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
+ tx_word = 0;
}
- return rx_count;
+ dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
+
+ if ((dspi->cs_change) && (!dspi->len))
+ dspi_pushr &= ~SPI_PUSHR_CONT;
+
+ regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
+
+ return tx_word + 1;
+}
+
+static void dspi_tcfq_read(struct fsl_dspi *dspi)
+{
+ int rx_word = is_double_byte_mode(dspi);
+
+ if (rx_word && (dspi->rx_end - dspi->rx) == 1)
+ rx_word = 0;
+
+ dspi_data_from_popr(dspi, rx_word);
}
static int dspi_transfer_one_message(struct spi_master *master,
@@ -288,6 +371,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
struct spi_device *spi = message->spi;
struct spi_transfer *transfer;
int status = 0;
+ enum dspi_trans_mode trans_mode;
+ u32 spi_tcr;
+
+ regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
+ dspi->spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
+
message->actual_length = 0;
list_for_each_entry(transfer, &message->transfers, transfer_list) {
@@ -295,10 +384,10 @@ static int dspi_transfer_one_message(struct spi_master *master,
dspi->cur_msg = message;
dspi->cur_chip = spi_get_ctldata(spi);
dspi->cs = spi->chip_select;
+ dspi->cs_change = 0;
if (dspi->cur_transfer->transfer_list.next
== &dspi->cur_msg->transfers)
- transfer->cs_change = 1;
- dspi->cs_change = transfer->cs_change;
+ dspi->cs_change = 1;
dspi->void_write_data = dspi->cur_chip->void_write_data;
dspi->dataflags = 0;
@@ -324,8 +413,22 @@ static int dspi_transfer_one_message(struct spi_master *master,
regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
dspi->cur_chip->ctar_val);
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
- message->actual_length += dspi_transfer_write(dspi);
+ trans_mode = dspi->devtype_data->trans_mode;
+ switch (trans_mode) {
+ case DSPI_EOQ_MODE:
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
+ dspi_eoq_write(dspi);
+ break;
+ case DSPI_TCFQ_MODE:
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
+ dspi_tcfq_write(dspi);
+ break;
+ default:
+ dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
+ trans_mode);
+ status = -EINVAL;
+ goto out;
+ }
if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
@@ -335,6 +438,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
udelay(transfer->delay_usecs);
}
+out:
message->status = status;
spi_finalize_current_message(master);
@@ -345,7 +449,10 @@ static int dspi_setup(struct spi_device *spi)
{
struct chip_data *chip;
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
- unsigned char br = 0, pbr = 0, fmsz = 0;
+ u32 cs_sck_delay = 0, sck_cs_delay = 0;
+ unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
+ unsigned char pasc = 0, asc = 0, fmsz = 0;
+ unsigned long clkrate;
if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
fmsz = spi->bits_per_word - 1;
@@ -362,18 +469,34 @@ static int dspi_setup(struct spi_device *spi)
return -ENOMEM;
}
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
+
chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
chip->void_write_data = 0;
- hz_to_spi_baud(&pbr, &br,
- spi->max_speed_hz, clk_get_rate(dspi->clk));
+ clkrate = clk_get_rate(dspi->clk);
+ hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
+
+ /* Set PCS to SCK delay scale values */
+ ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
+
+ /* Set After SCK delay scale values */
+ ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
chip->ctar_val = SPI_CTAR_FMSZ(fmsz)
| SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
| SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
| SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
+ | SPI_CTAR_PCSSCK(pcssck)
+ | SPI_CTAR_CSSCK(cssck)
+ | SPI_CTAR_PASC(pasc)
+ | SPI_CTAR_ASC(asc)
| SPI_CTAR_PBR(pbr)
| SPI_CTAR_BR(br);
@@ -395,27 +518,89 @@ static void dspi_cleanup(struct spi_device *spi)
static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
-
struct spi_message *msg = dspi->cur_msg;
+ enum dspi_trans_mode trans_mode;
+ u32 spi_sr, spi_tcr;
+ u32 spi_tcnt, tcnt_diff;
+ int tx_word;
- regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
- dspi_transfer_read(dspi);
-
- if (!dspi->len) {
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+
+ if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
+ tx_word = is_double_byte_mode(dspi);
+
+ regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
+ spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
+ /*
+ * The width of SPI Transfer Counter in SPI_TCR is 16bits,
+ * so the max couner is 65535. When the counter reach 65535,
+ * it will wrap around, counter reset to zero.
+ * spi_tcnt my be less than dspi->spi_tcnt, it means the
+ * counter already wrapped around.
+ * SPI Transfer Counter is a counter of transmitted frames.
+ * The size of frame maybe two bytes.
+ */
+ tcnt_diff = ((spi_tcnt + SPI_TCR_TCNT_MAX) - dspi->spi_tcnt)
+ % SPI_TCR_TCNT_MAX;
+ tcnt_diff *= (tx_word + 1);
if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
- regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
- SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+ tcnt_diff--;
+
+ msg->actual_length += tcnt_diff;
+
+ dspi->spi_tcnt = spi_tcnt;
+
+ trans_mode = dspi->devtype_data->trans_mode;
+ switch (trans_mode) {
+ case DSPI_EOQ_MODE:
+ dspi_eoq_read(dspi);
+ break;
+ case DSPI_TCFQ_MODE:
+ dspi_tcfq_read(dspi);
+ break;
+ default:
+ dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
+ trans_mode);
+ return IRQ_HANDLED;
+ }
- dspi->waitflags = 1;
- wake_up_interruptible(&dspi->waitq);
- } else
- msg->actual_length += dspi_transfer_write(dspi);
+ if (!dspi->len) {
+ if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
+ regmap_update_bits(dspi->regmap,
+ SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK,
+ SPI_FRAME_BITS(16));
+ dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
+ }
+
+ dspi->waitflags = 1;
+ wake_up_interruptible(&dspi->waitq);
+ } else {
+ switch (trans_mode) {
+ case DSPI_EOQ_MODE:
+ dspi_eoq_write(dspi);
+ break;
+ case DSPI_TCFQ_MODE:
+ dspi_tcfq_write(dspi);
+ break;
+ default:
+ dev_err(&dspi->pdev->dev,
+ "unsupported trans_mode %u\n",
+ trans_mode);
+ }
+ }
+ }
return IRQ_HANDLED;
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
- { .compatible = "fsl,vf610-dspi", .data = NULL, },
+ { .compatible = "fsl,vf610-dspi", .data = (void *)&vf610_data, },
+ { .compatible = "fsl,ls1021a-v1.0-dspi",
+ .data = (void *)&ls1021a_v1_data, },
+ { .compatible = "fsl,ls2085a-dspi", .data = (void *)&ls2085a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -429,6 +614,8 @@ static int dspi_suspend(struct device *dev)
spi_master_suspend(master);
clk_disable_unprepare(dspi->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
return 0;
}
@@ -437,6 +624,8 @@ static int dspi_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ pinctrl_pm_select_default_state(dev);
+
clk_prepare_enable(dspi->clk);
spi_master_resume(master);
@@ -461,6 +650,8 @@ static int dspi_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
int ret = 0, cs_num, bus_num;
+ const struct of_device_id *of_id =
+ of_match_device(fsl_dspi_dt_ids, &pdev->dev);
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
if (!master)
@@ -494,6 +685,13 @@ static int dspi_probe(struct platform_device *pdev)
}
master->bus_num = bus_num;
+ dspi->devtype_data = (struct fsl_dspi_devtype_data *)of_id->data;
+ if (!dspi->devtype_data) {
+ dev_err(&pdev->dev, "can't get devtype_data\n");
+ ret = -EFAULT;
+ goto out_master_put;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
@@ -501,7 +699,7 @@ static int dspi_probe(struct platform_device *pdev)
goto out_master_put;
}
- dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
+ dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
&dspi_regmap_config);
if (IS_ERR(dspi->regmap)) {
dev_err(&pdev->dev, "failed to init regmap: %ld\n",
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index d0a73a09a9bd..d3f05a0525a4 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -359,14 +359,16 @@ static void fsl_espi_rw_trans(struct spi_message *m,
struct fsl_espi_transfer *trans, u8 *rx_buff)
{
struct fsl_espi_transfer *espi_trans = trans;
- unsigned int n_tx = espi_trans->n_tx;
- unsigned int n_rx = espi_trans->n_rx;
+ unsigned int total_len = espi_trans->len;
struct spi_transfer *t;
u8 *local_buf;
u8 *rx_buf = rx_buff;
unsigned int trans_len;
unsigned int addr;
- int i, pos, loop;
+ unsigned int tx_only;
+ unsigned int rx_pos = 0;
+ unsigned int pos;
+ int i, loop;
local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
if (!local_buf) {
@@ -374,36 +376,48 @@ static void fsl_espi_rw_trans(struct spi_message *m,
return;
}
- for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) {
- trans_len = n_rx - pos;
- if (trans_len > SPCOM_TRANLEN_MAX - n_tx)
- trans_len = SPCOM_TRANLEN_MAX - n_tx;
+ for (pos = 0, loop = 0; pos < total_len; pos += trans_len, loop++) {
+ trans_len = total_len - pos;
i = 0;
+ tx_only = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf) {
memcpy(local_buf + i, t->tx_buf, t->len);
i += t->len;
+ if (!t->rx_buf)
+ tx_only += t->len;
}
}
+ /* Add additional TX bytes to compensate SPCOM_TRANLEN_MAX */
+ if (loop > 0)
+ trans_len += tx_only;
+
+ if (trans_len > SPCOM_TRANLEN_MAX)
+ trans_len = SPCOM_TRANLEN_MAX;
+
+ /* Update device offset */
if (pos > 0) {
addr = fsl_espi_cmd2addr(local_buf);
- addr += pos;
+ addr += rx_pos;
fsl_espi_addr2cmd(addr, local_buf);
}
- espi_trans->n_tx = n_tx;
- espi_trans->n_rx = trans_len;
- espi_trans->len = trans_len + n_tx;
+ espi_trans->len = trans_len;
espi_trans->tx_buf = local_buf;
espi_trans->rx_buf = local_buf;
fsl_espi_do_trans(m, espi_trans);
- memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
+ /* If there is at least one RX byte then copy it to rx_buf */
+ if (tx_only < SPCOM_TRANLEN_MAX)
+ memcpy(rx_buf + rx_pos, espi_trans->rx_buf + tx_only,
+ trans_len - tx_only);
+
+ rx_pos += trans_len - tx_only;
if (loop > 0)
- espi_trans->actual_length += espi_trans->len - n_tx;
+ espi_trans->actual_length += espi_trans->len - tx_only;
else
espi_trans->actual_length += espi_trans->len;
}
@@ -418,6 +432,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
u8 *rx_buf = NULL;
unsigned int n_tx = 0;
unsigned int n_rx = 0;
+ unsigned int xfer_len = 0;
struct fsl_espi_transfer espi_trans;
list_for_each_entry(t, &m->transfers, transfer_list) {
@@ -427,11 +442,13 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
n_rx += t->len;
rx_buf = t->rx_buf;
}
+ if ((t->tx_buf) || (t->rx_buf))
+ xfer_len += t->len;
}
espi_trans.n_tx = n_tx;
espi_trans.n_rx = n_rx;
- espi_trans.len = n_tx + n_rx;
+ espi_trans.len = xfer_len;
espi_trans.actual_length = 0;
espi_trans.status = 0;
@@ -544,9 +561,13 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
/* spin until TX is done */
ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg(
- &reg_base->event)) & SPIE_NF) == 0, 1000, 0);
+ &reg_base->event)) & SPIE_NF), 1000, 0);
if (!ret) {
dev_err(mspi->dev, "tired waiting for SPIE_NF\n");
+
+ /* Clear the SPIE bits */
+ mpc8xxx_spi_write_reg(&reg_base->event, events);
+ complete(&mspi->done);
return;
}
}
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index c01567d53581..788e2b176a4f 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -122,36 +123,31 @@ static inline void spfi_start(struct img_spfi *spfi)
spfi_writel(spfi, val, SPFI_CONTROL);
}
-static inline void spfi_stop(struct img_spfi *spfi)
-{
- u32 val;
-
- val = spfi_readl(spfi, SPFI_CONTROL);
- val &= ~SPFI_CONTROL_SPFI_EN;
- spfi_writel(spfi, val, SPFI_CONTROL);
-}
-
static inline void spfi_reset(struct img_spfi *spfi)
{
spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
- udelay(1);
spfi_writel(spfi, 0, SPFI_CONTROL);
}
-static void spfi_flush_tx_fifo(struct img_spfi *spfi)
+static int spfi_wait_all_done(struct img_spfi *spfi)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
- spfi_writel(spfi, SPFI_INTERRUPT_SDE, SPFI_INTERRUPT_CLEAR);
while (time_before(jiffies, timeout)) {
- if (spfi_readl(spfi, SPFI_INTERRUPT_STATUS) &
- SPFI_INTERRUPT_SDE)
- return;
+ u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+
+ if (status & SPFI_INTERRUPT_ALLDONETRIG) {
+ spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
+ SPFI_INTERRUPT_CLEAR);
+ return 0;
+ }
cpu_relax();
}
- dev_err(spfi->dev, "Timed out waiting for FIFO to drain\n");
+ dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
spfi_reset(spfi);
+
+ return -ETIMEDOUT;
}
static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
@@ -237,6 +233,7 @@ static int img_spfi_start_pio(struct spi_master *master,
const void *tx_buf = xfer->tx_buf;
void *rx_buf = xfer->rx_buf;
unsigned long timeout;
+ int ret;
if (tx_buf)
tx_bytes = xfer->len;
@@ -269,16 +266,15 @@ static int img_spfi_start_pio(struct spi_master *master,
cpu_relax();
}
+ ret = spfi_wait_all_done(spfi);
+ if (ret < 0)
+ return ret;
+
if (rx_bytes > 0 || tx_bytes > 0) {
dev_err(spfi->dev, "PIO transfer timed out\n");
- spfi_reset(spfi);
return -ETIMEDOUT;
}
- if (tx_buf)
- spfi_flush_tx_fifo(spfi);
- spfi_stop(spfi);
-
return 0;
}
@@ -287,14 +283,12 @@ static void img_spfi_dma_rx_cb(void *data)
struct img_spfi *spfi = data;
unsigned long flags;
- spin_lock_irqsave(&spfi->lock, flags);
+ spfi_wait_all_done(spfi);
+ spin_lock_irqsave(&spfi->lock, flags);
spfi->rx_dma_busy = false;
- if (!spfi->tx_dma_busy) {
- spfi_stop(spfi);
+ if (!spfi->tx_dma_busy)
spi_finalize_current_transfer(spfi->master);
- }
-
spin_unlock_irqrestore(&spfi->lock, flags);
}
@@ -303,16 +297,12 @@ static void img_spfi_dma_tx_cb(void *data)
struct img_spfi *spfi = data;
unsigned long flags;
- spfi_flush_tx_fifo(spfi);
+ spfi_wait_all_done(spfi);
spin_lock_irqsave(&spfi->lock, flags);
-
spfi->tx_dma_busy = false;
- if (!spfi->rx_dma_busy) {
- spfi_stop(spfi);
+ if (!spfi->rx_dma_busy)
spi_finalize_current_transfer(spfi->master);
- }
-
spin_unlock_irqrestore(&spfi->lock, flags);
}
@@ -397,6 +387,75 @@ stop_dma:
return -EIO;
}
+static void img_spfi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ unsigned long flags;
+
+ /*
+ * Stop all DMA and reset the controller if the previous transaction
+ * timed-out and never completed it's DMA.
+ */
+ spin_lock_irqsave(&spfi->lock, flags);
+ if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
+ spfi->tx_dma_busy = false;
+ spfi->rx_dma_busy = false;
+
+ dmaengine_terminate_all(spfi->tx_ch);
+ dmaengine_terminate_all(spfi->rx_ch);
+ }
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ u32 val;
+
+ val = spfi_readl(spfi, SPFI_PORT_STATE);
+ if (msg->spi->mode & SPI_CPHA)
+ val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
+ else
+ val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
+ if (msg->spi->mode & SPI_CPOL)
+ val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
+ else
+ val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
+ spfi_writel(spfi, val, SPFI_PORT_STATE);
+
+ return 0;
+}
+
+static int img_spfi_unprepare(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ spfi_reset(spfi);
+
+ return 0;
+}
+
+static int img_spfi_setup(struct spi_device *spi)
+{
+ int ret;
+
+ ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (ret)
+ dev_err(&spi->dev, "can't request chipselect gpio %d\n",
+ spi->cs_gpio);
+
+ return ret;
+}
+
+static void img_spfi_cleanup(struct spi_device *spi)
+{
+ gpio_free(spi->cs_gpio);
+}
+
static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer)
{
@@ -405,10 +464,10 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
/*
* output = spfi_clk * (BITCLK / 512), where BITCLK must be a
- * power of 2 up to 256 (where 255 == 256 since BITCLK is 8 bits)
+ * power of 2 up to 128
*/
- div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz);
- div = clamp(512 / (1 << get_count_order(div)), 1, 255);
+ div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
+ div = clamp(512 / (1 << get_count_order(div)), 1, 128);
val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
@@ -416,6 +475,9 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
+ spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
+ SPFI_TRANSACTION);
+
val = spfi_readl(spfi, SPFI_CONTROL);
val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
if (xfer->tx_buf)
@@ -429,25 +491,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
xfer->rx_nbits == SPI_NBITS_QUAD)
val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
- val &= ~SPFI_CONTROL_CONTINUE;
- if (!xfer->cs_change && !list_is_last(&xfer->transfer_list,
- &master->cur_msg->transfers))
- val |= SPFI_CONTROL_CONTINUE;
spfi_writel(spfi, val, SPFI_CONTROL);
-
- val = spfi_readl(spfi, SPFI_PORT_STATE);
- if (spi->mode & SPI_CPHA)
- val |= SPFI_PORT_STATE_CK_PHASE(spi->chip_select);
- else
- val &= ~SPFI_PORT_STATE_CK_PHASE(spi->chip_select);
- if (spi->mode & SPI_CPOL)
- val |= SPFI_PORT_STATE_CK_POL(spi->chip_select);
- else
- val &= ~SPFI_PORT_STATE_CK_POL(spi->chip_select);
- spfi_writel(spfi, val, SPFI_PORT_STATE);
-
- spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
- SPFI_TRANSACTION);
}
static int img_spfi_transfer_one(struct spi_master *master,
@@ -455,25 +499,13 @@ static int img_spfi_transfer_one(struct spi_master *master,
struct spi_transfer *xfer)
{
struct img_spfi *spfi = spi_master_get_devdata(spi->master);
- bool dma_reset = false;
- unsigned long flags;
int ret;
- /*
- * Stop all DMA and reset the controller if the previous transaction
- * timed-out and never completed it's DMA.
- */
- spin_lock_irqsave(&spfi->lock, flags);
- if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
- dev_err(spfi->dev, "SPI DMA still busy\n");
- dma_reset = true;
- }
- spin_unlock_irqrestore(&spfi->lock, flags);
-
- if (dma_reset) {
- dmaengine_terminate_all(spfi->tx_ch);
- dmaengine_terminate_all(spfi->rx_ch);
- spfi_reset(spfi);
+ if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
+ dev_err(spfi->dev,
+ "Transfer length (%d) is greater than the max supported (%d)",
+ xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
+ return -EINVAL;
}
img_spfi_config(master, spi, xfer);
@@ -485,17 +517,6 @@ static int img_spfi_transfer_one(struct spi_master *master,
return ret;
}
-static void img_spfi_set_cs(struct spi_device *spi, bool enable)
-{
- struct img_spfi *spfi = spi_master_get_devdata(spi->master);
- u32 val;
-
- val = spfi_readl(spfi, SPFI_PORT_STATE);
- val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << SPFI_PORT_STATE_DEV_SEL_SHIFT);
- val |= spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT;
- spfi_writel(spfi, val, SPFI_PORT_STATE);
-}
-
static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer)
{
@@ -584,14 +605,17 @@ static int img_spfi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
- master->num_chipselect = 5;
master->dev.of_node = pdev->dev.of_node;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
- master->max_speed_hz = clk_get_rate(spfi->spfi_clk);
- master->min_speed_hz = master->max_speed_hz / 512;
+ master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
+ master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
- master->set_cs = img_spfi_set_cs;
+ master->setup = img_spfi_setup;
+ master->cleanup = img_spfi_cleanup;
master->transfer_one = img_spfi_transfer_one;
+ master->prepare_message = img_spfi_prepare;
+ master->unprepare_message = img_spfi_unprepare;
+ master->handle_err = img_spfi_handle_err;
spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6fea4af51c41..eb7d3a6fb14c 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
if (spi_imx->dma_is_inited) {
dma = readl(spi_imx->base + MX51_ECSPI_DMA);
- spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
- spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
@@ -676,7 +674,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.devtype = IMX51_ECSPI,
};
-static struct platform_device_id spi_imx_devtype[] = {
+static const struct platform_device_id spi_imx_devtype[] = {
{
.name = "imx1-cspi",
.driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
@@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
master->max_dma_len = MAX_SDMA_BD_BYTES;
spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
SPI_MASTER_MUST_TX;
+ spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
spi_imx->dma_is_inited = 1;
return 0;
@@ -903,7 +903,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
if (tx) {
desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
- tx->sgl, tx->nents, DMA_TO_DEVICE,
+ tx->sgl, tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx)
goto no_dma;
@@ -915,7 +915,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
if (rx) {
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
- rx->sgl, rx->nents, DMA_FROM_DEVICE,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
goto no_dma;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index ecae0d4e2945..965d2bdcfdcc 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -588,7 +588,7 @@ static int mpc512x_psc_spi_of_remove(struct platform_device *op)
return mpc512x_psc_spi_do_remove(&op->dev);
}
-static struct of_device_id mpc512x_psc_spi_of_match[] = {
+static const struct of_device_id mpc512x_psc_spi_of_match[] = {
{ .compatible = "fsl,mpc5121-psc-spi", },
{},
};
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index b283d537d16a..e99d6a93d394 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -238,7 +238,7 @@ static int octeon_spi_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id octeon_spi_match[] = {
+static const struct of_device_id octeon_spi_match[] = {
{ .compatible = "cavium,octeon-3010-spi", },
{},
};
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index d890d309dff9..35b332dacb13 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -24,6 +24,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -294,16 +295,6 @@ static int omap1_spi100k_setup(struct spi_device *spi)
return ret;
}
-static int omap1_spi100k_prepare_hardware(struct spi_master *master)
-{
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- clk_prepare_enable(spi100k->ick);
- clk_prepare_enable(spi100k->fck);
-
- return 0;
-}
-
static int omap1_spi100k_transfer_one_message(struct spi_master *master,
struct spi_message *m)
{
@@ -372,16 +363,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
return status;
}
-static int omap1_spi100k_unprepare_hardware(struct spi_master *master)
-{
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- clk_disable_unprepare(spi100k->ick);
- clk_disable_unprepare(spi100k->fck);
-
- return 0;
-}
-
static int omap1_spi100k_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -402,14 +383,12 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
master->setup = omap1_spi100k_setup;
master->transfer_one_message = omap1_spi100k_transfer_one_message;
- master->prepare_transfer_hardware = omap1_spi100k_prepare_hardware;
- master->unprepare_transfer_hardware = omap1_spi100k_unprepare_hardware;
- master->cleanup = NULL;
master->num_chipselect = 2;
master->mode_bits = MODEBITS;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16);
master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ;
+ master->auto_runtime_pm = true;
spi100k = spi_master_get_devdata(master);
@@ -434,22 +413,96 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
goto err;
}
+ status = clk_prepare_enable(spi100k->ick);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to enable ick: %d\n", status);
+ goto err;
+ }
+
+ status = clk_prepare_enable(spi100k->fck);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to enable fck: %d\n", status);
+ goto err_ick;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+
status = devm_spi_register_master(&pdev->dev, master);
if (status < 0)
- goto err;
+ goto err_fck;
return status;
+err_fck:
+ clk_disable_unprepare(spi100k->fck);
+err_ick:
+ clk_disable_unprepare(spi100k->ick);
err:
spi_master_put(master);
return status;
}
+static int omap1_spi100k_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(spi100k->fck);
+ clk_disable_unprepare(spi100k->ick);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int omap1_spi100k_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
+
+ return 0;
+}
+
+static int omap1_spi100k_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spi100k->ick);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable ick: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(spi100k->fck);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable fck: %d\n", ret);
+ clk_disable_unprepare(spi100k->ick);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap1_spi100k_pm = {
+ SET_RUNTIME_PM_OPS(omap1_spi100k_runtime_suspend,
+ omap1_spi100k_runtime_resume, NULL)
+};
+
static struct platform_driver omap1_spi100k_driver = {
.driver = {
.name = "omap1_spi100k",
+ .pm = &omap1_spi100k_pm,
},
.probe = omap1_spi100k_probe,
+ .remove = omap1_spi100k_remove,
};
module_platform_driver(omap1_spi100k_driver);
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 3c0844457c07..55576db31549 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -44,7 +44,6 @@
#include <linux/module.h>
#include <linux/io.h>
-#include <asm/irq.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 4df8942058de..58673841286c 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -35,6 +35,7 @@
#include <linux/gcd.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -242,17 +243,27 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
}
-static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
+static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
{
u32 l;
- l = mcspi_cached_chconf0(spi);
- if (cs_active)
- l |= OMAP2_MCSPI_CHCONF_FORCE;
- else
- l &= ~OMAP2_MCSPI_CHCONF_FORCE;
+ /* The controller handles the inverted chip selects
+ * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
+ * the inversion from the core spi_set_cs function.
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
- mcspi_write_chconf0(spi, l);
+ if (spi->controller_state) {
+ l = mcspi_cached_chconf0(spi);
+
+ if (enable)
+ l &= ~OMAP2_MCSPI_CHCONF_FORCE;
+ else
+ l |= OMAP2_MCSPI_CHCONF_FORCE;
+
+ mcspi_write_chconf0(spi, l);
+ }
}
static void omap2_mcspi_set_master_mode(struct spi_master *master)
@@ -1011,6 +1022,15 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return ret;
}
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "failed to request gpio\n");
+ return ret;
+ }
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ }
+
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0)
return ret;
@@ -1050,9 +1070,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
mcspi_dma->dma_tx = NULL;
}
}
+
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
}
-static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
+static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
+ struct spi_device *spi, struct spi_transfer *t)
{
/* We only enable one channel at a time -- the one whose message is
@@ -1062,18 +1086,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
* chipselect with the FORCE bit ... CS != channel enable.
*/
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
struct spi_master *master;
struct omap2_mcspi_dma *mcspi_dma;
- int cs_active = 0;
struct omap2_mcspi_cs *cs;
struct omap2_mcspi_device_config *cd;
int par_override = 0;
int status = 0;
u32 chconf;
- spi = m->spi;
master = spi->master;
mcspi_dma = mcspi->dma_channels + spi->chip_select;
cs = spi->controller_state;
@@ -1090,103 +1110,84 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
par_override = 1;
omap2_mcspi_set_enable(spi, 0);
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
- status = -EINVAL;
- break;
- }
- if (par_override ||
- (t->speed_hz != spi->max_speed_hz) ||
- (t->bits_per_word != spi->bits_per_word)) {
- par_override = 1;
- status = omap2_mcspi_setup_transfer(spi, t);
- if (status < 0)
- break;
- if (t->speed_hz == spi->max_speed_hz &&
- t->bits_per_word == spi->bits_per_word)
- par_override = 0;
- }
- if (cd && cd->cs_per_word) {
- chconf = mcspi->ctx.modulctrl;
- chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
- mcspi->ctx.modulctrl =
- mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
- }
+ if (gpio_is_valid(spi->cs_gpio))
+ omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
- if (!cs_active) {
- omap2_mcspi_force_cs(spi, 1);
- cs_active = 1;
- }
-
- chconf = mcspi_cached_chconf0(spi);
- chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
- chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+ if (par_override ||
+ (t->speed_hz != spi->max_speed_hz) ||
+ (t->bits_per_word != spi->bits_per_word)) {
+ par_override = 1;
+ status = omap2_mcspi_setup_transfer(spi, t);
+ if (status < 0)
+ goto out;
+ if (t->speed_hz == spi->max_speed_hz &&
+ t->bits_per_word == spi->bits_per_word)
+ par_override = 0;
+ }
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
- if (t->tx_buf == NULL)
- chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
- else if (t->rx_buf == NULL)
- chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
-
- if (cd && cd->turbo_mode && t->tx_buf == NULL) {
- /* Turbo mode is for more than one word */
- if (t->len > ((cs->word_len + 7) >> 3))
- chconf |= OMAP2_MCSPI_CHCONF_TURBO;
- }
+ chconf = mcspi_cached_chconf0(spi);
+ chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+ chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+
+ if (t->tx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
+ else if (t->rx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
+
+ if (cd && cd->turbo_mode && t->tx_buf == NULL) {
+ /* Turbo mode is for more than one word */
+ if (t->len > ((cs->word_len + 7) >> 3))
+ chconf |= OMAP2_MCSPI_CHCONF_TURBO;
+ }
- mcspi_write_chconf0(spi, chconf);
+ mcspi_write_chconf0(spi, chconf);
- if (t->len) {
- unsigned count;
+ if (t->len) {
+ unsigned count;
- if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
- (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
- omap2_mcspi_set_fifo(spi, t, 1);
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ (t->len >= DMA_MIN_BYTES))
+ omap2_mcspi_set_fifo(spi, t, 1);
- omap2_mcspi_set_enable(spi, 1);
+ omap2_mcspi_set_enable(spi, 1);
- /* RX_ONLY mode needs dummy data in TX reg */
- if (t->tx_buf == NULL)
- writel_relaxed(0, cs->base
- + OMAP2_MCSPI_TX0);
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ writel_relaxed(0, cs->base
+ + OMAP2_MCSPI_TX0);
- if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
- (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
- count = omap2_mcspi_txrx_dma(spi, t);
- else
- count = omap2_mcspi_txrx_pio(spi, t);
- m->actual_length += count;
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ (t->len >= DMA_MIN_BYTES))
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
- if (count != t->len) {
- status = -EIO;
- break;
- }
+ if (count != t->len) {
+ status = -EIO;
+ goto out;
}
+ }
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- /* ignore the "leave it on after last xfer" hint */
- if (t->cs_change) {
- omap2_mcspi_force_cs(spi, 0);
- cs_active = 0;
- }
+ omap2_mcspi_set_enable(spi, 0);
- omap2_mcspi_set_enable(spi, 0);
+ if (mcspi->fifo_depth > 0)
+ omap2_mcspi_set_fifo(spi, t, 0);
- if (mcspi->fifo_depth > 0)
- omap2_mcspi_set_fifo(spi, t, 0);
- }
+out:
/* Restore defaults if they were overriden */
if (par_override) {
par_override = 0;
status = omap2_mcspi_setup_transfer(spi, NULL);
}
- if (cs_active)
- omap2_mcspi_force_cs(spi, 0);
-
if (cd && cd->cs_per_word) {
chconf = mcspi->ctx.modulctrl;
chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
@@ -1197,70 +1198,64 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
omap2_mcspi_set_enable(spi, 0);
+ if (gpio_is_valid(spi->cs_gpio))
+ omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
+
if (mcspi->fifo_depth > 0 && t)
omap2_mcspi_set_fifo(spi, t, 0);
- m->status = status;
+ return status;
}
-static int omap2_mcspi_transfer_one_message(struct spi_master *master,
- struct spi_message *m)
+static int omap2_mcspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi, struct spi_transfer *t)
{
- struct spi_device *spi;
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
- struct spi_transfer *t;
+ const void *tx_buf = t->tx_buf;
+ void *rx_buf = t->rx_buf;
+ unsigned len = t->len;
- spi = m->spi;
mcspi = spi_master_get_devdata(master);
mcspi_dma = mcspi->dma_channels + spi->chip_select;
- m->actual_length = 0;
- m->status = 0;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- const void *tx_buf = t->tx_buf;
- void *rx_buf = t->rx_buf;
- unsigned len = t->len;
-
- if ((len && !(rx_buf || tx_buf))) {
- dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
- t->speed_hz,
- len,
- tx_buf ? "tx" : "",
- rx_buf ? "rx" : "",
- t->bits_per_word);
- return -EINVAL;
- }
- if (m->is_dma_mapped || len < DMA_MIN_BYTES)
- continue;
+ if ((len && !(rx_buf || tx_buf))) {
+ dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+ t->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ t->bits_per_word);
+ return -EINVAL;
+ }
- if (mcspi_dma->dma_tx && tx_buf != NULL) {
- t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
- len, DMA_TO_DEVICE);
- if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
- dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
- 'T', len);
- return -EINVAL;
- }
+ if (len < DMA_MIN_BYTES)
+ goto skip_dma_map;
+
+ if (mcspi_dma->dma_tx && tx_buf != NULL) {
+ t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
+ dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
+ 'T', len);
+ return -EINVAL;
}
- if (mcspi_dma->dma_rx && rx_buf != NULL) {
- t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
- dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
- 'R', len);
- if (tx_buf != NULL)
- dma_unmap_single(mcspi->dev, t->tx_dma,
- len, DMA_TO_DEVICE);
- return -EINVAL;
- }
+ }
+ if (mcspi_dma->dma_rx && rx_buf != NULL) {
+ t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
+ dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
+ 'R', len);
+ if (tx_buf != NULL)
+ dma_unmap_single(mcspi->dev, t->tx_dma,
+ len, DMA_TO_DEVICE);
+ return -EINVAL;
}
}
- omap2_mcspi_work(mcspi, m);
- spi_finalize_current_message(master);
- return 0;
+skip_dma_map:
+ return omap2_mcspi_work_one(mcspi, spi, t);
}
static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
@@ -1339,7 +1334,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = omap2_mcspi_setup;
master->auto_runtime_pm = true;
- master->transfer_one_message = omap2_mcspi_transfer_one_message;
+ master->transfer_one = omap2_mcspi_transfer_one;
+ master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 861664776672..8cad107a5b3f 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -61,6 +61,12 @@ enum orion_spi_type {
struct orion_spi_dev {
enum orion_spi_type typ;
+ /*
+ * min_divisor and max_hz should be exclusive, the only we can
+ * have both is for managing the armada-370-spi case with old
+ * device tree
+ */
+ unsigned long max_hz;
unsigned int min_divisor;
unsigned int max_divisor;
u32 prescale_mask;
@@ -385,16 +391,54 @@ static const struct orion_spi_dev orion_spi_dev_data = {
.prescale_mask = ORION_SPI_CLK_PRESCALE_MASK,
};
-static const struct orion_spi_dev armada_spi_dev_data = {
+static const struct orion_spi_dev armada_370_spi_dev_data = {
.typ = ARMADA_SPI,
- .min_divisor = 1,
+ .min_divisor = 4,
+ .max_divisor = 1920,
+ .max_hz = 50000000,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct orion_spi_dev armada_xp_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .max_hz = 50000000,
+ .max_divisor = 1920,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct orion_spi_dev armada_375_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .min_divisor = 15,
.max_divisor = 1920,
.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
};
static const struct of_device_id orion_spi_of_match_table[] = {
- { .compatible = "marvell,orion-spi", .data = &orion_spi_dev_data, },
- { .compatible = "marvell,armada-370-spi", .data = &armada_spi_dev_data, },
+ {
+ .compatible = "marvell,orion-spi",
+ .data = &orion_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-370-spi",
+ .data = &armada_370_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-375-spi",
+ .data = &armada_375_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-380-spi",
+ .data = &armada_xp_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-390-spi",
+ .data = &armada_xp_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-xp-spi",
+ .data = &armada_xp_spi_dev_data,
+ },
+
{}
};
MODULE_DEVICE_TABLE(of, orion_spi_of_match_table);
@@ -454,7 +498,23 @@ static int orion_spi_probe(struct platform_device *pdev)
goto out;
tclk_hz = clk_get_rate(spi->clk);
- master->max_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
+
+ /*
+ * With old device tree, armada-370-spi could be used with
+ * Armada XP, however for this SoC the maximum frequency is
+ * 50MHz instead of tclk/4. On Armada 370, tclk cannot be
+ * higher than 200MHz. So, in order to be able to handle both
+ * SoCs, we can take the minimum of 50MHz and tclk/4.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-370-spi"))
+ master->max_speed_hz = min(devdata->max_hz,
+ DIV_ROUND_UP(tclk_hz, devdata->min_divisor));
+ else if (devdata->min_divisor)
+ master->max_speed_hz =
+ DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
+ else
+ master->max_speed_hz = devdata->max_hz;
master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 89ca162801da..94af80676684 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -285,7 +285,12 @@
*/
#define DEFAULT_SSP_REG_IMSC 0x0UL
#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
-#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
+#define ENABLE_ALL_INTERRUPTS ( \
+ SSP_IMSC_MASK_RORIM | \
+ SSP_IMSC_MASK_RTIM | \
+ SSP_IMSC_MASK_RXIM | \
+ SSP_IMSC_MASK_TXIM \
+)
#define CLEAR_ALL_INTERRUPTS 0x3
@@ -534,12 +539,12 @@ static void giveback(struct pl022 *pl022)
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
- spi_finalize_current_message(pl022->master);
/* disable the SPI/SSP operation */
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+ spi_finalize_current_message(pl022->master);
}
/**
@@ -1251,7 +1256,6 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
struct pl022 *pl022 = dev_id;
struct spi_message *msg = pl022->cur_msg;
u16 irq_status = 0;
- u16 flag = 0;
if (unlikely(!msg)) {
dev_err(&pl022->adev->dev,
@@ -1280,9 +1284,6 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
dev_err(&pl022->adev->dev,
"RXFIFO is full\n");
- if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
- dev_err(&pl022->adev->dev,
- "TXFIFO is full\n");
/*
* Disable and clear interrupts, disable SSP,
@@ -1303,8 +1304,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
readwriter(pl022);
- if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
- flag = 1;
+ if (pl022->tx == pl022->tx_end) {
/* Disable Transmit interrupt, enable receive interrupt */
writew((readw(SSP_IMSC(pl022->virtbase)) &
~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index fa7399e84bbb..3cfd4357489a 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -62,7 +62,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.max_clk_rate = 3686400,
},
[PORT_BYT] = {
- .type = LPSS_SSP,
+ .type = LPSS_BYT_SSP,
.port_id = 0,
.num_chipselect = 1,
.max_clk_rate = 50000000,
@@ -70,7 +70,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.rx_param = &byt_rx_param,
},
[PORT_BSW0] = {
- .type = LPSS_SSP,
+ .type = LPSS_BYT_SSP,
.port_id = 0,
.num_chipselect = 1,
.max_clk_rate = 50000000,
@@ -78,7 +78,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.rx_param = &bsw0_rx_param,
},
[PORT_BSW1] = {
- .type = LPSS_SSP,
+ .type = LPSS_BYT_SSP,
.port_id = 1,
.num_chipselect = 1,
.max_clk_rate = 50000000,
@@ -86,7 +86,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.rx_param = &bsw1_rx_param,
},
[PORT_BSW2] = {
- .type = LPSS_SSP,
+ .type = LPSS_BYT_SSP,
.port_id = 2,
.num_chipselect = 1,
.max_clk_rate = 50000000,
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
deleted file mode 100644
index 2e0796a0003f..000000000000
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * PXA2xx SPI private DMA support.
- *
- * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/pxa2xx_ssp.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/pxa2xx_spi.h>
-
-#include <mach/dma.h>
-#include "spi-pxa2xx.h"
-
-#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
-#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
-
-bool pxa2xx_spi_dma_is_possible(size_t len)
-{
- /* Try to map dma buffer and do a dma transfer if successful, but
- * only if the length is non-zero and less than MAX_DMA_LEN.
- *
- * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
- * of PIO instead. Care is needed above because the transfer may
- * have have been passed with buffers that are already dma mapped.
- * A zero-length transfer in PIO mode will not try to write/read
- * to/from the buffers
- *
- * REVISIT large transfers are exactly where we most want to be
- * using DMA. If this happens much, split those transfers into
- * multiple DMA segments rather than forcing PIO.
- */
- return len > 0 && len <= MAX_DMA_LEN;
-}
-
-int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
-{
- struct spi_message *msg = drv_data->cur_msg;
- struct device *dev = &msg->spi->dev;
-
- if (!drv_data->cur_chip->enable_dma)
- return 0;
-
- if (msg->is_dma_mapped)
- return drv_data->rx_dma && drv_data->tx_dma;
-
- if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
- return 0;
-
- /* Modify setup if rx buffer is null */
- if (drv_data->rx == NULL) {
- *drv_data->null_dma_buf = 0;
- drv_data->rx = drv_data->null_dma_buf;
- drv_data->rx_map_len = 4;
- } else
- drv_data->rx_map_len = drv_data->len;
-
-
- /* Modify setup if tx buffer is null */
- if (drv_data->tx == NULL) {
- *drv_data->null_dma_buf = 0;
- drv_data->tx = drv_data->null_dma_buf;
- drv_data->tx_map_len = 4;
- } else
- drv_data->tx_map_len = drv_data->len;
-
- /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
- * so we flush the cache *before* invalidating it, in case
- * the tx and rx buffers overlap.
- */
- drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, drv_data->tx_dma))
- return 0;
-
- /* Stream map the rx buffer */
- drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
- drv_data->rx_map_len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, drv_data->rx_dma)) {
- dma_unmap_single(dev, drv_data->tx_dma,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- return 0;
- }
-
- return 1;
-}
-
-static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
-{
- struct device *dev;
-
- if (!drv_data->dma_mapped)
- return;
-
- if (!drv_data->cur_msg->is_dma_mapped) {
- dev = &drv_data->cur_msg->spi->dev;
- dma_unmap_single(dev, drv_data->rx_dma,
- drv_data->rx_map_len, DMA_FROM_DEVICE);
- dma_unmap_single(dev, drv_data->tx_dma,
- drv_data->tx_map_len, DMA_TO_DEVICE);
- }
-
- drv_data->dma_mapped = 0;
-}
-
-static int wait_ssp_rx_stall(struct driver_data *drv_data)
-{
- unsigned long limit = loops_per_jiffy << 1;
-
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
- cpu_relax();
-
- return limit;
-}
-
-static int wait_dma_channel_stop(int channel)
-{
- unsigned long limit = loops_per_jiffy << 1;
-
- while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
- cpu_relax();
-
- return limit;
-}
-
-static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
- const char *msg)
-{
- /* Stop and reset */
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1)
- & ~drv_data->dma_cr1);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
- pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
-
- pxa2xx_spi_unmap_dma_buffers(drv_data);
-
- dev_err(&drv_data->pdev->dev, "%s\n", msg);
-
- drv_data->cur_msg->state = ERROR_STATE;
- tasklet_schedule(&drv_data->pump_transfers);
-}
-
-static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
-{
- struct spi_message *msg = drv_data->cur_msg;
-
- /* Clear and disable interrupts on SSP and DMA channels*/
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1)
- & ~drv_data->dma_cr1);
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
-
- if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_handler: dma rx channel stop failed\n");
-
- if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_transfer: ssp rx stall failed\n");
-
- pxa2xx_spi_unmap_dma_buffers(drv_data);
-
- /* update the buffer pointer for the amount completed in dma */
- drv_data->rx += drv_data->len -
- (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
-
- /* read trailing data from fifo, it does not matter how many
- * bytes are in the fifo just read until buffer is full
- * or fifo is empty, which ever occurs first */
- drv_data->read(drv_data);
-
- /* return count of what was actually read */
- msg->actual_length += drv_data->len -
- (drv_data->rx_end - drv_data->rx);
-
- /* Transfer delays and chip select release are
- * handled in pump_transfers or giveback
- */
-
- /* Move to next transfer */
- msg->state = pxa2xx_spi_next_transfer(drv_data);
-
- /* Schedule transfer tasklet */
- tasklet_schedule(&drv_data->pump_transfers);
-}
-
-void pxa2xx_spi_dma_handler(int channel, void *data)
-{
- struct driver_data *drv_data = data;
- u32 irq_status = DCSR(channel) & DMA_INT_MASK;
-
- if (irq_status & DCSR_BUSERR) {
-
- if (channel == drv_data->tx_channel)
- pxa2xx_spi_dma_error_stop(drv_data,
- "dma_handler: bad bus address on tx channel");
- else
- pxa2xx_spi_dma_error_stop(drv_data,
- "dma_handler: bad bus address on rx channel");
- return;
- }
-
- /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
- if ((channel == drv_data->tx_channel)
- && (irq_status & DCSR_ENDINTR)
- && (drv_data->ssp_type == PXA25x_SSP)) {
-
- /* Wait for rx to stall */
- if (wait_ssp_rx_stall(drv_data) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_handler: ssp rx stall failed\n");
-
- /* finish this transfer, start the next */
- pxa2xx_spi_dma_transfer_complete(drv_data);
- }
-}
-
-irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
-{
- u32 irq_status;
-
- irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
- if (irq_status & SSSR_ROR) {
- pxa2xx_spi_dma_error_stop(drv_data,
- "dma_transfer: fifo overrun");
- return IRQ_HANDLED;
- }
-
- /* Check for false positive timeout */
- if ((irq_status & SSSR_TINT)
- && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
- pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
- return IRQ_HANDLED;
- }
-
- if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
-
- /* Clear and disable timeout interrupt, do the rest in
- * dma_transfer_complete */
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
-
- /* finish this transfer, start the next */
- pxa2xx_spi_dma_transfer_complete(drv_data);
-
- return IRQ_HANDLED;
- }
-
- /* Opps problem detected */
- return IRQ_NONE;
-}
-
-int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
-{
- u32 dma_width;
-
- switch (drv_data->n_bytes) {
- case 1:
- dma_width = DCMD_WIDTH1;
- break;
- case 2:
- dma_width = DCMD_WIDTH2;
- break;
- default:
- dma_width = DCMD_WIDTH4;
- break;
- }
-
- /* Setup rx DMA Channel */
- DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
- DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
- DTADR(drv_data->rx_channel) = drv_data->rx_dma;
- if (drv_data->rx == drv_data->null_dma_buf)
- /* No target address increment */
- DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
- | dma_width
- | dma_burst
- | drv_data->len;
- else
- DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
- | DCMD_FLOWSRC
- | dma_width
- | dma_burst
- | drv_data->len;
-
- /* Setup tx DMA Channel */
- DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
- DSADR(drv_data->tx_channel) = drv_data->tx_dma;
- DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
- if (drv_data->tx == drv_data->null_dma_buf)
- /* No source address increment */
- DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
- | dma_width
- | dma_burst
- | drv_data->len;
- else
- DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
- | DCMD_FLOWTRG
- | dma_width
- | dma_burst
- | drv_data->len;
-
- /* Enable dma end irqs on SSP to detect end of transfer */
- if (drv_data->ssp_type == PXA25x_SSP)
- DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
-
- return 0;
-}
-
-void pxa2xx_spi_dma_start(struct driver_data *drv_data)
-{
- DCSR(drv_data->rx_channel) |= DCSR_RUN;
- DCSR(drv_data->tx_channel) |= DCSR_RUN;
-}
-
-int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
-{
- struct device *dev = &drv_data->pdev->dev;
- struct ssp_device *ssp = drv_data->ssp;
-
- /* Get two DMA channels (rx and tx) */
- drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
- DMA_PRIO_HIGH,
- pxa2xx_spi_dma_handler,
- drv_data);
- if (drv_data->rx_channel < 0) {
- dev_err(dev, "problem (%d) requesting rx channel\n",
- drv_data->rx_channel);
- return -ENODEV;
- }
- drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
- DMA_PRIO_MEDIUM,
- pxa2xx_spi_dma_handler,
- drv_data);
- if (drv_data->tx_channel < 0) {
- dev_err(dev, "problem (%d) requesting tx channel\n",
- drv_data->tx_channel);
- pxa_free_dma(drv_data->rx_channel);
- return -ENODEV;
- }
-
- DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
- DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
-
- return 0;
-}
-
-void pxa2xx_spi_dma_release(struct driver_data *drv_data)
-{
- struct ssp_device *ssp = drv_data->ssp;
-
- DRCMR(ssp->drcmr_rx) = 0;
- DRCMR(ssp->drcmr_tx) = 0;
-
- if (drv_data->tx_channel != 0)
- pxa_free_dma(drv_data->tx_channel);
- if (drv_data->rx_channel != 0)
- pxa_free_dma(drv_data->rx_channel);
-}
-
-void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
-{
- if (drv_data->rx_channel != -1)
- DRCMR(drv_data->ssp->drcmr_rx) =
- DRCMR_MAPVLD | drv_data->rx_channel;
- if (drv_data->tx_channel != -1)
- DRCMR(drv_data->ssp->drcmr_tx) =
- DRCMR_MAPVLD | drv_data->tx_channel;
-}
-
-int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
- struct spi_device *spi,
- u8 bits_per_word, u32 *burst_code,
- u32 *threshold)
-{
- struct pxa2xx_spi_chip *chip_info =
- (struct pxa2xx_spi_chip *)spi->controller_data;
- int bytes_per_word;
- int burst_bytes;
- int thresh_words;
- int req_burst_size;
- int retval = 0;
-
- /* Set the threshold (in registers) to equal the same amount of data
- * as represented by burst size (in bytes). The computation below
- * is (burst_size rounded up to nearest 8 byte, word or long word)
- * divided by (bytes/register); the tx threshold is the inverse of
- * the rx, so that there will always be enough data in the rx fifo
- * to satisfy a burst, and there will always be enough space in the
- * tx fifo to accept a burst (a tx burst will overwrite the fifo if
- * there is not enough space), there must always remain enough empty
- * space in the rx fifo for any data loaded to the tx fifo.
- * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
- * will be 8, or half the fifo;
- * The threshold can only be set to 2, 4 or 8, but not 16, because
- * to burst 16 to the tx fifo, the fifo would have to be empty;
- * however, the minimum fifo trigger level is 1, and the tx will
- * request service when the fifo is at this level, with only 15 spaces.
- */
-
- /* find bytes/word */
- if (bits_per_word <= 8)
- bytes_per_word = 1;
- else if (bits_per_word <= 16)
- bytes_per_word = 2;
- else
- bytes_per_word = 4;
-
- /* use struct pxa2xx_spi_chip->dma_burst_size if available */
- if (chip_info)
- req_burst_size = chip_info->dma_burst_size;
- else {
- switch (chip->dma_burst_size) {
- default:
- /* if the default burst size is not set,
- * do it now */
- chip->dma_burst_size = DCMD_BURST8;
- case DCMD_BURST8:
- req_burst_size = 8;
- break;
- case DCMD_BURST16:
- req_burst_size = 16;
- break;
- case DCMD_BURST32:
- req_burst_size = 32;
- break;
- }
- }
- if (req_burst_size <= 8) {
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- } else if (req_burst_size <= 16) {
- if (bytes_per_word == 1) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- retval = 1;
- } else {
- *burst_code = DCMD_BURST16;
- burst_bytes = 16;
- }
- } else {
- if (bytes_per_word == 1) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST8;
- burst_bytes = 8;
- retval = 1;
- } else if (bytes_per_word == 2) {
- /* don't burst more than 1/2 the fifo */
- *burst_code = DCMD_BURST16;
- burst_bytes = 16;
- retval = 1;
- } else {
- *burst_code = DCMD_BURST32;
- burst_bytes = 32;
- }
- }
-
- thresh_words = burst_bytes / bytes_per_word;
-
- /* thresh_words will be between 2 and 8 */
- *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
- | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
-
- return retval;
-}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 6f72ad01e041..7293d6d875c5 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
@@ -30,10 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/acpi.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/delay.h>
-
#include "spi-pxa2xx.h"
MODULE_AUTHOR("Stephen Street");
@@ -63,69 +60,60 @@ MODULE_ALIAS("platform:pxa2xx-spi");
| QUARK_X1000_SSCR1_TFT \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
-#define LPSS_RX_THRESH_DFLT 64
-#define LPSS_TX_LOTHRESH_DFLT 160
-#define LPSS_TX_HITHRESH_DFLT 224
+#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
+#define SPI_CS_CONTROL_SW_MODE BIT(0)
+#define SPI_CS_CONTROL_CS_HIGH BIT(1)
-struct quark_spi_rate {
- u32 bitrate;
- u32 dds_clk_rate;
- u32 clk_div;
+struct lpss_config {
+ /* LPSS offset from drv_data->ioaddr */
+ unsigned offset;
+ /* Register offsets from drv_data->lpss_base or -1 */
+ int reg_general;
+ int reg_ssp;
+ int reg_cs_ctrl;
+ /* FIFO thresholds */
+ u32 rx_threshold;
+ u32 tx_threshold_lo;
+ u32 tx_threshold_hi;
};
-/*
- * 'rate', 'dds', 'clk_div' lookup table, which is defined in
- * the Quark SPI datasheet.
- */
-static const struct quark_spi_rate quark_spi_rate_table[] = {
-/* bitrate, dds_clk_rate, clk_div */
- {50000000, 0x800000, 0},
- {40000000, 0x666666, 0},
- {25000000, 0x400000, 0},
- {20000000, 0x666666, 1},
- {16667000, 0x800000, 2},
- {13333000, 0x666666, 2},
- {12500000, 0x200000, 0},
- {10000000, 0x800000, 4},
- {8000000, 0x666666, 4},
- {6250000, 0x400000, 3},
- {5000000, 0x400000, 4},
- {4000000, 0x666666, 9},
- {3125000, 0x80000, 0},
- {2500000, 0x400000, 9},
- {2000000, 0x666666, 19},
- {1563000, 0x40000, 0},
- {1250000, 0x200000, 9},
- {1000000, 0x400000, 24},
- {800000, 0x666666, 49},
- {781250, 0x20000, 0},
- {625000, 0x200000, 19},
- {500000, 0x400000, 49},
- {400000, 0x666666, 99},
- {390625, 0x10000, 0},
- {250000, 0x400000, 99},
- {200000, 0x666666, 199},
- {195313, 0x8000, 0},
- {125000, 0x100000, 49},
- {100000, 0x200000, 124},
- {50000, 0x100000, 124},
- {25000, 0x80000, 124},
- {10016, 0x20000, 77},
- {5040, 0x20000, 154},
- {1002, 0x8000, 194},
+/* Keep these sorted with enum pxa_ssp_type */
+static const struct lpss_config lpss_platforms[] = {
+ { /* LPSS_LPT_SSP */
+ .offset = 0x800,
+ .reg_general = 0x08,
+ .reg_ssp = 0x0c,
+ .reg_cs_ctrl = 0x18,
+ .rx_threshold = 64,
+ .tx_threshold_lo = 160,
+ .tx_threshold_hi = 224,
+ },
+ { /* LPSS_BYT_SSP */
+ .offset = 0x400,
+ .reg_general = 0x08,
+ .reg_ssp = 0x0c,
+ .reg_cs_ctrl = 0x18,
+ .rx_threshold = 64,
+ .tx_threshold_lo = 160,
+ .tx_threshold_hi = 224,
+ },
};
-/* Offset from drv_data->lpss_base */
-#define GENERAL_REG 0x08
-#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
-#define SSP_REG 0x0c
-#define SPI_CS_CONTROL 0x18
-#define SPI_CS_CONTROL_SW_MODE BIT(0)
-#define SPI_CS_CONTROL_CS_HIGH BIT(1)
+static inline const struct lpss_config
+*lpss_get_config(const struct driver_data *drv_data)
+{
+ return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
+}
static bool is_lpss_ssp(const struct driver_data *drv_data)
{
- return drv_data->ssp_type == LPSS_SSP;
+ switch (drv_data->ssp_type) {
+ case LPSS_LPT_SSP:
+ case LPSS_BYT_SSP:
+ return true;
+ default:
+ return false;
+ }
}
static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
@@ -243,63 +231,43 @@ static void __lpss_ssp_write_priv(struct driver_data *drv_data,
*/
static void lpss_ssp_setup(struct driver_data *drv_data)
{
- unsigned offset = 0x400;
- u32 value, orig;
-
- /*
- * Perform auto-detection of the LPSS SSP private registers. They
- * can be either at 1k or 2k offset from the base address.
- */
- orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
-
- /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
- value = orig | SPI_CS_CONTROL_SW_MODE;
- writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
- value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
- if (value != (orig | SPI_CS_CONTROL_SW_MODE)) {
- offset = 0x800;
- goto detection_done;
- }
-
- orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
-
- /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
- value = orig & ~SPI_CS_CONTROL_SW_MODE;
- writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
- value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
- if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
- offset = 0x800;
- goto detection_done;
- }
+ const struct lpss_config *config;
+ u32 value;
-detection_done:
- /* Now set the LPSS base */
- drv_data->lpss_base = drv_data->ioaddr + offset;
+ config = lpss_get_config(drv_data);
+ drv_data->lpss_base = drv_data->ioaddr + config->offset;
/* Enable software chip select control */
value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
- __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+ __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
/* Enable multiblock DMA transfers */
if (drv_data->master_info->enable_dma) {
- __lpss_ssp_write_priv(drv_data, SSP_REG, 1);
-
- value = __lpss_ssp_read_priv(drv_data, GENERAL_REG);
- value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
- __lpss_ssp_write_priv(drv_data, GENERAL_REG, value);
+ __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
+
+ if (config->reg_general >= 0) {
+ value = __lpss_ssp_read_priv(drv_data,
+ config->reg_general);
+ value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ __lpss_ssp_write_priv(drv_data,
+ config->reg_general, value);
+ }
}
}
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
+ const struct lpss_config *config;
u32 value;
- value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
+ config = lpss_get_config(drv_data);
+
+ value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
if (enable)
value &= ~SPI_CS_CONTROL_CS_HIGH;
else
value |= SPI_CS_CONTROL_CS_HIGH;
- __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+ __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
}
static void cs_assert(struct driver_data *drv_data)
@@ -701,25 +669,124 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
}
/*
- * The Quark SPI data sheet gives a table, and for the given 'rate',
- * the 'dds' and 'clk_div' can be found in the table.
+ * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
+ * input frequency by fractions of 2^24. It also has a divider by 5.
+ *
+ * There are formulas to get baud rate value for given input frequency and
+ * divider parameters, such as DDS_CLK_RATE and SCR:
+ *
+ * Fsys = 200MHz
+ *
+ * Fssp = Fsys * DDS_CLK_RATE / 2^24 (1)
+ * Baud rate = Fsclk = Fssp / (2 * (SCR + 1)) (2)
+ *
+ * DDS_CLK_RATE either 2^n or 2^n / 5.
+ * SCR is in range 0 .. 255
+ *
+ * Divisor = 5^i * 2^j * 2 * k
+ * i = [0, 1] i = 1 iff j = 0 or j > 3
+ * j = [0, 23] j = 0 iff i = 1
+ * k = [1, 256]
+ * Special case: j = 0, i = 1: Divisor = 2 / 5
+ *
+ * Accordingly to the specification the recommended values for DDS_CLK_RATE
+ * are:
+ * Case 1: 2^n, n = [0, 23]
+ * Case 2: 2^24 * 2 / 5 (0x666666)
+ * Case 3: less than or equal to 2^24 / 5 / 16 (0x33333)
+ *
+ * In all cases the lowest possible value is better.
+ *
+ * The function calculates parameters for all cases and chooses the one closest
+ * to the asked baud rate.
*/
-static u32 quark_x1000_set_clk_regvals(u32 rate, u32 *dds, u32 *clk_div)
+static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(quark_spi_rate_table); i++) {
- if (rate >= quark_spi_rate_table[i].bitrate) {
- *dds = quark_spi_rate_table[i].dds_clk_rate;
- *clk_div = quark_spi_rate_table[i].clk_div;
- return quark_spi_rate_table[i].bitrate;
+ unsigned long xtal = 200000000;
+ unsigned long fref = xtal / 2; /* mandatory division by 2,
+ see (2) */
+ /* case 3 */
+ unsigned long fref1 = fref / 2; /* case 1 */
+ unsigned long fref2 = fref * 2 / 5; /* case 2 */
+ unsigned long scale;
+ unsigned long q, q1, q2;
+ long r, r1, r2;
+ u32 mul;
+
+ /* Case 1 */
+
+ /* Set initial value for DDS_CLK_RATE */
+ mul = (1 << 24) >> 1;
+
+ /* Calculate initial quot */
+ q1 = DIV_ROUND_CLOSEST(fref1, rate);
+
+ /* Scale q1 if it's too big */
+ if (q1 > 256) {
+ /* Scale q1 to range [1, 512] */
+ scale = fls_long(q1 - 1);
+ if (scale > 9) {
+ q1 >>= scale - 9;
+ mul >>= scale - 9;
}
+
+ /* Round the result if we have a remainder */
+ q1 += q1 & 1;
}
- *dds = quark_spi_rate_table[i-1].dds_clk_rate;
- *clk_div = quark_spi_rate_table[i-1].clk_div;
+ /* Decrease DDS_CLK_RATE as much as we can without loss in precision */
+ scale = __ffs(q1);
+ q1 >>= scale;
+ mul >>= scale;
+
+ /* Get the remainder */
+ r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);
+
+ /* Case 2 */
- return quark_spi_rate_table[i-1].bitrate;
+ q2 = DIV_ROUND_CLOSEST(fref2, rate);
+ r2 = abs(fref2 / q2 - rate);
+
+ /*
+ * Choose the best between two: less remainder we have the better. We
+ * can't go case 2 if q2 is greater than 256 since SCR register can
+ * hold only values 0 .. 255.
+ */
+ if (r2 >= r1 || q2 > 256) {
+ /* case 1 is better */
+ r = r1;
+ q = q1;
+ } else {
+ /* case 2 is better */
+ r = r2;
+ q = q2;
+ mul = (1 << 24) * 2 / 5;
+ }
+
+ /* Check case 3 only If the divisor is big enough */
+ if (fref / rate >= 80) {
+ u64 fssp;
+ u32 m;
+
+ /* Calculate initial quot */
+ q1 = DIV_ROUND_CLOSEST(fref, rate);
+ m = (1 << 24) / q1;
+
+ /* Get the remainder */
+ fssp = (u64)fref * m;
+ do_div(fssp, 1 << 24);
+ r1 = abs(fssp - rate);
+
+ /* Choose this one if it suits better */
+ if (r1 < r) {
+ /* case 3 is better */
+ q = 1;
+ mul = m;
+ }
+ }
+
+ *dds = mul;
+ return q - 1;
}
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
@@ -730,23 +797,25 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
rate = min_t(int, ssp_clk, rate);
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
- return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
+ return (ssp_clk / (2 * rate) - 1) & 0xff;
else
- return ((ssp_clk / rate - 1) & 0xfff) << 8;
+ return (ssp_clk / rate - 1) & 0xfff;
}
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
struct chip_data *chip, int rate)
{
- u32 clk_div;
+ unsigned int clk_div;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
- quark_x1000_set_clk_regvals(rate, &chip->dds_rate, &clk_div);
- return clk_div << 8;
+ clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
+ break;
default:
- return ssp_get_clk_div(drv_data, rate);
+ clk_div = ssp_get_clk_div(drv_data, rate);
+ break;
}
+ return clk_div << 8;
}
static void pump_transfers(unsigned long data)
@@ -1025,6 +1094,7 @@ static int setup(struct spi_device *spi)
{
struct pxa2xx_spi_chip *chip_info = NULL;
struct chip_data *chip;
+ const struct lpss_config *config;
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
unsigned int clk_div;
uint tx_thres, tx_hi_thres, rx_thres;
@@ -1035,10 +1105,12 @@ static int setup(struct spi_device *spi)
tx_hi_thres = 0;
rx_thres = RX_THRESH_QUARK_X1000_DFLT;
break;
- case LPSS_SSP:
- tx_thres = LPSS_TX_LOTHRESH_DFLT;
- tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
- rx_thres = LPSS_RX_THRESH_DFLT;
+ case LPSS_LPT_SSP:
+ case LPSS_BYT_SSP:
+ config = lpss_get_config(drv_data);
+ tx_thres = config->tx_threshold_lo;
+ tx_hi_thres = config->tx_threshold_hi;
+ rx_thres = config->rx_threshold;
break;
default:
tx_thres = TX_THRESH_DFLT;
@@ -1192,6 +1264,18 @@ static void cleanup(struct spi_device *spi)
}
#ifdef CONFIG_ACPI
+
+static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "INT33C0", LPSS_LPT_SSP },
+ { "INT33C1", LPSS_LPT_SSP },
+ { "INT3430", LPSS_LPT_SSP },
+ { "INT3431", LPSS_LPT_SSP },
+ { "80860F0E", LPSS_BYT_SSP },
+ { "8086228E", LPSS_BYT_SSP },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+
static struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
@@ -1199,12 +1283,19 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
struct acpi_device *adev;
struct ssp_device *ssp;
struct resource *res;
- int devid;
+ const struct acpi_device_id *id;
+ int devid, type;
if (!ACPI_HANDLE(&pdev->dev) ||
acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
return NULL;
+ id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+ if (id)
+ type = (int)id->driver_data;
+ else
+ return NULL;
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
@@ -1222,7 +1313,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
ssp->clk = devm_clk_get(&pdev->dev, NULL);
ssp->irq = platform_get_irq(pdev, 0);
- ssp->type = LPSS_SSP;
+ ssp->type = type;
ssp->pdev = pdev;
ssp->port_id = -1;
@@ -1235,16 +1326,6 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
return pdata;
}
-static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
- { "INT33C0", 0 },
- { "INT33C1", 0 },
- { "INT3430", 0 },
- { "INT3431", 0 },
- { "80860F0E", 0 },
- { "8086228E", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
#else
static inline struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 85a58c906869..9f01e9c9aa75 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -162,11 +162,7 @@ extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
/*
* Select the right DMA implementation.
*/
-#if defined(CONFIG_SPI_PXA2XX_PXADMA)
-#define SPI_PXA2XX_USE_DMA 1
-#define MAX_DMA_LEN 8191
-#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE)
-#elif defined(CONFIG_SPI_PXA2XX_DMA)
+#if defined(CONFIG_SPI_PXA2XX_DMA)
#define SPI_PXA2XX_USE_DMA 1
#define MAX_DMA_LEN SZ_64K
#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbdb6672..810a7fae3479 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -22,6 +22,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#define QUP_CONFIG 0x0000
#define QUP_STATE 0x0004
@@ -116,6 +118,8 @@
#define SPI_NUM_CHIPSELECTS 4
+#define SPI_MAX_DMA_XFER (SZ_64K - 64)
+
/* high speed mode is when bus rate is greater then 26MHz */
#define SPI_HS_MIN_RATE 26000000
#define SPI_MAX_RATE 50000000
@@ -140,9 +144,14 @@ struct spi_qup {
struct completion done;
int error;
int w_size; /* bytes per SPI word */
+ int n_words;
int tx_bytes;
int rx_bytes;
int qup_v1;
+
+ int use_dma;
+ struct dma_slave_config rx_conf;
+ struct dma_slave_config tx_conf;
};
@@ -198,7 +207,6 @@ static int spi_qup_set_state(struct spi_qup *controller, u32 state)
return 0;
}
-
static void spi_qup_fifo_read(struct spi_qup *controller,
struct spi_transfer *xfer)
{
@@ -266,6 +274,107 @@ static void spi_qup_fifo_write(struct spi_qup *controller,
}
}
+static void spi_qup_dma_done(void *data)
+{
+ struct spi_qup *qup = data;
+
+ complete(&qup->done);
+}
+
+static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
+ enum dma_transfer_direction dir,
+ dma_async_tx_callback callback)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+ unsigned int nents;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ chan = master->dma_tx;
+ nents = xfer->tx_sg.nents;
+ sgl = xfer->tx_sg.sgl;
+ } else {
+ chan = master->dma_rx;
+ nents = xfer->rx_sg.nents;
+ sgl = xfer->rx_sg.sgl;
+ }
+
+ desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
+ if (!desc)
+ return -EINVAL;
+
+ desc->callback = callback;
+ desc->callback_param = qup;
+
+ cookie = dmaengine_submit(desc);
+
+ return dma_submit_error(cookie);
+}
+
+static void spi_qup_dma_terminate(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ if (xfer->tx_buf)
+ dmaengine_terminate_all(master->dma_tx);
+ if (xfer->rx_buf)
+ dmaengine_terminate_all(master->dma_rx);
+}
+
+static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
+{
+ dma_async_tx_callback rx_done = NULL, tx_done = NULL;
+ int ret;
+
+ if (xfer->rx_buf)
+ rx_done = spi_qup_dma_done;
+ else if (xfer->tx_buf)
+ tx_done = spi_qup_dma_done;
+
+ if (xfer->rx_buf) {
+ ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
+ if (ret)
+ return ret;
+
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (xfer->tx_buf) {
+ ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
+ if (ret)
+ return ret;
+
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ return 0;
+}
+
+static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_qup_set_state(qup, QUP_STATE_RUN);
+ if (ret) {
+ dev_warn(qup->dev, "cannot set RUN state\n");
+ return ret;
+ }
+
+ ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
+ if (ret) {
+ dev_warn(qup->dev, "cannot set PAUSE state\n");
+ return ret;
+ }
+
+ spi_qup_fifo_write(qup, xfer);
+
+ return 0;
+}
+
static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
{
struct spi_qup *controller = dev_id;
@@ -315,11 +424,13 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
error = -EIO;
}
- if (opflags & QUP_OP_IN_SERVICE_FLAG)
- spi_qup_fifo_read(controller, xfer);
+ if (!controller->use_dma) {
+ if (opflags & QUP_OP_IN_SERVICE_FLAG)
+ spi_qup_fifo_read(controller, xfer);
- if (opflags & QUP_OP_OUT_SERVICE_FLAG)
- spi_qup_fifo_write(controller, xfer);
+ if (opflags & QUP_OP_OUT_SERVICE_FLAG)
+ spi_qup_fifo_write(controller, xfer);
+ }
spin_lock_irqsave(&controller->lock, flags);
controller->error = error;
@@ -332,13 +443,35 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static u32
+spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ u32 mode;
+
+ qup->w_size = 4;
+
+ if (xfer->bits_per_word <= 8)
+ qup->w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ qup->w_size = 2;
+
+ qup->n_words = xfer->len / qup->w_size;
+
+ if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
+ mode = QUP_IO_M_MODE_FIFO;
+ else
+ mode = QUP_IO_M_MODE_BLOCK;
+
+ return mode;
+}
/* set clock freq ... bits per word */
static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
{
struct spi_qup *controller = spi_master_get_devdata(spi->master);
u32 config, iomode, mode, control;
- int ret, n_words, w_size;
+ int ret, n_words;
if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
dev_err(controller->dev, "too big size for loopback %d > %d\n",
@@ -358,35 +491,54 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
return -EIO;
}
- w_size = 4;
- if (xfer->bits_per_word <= 8)
- w_size = 1;
- else if (xfer->bits_per_word <= 16)
- w_size = 2;
+ mode = spi_qup_get_mode(spi->master, xfer);
+ n_words = controller->n_words;
- n_words = xfer->len / w_size;
- controller->w_size = w_size;
-
- if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
- mode = QUP_IO_M_MODE_FIFO;
+ if (mode == QUP_IO_M_MODE_FIFO) {
writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
/* must be zero for FIFO */
writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
- } else {
- mode = QUP_IO_M_MODE_BLOCK;
+ } else if (!controller->use_dma) {
writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
/* must be zero for BLOCK and BAM */
writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+ } else {
+ mode = QUP_IO_M_MODE_BAM;
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+
+ if (!controller->qup_v1) {
+ void __iomem *input_cnt;
+
+ input_cnt = controller->base + QUP_MX_INPUT_CNT;
+ /*
+ * for DMA transfers, both QUP_MX_INPUT_CNT and
+ * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
+ * That case is a non-balanced transfer when there is
+ * only a rx_buf.
+ */
+ if (xfer->tx_buf)
+ writel_relaxed(0, input_cnt);
+ else
+ writel_relaxed(n_words, input_cnt);
+
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ }
}
iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
/* Set input and output transfer mode */
iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
- iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
+
+ if (!controller->use_dma)
+ iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
+ else
+ iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
+
iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
@@ -428,11 +580,31 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
config |= xfer->bits_per_word - 1;
config |= QUP_CONFIG_SPI_MODE;
+
+ if (controller->use_dma) {
+ if (!xfer->tx_buf)
+ config |= QUP_CONFIG_NO_OUTPUT;
+ if (!xfer->rx_buf)
+ config |= QUP_CONFIG_NO_INPUT;
+ }
+
writel_relaxed(config, controller->base + QUP_CONFIG);
/* only write to OPERATIONAL_MASK when register is present */
- if (!controller->qup_v1)
- writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
+ if (!controller->qup_v1) {
+ u32 mask = 0;
+
+ /*
+ * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
+ * status change in BAM mode
+ */
+
+ if (mode == QUP_IO_M_MODE_BAM)
+ mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
+
+ writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
+ }
+
return 0;
}
@@ -461,17 +633,13 @@ static int spi_qup_transfer_one(struct spi_master *master,
controller->tx_bytes = 0;
spin_unlock_irqrestore(&controller->lock, flags);
- if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
- dev_warn(controller->dev, "cannot set RUN state\n");
- goto exit;
- }
+ if (controller->use_dma)
+ ret = spi_qup_do_dma(master, xfer);
+ else
+ ret = spi_qup_do_pio(master, xfer);
- if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
- dev_warn(controller->dev, "cannot set PAUSE state\n");
+ if (ret)
goto exit;
- }
-
- spi_qup_fifo_write(controller, xfer);
if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
dev_warn(controller->dev, "cannot set EXECUTE state\n");
@@ -480,6 +648,7 @@ static int spi_qup_transfer_one(struct spi_master *master,
if (!wait_for_completion_timeout(&controller->done, timeout))
ret = -ETIMEDOUT;
+
exit:
spi_qup_set_state(controller, QUP_STATE_RESET);
spin_lock_irqsave(&controller->lock, flags);
@@ -487,6 +656,97 @@ exit:
if (!ret)
ret = controller->error;
spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (ret && controller->use_dma)
+ spi_qup_dma_terminate(master, xfer);
+
+ return ret;
+}
+
+static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ size_t dma_align = dma_get_cache_alignment();
+ u32 mode;
+
+ qup->use_dma = 0;
+
+ if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
+ IS_ERR_OR_NULL(master->dma_rx) ||
+ !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
+ return false;
+
+ if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
+ IS_ERR_OR_NULL(master->dma_tx) ||
+ !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
+ return false;
+
+ mode = spi_qup_get_mode(master, xfer);
+ if (mode == QUP_IO_M_MODE_FIFO)
+ return false;
+
+ qup->use_dma = 1;
+
+ return true;
+}
+
+static void spi_qup_release_dma(struct spi_master *master)
+{
+ if (!IS_ERR_OR_NULL(master->dma_rx))
+ dma_release_channel(master->dma_rx);
+ if (!IS_ERR_OR_NULL(master->dma_tx))
+ dma_release_channel(master->dma_tx);
+}
+
+static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
+{
+ struct spi_qup *spi = spi_master_get_devdata(master);
+ struct dma_slave_config *rx_conf = &spi->rx_conf,
+ *tx_conf = &spi->tx_conf;
+ struct device *dev = spi->dev;
+ int ret;
+
+ /* allocate dma resources, if available */
+ master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(master->dma_rx))
+ return PTR_ERR(master->dma_rx);
+
+ master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(master->dma_tx)) {
+ ret = PTR_ERR(master->dma_tx);
+ goto err_tx;
+ }
+
+ /* set DMA parameters */
+ rx_conf->direction = DMA_DEV_TO_MEM;
+ rx_conf->device_fc = 1;
+ rx_conf->src_addr = base + QUP_INPUT_FIFO;
+ rx_conf->src_maxburst = spi->in_blk_sz;
+
+ tx_conf->direction = DMA_MEM_TO_DEV;
+ tx_conf->device_fc = 1;
+ tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
+ tx_conf->dst_maxburst = spi->out_blk_sz;
+
+ ret = dmaengine_slave_config(master->dma_rx, rx_conf);
+ if (ret) {
+ dev_err(dev, "failed to configure RX channel\n");
+ goto err;
+ }
+
+ ret = dmaengine_slave_config(master->dma_tx, tx_conf);
+ if (ret) {
+ dev_err(dev, "failed to configure TX channel\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dma_release_channel(master->dma_tx);
+err_tx:
+ dma_release_channel(master->dma_rx);
return ret;
}
@@ -498,7 +758,7 @@ static int spi_qup_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev;
void __iomem *base;
- u32 max_freq, iomode;
+ u32 max_freq, iomode, num_cs;
int ret, irq, size;
dev = &pdev->dev;
@@ -550,10 +810,11 @@ static int spi_qup_probe(struct platform_device *pdev)
}
/* use num-cs unless not present or out of range */
- if (of_property_read_u16(dev->of_node, "num-cs",
- &master->num_chipselect) ||
- (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
+ num_cs > SPI_NUM_CHIPSELECTS)
master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ else
+ master->num_chipselect = num_cs;
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
@@ -562,6 +823,8 @@ static int spi_qup_probe(struct platform_device *pdev)
master->transfer_one = spi_qup_transfer_one;
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
+ master->dma_alignment = dma_get_cache_alignment();
+ master->max_dma_len = SPI_MAX_DMA_XFER;
platform_set_drvdata(pdev, master);
@@ -573,6 +836,12 @@ static int spi_qup_probe(struct platform_device *pdev)
controller->cclk = cclk;
controller->irq = irq;
+ ret = spi_qup_init_dma(master, res->start);
+ if (ret == -EPROBE_DEFER)
+ goto error;
+ else if (!ret)
+ master->can_dma = spi_qup_can_dma;
+
/* set v1 flag if device is version 1 */
if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
controller->qup_v1 = 1;
@@ -609,7 +878,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret) {
dev_err(dev, "cannot set RESET state\n");
- goto error;
+ goto error_dma;
}
writel_relaxed(0, base + QUP_OPERATIONAL);
@@ -633,7 +902,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
IRQF_TRIGGER_HIGH, pdev->name, controller);
if (ret)
- goto error;
+ goto error_dma;
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(dev);
@@ -648,6 +917,8 @@ static int spi_qup_probe(struct platform_device *pdev)
disable_pm:
pm_runtime_disable(&pdev->dev);
+error_dma:
+ spi_qup_release_dma(master);
error:
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
@@ -739,6 +1010,8 @@ static int spi_qup_remove(struct platform_device *pdev)
if (ret)
return ret;
+ spi_qup_release_dma(master);
+
clk_disable_unprepare(controller->cclk);
clk_disable_unprepare(controller->iclk);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 1a777dc261d6..68e7efeb9a27 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -179,6 +179,7 @@ struct rockchip_spi {
u8 tmode;
u8 bpw;
u8 n_bytes;
+ u8 rsd_nsecs;
unsigned len;
u32 speed;
@@ -302,8 +303,8 @@ static int rockchip_spi_prepare_message(struct spi_master *master,
return 0;
}
-static int rockchip_spi_unprepare_message(struct spi_master *master,
- struct spi_message *msg)
+static void rockchip_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
{
unsigned long flags;
struct rockchip_spi *rs = spi_master_get_devdata(master);
@@ -313,8 +314,8 @@ static int rockchip_spi_unprepare_message(struct spi_master *master,
/*
* For DMA mode, we need terminate DMA channel and flush
* fifo for the next transfer if DMA thansfer timeout.
- * unprepare_message() was called by core if transfer complete
- * or timeout. Maybe it is reasonable for error handling here.
+ * handle_err() was called by core if transfer failed.
+ * Maybe it is reasonable for error handling here.
*/
if (rs->use_dma) {
if (rs->state & RXBUSY) {
@@ -327,6 +328,12 @@ static int rockchip_spi_unprepare_message(struct spi_master *master,
}
spin_unlock_irqrestore(&rs->lock, flags);
+}
+
+static int rockchip_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
spi_enable_chip(rs, 0);
@@ -493,6 +500,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
{
u32 div = 0;
u32 dmacr = 0;
+ int rsd = 0;
u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
| (CR0_SSD_ONE << CR0_SSD_OFFSET);
@@ -519,9 +527,23 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
}
/* div doesn't support odd number */
- div = max_t(u32, rs->max_freq / rs->speed, 1);
+ div = DIV_ROUND_UP(rs->max_freq, rs->speed);
div = (div + 1) & 0xfffe;
+ /* Rx sample delay is expressed in parent clock cycles (max 3) */
+ rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8),
+ 1000000000 >> 8);
+ if (!rsd && rs->rsd_nsecs) {
+ pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
+ rs->max_freq, rs->rsd_nsecs);
+ } else if (rsd > 3) {
+ rsd = 3;
+ pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
+ rs->max_freq, rs->rsd_nsecs,
+ rsd * 1000000000U / rs->max_freq);
+ }
+ cr0 |= rsd << CR0_RSD_OFFSET;
+
writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
@@ -614,6 +636,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
struct rockchip_spi *rs;
struct spi_master *master;
struct resource *mem;
+ u32 rsd_nsecs;
master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
if (!master)
@@ -665,6 +688,10 @@ static int rockchip_spi_probe(struct platform_device *pdev)
rs->dev = &pdev->dev;
rs->max_freq = clk_get_rate(rs->spiclk);
+ if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
+ &rsd_nsecs))
+ rs->rsd_nsecs = rsd_nsecs;
+
rs->fifo_len = get_fifo_len(rs);
if (!rs->fifo_len) {
dev_err(&pdev->dev, "Failed to get fifo length\n");
@@ -688,6 +715,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
master->prepare_message = rockchip_spi_prepare_message;
master->unprepare_message = rockchip_spi_unprepare_message;
master->transfer_one = rockchip_spi_transfer_one;
+ master->handle_err = rockchip_spi_handle_err;
rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
if (!rs->dma_tx.ch)
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 46ce47076e63..f9189a0c8cec 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -177,6 +177,13 @@
#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
+/* QSPI on R-Car Gen2 */
+#define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */
+#define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */
+#define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */
+#define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */
+
+#define QSPI_BUFFER_SIZE 32u
struct rspi_data {
void __iomem *addr;
@@ -366,6 +373,52 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
return 0;
}
+static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
+{
+ u8 data;
+
+ data = rspi_read8(rspi, reg);
+ data &= ~mask;
+ data |= (val & mask);
+ rspi_write8(rspi, data, reg);
+}
+
+static int qspi_set_send_trigger(struct rspi_data *rspi, unsigned int len)
+{
+ unsigned int n;
+
+ n = min(len, QSPI_BUFFER_SIZE);
+
+ if (len >= QSPI_BUFFER_SIZE) {
+ /* sets triggering number to 32 bytes */
+ qspi_update(rspi, SPBFCR_TXTRG_MASK,
+ SPBFCR_TXTRG_32B, QSPI_SPBFCR);
+ } else {
+ /* sets triggering number to 1 byte */
+ qspi_update(rspi, SPBFCR_TXTRG_MASK,
+ SPBFCR_TXTRG_1B, QSPI_SPBFCR);
+ }
+
+ return n;
+}
+
+static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
+{
+ unsigned int n;
+
+ n = min(len, QSPI_BUFFER_SIZE);
+
+ if (len >= QSPI_BUFFER_SIZE) {
+ /* sets triggering number to 32 bytes */
+ qspi_update(rspi, SPBFCR_RXTRG_MASK,
+ SPBFCR_RXTRG_32B, QSPI_SPBFCR);
+ } else {
+ /* sets triggering number to 1 byte */
+ qspi_update(rspi, SPBFCR_RXTRG_MASK,
+ SPBFCR_RXTRG_1B, QSPI_SPBFCR);
+ }
+}
+
#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
@@ -609,18 +662,25 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
return __rspi_can_dma(rspi, xfer);
}
+static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
+ return -EAGAIN;
+
+ /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
+ return rspi_dma_transfer(rspi, &xfer->tx_sg,
+ xfer->rx_buf ? &xfer->rx_sg : NULL);
+}
+
static int rspi_common_transfer(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
int ret;
- if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
- /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
- ret = rspi_dma_transfer(rspi, &xfer->tx_sg,
- xfer->rx_buf ? &xfer->rx_sg : NULL);
- if (ret != -EAGAIN)
- return ret;
- }
+ ret = rspi_dma_check_then_transfer(rspi, xfer);
+ if (ret != -EAGAIN)
+ return ret;
ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
if (ret < 0)
@@ -661,12 +721,55 @@ static int rspi_rz_transfer_one(struct spi_master *master,
return rspi_common_transfer(rspi, xfer);
}
+static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
+ u8 *rx, unsigned int len)
+{
+ int i, n, ret;
+ int error;
+
+ while (len > 0) {
+ n = qspi_set_send_trigger(rspi, len);
+ qspi_set_receive_trigger(rspi, len);
+ if (n == QSPI_BUFFER_SIZE) {
+ error = rspi_wait_for_tx_empty(rspi);
+ if (error < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return error;
+ }
+ for (i = 0; i < n; i++)
+ rspi_write_data(rspi, *tx++);
+
+ error = rspi_wait_for_rx_full(rspi);
+ if (error < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return error;
+ }
+ for (i = 0; i < n; i++)
+ *rx++ = rspi_read_data(rspi);
+ } else {
+ ret = rspi_pio_transfer(rspi, tx, rx, n);
+ if (ret < 0)
+ return ret;
+ }
+ len -= n;
+ }
+
+ return 0;
+}
+
static int qspi_transfer_out_in(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
+ int ret;
+
qspi_receive_init(rspi);
- return rspi_common_transfer(rspi, xfer);
+ ret = rspi_dma_check_then_transfer(rspi, xfer);
+ if (ret != -EAGAIN)
+ return ret;
+
+ return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
+ xfer->rx_buf, xfer->len);
}
static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
@@ -913,7 +1016,6 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
}
memset(&cfg, 0, sizeof(cfg));
- cfg.slave_id = id;
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
@@ -1191,7 +1293,7 @@ error1:
return ret;
}
-static struct platform_device_id spi_driver_ids[] = {
+static const struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
{ "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
{ "qspi", (kernel_ulong_t)&qspi_ops },
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 9231c34b5a5c..2a8c513c4d07 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -324,7 +324,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
/* Acquire DMA channels */
sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- (void *)sdd->rx_dma.dmach, dev, "rx");
+ (void *)(long)sdd->rx_dma.dmach, dev, "rx");
if (!sdd->rx_dma.ch) {
dev_err(dev, "Failed to get RX DMA channel\n");
ret = -EBUSY;
@@ -333,7 +333,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
spi->dma_rx = sdd->rx_dma.ch;
sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- (void *)sdd->tx_dma.dmach, dev, "tx");
+ (void *)(long)sdd->tx_dma.dmach, dev, "tx");
if (!sdd->tx_dma.ch) {
dev_err(dev, "Failed to get TX DMA channel\n");
ret = -EBUSY;
@@ -1347,7 +1347,7 @@ static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
-static struct platform_device_id s3c64xx_spi_driver_ids[] = {
+static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
.driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 5a56acf8a43e..36af4d48a700 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -286,7 +286,7 @@ static int sc18is602_probe(struct i2c_client *client,
hw->freq = SC18IS602_CLOCK;
break;
}
- master->bus_num = client->adapter->nr;
+ master->bus_num = np ? -1 : client->adapter->nr;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->setup = sc18is602_setup;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index e57eec0b2f46..d3370a612d84 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1030,7 +1030,6 @@ static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
}
memset(&cfg, 0, sizeof(cfg));
- cfg.slave_id = id;
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
@@ -1264,7 +1263,7 @@ static int sh_msiof_spi_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id spi_driver_ids[] = {
+static const struct platform_device_id spi_driver_ids[] = {
{ "spi_sh_msiof", (kernel_ulong_t)&sh_data },
{ "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
{ "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 2faeaa7b57a8..f17c0abe299f 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -482,7 +482,7 @@ static const struct dev_pm_ops spi_st_pm = {
SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
};
-static struct of_device_id stm_spi_match[] = {
+static const struct of_device_id stm_spi_match[] = {
{ .compatible = "st,comms-ssc4-spi", },
{},
};
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 884a716e50cb..5c0616870358 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -101,6 +101,7 @@ struct ti_qspi {
#define QSPI_FLEN(n) ((n - 1) << 0)
/* STATUS REGISTER */
+#define BUSY 0x01
#define WC 0x02
/* INTERRUPT REGISTER */
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
}
+static inline u32 qspi_is_busy(struct ti_qspi *qspi)
+{
+ u32 stat;
+ unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
+
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ while ((stat & BUSY) && time_after(timeout, jiffies)) {
+ cpu_relax();
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ }
+
+ WARN(stat & BUSY, "qspi busy\n");
+ return stat & BUSY;
+}
+
static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
int wlen, count;
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
+ if (qspi_is_busy(qspi))
+ return -EBUSY;
+
switch (wlen) {
case 1:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
+ if (qspi_is_busy(qspi))
+ return -EBUSY;
+
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (!wait_for_completion_timeout(&qspi->transfer_complete,
QSPI_COMPLETION_TIMEOUT)) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c64a3e59fce3..cf8b91b23a76 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -16,7 +16,6 @@
*/
#include <linux/kernel.h>
-#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
@@ -129,125 +128,11 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int spi_legacy_suspend(struct device *dev, pm_message_t message)
-{
- int value = 0;
- struct spi_driver *drv = to_spi_driver(dev->driver);
-
- /* suspend will stop irqs and dma; no more i/o */
- if (drv) {
- if (drv->suspend)
- value = drv->suspend(to_spi_device(dev), message);
- else
- dev_dbg(dev, "... can't suspend\n");
- }
- return value;
-}
-
-static int spi_legacy_resume(struct device *dev)
-{
- int value = 0;
- struct spi_driver *drv = to_spi_driver(dev->driver);
-
- /* resume may restart the i/o queue */
- if (drv) {
- if (drv->resume)
- value = drv->resume(to_spi_device(dev));
- else
- dev_dbg(dev, "... can't resume\n");
- }
- return value;
-}
-
-static int spi_pm_suspend(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm)
- return pm_generic_suspend(dev);
- else
- return spi_legacy_suspend(dev, PMSG_SUSPEND);
-}
-
-static int spi_pm_resume(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm)
- return pm_generic_resume(dev);
- else
- return spi_legacy_resume(dev);
-}
-
-static int spi_pm_freeze(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm)
- return pm_generic_freeze(dev);
- else
- return spi_legacy_suspend(dev, PMSG_FREEZE);
-}
-
-static int spi_pm_thaw(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm)
- return pm_generic_thaw(dev);
- else
- return spi_legacy_resume(dev);
-}
-
-static int spi_pm_poweroff(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm)
- return pm_generic_poweroff(dev);
- else
- return spi_legacy_suspend(dev, PMSG_HIBERNATE);
-}
-
-static int spi_pm_restore(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm)
- return pm_generic_restore(dev);
- else
- return spi_legacy_resume(dev);
-}
-#else
-#define spi_pm_suspend NULL
-#define spi_pm_resume NULL
-#define spi_pm_freeze NULL
-#define spi_pm_thaw NULL
-#define spi_pm_poweroff NULL
-#define spi_pm_restore NULL
-#endif
-
-static const struct dev_pm_ops spi_pm = {
- .suspend = spi_pm_suspend,
- .resume = spi_pm_resume,
- .freeze = spi_pm_freeze,
- .thaw = spi_pm_thaw,
- .poweroff = spi_pm_poweroff,
- .restore = spi_pm_restore,
- SET_RUNTIME_PM_OPS(
- pm_generic_runtime_suspend,
- pm_generic_runtime_resume,
- NULL
- )
-};
-
struct bus_type spi_bus_type = {
.name = "spi",
.dev_groups = spi_dev_groups,
.match = spi_match_device,
.uevent = spi_uevent,
- .pm = &spi_pm,
};
EXPORT_SYMBOL_GPL(spi_bus_type);
@@ -686,7 +571,7 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
return 0;
}
-static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
{
struct spi_transfer *xfer;
struct device *tx_dev, *rx_dev;
@@ -714,13 +599,32 @@ static inline int __spi_map_msg(struct spi_master *master,
return 0;
}
-static inline int spi_unmap_msg(struct spi_master *master,
- struct spi_message *msg)
+static inline int __spi_unmap_msg(struct spi_master *master,
+ struct spi_message *msg)
{
return 0;
}
#endif /* !CONFIG_HAS_DMA */
+static inline int spi_unmap_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /*
+ * Restore the original value of tx_buf or rx_buf if they are
+ * NULL.
+ */
+ if (xfer->tx_buf == master->dummy_tx)
+ xfer->tx_buf = NULL;
+ if (xfer->rx_buf == master->dummy_rx)
+ xfer->rx_buf = NULL;
+ }
+
+ return __spi_unmap_msg(master, msg);
+}
+
static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
{
struct spi_transfer *xfer;
@@ -851,6 +755,9 @@ out:
if (msg->status == -EINPROGRESS)
msg->status = ret;
+ if (msg->status && master->handle_err)
+ master->handle_err(master, msg);
+
spi_finalize_current_message(master);
return ret;
@@ -1091,9 +998,6 @@ void spi_finalize_current_message(struct spi_master *master)
spin_lock_irqsave(&master->queue_lock, flags);
mesg = master->cur_msg;
- master->cur_msg = NULL;
-
- queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
spi_unmap_msg(master, mesg);
@@ -1105,13 +1009,18 @@ void spi_finalize_current_message(struct spi_master *master)
"failed to unprepare message: %d\n", ret);
}
}
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+ master->cur_msg = NULL;
master->cur_msg_prepared = false;
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ trace_spi_message_done(mesg);
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
-
- trace_spi_message_done(mesg);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
@@ -1359,7 +1268,6 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
spi->dev.of_node = nc;
/* Register the new device */
- request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
rc = spi_add_device(spi);
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
@@ -1893,6 +1801,8 @@ int spi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
spi->max_speed_hz = spi->master->max_speed_hz;
+ spi_set_cs(spi, false);
+
if (spi->master->setup)
status = spi->master->setup(spi);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 4eb7a980e670..92c909eed6b5 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -223,7 +223,7 @@ static int spidev_message(struct spidev_data *spidev,
struct spi_transfer *k_xfers;
struct spi_transfer *k_tmp;
struct spi_ioc_transfer *u_tmp;
- unsigned n, total;
+ unsigned n, total, tx_total, rx_total;
u8 *tx_buf, *rx_buf;
int status = -EFAULT;
@@ -239,33 +239,52 @@ static int spidev_message(struct spidev_data *spidev,
tx_buf = spidev->tx_buffer;
rx_buf = spidev->rx_buffer;
total = 0;
+ tx_total = 0;
+ rx_total = 0;
for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
n;
n--, k_tmp++, u_tmp++) {
k_tmp->len = u_tmp->len;
total += k_tmp->len;
- if (total > bufsiz) {
+ /* Since the function returns the total length of transfers
+ * on success, restrict the total to positive int values to
+ * avoid the return value looking like an error. Also check
+ * each transfer length to avoid arithmetic overflow.
+ */
+ if (total > INT_MAX || k_tmp->len > INT_MAX) {
status = -EMSGSIZE;
goto done;
}
if (u_tmp->rx_buf) {
+ /* this transfer needs space in RX bounce buffer */
+ rx_total += k_tmp->len;
+ if (rx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
k_tmp->rx_buf = rx_buf;
if (!access_ok(VERIFY_WRITE, (u8 __user *)
(uintptr_t) u_tmp->rx_buf,
u_tmp->len))
goto done;
+ rx_buf += k_tmp->len;
}
if (u_tmp->tx_buf) {
+ /* this transfer needs space in TX bounce buffer */
+ tx_total += k_tmp->len;
+ if (tx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
k_tmp->tx_buf = tx_buf;
if (copy_from_user(tx_buf, (const u8 __user *)
(uintptr_t) u_tmp->tx_buf,
u_tmp->len))
goto done;
+ tx_buf += k_tmp->len;
}
- tx_buf += k_tmp->len;
- rx_buf += k_tmp->len;
k_tmp->cs_change = !!u_tmp->cs_change;
k_tmp->tx_nbits = u_tmp->tx_nbits;
@@ -303,8 +322,8 @@ static int spidev_message(struct spidev_data *spidev,
status = -EFAULT;
goto done;
}
+ rx_buf += u_tmp->len;
}
- rx_buf += u_tmp->len;
}
status = total;
@@ -684,6 +703,14 @@ static const struct file_operations spidev_fops = {
static struct class *spidev_class;
+#ifdef CONFIG_OF
+static const struct of_device_id spidev_dt_ids[] = {
+ { .compatible = "rohm,dh2228fv" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+#endif
+
/*-------------------------------------------------------------------------*/
static int spidev_probe(struct spi_device *spi)
@@ -692,6 +719,17 @@ static int spidev_probe(struct spi_device *spi)
int status;
unsigned long minor;
+ /*
+ * spidev should never be referenced in DT without a specific
+ * compatbile string, it is a Linux implementation thing
+ * rather than a description of the hardware.
+ */
+ if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
+ dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n");
+ WARN_ON(spi->dev.of_node &&
+ !of_match_device(spidev_dt_ids, &spi->dev));
+ }
+
/* Allocate driver data */
spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
if (!spidev)
@@ -758,13 +796,6 @@ static int spidev_remove(struct spi_device *spi)
return 0;
}
-static const struct of_device_id spidev_dt_ids[] = {
- { .compatible = "rohm,dh2228fv" },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, spidev_dt_ids);
-
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",