From 99adef310f682d6343cb40c1f6c9c25a4b3a450d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 16 Jan 2014 12:22:43 +0000 Subject: spi: Provide core support for DMA mapping transfers The process of DMA mapping buffers for SPI transfers does not vary between devices so in order to save duplication of code in drivers this can be factored out into the core, allowing it to be integrated with the work that is being done on factoring out the common elements from the data path including more sharing of dmaengine code. In order to use this masters need to provide a can_dma() operation and while the hardware is prepared they should ensure that DMA channels are provided in tx_dma and rx_dma. The core will then ensure that the buffers are mapped for DMA prior to calling transfer_one_message(). Currently the cleanup on error is not complete, this needs to be improved. Signed-off-by: Mark Brown --- drivers/spi/spi.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/spi/spi.h | 18 +++++++++++ 2 files changed, 100 insertions(+) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 23756b0f9036..bcdaa74f1c8e 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include #include @@ -580,6 +582,77 @@ static void spi_set_cs(struct spi_device *spi, bool enable) spi->master->set_cs(spi, !enable); } +static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +{ + struct device *dev = master->dev.parent; + struct device *tx_dev, *rx_dev; + struct spi_transfer *xfer; + + if (msg->is_dma_mapped || !master->can_dma) + return 0; + + tx_dev = &master->dma_tx->dev->device; + rx_dev = &master->dma_rx->dev->device; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!master->can_dma(master, msg->spi, xfer)) + continue; + + if (xfer->tx_buf != NULL) { + xfer->tx_dma = dma_map_single(tx_dev, + (void *)xfer->tx_buf, + xfer->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, xfer->tx_dma)) { + dev_err(dev, "dma_map_single Tx failed\n"); + return -ENOMEM; + } + } + + if (xfer->rx_buf != NULL) { + xfer->rx_dma = dma_map_single(rx_dev, + xfer->rx_buf, xfer->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, xfer->rx_dma)) { + dev_err(dev, "dma_map_single Rx failed\n"); + dma_unmap_single(tx_dev, xfer->tx_dma, + xfer->len, DMA_TO_DEVICE); + return -ENOMEM; + } + } + } + + master->cur_msg_mapped = true; + + return 0; +} + +static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) +{ + struct spi_transfer *xfer; + struct device *tx_dev, *rx_dev; + + if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma) + return 0; + + tx_dev = &master->dma_tx->dev->device; + rx_dev = &master->dma_rx->dev->device; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!master->can_dma(master, msg->spi, xfer)) + continue; + + if (xfer->rx_buf) + dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len, + DMA_FROM_DEVICE); + if (xfer->tx_buf) + dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len, + DMA_TO_DEVICE); + } + + return 0; +} + /* * spi_transfer_one_message - Default implementation of transfer_one_message() * @@ -752,6 +825,13 @@ static void spi_pump_messages(struct kthread_work *work) master->cur_msg_prepared = true; } + ret = spi_map_msg(master, master->cur_msg); + if (ret) { + master->cur_msg->status = ret; + spi_finalize_current_message(master); + return; + } + ret = master->transfer_one_message(master, master->cur_msg); if (ret) { dev_err(&master->dev, @@ -841,6 +921,8 @@ void spi_finalize_current_message(struct spi_master *master) queue_kthread_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); + spi_unmap_msg(master, mesg); + if (master->cur_msg_prepared && master->unprepare_message) { ret = master->unprepare_message(master, mesg); if (ret) { diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a1d4ca290862..b354dcbed55b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -25,6 +25,8 @@ #include #include +struct dma_chan; + /* * INTERFACES between SPI master-side drivers and SPI infrastructure. * (There's no SPI slave support for Linux yet...) @@ -386,6 +388,17 @@ struct spi_master { /* called on release() to free memory provided by spi_master */ void (*cleanup)(struct spi_device *spi); + /* + * Used to enable core support for DMA handling, if can_dma() + * exists and returns true then the transfer will be mapped + * prior to transfer_one() being called. The driver should + * not modify or store xfer and dma_tx and dma_rx must be set + * while the device is prepared. + */ + bool (*can_dma)(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *xfer); + /* * These hooks are for drivers that want to use the generic * master transfer queueing mechanism. If these are used, the @@ -404,6 +417,7 @@ struct spi_master { bool rt; bool auto_runtime_pm; bool cur_msg_prepared; + bool cur_msg_mapped; struct completion xfer_completion; int (*prepare_transfer_hardware)(struct spi_master *master); @@ -425,6 +439,10 @@ struct spi_master { /* gpio chip select */ int *cs_gpios; + + /* DMA channels for use with core dmaengine helpers */ + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; }; static inline void *spi_master_get_devdata(struct spi_master *master) -- cgit v1.2.3 From 4ddc86005b1266b81c7b1b574a2402d3d8deda44 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 24 Jan 2014 19:00:55 +0000 Subject: spi/s3c64xx: Remove unused /CS GPIO management The GPIO enable and disable is done in the core so does not need to be replicated in the driver, delete the unneeded code. enable_cs() was not referenced at all. Signed-off-by: Mark Brown --- drivers/spi/spi-s3c64xx.c | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index ae907dde1371..da32cd9c9b59 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -555,23 +555,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(chcfg, regs + S3C64XX_SPI_CH_CFG); } -static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, - struct spi_device *spi) -{ - if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ - if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ - /* Deselect the last toggled device */ - if (spi->cs_gpio >= 0) - gpio_set_value(spi->cs_gpio, - spi->mode & SPI_CS_HIGH ? 0 : 1); - } - sdd->tgl_spi = NULL; - } - - if (spi->cs_gpio >= 0) - gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0); -} - static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, int timeout_ms) { @@ -691,16 +674,6 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, return 0; } -static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, - struct spi_device *spi) -{ - if (sdd->tgl_spi == spi) - sdd->tgl_spi = NULL; - - if (spi->cs_gpio >= 0) - gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); -} - static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) { void __iomem *regs = sdd->regs; @@ -1092,14 +1065,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi) pm_runtime_put(&sdd->pdev->dev); writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); - disable_cs(sdd, spi); return 0; setup_exit: pm_runtime_put(&sdd->pdev->dev); /* setup() returns with device de-selected */ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); - disable_cs(sdd, spi); gpio_free(cs->line); spi_set_ctldata(spi, NULL); -- cgit v1.2.3 From 3700c6eb1e41a898c35c8dd4d5b10dc65fdaf486 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 24 Jan 2014 20:05:43 +0000 Subject: spi/s3c64xx: Split wait_for_xfer() into PIO and DMA versions There is no meaningful code sharing between the PIO and DMA variants (just the timeout calculation) so in order to make the code easier to work with split the two cases. Looking at the code it is not clear how the PIO version works for large transmits, greater than FIFO size is only handled for RX. Signed-off-by: Mark Brown --- drivers/spi/spi-s3c64xx.c | 166 +++++++++++++++++++++++++--------------------- 1 file changed, 89 insertions(+), 77 deletions(-) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index da32cd9c9b59..e515b8a6f590 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -576,101 +576,110 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, return RX_FIFO_LVL(status, sdd); } -static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, - struct spi_transfer *xfer, int dma_mode) +static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd, + struct spi_transfer *xfer) { void __iomem *regs = sdd->regs; unsigned long val; + u32 status; int ms; /* millisecs to xfer 'len' bytes @ 'cur_speed' */ ms = xfer->len * 8 * 1000 / sdd->cur_speed; ms += 10; /* some tolerance */ - if (dma_mode) { - val = msecs_to_jiffies(ms) + 10; - val = wait_for_completion_timeout(&sdd->xfer_completion, val); - } else { - u32 status; - val = msecs_to_loops(ms); - do { + val = msecs_to_jiffies(ms) + 10; + val = wait_for_completion_timeout(&sdd->xfer_completion, val); + + /* + * If the previous xfer was completed within timeout, then + * proceed further else return -EIO. + * DmaTx returns after simply writing data in the FIFO, + * w/o waiting for real transmission on the bus to finish. + * DmaRx returns only after Dma read data from FIFO which + * needs bus transmission to finish, so we don't worry if + * Xfer involved Rx(with or without Tx). + */ + if (val && !xfer->rx_buf) { + val = msecs_to_loops(10); + status = readl(regs + S3C64XX_SPI_STATUS); + while ((TX_FIFO_LVL(status, sdd) + || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) + && --val) { + cpu_relax(); status = readl(regs + S3C64XX_SPI_STATUS); - } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); + } + } - if (dma_mode) { - u32 status; - - /* - * If the previous xfer was completed within timeout, then - * proceed further else return -EIO. - * DmaTx returns after simply writing data in the FIFO, - * w/o waiting for real transmission on the bus to finish. - * DmaRx returns only after Dma read data from FIFO which - * needs bus transmission to finish, so we don't worry if - * Xfer involved Rx(with or without Tx). - */ - if (val && !xfer->rx_buf) { - val = msecs_to_loops(10); - status = readl(regs + S3C64XX_SPI_STATUS); - while ((TX_FIFO_LVL(status, sdd) - || !S3C64XX_SPI_ST_TX_DONE(status, sdd)) - && --val) { - cpu_relax(); - status = readl(regs + S3C64XX_SPI_STATUS); - } + /* If timed out while checking rx/tx status return error */ + if (!val) + return -EIO; - } + return 0; +} - /* If timed out while checking rx/tx status return error */ - if (!val) - return -EIO; - } else { - int loops; - u32 cpy_len; - u8 *buf; - - /* If it was only Tx */ - if (!xfer->rx_buf) { - sdd->state &= ~TXBUSY; - return 0; - } +static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd, + struct spi_transfer *xfer) +{ + void __iomem *regs = sdd->regs; + unsigned long val; + u32 status; + int loops; + u32 cpy_len; + u8 *buf; + int ms; - /* - * If the receive length is bigger than the controller fifo - * size, calculate the loops and read the fifo as many times. - * loops = length / max fifo size (calculated by using the - * fifo mask). - * For any size less than the fifo size the below code is - * executed atleast once. - */ - loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); - buf = xfer->rx_buf; - do { - /* wait for data to be received in the fifo */ - cpy_len = s3c64xx_spi_wait_for_timeout(sdd, - (loops ? ms : 0)); + /* millisecs to xfer 'len' bytes @ 'cur_speed' */ + ms = xfer->len * 8 * 1000 / sdd->cur_speed; + ms += 10; /* some tolerance */ - switch (sdd->cur_bpw) { - case 32: - ioread32_rep(regs + S3C64XX_SPI_RX_DATA, - buf, cpy_len / 4); - break; - case 16: - ioread16_rep(regs + S3C64XX_SPI_RX_DATA, - buf, cpy_len / 2); - break; - default: - ioread8_rep(regs + S3C64XX_SPI_RX_DATA, - buf, cpy_len); - break; - } + val = msecs_to_loops(ms); + do { + status = readl(regs + S3C64XX_SPI_STATUS); + } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); - buf = buf + cpy_len; - } while (loops--); - sdd->state &= ~RXBUSY; + + /* If it was only Tx */ + if (!xfer->rx_buf) { + sdd->state &= ~TXBUSY; + return 0; } + /* + * If the receive length is bigger than the controller fifo + * size, calculate the loops and read the fifo as many times. + * loops = length / max fifo size (calculated by using the + * fifo mask). + * For any size less than the fifo size the below code is + * executed atleast once. + */ + loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1); + buf = xfer->rx_buf; + do { + /* wait for data to be received in the fifo */ + cpy_len = s3c64xx_spi_wait_for_timeout(sdd, + (loops ? ms : 0)); + + switch (sdd->cur_bpw) { + case 32: + ioread32_rep(regs + S3C64XX_SPI_RX_DATA, + buf, cpy_len / 4); + break; + case 16: + ioread16_rep(regs + S3C64XX_SPI_RX_DATA, + buf, cpy_len / 2); + break; + default: + ioread8_rep(regs + S3C64XX_SPI_RX_DATA, + buf, cpy_len); + break; + } + + buf = buf + cpy_len; + } while (loops--); + sdd->state &= ~RXBUSY; + return 0; } @@ -902,7 +911,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, spin_unlock_irqrestore(&sdd->lock, flags); - status = wait_for_xfer(sdd, xfer, use_dma); + if (use_dma) + status = wait_for_dma(sdd, xfer); + else + status = wait_for_pio(sdd, xfer); if (status) { dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", -- cgit v1.2.3 From 3a2eba9bd0a6447dfbc01635e4cd0689f5f2bdad Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 28 Jan 2014 20:17:03 +0000 Subject: spi: Provide core support for full duplex devices It is fairly common for SPI devices to require that one or both transfer directions is always active. Currently drivers open code this in various ways with varying degrees of efficiency. Start factoring this out by providing flags SPI_MASTER_MUST_TX and SPI_MASTER_MUST_RX. These will cause the core to provide buffers for the requested direction if none are specified in the underlying transfer. Currently this is fairly inefficient since we actually allocate a data buffer which may get large, support for mapping transfers using a scatterlist will allow us to avoid this for DMA based transfers. Signed-off-by: Mark Brown --- drivers/spi/spi.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/spi/spi.h | 6 ++++++ 2 files changed, 53 insertions(+) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index bcdaa74f1c8e..bb7cf561c311 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -587,6 +587,49 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) struct device *dev = master->dev.parent; struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; + void *tmp; + size_t max_tx, max_rx; + + if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { + max_tx = 0; + max_rx = 0; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if ((master->flags & SPI_MASTER_MUST_TX) && + !xfer->tx_buf) + max_tx = max(xfer->len, max_tx); + if ((master->flags & SPI_MASTER_MUST_RX) && + !xfer->rx_buf) + max_rx = max(xfer->len, max_rx); + } + + if (max_tx) { + tmp = krealloc(master->dummy_tx, max_tx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_tx = tmp; + memset(tmp, 0, max_tx); + } + + if (max_rx) { + tmp = krealloc(master->dummy_rx, max_rx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_rx = tmp; + } + + if (max_tx || max_rx) { + list_for_each_entry(xfer, &msg->transfers, + transfer_list) { + if (!xfer->tx_buf) + xfer->tx_buf = master->dummy_tx; + if (!xfer->rx_buf) + xfer->rx_buf = master->dummy_rx; + } + } + } if (msg->is_dma_mapped || !master->can_dma) return 0; @@ -759,6 +802,10 @@ static void spi_pump_messages(struct kthread_work *work) } master->busy = false; spin_unlock_irqrestore(&master->queue_lock, flags); + kfree(master->dummy_rx); + master->dummy_rx = NULL; + kfree(master->dummy_tx); + master->dummy_tx = NULL; if (master->unprepare_transfer_hardware && master->unprepare_transfer_hardware(master)) dev_err(&master->dev, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b354dcbed55b..31a5b0ee93ec 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -347,6 +347,8 @@ struct spi_master { #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ +#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ +#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; @@ -443,6 +445,10 @@ struct spi_master { /* DMA channels for use with core dmaengine helpers */ struct dma_chan *dma_tx; struct dma_chan *dma_rx; + + /* dummy data for full duplex devices */ + void *dummy_rx; + void *dummy_tx; }; static inline void *spi_master_get_devdata(struct spi_master *master) -- cgit v1.2.3 From 6ad45a27cbe343ec8d7888e5edf6335499a4b555 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sun, 2 Feb 2014 13:47:47 +0000 Subject: spi: Make core DMA mapping functions generate scatterlists We cannot unconditionally use dma_map_single() to map data for use with SPI since transfers may exceed a page and virtual addresses may not be provided with physically contiguous pages. Further, addresses allocated using vmalloc() need to be mapped differently to other addresses. Currently only the MXS driver handles all this, a few drivers do handle the possibility that buffers may not be physically contiguous which is the main potential problem but many don't even do that. Factoring this out into the core will make it easier for drivers to do a good job so if the driver is using the core DMA code then generate a scatterlist instead of mapping to a single address so do that. This code is mainly based on a combination of the existing code in the MXS and PXA2xx drivers. In future we should be able to extend it to allow the core to concatenate adjacent transfers if they are compatible, improving performance. Currently for simplicity clients are not allowed to use the scatterlist when they do DMA mapping, in the future the existing single address mappings will be replaced with use of the scatterlist most likely as part of pre-verifying transfers. This change makes it mandatory to use scatterlists when using the core DMA mapping so update the s3c64xx driver to do this when used with dmaengine. Doing so makes the code more ugly but it is expected that the old s3c-dma code can be removed very soon. Signed-off-by: Mark Brown --- drivers/spi/spi-s3c64xx.c | 14 +++++-- drivers/spi/spi.c | 101 ++++++++++++++++++++++++++++++++++------------ include/linux/spi/spi.h | 7 ++++ 3 files changed, 94 insertions(+), 28 deletions(-) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e515b8a6f590..25c9bd409a87 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, #else static void prepare_dma(struct s3c64xx_spi_dma_data *dma, - unsigned len, dma_addr_t buf) + struct sg_table *sgt) { struct s3c64xx_spi_driver_data *sdd; struct dma_slave_config config; @@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, dmaengine_slave_config(dma->ch, &config); } - desc = dmaengine_prep_slave_single(dma->ch, buf, len, - dma->direction, DMA_PREP_INTERRUPT); + desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, + dma->direction, DMA_PREP_INTERRUPT); desc->callback = s3c64xx_spi_dmacb; desc->callback_param = dma; @@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, chcfg |= S3C64XX_SPI_CH_TXCH_ON; if (dma_mode) { modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; +#ifndef CONFIG_S3C_DMA + prepare_dma(&sdd->tx_dma, &xfer->tx_sg); +#else prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); +#endif } else { switch (sdd->cur_bpw) { case 32: @@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | S3C64XX_SPI_PACKET_CNT_EN, regs + S3C64XX_SPI_PACKET_CNT); +#ifndef CONFIG_S3C_DMA + prepare_dma(&sdd->rx_dma, &xfer->rx_sg); +#else prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); +#endif } } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index bb7cf561c311..49313dd0a144 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -582,13 +582,70 @@ static void spi_set_cs(struct spi_device *spi, bool enable) spi->master->set_cs(spi, !enable); } +static int spi_map_buf(struct spi_master *master, struct device *dev, + struct sg_table *sgt, void *buf, size_t len, + enum dma_data_direction dir) +{ + const bool vmalloced_buf = is_vmalloc_addr(buf); + const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; + const int sgs = DIV_ROUND_UP(len, desc_len); + struct page *vm_page; + void *sg_buf; + size_t min; + int i, ret; + + ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); + if (ret != 0) + return ret; + + for (i = 0; i < sgs; i++) { + min = min_t(size_t, len, desc_len); + + if (vmalloced_buf) { + vm_page = vmalloc_to_page(buf); + if (!vm_page) { + sg_free_table(sgt); + return -ENOMEM; + } + sg_buf = page_address(vm_page) + + ((size_t)buf & ~PAGE_MASK); + } else { + sg_buf = buf; + } + + sg_set_buf(&sgt->sgl[i], sg_buf, min); + + buf += min; + len -= min; + } + + ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); + if (ret < 0) { + sg_free_table(sgt); + return ret; + } + + sgt->nents = ret; + + return 0; +} + +static void spi_unmap_buf(struct spi_master *master, struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + if (sgt->orig_nents) { + dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); + sg_free_table(sgt); + } +} + static int spi_map_msg(struct spi_master *master, struct spi_message *msg) { - struct device *dev = master->dev.parent; struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; void *tmp; size_t max_tx, max_rx; + int ret; if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { max_tx = 0; @@ -631,7 +688,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) } } - if (msg->is_dma_mapped || !master->can_dma) + if (!master->can_dma) return 0; tx_dev = &master->dma_tx->dev->device; @@ -642,25 +699,21 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) continue; if (xfer->tx_buf != NULL) { - xfer->tx_dma = dma_map_single(tx_dev, - (void *)xfer->tx_buf, - xfer->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, xfer->tx_dma)) { - dev_err(dev, "dma_map_single Tx failed\n"); - return -ENOMEM; - } + ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, + (void *)xfer->tx_buf, xfer->len, + DMA_TO_DEVICE); + if (ret != 0) + return ret; } if (xfer->rx_buf != NULL) { - xfer->rx_dma = dma_map_single(rx_dev, - xfer->rx_buf, xfer->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, xfer->rx_dma)) { - dev_err(dev, "dma_map_single Rx failed\n"); - dma_unmap_single(tx_dev, xfer->tx_dma, - xfer->len, DMA_TO_DEVICE); - return -ENOMEM; + ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, + xfer->rx_buf, xfer->len, + DMA_FROM_DEVICE); + if (ret != 0) { + spi_unmap_buf(master, tx_dev, &xfer->tx_sg, + DMA_TO_DEVICE); + return ret; } } } @@ -675,7 +728,7 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) struct spi_transfer *xfer; struct device *tx_dev, *rx_dev; - if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma) + if (!master->cur_msg_mapped || !master->can_dma) return 0; tx_dev = &master->dma_tx->dev->device; @@ -685,12 +738,8 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) if (!master->can_dma(master, msg->spi, xfer)) continue; - if (xfer->rx_buf) - dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len, - DMA_FROM_DEVICE); - if (xfer->tx_buf) - dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len, - DMA_TO_DEVICE); + spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } return 0; @@ -1503,6 +1552,8 @@ int spi_register_master(struct spi_master *master) mutex_init(&master->bus_lock_mutex); master->bus_lock_flag = 0; init_completion(&master->xfer_completion); + if (!master->max_dma_len) + master->max_dma_len = INT_MAX; /* register the device, then userspace will see it. * registration fails if the bus ID is in use. diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 31a5b0ee93ec..0c23c835d48b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -24,6 +24,7 @@ #include #include #include +#include struct dma_chan; @@ -268,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @auto_runtime_pm: the core should ensure a runtime PM reference is held * while the hardware is prepared, using the parent * device for the spidev + * @max_dma_len: Maximum length of a DMA transfer for the device. * @prepare_transfer_hardware: a message will soon arrive from the queue * so the subsystem requests the driver to prepare the transfer hardware * by issuing this call @@ -421,6 +423,7 @@ struct spi_master { bool cur_msg_prepared; bool cur_msg_mapped; struct completion xfer_completion; + size_t max_dma_len; int (*prepare_transfer_hardware)(struct spi_master *master); int (*transfer_one_message)(struct spi_master *master, @@ -533,6 +536,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. * @transfer_list: transfers are sequenced through @spi_message.transfers + * @tx_sg: Scatterlist for transmit, currently not for client use + * @rx_sg: Scatterlist for receive, currently not for client use * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. @@ -600,6 +605,8 @@ struct spi_transfer { dma_addr_t tx_dma; dma_addr_t rx_dma; + struct sg_table tx_sg; + struct sg_table rx_sg; unsigned cs_change:1; unsigned tx_nbits:3; -- cgit v1.2.3 From 513273538a6c10dba1170ecdee5c2da15acecdb5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 12 Feb 2014 20:31:26 +0000 Subject: spi: Make max_tx and max_rx the same type Prevents spurious compiler warnings. Signed-off-by: Mark Brown --- drivers/spi/spi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 49313dd0a144..f3fb1acf9ac1 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -644,7 +644,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; void *tmp; - size_t max_tx, max_rx; + unsigned int max_tx, max_rx; int ret; if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { -- cgit v1.2.3