summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/spi/internals.h2
-rw-r--r--drivers/spi/spi.c73
-rw-r--r--include/linux/spi/spi.h11
3 files changed, 35 insertions, 51 deletions
diff --git a/drivers/spi/internals.h b/drivers/spi/internals.h
index 47a87c2a6979..1f459b895891 100644
--- a/drivers/spi/internals.h
+++ b/drivers/spi/internals.h
@@ -45,7 +45,7 @@ static inline bool spi_xfer_is_dma_mapped(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
return ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer) &&
- ctlr->cur_msg_mapped;
+ (xfer->tx_sg_mapped || xfer->rx_sg_mapped);
}
#endif /* __LINUX_SPI_INTERNALS_H */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c1e8cde426e5..9721adf048b5 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1220,11 +1220,6 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
}
-/* Dummy SG for unidirect transfers */
-static struct scatterlist dummy_sg = {
- .page_link = SG_END,
-};
-
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
@@ -1263,8 +1258,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
attrs);
if (ret != 0)
return ret;
- } else {
- xfer->tx_sg.sgl = &dummy_sg;
+
+ xfer->tx_sg_mapped = true;
}
if (xfer->rx_buf != NULL) {
@@ -1278,8 +1273,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
return ret;
}
- } else {
- xfer->rx_sg.sgl = &dummy_sg;
+
+ xfer->rx_sg_mapped = true;
}
}
/* No transfer has been mapped, bail out with success */
@@ -1288,7 +1283,6 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
ctlr->cur_rx_dma_dev = rx_dev;
ctlr->cur_tx_dma_dev = tx_dev;
- ctlr->cur_msg_mapped = true;
return 0;
}
@@ -1299,57 +1293,46 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
struct device *tx_dev = ctlr->cur_tx_dma_dev;
struct spi_transfer *xfer;
- if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
- return 0;
-
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync has already been done after each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
- if (!ctlr->can_dma(ctlr, msg->spi, xfer))
- continue;
+ if (xfer->rx_sg_mapped)
+ spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ DMA_FROM_DEVICE, attrs);
+ xfer->rx_sg_mapped = false;
- spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
- DMA_FROM_DEVICE, attrs);
- spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
- DMA_TO_DEVICE, attrs);
+ if (xfer->tx_sg_mapped)
+ spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE, attrs);
+ xfer->tx_sg_mapped = false;
}
- ctlr->cur_msg_mapped = false;
-
return 0;
}
-static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg,
+static void spi_dma_sync_for_device(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
- if (!ctlr->cur_msg_mapped)
- return;
-
- if (!ctlr->can_dma(ctlr, msg->spi, xfer))
- return;
-
- dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
- dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg_mapped)
+ dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg_mapped)
+ dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
}
-static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg,
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
- if (!ctlr->cur_msg_mapped)
- return;
-
- if (!ctlr->can_dma(ctlr, msg->spi, xfer))
- return;
-
- dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
- dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg_mapped)
+ dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg_mapped)
+ dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
@@ -1365,13 +1348,11 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
}
static void spi_dma_sync_for_device(struct spi_controller *ctrl,
- struct spi_message *msg,
struct spi_transfer *xfer)
{
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
- struct spi_message *msg,
struct spi_transfer *xfer)
{
}
@@ -1643,13 +1624,13 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
- spi_dma_sync_for_device(ctlr, msg, xfer);
+ spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
- spi_dma_sync_for_cpu(ctlr, msg, xfer);
+ spi_dma_sync_for_cpu(ctlr, xfer);
- if (ctlr->cur_msg_mapped &&
- (xfer->error & SPI_TRANS_FAIL_NO_START)) {
+ if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
+ (xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
ctlr->fallback = true;
xfer->error &= ~SPI_TRANS_FAIL_NO_START;
@@ -1671,7 +1652,7 @@ fallback_pio:
msg->status = ret;
}
- spi_dma_sync_for_cpu(ctlr, msg, xfer);
+ spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e8e1e798924f..b4a89db4c855 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -447,7 +447,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @cur_msg_need_completion: Flag used internally to opportunistically skip
* the @cur_msg_completion. This flag is used to signal the context that
* is running spi_finalize_current_message() that it needs to complete()
- * @cur_msg_mapped: message has been mapped for DMA
* @fallback: fallback to PIO if DMA transfer return failure with
* SPI_TRANS_FAIL_NO_START.
* @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
@@ -708,7 +707,6 @@ struct spi_controller {
bool running;
bool rt;
bool auto_runtime_pm;
- bool cur_msg_mapped;
bool fallback;
bool last_cs_mode_high;
s8 last_cs[SPI_CS_CNT_MAX];
@@ -981,6 +979,8 @@ struct spi_res {
* transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA
+ * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
* @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
@@ -1077,10 +1077,13 @@ struct spi_transfer {
#define SPI_TRANS_FAIL_IO BIT(1)
u16 error;
- dma_addr_t tx_dma;
- dma_addr_t rx_dma;
+ bool tx_sg_mapped;
+ bool rx_sg_mapped;
+
struct sg_table tx_sg;
struct sg_table rx_sg;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
unsigned dummy_data:1;
unsigned cs_off:1;