summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2015-06-25 06:51:37 +0300
committerVinod Koul <vinod.koul@intel.com>2015-06-25 06:51:37 +0300
commit9324fdf5267b12f6db660fe52e882bbfffcc109a (patch)
tree877a81ac336724efdb02403eceb8ca0b1e7b10eb
parent4983a501afede12f95d26e1e213f8f2e9eda1871 (diff)
parent5f88d9706fa48084eaab2dbbec27779809c5106b (diff)
downloadlinux-9324fdf5267b12f6db660fe52e882bbfffcc109a.tar.xz
Merge branch 'topic/core' into for-linus
-rw-r--r--Documentation/dmaengine/provider.txt11
-rw-r--r--drivers/dma/virt-dma.c19
-rw-r--r--drivers/dma/virt-dma.h13
-rw-r--r--include/linux/dmaengine.h8
4 files changed, 39 insertions, 12 deletions
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index 05d2280190f1..ca67b0f04c6e 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -345,11 +345,12 @@ where to put them)
that abstracts it away.
* DMA_CTRL_ACK
- - Undocumented feature
- - No one really has an idea of what it's about, besides being
- related to reusing the DMA transaction descriptors or having
- additional transactions added to it in the async-tx API
- - Useless in the case of the slave API
+ - If set, the transfer can be reused after being completed.
+ - There is a guarantee the transfer won't be freed until it is acked
+ by async_tx_ack().
+ - As a consequence, if a device driver wants to skip the dma_map_sg() and
+ dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
+ it can resubmit the transfer right after its completion.
General Design Notes
--------------------
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 6f80432a3f0a..7d2c17d8d30f 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx);
- list_add_tail(&vd->node, &vc->desc_submitted);
+ list_move_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,8 +83,10 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param;
list_del(&vd->node);
-
- vc->desc_free(vd);
+ if (async_tx_test_ack(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
if (cb)
cb(cb_data);
@@ -96,9 +98,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node);
- list_del(&vd->node);
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- vc->desc_free(vd);
+ if (async_tx_test_ack(&vd->tx)) {
+ list_move_tail(&vd->node, &vc->desc_allocated);
+ } else {
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+ list_del(&vd->node);
+ vc->desc_free(vd);
+ }
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -108,6 +114,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock);
+ INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 181b95267866..189e75dbcb15 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,6 +29,7 @@ struct virt_dma_chan {
spinlock_t lock;
/* protected by vc.lock */
+ struct list_head desc_allocated;
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
@@ -55,11 +56,16 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags)
{
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+ unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit;
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add_tail(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
return &vd->tx;
}
@@ -122,7 +128,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
}
/**
- * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * vchan_get_all_descriptors - obtain all allocated, submitted and issued
+ * descriptors
* vc: virtual channel to get descriptors from
* head: list of descriptors found
*
@@ -134,6 +141,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
+ list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
@@ -141,11 +149,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{
+ struct virt_dma_desc *vd;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head);
+ list_for_each_entry(vd, &head, node)
+ async_tx_clear_ack(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 19face3168b4..5d99229c2f95 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -123,10 +123,18 @@ enum dma_transfer_direction {
* chunk and before first src/dst address for next chunk.
* Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
* Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ * @dst_icg: Number of bytes to jump after last dst address of this
+ * chunk and before the first dst address for next chunk.
+ * Ignored if dst_inc is true and dst_sgl is false.
+ * @src_icg: Number of bytes to jump after last src address of this
+ * chunk and before the first src address for next chunk.
+ * Ignored if src_inc is true and src_sgl is false.
*/
struct data_chunk {
size_t size;
size_t icg;
+ size_t dst_icg;
+ size_t src_icg;
};
/**