diff options
author | Vinod Koul <vinod.koul@intel.com> | 2017-11-14 08:02:20 +0300 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2017-11-14 08:02:20 +0300 |
commit | 049d0d38499822f9117d04d8fbaecf12814cbafa (patch) | |
tree | 63cbe6c24e3a36337024d60755e8908f36dae47f /drivers/dma | |
parent | 5ddab696e75b58a772b0bdad55fcd0c9c214b689 (diff) | |
parent | 008913dbeb1775ba365daa39462ca68884bd926f (diff) | |
download | linux-049d0d38499822f9117d04d8fbaecf12814cbafa.tar.xz |
Merge branch 'topic/axi' into for-linus
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dma-axi-dmac.c | 75 |
1 files changed, 55 insertions, 20 deletions
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 7f0b9aa15867..2419fe524daa 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -72,6 +72,9 @@ #define AXI_DMAC_FLAG_CYCLIC BIT(0) +/* The maximum ID allocated by the hardware is 31 */ +#define AXI_DMAC_SG_UNUSED 32U + struct axi_dmac_sg { dma_addr_t src_addr; dma_addr_t dest_addr; @@ -80,6 +83,7 @@ struct axi_dmac_sg { unsigned int dest_stride; unsigned int src_stride; unsigned int id; + bool schedule_when_free; }; struct axi_dmac_desc { @@ -200,11 +204,21 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) } sg = &desc->sg[desc->num_submitted]; + /* Already queued in cyclic mode. Wait for it to finish */ + if (sg->id != AXI_DMAC_SG_UNUSED) { + sg->schedule_when_free = true; + return; + } + desc->num_submitted++; - if (desc->num_submitted == desc->num_sgs) - chan->next_desc = NULL; - else + if (desc->num_submitted == desc->num_sgs) { + if (desc->cyclic) + desc->num_submitted = 0; /* Start again */ + else + chan->next_desc = NULL; + } else { chan->next_desc = desc; + } sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); @@ -220,9 +234,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) /* * If the hardware supports cyclic transfers and there is no callback to - * call, enable hw cyclic mode to avoid unnecessary interrupts. + * call and only a single segment, enable hw cyclic mode to avoid + * unnecessary interrupts. */ - if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) + if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback && + desc->num_sgs == 1) flags |= AXI_DMAC_FLAG_CYCLIC; axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); @@ -237,37 +253,52 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) struct axi_dmac_desc, vdesc.node); } -static void axi_dmac_transfer_done(struct axi_dmac_chan *chan, +static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, unsigned int completed_transfers) { struct axi_dmac_desc *active; struct axi_dmac_sg *sg; + bool start_next = false; active = axi_dmac_active_desc(chan); if (!active) - return; + return false; - if (active->cyclic) { - vchan_cyclic_callback(&active->vdesc); - } else { - do { - sg = &active->sg[active->num_completed]; - if (!(BIT(sg->id) & completed_transfers)) - break; - active->num_completed++; - if (active->num_completed == active->num_sgs) { + do { + sg = &active->sg[active->num_completed]; + if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ + break; + if (!(BIT(sg->id) & completed_transfers)) + break; + active->num_completed++; + sg->id = AXI_DMAC_SG_UNUSED; + if (sg->schedule_when_free) { + sg->schedule_when_free = false; + start_next = true; + } + + if (active->cyclic) + vchan_cyclic_callback(&active->vdesc); + + if (active->num_completed == active->num_sgs) { + if (active->cyclic) { + active->num_completed = 0; /* wrap around */ + } else { list_del(&active->vdesc.node); vchan_cookie_complete(&active->vdesc); active = axi_dmac_active_desc(chan); } - } while (active); - } + } + } while (active); + + return start_next; } static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) { struct axi_dmac *dmac = devid; unsigned int pending; + bool start_next = false; pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); if (!pending) @@ -281,10 +312,10 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) unsigned int completed; completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); - axi_dmac_transfer_done(&dmac->chan, completed); + start_next = axi_dmac_transfer_done(&dmac->chan, completed); } /* Space has become available in the descriptor queue */ - if (pending & AXI_DMAC_IRQ_SOT) + if ((pending & AXI_DMAC_IRQ_SOT) || start_next) axi_dmac_start_transfer(&dmac->chan); spin_unlock(&dmac->chan.vchan.lock); @@ -334,12 +365,16 @@ static void axi_dmac_issue_pending(struct dma_chan *c) static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) { struct axi_dmac_desc *desc; + unsigned int i; desc = kzalloc(sizeof(struct axi_dmac_desc) + sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); if (!desc) return NULL; + for (i = 0; i < num_sgs; i++) + desc->sg[i].id = AXI_DMAC_SG_UNUSED; + desc->num_sgs = num_sgs; return desc; |