diff options
Diffstat (limited to 'drivers/dma/ioat/dma.c')
-rw-r--r-- | drivers/dma/ioat/dma.c | 85 |
1 files changed, 48 insertions, 37 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 18c011e57592..8ad0ad861c86 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -332,8 +332,8 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags) u8 *pos; off_t offs; - chunk = idx / IOAT_DESCS_PER_2M; - idx &= (IOAT_DESCS_PER_2M - 1); + chunk = idx / IOAT_DESCS_PER_CHUNK; + idx &= (IOAT_DESCS_PER_CHUNK - 1); offs = idx * IOAT_DESC_SZ; pos = (u8 *)ioat_chan->descs[chunk].virt + offs; phys = ioat_chan->descs[chunk].hw + offs; @@ -370,7 +370,8 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) if (!ring) return NULL; - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M; + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE; + ioat_chan->desc_chunks = chunks; for (i = 0; i < chunks; i++) { struct ioat_descs *descs = &ioat_chan->descs[i]; @@ -382,8 +383,9 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) for (idx = 0; idx < i; idx++) { descs = &ioat_chan->descs[idx]; - dma_free_coherent(to_dev(ioat_chan), SZ_2M, - descs->virt, descs->hw); + dma_free_coherent(to_dev(ioat_chan), + IOAT_CHUNK_SIZE, + descs->virt, descs->hw); descs->virt = NULL; descs->hw = 0; } @@ -404,7 +406,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) for (idx = 0; idx < ioat_chan->desc_chunks; idx++) { dma_free_coherent(to_dev(ioat_chan), - SZ_2M, + IOAT_CHUNK_SIZE, ioat_chan->descs[idx].virt, ioat_chan->descs[idx].hw); ioat_chan->descs[idx].virt = NULL; @@ -867,6 +869,23 @@ static void check_active(struct ioatdma_chan *ioat_chan) mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } +static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan) +{ + spin_lock_bh(&ioat_chan->prep_lock); + set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + + ioat_abort_descs(ioat_chan); + dev_warn(to_dev(ioat_chan), "Reset channel...\n"); + ioat_reset_hw(ioat_chan); + dev_warn(to_dev(ioat_chan), "Restart channel...\n"); + ioat_restart_channel(ioat_chan); + + spin_lock_bh(&ioat_chan->prep_lock); + clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); +} + void ioat_timer_event(struct timer_list *t) { struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer); @@ -889,19 +908,7 @@ void ioat_timer_event(struct timer_list *t) if (test_bit(IOAT_RUN, &ioat_chan->state)) { spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->prep_lock); - set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); - - ioat_abort_descs(ioat_chan); - dev_warn(to_dev(ioat_chan), "Reset channel...\n"); - ioat_reset_hw(ioat_chan); - dev_warn(to_dev(ioat_chan), "Restart channel...\n"); - ioat_restart_channel(ioat_chan); - - spin_lock_bh(&ioat_chan->prep_lock); - clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + ioat_reboot_chan(ioat_chan); spin_unlock_bh(&ioat_chan->cleanup_lock); } @@ -915,17 +922,23 @@ void ioat_timer_event(struct timer_list *t) spin_lock_bh(&ioat_chan->prep_lock); check_active(ioat_chan); spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); - return; + goto unlock_out; + } + + /* handle the missed cleanup case */ + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) { + /* timer restarted in ioat_cleanup_preamble + * and IOAT_COMPLETION_ACK cleared + */ + __cleanup(ioat_chan, phys_complete); + goto unlock_out; } /* if we haven't made progress and we have already * acknowledged a pending completion once, then be more * forceful with a restart */ - if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) - __cleanup(ioat_chan, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { + if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { u32 chanerr; chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); @@ -937,25 +950,23 @@ void ioat_timer_event(struct timer_list *t) dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n", ioat_ring_active(ioat_chan)); - spin_lock_bh(&ioat_chan->prep_lock); - set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + ioat_reboot_chan(ioat_chan); - ioat_abort_descs(ioat_chan); - dev_warn(to_dev(ioat_chan), "Resetting channel...\n"); - ioat_reset_hw(ioat_chan); - dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); - ioat_restart_channel(ioat_chan); + goto unlock_out; + } + /* handle missed issue pending case */ + if (ioat_ring_pending(ioat_chan)) { + dev_warn(to_dev(ioat_chan), + "Completion timeout with pending descriptors\n"); spin_lock_bh(&ioat_chan->prep_lock); - clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + __ioat_issue_pending(ioat_chan); spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); - return; - } else - set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + } + set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); +unlock_out: spin_unlock_bh(&ioat_chan->cleanup_lock); } |