From 61b5865d56bbb09dd5887b197dd324d1d4333d47 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 11 Feb 2020 10:42:47 -0700 Subject: dmaengine: idxd: fix runaway module ref count on device driver bind idxd_config_bus_probe() calls try_module_get() but never calls module_put() when it fails. Thus with every failed attempt, the ref count goes up. Add module_put() in failure paths. Fixes: c52ca478233c ("dmaengine: idxd: add configuration component of driver") Reported-by: Jerry Chen Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/158144296730.41381.12134210685456322434.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/sysfs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 6d907fe150aa..298855ca934f 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -124,6 +124,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_device_config(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_warn(dev, "Device config failed: %d\n", rc); return rc; } @@ -132,6 +133,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_device_enable(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_warn(dev, "Device enable failed: %d\n", rc); return rc; } @@ -142,6 +144,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_register_dma_device(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_dbg(dev, "Failed to register dmaengine device\n"); return rc; } -- cgit v1.2.3 From 83c49f734463980802d6dba376f69e787fb07a8e Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 4 Feb 2020 20:51:15 +0800 Subject: dmaengine: doc: fix warnings/issues of client.rst This fixed some warnings/issues of client.rst. o Need a blank line for enumerated lists. o Do not create section in enumerated list. o Remove suffix ':' from section title. Signed-off-by: Changbin Du Link: https://lore.kernel.org/r/20200204125115.12128-1-changbin.du@gmail.com Signed-off-by: Vinod Koul --- Documentation/driver-api/dmaengine/client.rst | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst index e5953e7e4bf4..2104830a99ae 100644 --- a/Documentation/driver-api/dmaengine/client.rst +++ b/Documentation/driver-api/dmaengine/client.rst @@ -151,8 +151,8 @@ The details of these operations are: Note that callbacks will always be invoked from the DMA engines tasklet, never from interrupt context. -Optional: per descriptor metadata ---------------------------------- + **Optional: per descriptor metadata** + DMAengine provides two ways for metadata support. DESC_METADATA_CLIENT @@ -199,12 +199,15 @@ Optional: per descriptor metadata DESC_METADATA_CLIENT - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + 1. prepare the descriptor (dmaengine_prep_*) construct the metadata in the client's buffer 2. use dmaengine_desc_attach_metadata() to attach the buffer to the descriptor 3. submit the transfer + - DMA_DEV_TO_MEM: + 1. prepare the descriptor (dmaengine_prep_*) 2. use dmaengine_desc_attach_metadata() to attach the buffer to the descriptor @@ -215,6 +218,7 @@ Optional: per descriptor metadata DESC_METADATA_ENGINE - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + 1. prepare the descriptor (dmaengine_prep_*) 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's metadata area @@ -222,7 +226,9 @@ Optional: per descriptor metadata 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount of data the client has placed into the metadata buffer 5. submit the transfer + - DMA_DEV_TO_MEM: + 1. prepare the descriptor (dmaengine_prep_*) 2. submit the transfer 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get @@ -278,8 +284,8 @@ Optional: per descriptor metadata void dma_async_issue_pending(struct dma_chan *chan); -Further APIs: -------------- +Further APIs +------------ 1. Terminate APIs -- cgit v1.2.3 From 2227ab4216cd4d56619733bcfaee7e6943cdc9aa Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 5 Feb 2020 15:32:48 +0300 Subject: dmaengine: idxd: Fix error handling in idxd_wq_cdev_dev_setup() We can't call kfree(dev) after calling device_register(dev). The "dev" pointer has to be freed using put_device(). Fixes: 42d279f9137a ("dmaengine: idxd: add char driver to expose submission portal to userland") Signed-off-by: Dan Carpenter Acked-by: Dave Jiang Link: https://lore.kernel.org/r/20200205123248.hmtog7qa2eiqaagh@kili.mountain Signed-off-by: Vinod Koul --- drivers/dma/idxd/cdev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 1d7347825b95..df47be612ebb 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -204,6 +204,7 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); if (minor < 0) { rc = minor; + kfree(dev); goto ida_err; } @@ -212,7 +213,6 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) rc = device_register(dev); if (rc < 0) { dev_err(&idxd->pdev->dev, "device register failed\n"); - put_device(dev); goto dev_reg_err; } idxd_cdev->minor = minor; @@ -221,8 +221,8 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) dev_reg_err: ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); + put_device(dev); ida_err: - kfree(dev); idxd_cdev->dev = NULL; return rc; } -- cgit v1.2.3 From 1c83767c9d417c4cc45d95f09b3a6e6c6b5417b5 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Fri, 14 Feb 2020 11:14:36 +0200 Subject: dmaengine: ti: k3-udma: Use ktime/usleep_range based TX completion check In some cases (McSPI for example) the jiffie and delayed_work based workaround can cause big throughput drop. Switch to use ktime/usleep_range based implementation to be able to sustain speed for PDMA based peripherals. Signed-off-by: Vignesh Raghavendra Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200214091441.27535-2-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 80 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 27 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index ea79c2df28e0..fb59c869a6a7 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -169,7 +170,7 @@ enum udma_chan_state { struct udma_tx_drain { struct delayed_work work; - unsigned long jiffie; + ktime_t tstamp; u32 residue; }; @@ -946,9 +947,10 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + /* Transfer is incomplete, store current residue and time stamp */ if (peer_bcnt < bcnt) { uc->tx_drain.residue = bcnt - peer_bcnt; - uc->tx_drain.jiffie = jiffies; + uc->tx_drain.tstamp = ktime_get(); return false; } @@ -961,35 +963,59 @@ static void udma_check_tx_completion(struct work_struct *work) tx_drain.work.work); bool desc_done = true; u32 residue_diff; - unsigned long jiffie_diff, delay; + ktime_t time_diff; + unsigned long delay; + + while (1) { + if (uc->desc) { + /* Get previous residue and time stamp */ + residue_diff = uc->tx_drain.residue; + time_diff = uc->tx_drain.tstamp; + /* + * Get current residue and time stamp or see if + * transfer is complete + */ + desc_done = udma_is_desc_really_done(uc, uc->desc); + } - if (uc->desc) { - residue_diff = uc->tx_drain.residue; - jiffie_diff = uc->tx_drain.jiffie; - desc_done = udma_is_desc_really_done(uc, uc->desc); - } - - if (!desc_done) { - jiffie_diff = uc->tx_drain.jiffie - jiffie_diff; - residue_diff -= uc->tx_drain.residue; - if (residue_diff) { - /* Try to guess when we should check next time */ - residue_diff /= jiffie_diff; - delay = uc->tx_drain.residue / residue_diff / 3; - if (jiffies_to_msecs(delay) < 5) - delay = 0; - } else { - /* No progress, check again in 1 second */ - delay = HZ; + if (!desc_done) { + /* + * Find the time delta and residue delta w.r.t + * previous poll + */ + time_diff = ktime_sub(uc->tx_drain.tstamp, + time_diff) + 1; + residue_diff -= uc->tx_drain.residue; + if (residue_diff) { + /* + * Try to guess when we should check + * next time by calculating rate at + * which data is being drained at the + * peer device + */ + delay = (time_diff / residue_diff) * + uc->tx_drain.residue; + } else { + /* No progress, check again in 1 second */ + schedule_delayed_work(&uc->tx_drain.work, HZ); + break; + } + + usleep_range(ktime_to_us(delay), + ktime_to_us(delay) + 10); + continue; } - schedule_delayed_work(&uc->tx_drain.work, delay); - } else if (uc->desc) { - struct udma_desc *d = uc->desc; + if (uc->desc) { + struct udma_desc *d = uc->desc; + + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + break; + } - uc->bcnt += d->residue; - udma_start(uc); - vchan_cookie_complete(&d->vd); + break; } } -- cgit v1.2.3 From 16cd3c670183d788f5a0dfe41415d3386ba92ed9 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 14 Feb 2020 11:14:37 +0200 Subject: dmaengine: ti: k3-udma: Workaround for RX teardown with stale data in peer When a channel is asked to be stopped (teardown) and we do not have active descriptor to receive stale data buffered on the remote side then the teardown will not complete as UDMA needs a descriptor to be able to flush out the DMA pipe. The peer is trying to push the data to UDMA in teardown, but UDMA is pushing back because it has no descriptor which would allow it to drain the data. The workaround is to create 1K 'trashcan' to receive the discarded data and set up descriptors for packet and TR mode channels. When a channel is stopped and there is no active descriptor then a descriptor is pushed to the ring for UDMA before the teardown is initiated. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200214091441.27535-3-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 168 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 151 insertions(+), 17 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index fb59c869a6a7..cb9259e104b4 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -97,6 +97,24 @@ struct udma_match_data { u32 level_start_idx[]; }; +struct udma_hwdesc { + size_t cppi5_desc_size; + void *cppi5_desc_vaddr; + dma_addr_t cppi5_desc_paddr; + + /* TR descriptor internal pointers */ + void *tr_req_base; + struct cppi5_tr_resp_t *tr_resp_base; +}; + +struct udma_rx_flush { + struct udma_hwdesc hwdescs[2]; + + size_t buffer_size; + void *buffer_vaddr; + dma_addr_t buffer_paddr; +}; + struct udma_dev { struct dma_device ddev; struct device *dev; @@ -113,6 +131,8 @@ struct udma_dev { struct list_head desc_to_purge; spinlock_t lock; + struct udma_rx_flush rx_flush; + int tchan_cnt; int echan_cnt; int rchan_cnt; @@ -131,16 +151,6 @@ struct udma_dev { u32 psil_base; }; -struct udma_hwdesc { - size_t cppi5_desc_size; - void *cppi5_desc_vaddr; - dma_addr_t cppi5_desc_paddr; - - /* TR descriptor internal pointers */ - void *tr_req_base; - struct cppi5_tr_resp_t *tr_resp_base; -}; - struct udma_desc { struct virt_dma_desc vd; @@ -552,12 +562,17 @@ static void udma_sync_for_device(struct udma_chan *uc, int idx) } } +static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) +{ + return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; +} + static int udma_push_to_ring(struct udma_chan *uc, int idx) { struct udma_desc *d = uc->desc; - struct k3_ring *ring = NULL; - int ret = -EINVAL; + dma_addr_t paddr; + int ret; switch (uc->config.dir) { case DMA_DEV_TO_MEM: @@ -568,21 +583,37 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx) ring = uc->tchan->t_ring; break; default: - break; + return -EINVAL; } - if (ring) { - dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx); + /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ + if (idx == -1) { + paddr = udma_get_rx_flush_hwdesc_paddr(uc); + } else { + paddr = udma_curr_cppi5_desc_paddr(d, idx); wmb(); /* Ensure that writes are not moved over this point */ udma_sync_for_device(uc, idx); - ret = k3_ringacc_ring_push(ring, &desc_addr); - uc->in_ring_cnt++; } + ret = k3_ringacc_ring_push(ring, &paddr); + if (!ret) + uc->in_ring_cnt++; + return ret; } +static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) +{ + if (uc->config.dir != DMA_DEV_TO_MEM) + return false; + + if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) + return true; + + return false; +} + static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) { struct k3_ring *ring = NULL; @@ -611,6 +642,10 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) if (cppi5_desc_is_tdcm(*addr)) return ret; + /* Check for flush descriptor */ + if (udma_desc_is_rx_flush(uc, *addr)) + return -ENOENT; + d = udma_udma_desc_from_paddr(uc, *addr); if (d) @@ -891,6 +926,9 @@ static int udma_stop(struct udma_chan *uc) switch (uc->config.dir) { case DMA_DEV_TO_MEM: + if (!uc->cyclic && !uc->desc) + udma_push_to_ring(uc, -1); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); @@ -3274,6 +3312,98 @@ static int udma_setup_resources(struct udma_dev *ud) return ch_count; } +static int udma_setup_rx_flush(struct udma_dev *ud) +{ + struct udma_rx_flush *rx_flush = &ud->rx_flush; + struct cppi5_desc_hdr_t *tr_desc; + struct cppi5_tr_type1_t *tr_req; + struct cppi5_host_desc_t *desc; + struct device *dev = ud->dev; + struct udma_hwdesc *hwdesc; + size_t tr_size; + + /* Allocate 1K buffer for discarded data on RX channel teardown */ + rx_flush->buffer_size = SZ_1K; + rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, + GFP_KERNEL); + if (!rx_flush->buffer_vaddr) + return -ENOMEM; + + rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, + rx_flush->buffer_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, rx_flush->buffer_paddr)) + return -ENOMEM; + + /* Set up descriptor to be used for TR mode */ + hwdesc = &rx_flush->hwdescs[0]; + tr_size = sizeof(struct cppi5_tr_type1_t); + hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); + hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, + ud->desc_align); + + hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, + GFP_KERNEL); + if (!hwdesc->cppi5_desc_vaddr) + return -ENOMEM; + + hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, + hwdesc->cppi5_desc_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) + return -ENOMEM; + + /* Start of the TR req records */ + hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; + /* Start address of the TR response array */ + hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; + + tr_desc = hwdesc->cppi5_desc_vaddr; + cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); + cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(tr_desc, 0, 0); + + tr_req = hwdesc->tr_req_base; + cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req->addr = rx_flush->buffer_paddr; + tr_req->icnt0 = rx_flush->buffer_size; + tr_req->icnt1 = 1; + + /* Set up descriptor to be used for packet mode */ + hwdesc = &rx_flush->hwdescs[1]; + hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + + CPPI5_INFO0_HDESC_EPIB_SIZE + + CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, + ud->desc_align); + + hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, + GFP_KERNEL); + if (!hwdesc->cppi5_desc_vaddr) + return -ENOMEM; + + hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, + hwdesc->cppi5_desc_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) + return -ENOMEM; + + desc = hwdesc->cppi5_desc_vaddr; + cppi5_hdesc_init(desc, 0, 0); + cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); + + cppi5_hdesc_attach_buf(desc, + rx_flush->buffer_paddr, rx_flush->buffer_size, + rx_flush->buffer_paddr, rx_flush->buffer_size); + + dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, + hwdesc->cppi5_desc_size, DMA_TO_DEVICE); + return 0; +} + #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ @@ -3387,6 +3517,10 @@ static int udma_probe(struct platform_device *pdev) if (ud->desc_align < dma_get_cache_alignment()) ud->desc_align = dma_get_cache_alignment(); + ret = udma_setup_rx_flush(ud); + if (ret) + return ret; + for (i = 0; i < ud->tchan_cnt; i++) { struct udma_tchan *tchan = &ud->tchans[i]; -- cgit v1.2.3 From a97934071fc3b0a5e52c89e06da68bcac0481de3 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 14 Feb 2020 11:14:38 +0200 Subject: dmaengine: ti: k3-udma: Move the TR counter calculation to helper function Move the TR counter parameter configuration code out from the prep_memcpy callback to a helper function to allow a generic re-usable code for other TR based transfers. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200214091441.27535-4-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 74 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 23 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index cb9259e104b4..9b00013d6f63 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -2029,6 +2029,51 @@ static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, return d; } +/** + * udma_get_tr_counters - calculate TR counters for a given length + * @len: Length of the trasnfer + * @align_to: Preferred alignment + * @tr0_cnt0: First TR icnt0 + * @tr0_cnt1: First TR icnt1 + * @tr1_cnt0: Second (if used) TR icnt0 + * + * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated + * For len >= SZ_64K two TRs are used in a simple way: + * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) + * Second TR: the remaining length (tr1_cnt0) + * + * Returns the number of TRs the length needs (1 or 2) + * -EINVAL if the length can not be supported + */ +static int udma_get_tr_counters(size_t len, unsigned long align_to, + u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) +{ + if (len < SZ_64K) { + *tr0_cnt0 = len; + *tr0_cnt1 = 1; + + return 1; + } + + if (align_to > 3) + align_to = 3; + +realign: + *tr0_cnt0 = SZ_64K - BIT(align_to); + if (len / *tr0_cnt0 >= SZ_64K) { + if (align_to) { + align_to--; + goto realign; + } + return -EINVAL; + } + + *tr0_cnt1 = len / *tr0_cnt0; + *tr1_cnt0 = len % *tr0_cnt0; + + return 2; +} + static struct udma_desc * udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, @@ -2581,29 +2626,12 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, return NULL; } - if (len < SZ_64K) { - num_tr = 1; - tr0_cnt0 = len; - tr0_cnt1 = 1; - } else { - unsigned long align_to = __ffs(src | dest); - - if (align_to > 3) - align_to = 3; - /* - * Keep simple: tr0: SZ_64K-alignment blocks, - * tr1: the remaining - */ - num_tr = 2; - tr0_cnt0 = (SZ_64K - BIT(align_to)); - if (len / tr0_cnt0 >= SZ_64K) { - dev_err(uc->ud->dev, "size %zu is not supported\n", - len); - return NULL; - } - - tr0_cnt1 = len / tr0_cnt0; - tr1_cnt0 = len % tr0_cnt0; + num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, + &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; } d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); -- cgit v1.2.3 From 6cf668a4ef829b9a11d74a4954f02a1767403246 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 14 Feb 2020 11:14:39 +0200 Subject: dmaengine: ti: k3-udma: Use the TR counter helper for slave_sg and cyclic Use the generic TR setup function to get the TR counters for both cyclic and slave_sg transfers. This way the period_size for cyclic and sg_dma_len() for slave_sg can be as large as (SZ_64K - 1) * (SZ_64K - 1) and we can handle cases when the length is >SZ_64K and a prime number. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200214091441.27535-5-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 130 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 88 insertions(+), 42 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 9b00013d6f63..1dba47c662c4 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -2079,31 +2079,31 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { - enum dma_slave_buswidth dev_width; struct scatterlist *sgent; struct udma_desc *d; - size_t tr_size; struct cppi5_tr_type1_t *tr_req = NULL; + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; unsigned int i; - u32 burst; + size_t tr_size; + int num_tr = 0; + int tr_idx = 0; - if (dir == DMA_DEV_TO_MEM) { - dev_width = uc->cfg.src_addr_width; - burst = uc->cfg.src_maxburst; - } else if (dir == DMA_MEM_TO_DEV) { - dev_width = uc->cfg.dst_addr_width; - burst = uc->cfg.dst_maxburst; - } else { - dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + if (!is_slave_direction(dir)) { + dev_err(uc->ud->dev, "Only slave cyclic is supported\n"); return NULL; } - if (!burst) - burst = 1; + /* estimate the number of TRs we will need */ + for_each_sg(sgl, sgent, sglen, i) { + if (sg_dma_len(sgent) < SZ_64K) + num_tr++; + else + num_tr += 2; + } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type1_t); - d = udma_alloc_tr_desc(uc, tr_size, sglen, dir); + d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); if (!d) return NULL; @@ -2111,19 +2111,46 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, tr_req = d->hwdesc[0].tr_req_base; for_each_sg(sgl, sgent, sglen, i) { - d->residue += sg_dma_len(sgent); + dma_addr_t sg_addr = sg_dma_address(sgent); + + num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), + &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %u is not supported\n", + sg_dma_len(sgent)); + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); - tr_req[i].addr = sg_dma_address(sgent); - tr_req[i].icnt0 = burst * dev_width; - tr_req[i].dim1 = burst * dev_width; - tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0; + tr_req[tr_idx].addr = sg_addr; + tr_req[tr_idx].icnt0 = tr0_cnt0; + tr_req[tr_idx].icnt1 = tr0_cnt1; + tr_req[tr_idx].dim1 = tr0_cnt0; + tr_idx++; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, + false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[tr_idx].flags, + CPPI5_TR_CSF_SUPR_EVT); + + tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; + tr_req[tr_idx].icnt0 = tr1_cnt0; + tr_req[tr_idx].icnt1 = 1; + tr_req[tr_idx].dim1 = tr1_cnt0; + tr_idx++; + } + + d->residue += sg_dma_len(sgent); } - cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP); + cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP); return d; } @@ -2428,47 +2455,66 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { - enum dma_slave_buswidth dev_width; struct udma_desc *d; - size_t tr_size; + size_t tr_size, period_addr; struct cppi5_tr_type1_t *tr_req; - unsigned int i; unsigned int periods = buf_len / period_len; - u32 burst; + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + unsigned int i; + int num_tr; - if (dir == DMA_DEV_TO_MEM) { - dev_width = uc->cfg.src_addr_width; - burst = uc->cfg.src_maxburst; - } else if (dir == DMA_MEM_TO_DEV) { - dev_width = uc->cfg.dst_addr_width; - burst = uc->cfg.dst_maxburst; - } else { - dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + if (!is_slave_direction(dir)) { + dev_err(uc->ud->dev, "Only slave cyclic is supported\n"); return NULL; } - if (!burst) - burst = 1; + num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, + &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + period_len); + return NULL; + } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type1_t); - d = udma_alloc_tr_desc(uc, tr_size, periods, dir); + d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); if (!d) return NULL; tr_req = d->hwdesc[0].tr_req_base; + period_addr = buf_addr; for (i = 0; i < periods; i++) { - cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, - CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + int tr_idx = i * num_tr; - tr_req[i].addr = buf_addr + period_len * i; - tr_req[i].icnt0 = dev_width; - tr_req[i].icnt1 = period_len / dev_width; - tr_req[i].dim1 = dev_width; + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, + false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[tr_idx].addr = period_addr; + tr_req[tr_idx].icnt0 = tr0_cnt0; + tr_req[tr_idx].icnt1 = tr0_cnt1; + tr_req[tr_idx].dim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_csf_set(&tr_req[tr_idx].flags, + CPPI5_TR_CSF_SUPR_EVT); + tr_idx++; + + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, + false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; + tr_req[tr_idx].icnt0 = tr1_cnt0; + tr_req[tr_idx].icnt1 = 1; + tr_req[tr_idx].dim1 = tr1_cnt0; + } if (!(flags & DMA_PREP_INTERRUPT)) - cppi5_tr_csf_set(&tr_req[i].flags, + cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); + + period_addr += period_len; } return d; -- cgit v1.2.3 From c7450bb211f3ded7cb6e4d305855683ed7d04e60 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 14 Feb 2020 11:14:40 +0200 Subject: dmaengine: ti: k3-udma: Use the channel direction in pause/resume functions It should be possible to pause, resume and check the pause state of a channel even if we do not have active transfer. udma_is_chan_paused() can trigger NULL pointer reference in it's current form when the status is checked while uc->desc is NULL. Fixes: 25dcb5dd7b7ce ("dmaengine: ti: New driver for K3 UDMA") Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200214091441.27535-6-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 1dba47c662c4..9b4e1e5fa849 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -513,7 +513,7 @@ static bool udma_is_chan_paused(struct udma_chan *uc) { u32 val, pause_mask; - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG); @@ -2835,11 +2835,8 @@ static int udma_pause(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); - if (!uc->desc) - return -EINVAL; - /* pause the channel */ - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_update_bits(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, @@ -2868,11 +2865,8 @@ static int udma_resume(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); - if (!uc->desc) - return -EINVAL; - /* resume the channel */ - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_update_bits(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, -- cgit v1.2.3 From 8390318c04bb24cc4d41ac03e009b0748882f99f Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 14 Feb 2020 11:14:41 +0200 Subject: dmaengine: ti: k3-udma: Fix terminated transfer handling When we receive back the descriptor of the terminated transfer the cookie must be marked as completed to make sure that the accounting is correct. In udma_tx_status() the status should be marked as completed if the channel is no longer running (it can only happen if the channel is not yet started for the first time, or after a channel termination). Fixes: 25dcb5dd7b7ce ("dmaengine: ti: New driver for K3 UDMA") Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200214091441.27535-7-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 9b4e1e5fa849..0536866a58ce 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1097,29 +1097,27 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data) goto out; } - if (uc->cyclic) { - /* push the descriptor back to the ring */ - if (d == uc->desc) { + if (d == uc->desc) { + /* active descriptor */ + if (uc->cyclic) { udma_cyclic_packet_elapsed(uc); vchan_cyclic_callback(&d->vd); - } - } else { - bool desc_done = false; - - if (d == uc->desc) { - desc_done = udma_is_desc_really_done(uc, d); - - if (desc_done) { + } else { + if (udma_is_desc_really_done(uc, d)) { uc->bcnt += d->residue; udma_start(uc); + vchan_cookie_complete(&d->vd); } else { schedule_delayed_work(&uc->tx_drain.work, 0); } } - - if (desc_done) - vchan_cookie_complete(&d->vd); + } else { + /* + * terminated descriptor, mark the descriptor as + * completed to update the channel's cookie marker + */ + dma_cookie_complete(&d->vd.tx); } } out: @@ -2769,6 +2767,9 @@ static enum dma_status udma_tx_status(struct dma_chan *chan, ret = dma_cookie_status(chan, cookie, txstate); + if (!udma_is_chan_running(uc)) + ret = DMA_COMPLETE; + if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) ret = DMA_PAUSED; -- cgit v1.2.3 From 2d0b1919457ad78036f24169968cadc6f55d37ec Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 18 Feb 2020 09:51:58 -0700 Subject: dmaengine: idxd: correct reserved token calculation The calcuation for limit of reserved token did not take into account the change the user wanted vs the current group reserved token. This causes changing of the reserved token to be possible only after we set the value of the reserved token back to 0. Fix calculation so we can set a value that is non zero for reserved token. Fixes: c52ca478233c ("dmaengine: idxd: add configuration component of driver") Reported-by: Jerry Chen Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/158204471889.37789.7749177228265869168.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 298855ca934f..edbfe83325eb 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -519,7 +519,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev, if (val > idxd->max_tokens) return -EINVAL; - if (val > idxd->nr_tokens) + if (val > idxd->nr_tokens + group->tokens_reserved) return -EINVAL; group->tokens_reserved = val; -- cgit v1.2.3 From 36d5d22090d13fd3a7a8c9663a711cbe6970aac8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 17 Feb 2020 17:40:50 +0300 Subject: dmaengine: coh901318: Fix a double lock bug in dma_tc_handle() The caller is already holding the lock so this will deadlock. Fixes: 0b58828c923e ("DMAENGINE: COH 901 318 remove irq counting") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/20200217144050.3i4ymbytogod4ijn@kili.mountain Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e51d836afcc7..1092d4ce723e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) return; } - spin_lock(&cohc->lock); - /* * When we reach this point, at least one queue item * should have been moved over from cohc->queue to @@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) if (coh901318_queue_start(cohc) == NULL) cohc->busy = 0; - spin_unlock(&cohc->lock); - /* * This tasklet will remove items from cohc->active * and thus terminates them. -- cgit v1.2.3 From 88402c5b1ba7498217027c8a54e8df61d030500c Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 19 Feb 2020 10:24:08 -0700 Subject: dmaengine: idxd: sysfs input of wq incorrect wq type should return error Currently when inputing an unrecognized wq type, we set the wq type to "none". It really should return error and not change the existing wq type that's in the kernel. Fixes: c52ca478233c ("dmaengine: idxd: add configuration component of driver") Reported-by: Yixin Zhang Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/158213304803.2290.13336343633425868211.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/sysfs.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index edbfe83325eb..b4a7885e79ed 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1002,12 +1002,14 @@ static ssize_t wq_type_store(struct device *dev, return -EPERM; old_type = wq->type; - if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) + if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) + wq->type = IDXD_WQT_NONE; + else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) wq->type = IDXD_WQT_KERNEL; else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) wq->type = IDXD_WQT_USER; else - wq->type = IDXD_WQT_NONE; + return -EINVAL; /* If we are changing queue type, clear the name */ if (wq->type != old_type) -- cgit v1.2.3 From 50e7e7f6f2d040dd16a636f408eab9184abc63f8 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 19 Feb 2020 10:24:56 -0700 Subject: dmaengine: idxd: wq size configuration needs to check global max size The current size_store() function for idxd sysfs does not check the total wq size. This allows configuration of all wqs with total wq size. Add check to make sure the wq sysfs attribute rejects storing of size over the total wq size. Fixes: c52ca478233c ("dmaengine: idxd: add configuration component of driver") Reported-by: Jerry Chen Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/158213309629.2509.3583411832507185041.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/sysfs.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index b4a7885e79ed..6ca6e520a2fa 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -904,6 +904,20 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%u\n", wq->size); } +static int total_claimed_wq_size(struct idxd_device *idxd) +{ + int i; + int wq_size = 0; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq_size += wq->size; + } + + return wq_size; +} + static ssize_t wq_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -923,7 +937,7 @@ static ssize_t wq_size_store(struct device *dev, if (wq->state != IDXD_WQ_DISABLED) return -EPERM; - if (size > idxd->max_wq_size) + if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) return -EINVAL; wq->size = size; -- cgit v1.2.3 From d288bddd8374e0a043ac9dde64a1ae6a09411d74 Mon Sep 17 00:00:00 2001 From: Martin Fuzzey Date: Wed, 29 Jan 2020 14:40:06 +0100 Subject: dmaengine: imx-sdma: fix context cache There is a DMA problem with the serial ports on i.MX6. When the following sequence is performed: 1) Open a port 2) Write some data 3) Close the port 4) Open a *different* port 5) Write some data 6) Close the port The second write sends nothing and the second close hangs. If the first close() is omitted it works. Adding logs to the the UART driver shows that the DMA is being setup but the callback is never invoked for the second write. This used to work in 4.19. Git bisect leads to: ad0d92d: "dmaengine: imx-sdma: refine to load context only once" This commit adds a "context_loaded" flag used to avoid unnecessary context setups. However the flag is only reset in sdma_channel_terminate_work(), which is only invoked in a worker triggered by sdma_terminate_all() IF there is an active descriptor. So, if no active descriptor remains when the channel is terminated, the flag is not reset and, when the channel is later reused the old context is used. Fix the problem by always resetting the flag in sdma_free_chan_resources(). Cc: stable@vger.kernel.org Signed-off-by: Martin Fuzzey Fixes: ad0d92d7ba6a ("dmaengine: imx-sdma: refine to load context only once") Reviewed-by: Fabio Estevam Link: https://lore.kernel.org/r/1580305274-27274-1-git-send-email-martin.fuzzey@flowbird.group Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 066b21a32232..332ca5034504 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1338,6 +1338,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) sdmac->event_id0 = 0; sdmac->event_id1 = 0; + sdmac->context_loaded = false; sdma_set_channel_priority(sdmac, 0); -- cgit v1.2.3 From 94788af4ed039476ff3527b0e6a12c1dc42cb022 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 9 Feb 2020 19:33:38 +0300 Subject: dmaengine: tegra-apb: Fix use-after-free I was doing some experiments with I2C and noticed that Tegra APB DMA driver crashes sometime after I2C DMA transfer termination. The crash happens because tegra_dma_terminate_all() bails out immediately if pending list is empty, and thus, it doesn't release the half-completed descriptors which are getting re-used before ISR tasklet kicks-in. tegra-i2c 7000c400.i2c: DMA transfer timeout elants_i2c 0-0010: elants_i2c_irq: failed to read data: -110 ------------[ cut here ]------------ WARNING: CPU: 0 PID: 142 at lib/list_debug.c:45 __list_del_entry_valid+0x45/0xac list_del corruption, ddbaac44->next is LIST_POISON1 (00000100) Modules linked in: CPU: 0 PID: 142 Comm: kworker/0:2 Not tainted 5.5.0-rc2-next-20191220-00175-gc3605715758d-dirty #538 Hardware name: NVIDIA Tegra SoC (Flattened Device Tree) Workqueue: events_freezable_power_ thermal_zone_device_check [] (unwind_backtrace) from [] (show_stack+0x11/0x14) [] (show_stack) from [] (dump_stack+0x85/0x94) [] (dump_stack) from [] (__warn+0xc1/0xc4) [] (__warn) from [] (warn_slowpath_fmt+0x61/0x78) [] (warn_slowpath_fmt) from [] (__list_del_entry_valid+0x45/0xac) [] (__list_del_entry_valid) from [] (tegra_dma_tasklet+0x5b/0x154) [] (tegra_dma_tasklet) from [] (tasklet_action_common.constprop.0+0x41/0x7c) [] (tasklet_action_common.constprop.0) from [] (__do_softirq+0xd3/0x2a8) [] (__do_softirq) from [] (irq_exit+0x7b/0x98) [] (irq_exit) from [] (__handle_domain_irq+0x45/0x80) [] (__handle_domain_irq) from [] (gic_handle_irq+0x45/0x7c) [] (gic_handle_irq) from [] (__irq_svc+0x65/0x94) Exception stack(0xde2ebb90 to 0xde2ebbd8) Signed-off-by: Dmitry Osipenko Acked-by: Jon Hunter Cc: Link: https://lore.kernel.org/r/20200209163356.6439-2-digetx@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 3a45079d11ec..319f31d27014 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) bool was_busy; spin_lock_irqsave(&tdc->lock, flags); - if (list_empty(&tdc->pending_sg_req)) { - spin_unlock_irqrestore(&tdc->lock, flags); - return 0; - } if (!tdc->busy) goto skip_dma_stop; -- cgit v1.2.3 From c33ee1301c393a241d6424e36eff1071811b1064 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 9 Feb 2020 19:33:39 +0300 Subject: dmaengine: tegra-apb: Prevent race conditions of tasklet vs free list The interrupt handler puts a half-completed DMA descriptor on a free list and then schedules tasklet to process bottom half of the descriptor that executes client's callback, this creates possibility to pick up the busy descriptor from the free list. Thus, let's disallow descriptor's re-use until it is fully processed. Signed-off-by: Dmitry Osipenko Acked-by: Jon Hunter Cc: Link: https://lore.kernel.org/r/20200209163356.6439-3-digetx@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 319f31d27014..4a750e29bfb5 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get( /* Do not allocate if desc are waiting for ack */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { - if (async_tx_test_ack(&dma_desc->txd)) { + if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) { list_del(&dma_desc->node); spin_unlock_irqrestore(&tdc->lock, flags); dma_desc->txd.flags = 0; -- cgit v1.2.3 From 25962e1a7f1d522f1b57ead2f266fab570042a70 Mon Sep 17 00:00:00 2001 From: Frieder Schrempf Date: Tue, 25 Feb 2020 08:23:20 +0000 Subject: dmaengine: imx-sdma: Fix the event id check to include RX event for UART6 On i.MX6UL/ULL and i.MX6SX the DMA event id for the RX channel of UART6 is '0'. To fix the broken DMA support for UART6, we change the check for event_id0 to include '0' as a valid id. Fixes: 1ec1e82f2510 ("dmaengine: Add Freescale i.MX SDMA support") Signed-off-by: Frieder Schrempf Reviewed-by: Fabio Estevam Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200225082139.7646-1-frieder.schrempf@kontron.de Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 332ca5034504..4d4477df4ede 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1331,7 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) sdma_channel_synchronize(chan); - if (sdmac->event_id0) + if (sdmac->event_id0 >= 0) sdma_event_disable(sdmac, sdmac->event_id0); if (sdmac->event_id1) sdma_event_disable(sdmac, sdmac->event_id1); @@ -1632,7 +1632,7 @@ static int sdma_config(struct dma_chan *chan, memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); /* Set ENBLn earlier to make sure dma request triggered after that */ - if (sdmac->event_id0) { + if (sdmac->event_id0 >= 0) { if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) return -EINVAL; sdma_event_enable(sdmac, sdmac->event_id0); -- cgit v1.2.3