summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuodong Xu <guodong@riscstar.com>2025-12-16 17:10:06 +0300
committerVinod Koul <vkoul@kernel.org>2025-12-16 17:58:39 +0300
commita143545855bc2c6e1330f6f57ae375ac44af00a7 (patch)
tree94f36fc0a06bc6f99e7835acca7a9675a003d79a
parent430f7803b69cd5e5694e5dfc884c6628870af36e (diff)
downloadlinux-a143545855bc2c6e1330f6f57ae375ac44af00a7.tar.xz
dmaengine: mmp_pdma: Fix race condition in mmp_pdma_residue()
Add proper locking in mmp_pdma_residue() to prevent use-after-free when accessing descriptor list and descriptor contents. The race occurs when multiple threads call tx_status() while the tasklet on another CPU is freeing completed descriptors: CPU 0 CPU 1 ----- ----- mmp_pdma_tx_status() mmp_pdma_residue() -> NO LOCK held list_for_each_entry(sw, ..) DMA interrupt dma_do_tasklet() -> spin_lock(&desc_lock) list_move(sw->node, ...) spin_unlock(&desc_lock) | dma_pool_free(sw) <- FREED! -> access sw->desc <- UAF! This issue can be reproduced when running dmatest on the same channel with multiple threads (threads_per_chan > 1). Fix by protecting the chain_running list iteration and descriptor access with the chan->desc_lock spinlock. Signed-off-by: Juan Li <lijuan@linux.spacemit.com> Signed-off-by: Guodong Xu <guodong@riscstar.com> Link: https://patch.msgid.link/20251216-mmp-pdma-race-v1-1-976a224bb622@riscstar.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
-rw-r--r--drivers/dma/mmp_pdma.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 86661eb3cde1..d12e729ee12c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -928,6 +928,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
{
struct mmp_pdma_desc_sw *sw;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
+ unsigned long flags;
u64 curr;
u32 residue = 0;
bool passed = false;
@@ -945,6 +946,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
else
curr = pdev->ops->read_src_addr(chan->phy);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
list_for_each_entry(sw, &chan->chain_running, node) {
u64 start, end;
u32 len;
@@ -989,6 +992,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
continue;
if (sw->async_tx.cookie == cookie) {
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
return residue;
} else {
residue = 0;
@@ -996,6 +1000,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
}
}
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
/* We should only get here in case of cyclic transactions */
return residue;
}