diff options
author | Dave Jiang <dave.jiang@intel.com> | 2021-04-16 02:37:10 +0300 |
---|---|---|
committer | Vinod Koul <vkoul@kernel.org> | 2021-04-20 14:13:52 +0300 |
commit | 397862855619271296e46d10f7dfa7bafe71eb81 (patch) | |
tree | 14535be560b943d0ba47d788fbfcc303395e6cd5 /drivers/dma/idxd/dma.c | |
parent | 63606522b9c28c2950cb9c418f0ba2270009d4ad (diff) | |
download | linux-397862855619271296e46d10f7dfa7bafe71eb81.tar.xz |
dmaengine: idxd: fix dma device lifetime
The devm managed lifetime is incompatible with 'struct device' objects that
resides in idxd context. This is one of the series that clean up the idxd
driver 'struct device' lifetime. Remove embedding of dma_device and dma_chan
in idxd since it's not the only interface that idxd will use. The freeing of
the dma_device will be managed by the ->release() function.
Reported-by: Jason Gunthorpe <jgg@nvidia.com>
Fixes: bfe1d56091c1 ("dmaengine: idxd: Init and probe for Intel data accelerators")
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Link: https://lore.kernel.org/r/161852983001.2203940.14817017492384561719.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma/idxd/dma.c')
-rw-r--r-- | drivers/dma/idxd/dma.c | 77 |
1 files changed, 64 insertions, 13 deletions
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index a15e50126434..77439b645044 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -14,7 +14,10 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) { - return container_of(c, struct idxd_wq, dma_chan); + struct idxd_dma_chan *idxd_chan; + + idxd_chan = container_of(c, struct idxd_dma_chan, chan); + return idxd_chan->wq; } void idxd_dma_complete_txd(struct idxd_desc *desc, @@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan) { } -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) +static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct dma_chan *c = tx->chan; struct idxd_wq *wq = to_idxd_wq(c); @@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) static void idxd_dma_release(struct dma_device *device) { + struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma); + + kfree(idxd_dma); } int idxd_register_dma_device(struct idxd_device *idxd) { - struct dma_device *dma = &idxd->dma_dev; + struct idxd_dma_dev *idxd_dma; + struct dma_device *dma; + struct device *dev = &idxd->pdev->dev; + int rc; + idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev)); + if (!idxd_dma) + return -ENOMEM; + + dma = &idxd_dma->dma; INIT_LIST_HEAD(&dma->channels); - dma->dev = &idxd->pdev->dev; + dma->dev = dev; dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); @@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd) dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; dma->device_free_chan_resources = idxd_dma_free_chan_resources; - return dma_async_device_register(&idxd->dma_dev); + rc = dma_async_device_register(dma); + if (rc < 0) { + kfree(idxd_dma); + return rc; + } + + idxd_dma->idxd = idxd; + /* + * This pointer is protected by the refs taken by the dma_chan. It will remain valid + * as long as there are outstanding channels. + */ + idxd->idxd_dma = idxd_dma; + return 0; } void idxd_unregister_dma_device(struct idxd_device *idxd) { - dma_async_device_unregister(&idxd->dma_dev); + dma_async_device_unregister(&idxd->idxd_dma->dma); } int idxd_register_dma_channel(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; - struct dma_device *dma = &idxd->dma_dev; - struct dma_chan *chan = &wq->dma_chan; - int rc; + struct dma_device *dma = &idxd->idxd_dma->dma; + struct device *dev = &idxd->pdev->dev; + struct idxd_dma_chan *idxd_chan; + struct dma_chan *chan; + int rc, i; + + idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev)); + if (!idxd_chan) + return -ENOMEM; - memset(&wq->dma_chan, 0, sizeof(struct dma_chan)); + chan = &idxd_chan->chan; chan->device = dma; list_add_tail(&chan->device_node, &dma->channels); + + for (i = 0; i < wq->num_descs; i++) { + struct idxd_desc *desc = wq->descs[i]; + + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = idxd_dma_tx_submit; + } + rc = dma_async_device_channel_register(dma, chan); - if (rc < 0) + if (rc < 0) { + kfree(idxd_chan); return rc; + } + + wq->idxd_chan = idxd_chan; + idxd_chan->wq = wq; + get_device(&wq->conf_dev); return 0; } void idxd_unregister_dma_channel(struct idxd_wq *wq) { - struct dma_chan *chan = &wq->dma_chan; + struct idxd_dma_chan *idxd_chan = wq->idxd_chan; + struct dma_chan *chan = &idxd_chan->chan; + struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma; - dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan); + dma_async_device_channel_unregister(&idxd_dma->dma, chan); list_del(&chan->device_node); + kfree(wq->idxd_chan); + wq->idxd_chan = NULL; + put_device(&wq->conf_dev); } |