diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-27 21:55:50 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-27 21:55:50 +0300 |
commit | a5b871c91d470326eed3ae0ebd2fc07f3aee9050 (patch) | |
tree | 95b46574591a6eb56e2d7e62219c2629cf064f03 /drivers/dma/idxd/dma.c | |
parent | 715d1285695382b5074e49a0fe475b9ba56a1101 (diff) | |
parent | 71723a96b8b1367fefc18f60025dae792477d602 (diff) | |
download | linux-a5b871c91d470326eed3ae0ebd2fc07f3aee9050.tar.xz |
Merge tag 'dmaengine-5.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This time we have a bunch of core changes to support dynamic channels,
hotplug of controllers, new apis for metadata ops etc along with new
drivers for Intel data accelerators, TI K3 UDMA, PLX DMA engine and
hisilicon Kunpeng DMA engine. Also usual assorted updates to drivers.
Core:
- Support for dynamic channels
- Removal of various slave wrappers
- Make few slave request APIs as private to dmaengine
- Symlinks between channels and slaves
- Support for hotplug of controllers
- Support for metadata_ops for dma_async_tx_descriptor
- Reporting DMA cached data amount
- Virtual dma channel locking updates
New drivers/device/feature support support:
- Driver for Intel data accelerators
- Driver for TI K3 UDMA
- Driver for PLX DMA engine
- Driver for hisilicon Kunpeng DMA engine
- Support for eDMA support for QorIQ LS1028A in fsl edma driver
- Support for cyclic dma in sun4i driver
- Support for X1830 in JZ4780 driver"
* tag 'dmaengine-5.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (62 commits)
dmaengine: Create symlinks between DMA channels and slaves
dmaengine: hisilicon: Add Kunpeng DMA engine support
dmaengine: idxd: add char driver to expose submission portal to userland
dmaengine: idxd: connect idxd to dmaengine subsystem
dmaengine: idxd: add descriptor manipulation routines
dmaengine: idxd: add sysfs ABI for idxd driver
dmaengine: idxd: add configuration component of driver
dmaengine: idxd: Init and probe for Intel data accelerators
dmaengine: add support to dynamic register/unregister of channels
dmaengine: break out channel registration
x86/asm: add iosubmit_cmds512() based on MOVDIR64B CPU instruction
dmaengine: ti: k3-udma: fix spelling mistake "limted" -> "limited"
dmaengine: s3c24xx-dma: fix spelling mistake "to" -> "too"
dmaengine: Move dma_get_{,any_}slave_channel() to private dmaengine.h
dmaengine: Remove dma_request_slave_channel_compat() wrapper
dmaengine: Remove dma_device_satisfies_mask() wrapper
dt-bindings: fsl-imx-sdma: Add i.MX8MM/i.MX8MN/i.MX8MP compatible string
dmaengine: zynqmp_dma: fix burst length configuration
dmaengine: sun4i: Add support for cyclic requests with dedicated DMA
dmaengine: fsl-qdma: fix duplicated argument to &&
...
Diffstat (limited to 'drivers/dma/idxd/dma.c')
-rw-r--r-- | drivers/dma/idxd/dma.c | 217 |
1 files changed, 217 insertions, 0 deletions
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c new file mode 100644 index 000000000000..c64c1429d160 --- /dev/null +++ b/drivers/dma/idxd/dma.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) +{ + return container_of(c, struct idxd_wq, dma_chan); +} + +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type) +{ + struct dma_async_tx_descriptor *tx; + struct dmaengine_result res; + int complete = 1; + + if (desc->completion->status == DSA_COMP_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (desc->completion->status) + res.result = DMA_TRANS_WRITE_FAILED; + else if (comp_type == IDXD_COMPLETE_ABORT) + res.result = DMA_TRANS_ABORTED; + else + complete = 0; + + tx = &desc->txd; + if (complete && tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + dmaengine_desc_get_callback_invoke(tx, &res); + tx->callback = NULL; + tx->callback_result = NULL; + } +} + +static void op_flag_setup(unsigned long flags, u32 *desc_flags) +{ + *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR; + if (flags & DMA_PREP_INTERRUPT) + *desc_flags |= IDXD_OP_FLAG_RCI; +} + +static inline void set_completion_address(struct idxd_desc *desc, + u64 *compl_addr) +{ + *compl_addr = desc->compl_dma; +} + +static inline void idxd_prep_desc_common(struct idxd_wq *wq, + struct dsa_hw_desc *hw, char opcode, + u64 addr_f1, u64 addr_f2, u64 len, + u64 compl, u32 flags) +{ + struct idxd_device *idxd = wq->idxd; + + hw->flags = flags; + hw->opcode = opcode; + hw->src_addr = addr_f1; + hw->dst_addr = addr_f2; + hw->xfer_size = len; + hw->priv = !!(wq->type == IDXD_WQT_KERNEL); + hw->completion_addr = compl; + + /* + * Descriptor completion vectors are 1-8 for MSIX. We will round + * robin through the 8 vectors. + */ + wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; + hw->int_handle = wq->vec_ptr; +} + +static struct dma_async_tx_descriptor * +idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_device *idxd = wq->idxd; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + if (len > idxd->max_xfer_bytes) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE, + dma_src, dma_dest, len, desc->compl_dma, + desc_flags); + + desc->txd.flags = flags; + + return &desc->txd; +} + +static int idxd_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_get(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); + return 0; +} + +static void idxd_dma_free_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_put(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); +} + +static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(dma_chan, cookie, txstate); +} + +/* + * issue_pending() does not need to do anything since tx_submit() does the job + * already. + */ +static void idxd_dma_issue_pending(struct dma_chan *dma_chan) +{ +} + +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *c = tx->chan; + struct idxd_wq *wq = to_idxd_wq(c); + dma_cookie_t cookie; + int rc; + struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd); + + cookie = dma_cookie_assign(tx); + + rc = idxd_submit_desc(wq, desc); + if (rc < 0) { + idxd_free_desc(wq, desc); + return rc; + } + + return cookie; +} + +static void idxd_dma_release(struct dma_device *device) +{ +} + +int idxd_register_dma_device(struct idxd_device *idxd) +{ + struct dma_device *dma = &idxd->dma_dev; + + INIT_LIST_HEAD(&dma->channels); + dma->dev = &idxd->pdev->dev; + + dma->device_release = idxd_dma_release; + + if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; + } + + dma->device_tx_status = idxd_dma_tx_status; + dma->device_issue_pending = idxd_dma_issue_pending; + dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; + dma->device_free_chan_resources = idxd_dma_free_chan_resources; + + return dma_async_device_register(&idxd->dma_dev); +} + +void idxd_unregister_dma_device(struct idxd_device *idxd) +{ + dma_async_device_unregister(&idxd->dma_dev); +} + +int idxd_register_dma_channel(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct dma_device *dma = &idxd->dma_dev; + struct dma_chan *chan = &wq->dma_chan; + int rc; + + memset(&wq->dma_chan, 0, sizeof(struct dma_chan)); + chan->device = dma; + list_add_tail(&chan->device_node, &dma->channels); + rc = dma_async_device_channel_register(dma, chan); + if (rc < 0) + return rc; + + return 0; +} + +void idxd_unregister_dma_channel(struct idxd_wq *wq) +{ + dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan); +} |