diff options
author | Bjorn Helgaas <bhelgaas@google.com> | 2018-10-20 19:45:33 +0300 |
---|---|---|
committer | Bjorn Helgaas <bhelgaas@google.com> | 2018-10-20 19:45:33 +0300 |
commit | 1734715493e010d034ae8d65c9a228ee7ad24a08 (patch) | |
tree | 8e9bdb47355c40fe70f4d732656aebbe5d27b62e /drivers/nvme | |
parent | 0af6166395130b87713550c84c9518332f27cc28 (diff) | |
parent | c6925093d0b28329ad3a486f5b0345c2c192ae9a (diff) | |
download | linux-1734715493e010d034ae8d65c9a228ee7ad24a08.tar.xz |
Merge branch 'pci/peer-to-peer'
- Add PCI support for peer-to-peer DMA (Logan Gunthorpe)
- Add sysfs group for PCI peer-to-peer memory statistics (Logan
Gunthorpe)
- Add PCI peer-to-peer DMA scatterlist mapping interface (Logan
Gunthorpe)
- Add PCI configfs/sysfs helpers for use by peer-to-peer users (Logan
Gunthorpe)
- Add PCI peer-to-peer DMA driver writer's documentation (Logan
Gunthorpe)
- Add block layer flag to indicate driver support for PCI peer-to-peer
DMA (Logan Gunthorpe)
- Map Infiniband scatterlists for peer-to-peer DMA if they contain P2P
memory (Logan Gunthorpe)
- Register nvme-pci CMB buffer as PCI peer-to-peer memory (Logan
Gunthorpe)
- Add nvme-pci support for PCI peer-to-peer memory in requests (Logan
Gunthorpe)
- Use PCI peer-to-peer memory in nvme (Stephen Bates, Steve Wise,
Christoph Hellwig, Logan Gunthorpe)
* pci/peer-to-peer:
nvmet: Optionally use PCI P2P memory
nvmet: Introduce helper functions to allocate and free request SGLs
nvme-pci: Add support for P2P memory in requests
nvme-pci: Use PCI p2pmem subsystem to manage the CMB
IB/core: Ensure we map P2P memory correctly in rdma_rw_ctx_[init|destroy]()
block: Add PCI P2P flag for request queue
PCI/P2PDMA: Add P2P DMA driver writer's documentation
docs-rst: Add a new directory for PCI documentation
PCI/P2PDMA: Introduce configfs/sysfs enable attribute helpers
PCI/P2PDMA: Add PCI p2pmem DMA mappings to adjust the bus offset
PCI/P2PDMA: Add sysfs group to display p2pmem stats
PCI/P2PDMA: Support peer-to-peer memory
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 4 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 1 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 97 | ||||
-rw-r--r-- | drivers/nvme/target/configfs.c | 47 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 180 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 3 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 17 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 22 |
8 files changed, 324 insertions, 47 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index dd8ec1dd9219..6033ce2fd3e9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3051,7 +3051,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns->queue = blk_mq_init_queue(ctrl->tagset); if (IS_ERR(ns->queue)) goto out_free_ns; + blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); + if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) + blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); + ns->queue->queuedata = ns; ns->ctrl = ctrl; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index bb4a2003c097..4030743c90aa 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -343,6 +343,7 @@ struct nvme_ctrl_ops { unsigned int flags; #define NVME_F_FABRICS (1 << 0) #define NVME_F_METADATA_SUPPORTED (1 << 1) +#define NVME_F_PCI_P2PDMA (1 << 2) int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8991e79b2b87..7e09e45b0b28 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -30,6 +30,7 @@ #include <linux/types.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/sed-opal.h> +#include <linux/pci-p2pdma.h> #include "nvme.h" @@ -99,9 +100,8 @@ struct nvme_dev { struct work_struct remove_work; struct mutex shutdown_lock; bool subsystem; - void __iomem *cmb; - pci_bus_addr_t cmb_bus_addr; u64 cmb_size; + bool cmb_use_sqes; u32 cmbsz; u32 cmbloc; struct nvme_ctrl ctrl; @@ -158,7 +158,7 @@ struct nvme_queue { struct nvme_dev *dev; spinlock_t sq_lock; struct nvme_command *sq_cmds; - struct nvme_command __iomem *sq_cmds_io; + bool sq_cmds_is_io; spinlock_t cq_lock ____cacheline_aligned_in_smp; volatile struct nvme_completion *cqes; struct blk_mq_tags **tags; @@ -447,11 +447,8 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) { spin_lock(&nvmeq->sq_lock); - if (nvmeq->sq_cmds_io) - memcpy_toio(&nvmeq->sq_cmds_io[nvmeq->sq_tail], cmd, - sizeof(*cmd)); - else - memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd)); + + memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd)); if (++nvmeq->sq_tail == nvmeq->q_depth) nvmeq->sq_tail = 0; @@ -748,8 +745,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, goto out; ret = BLK_STS_RESOURCE; - nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, - DMA_ATTR_NO_WARN); + + if (is_pci_p2pdma_page(sg_page(iod->sg))) + nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents, + dma_dir); + else + nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, + dma_dir, DMA_ATTR_NO_WARN); if (!nr_mapped) goto out; @@ -791,7 +793,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) DMA_TO_DEVICE : DMA_FROM_DEVICE; if (iod->nents) { - dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); + /* P2PDMA requests do not need to be unmapped */ + if (!is_pci_p2pdma_page(sg_page(iod->sg))) + dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); + if (blk_integrity_rq(req)) dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); } @@ -1232,9 +1237,18 @@ static void nvme_free_queue(struct nvme_queue *nvmeq) { dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); - if (nvmeq->sq_cmds) - dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), - nvmeq->sq_cmds, nvmeq->sq_dma_addr); + + if (nvmeq->sq_cmds) { + if (nvmeq->sq_cmds_is_io) + pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev), + nvmeq->sq_cmds, + SQ_SIZE(nvmeq->q_depth)); + else + dma_free_coherent(nvmeq->q_dmadev, + SQ_SIZE(nvmeq->q_depth), + nvmeq->sq_cmds, + nvmeq->sq_dma_addr); + } } static void nvme_free_queues(struct nvme_dev *dev, int lowest) @@ -1323,12 +1337,21 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, int qid, int depth) { - /* CMB SQEs will be mapped before creation */ - if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) - return 0; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { + nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth)); + nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, + nvmeq->sq_cmds); + nvmeq->sq_cmds_is_io = true; + } + + if (!nvmeq->sq_cmds) { + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), + &nvmeq->sq_dma_addr, GFP_KERNEL); + nvmeq->sq_cmds_is_io = false; + } - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), - &nvmeq->sq_dma_addr, GFP_KERNEL); if (!nvmeq->sq_cmds) return -ENOMEM; return 0; @@ -1405,13 +1428,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) int result; s16 vector; - if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { - unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), - dev->ctrl.page_size); - nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; - nvmeq->sq_cmds_io = dev->cmb + offset; - } - /* * A queue's vector matches the queue identifier unless the controller * has only one vector available. @@ -1652,9 +1668,6 @@ static void nvme_map_cmb(struct nvme_dev *dev) return; dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); - if (!use_cmb_sqes) - return; - size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); bar = NVME_CMB_BIR(dev->cmbloc); @@ -1671,11 +1684,18 @@ static void nvme_map_cmb(struct nvme_dev *dev) if (size > bar_size - offset) size = bar_size - offset; - dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size); - if (!dev->cmb) + if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { + dev_warn(dev->ctrl.device, + "failed to register the CMB\n"); return; - dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset; + } + dev->cmb_size = size; + dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); + + if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == + (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) + pci_p2pmem_publish(pdev, true); if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, &dev_attr_cmb.attr, NULL)) @@ -1685,12 +1705,10 @@ static void nvme_map_cmb(struct nvme_dev *dev) static inline void nvme_release_cmb(struct nvme_dev *dev) { - if (dev->cmb) { - iounmap(dev->cmb); - dev->cmb = NULL; + if (dev->cmb_size) { sysfs_remove_file_from_group(&dev->ctrl.device->kobj, &dev_attr_cmb.attr, NULL); - dev->cmbsz = 0; + dev->cmb_size = 0; } } @@ -1889,13 +1907,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (nr_io_queues == 0) return 0; - if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) { + if (dev->cmb_use_sqes) { result = nvme_cmb_qdepth(dev, nr_io_queues, sizeof(struct nvme_command)); if (result > 0) dev->q_depth = result; else - nvme_release_cmb(dev); + dev->cmb_use_sqes = false; } do { @@ -2390,7 +2408,8 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .name = "pcie", .module = THIS_MODULE, - .flags = NVME_F_METADATA_SUPPORTED, + .flags = NVME_F_METADATA_SUPPORTED | + NVME_F_PCI_P2PDMA, .reg_read32 = nvme_pci_reg_read32, .reg_write32 = nvme_pci_reg_write32, .reg_read64 = nvme_pci_reg_read64, diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index b37a8e3e3f80..d895579b6c5d 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -17,6 +17,8 @@ #include <linux/slab.h> #include <linux/stat.h> #include <linux/ctype.h> +#include <linux/pci.h> +#include <linux/pci-p2pdma.h> #include "nvmet.h" @@ -340,6 +342,48 @@ out_unlock: CONFIGFS_ATTR(nvmet_ns_, device_path); +#ifdef CONFIG_PCI_P2PDMA +static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page) +{ + struct nvmet_ns *ns = to_nvmet_ns(item); + + return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem); +} + +static ssize_t nvmet_ns_p2pmem_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_ns *ns = to_nvmet_ns(item); + struct pci_dev *p2p_dev = NULL; + bool use_p2pmem; + int ret = count; + int error; + + mutex_lock(&ns->subsys->lock); + if (ns->enabled) { + ret = -EBUSY; + goto out_unlock; + } + + error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem); + if (error) { + ret = error; + goto out_unlock; + } + + ns->use_p2pmem = use_p2pmem; + pci_dev_put(ns->p2p_dev); + ns->p2p_dev = p2p_dev; + +out_unlock: + mutex_unlock(&ns->subsys->lock); + + return ret; +} + +CONFIGFS_ATTR(nvmet_ns_, p2pmem); +#endif /* CONFIG_PCI_P2PDMA */ + static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) { return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); @@ -509,6 +553,9 @@ static struct configfs_attribute *nvmet_ns_attrs[] = { &nvmet_ns_attr_ana_grpid, &nvmet_ns_attr_enable, &nvmet_ns_attr_buffered_io, +#ifdef CONFIG_PCI_P2PDMA + &nvmet_ns_attr_p2pmem, +#endif NULL, }; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b5ec96abd048..9b4d84cfc224 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/random.h> #include <linux/rculist.h> +#include <linux/pci-p2pdma.h> #include "nvmet.h" @@ -365,9 +366,93 @@ static void nvmet_ns_dev_disable(struct nvmet_ns *ns) nvmet_file_ns_disable(ns); } +static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) +{ + int ret; + struct pci_dev *p2p_dev; + + if (!ns->use_p2pmem) + return 0; + + if (!ns->bdev) { + pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n"); + return -EINVAL; + } + + if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) { + pr_err("peer-to-peer DMA is not supported by the driver of %s\n", + ns->device_path); + return -EINVAL; + } + + if (ns->p2p_dev) { + ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true); + if (ret < 0) + return -EINVAL; + } else { + /* + * Right now we just check that there is p2pmem available so + * we can report an error to the user right away if there + * is not. We'll find the actual device to use once we + * setup the controller when the port's device is available. + */ + + p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns)); + if (!p2p_dev) { + pr_err("no peer-to-peer memory is available for %s\n", + ns->device_path); + return -EINVAL; + } + + pci_dev_put(p2p_dev); + } + + return 0; +} + +/* + * Note: ctrl->subsys->lock should be held when calling this function + */ +static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, + struct nvmet_ns *ns) +{ + struct device *clients[2]; + struct pci_dev *p2p_dev; + int ret; + + if (!ctrl->p2p_client) + return; + + if (ns->p2p_dev) { + ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true); + if (ret < 0) + return; + + p2p_dev = pci_dev_get(ns->p2p_dev); + } else { + clients[0] = ctrl->p2p_client; + clients[1] = nvmet_ns_dev(ns); + + p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients)); + if (!p2p_dev) { + pr_err("no peer-to-peer memory is available that's supported by %s and %s\n", + dev_name(ctrl->p2p_client), ns->device_path); + return; + } + } + + ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev); + if (ret < 0) + pci_dev_put(p2p_dev); + + pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev), + ns->nsid); +} + int nvmet_ns_enable(struct nvmet_ns *ns) { struct nvmet_subsys *subsys = ns->subsys; + struct nvmet_ctrl *ctrl; int ret; mutex_lock(&subsys->lock); @@ -384,6 +469,13 @@ int nvmet_ns_enable(struct nvmet_ns *ns) if (ret) goto out_unlock; + ret = nvmet_p2pmem_ns_enable(ns); + if (ret) + goto out_unlock; + + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) + nvmet_p2pmem_ns_add_p2p(ctrl, ns); + ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL); if (ret) @@ -418,6 +510,9 @@ out_unlock: mutex_unlock(&subsys->lock); return ret; out_dev_put: + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) + pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); + nvmet_ns_dev_disable(ns); goto out_unlock; } @@ -425,6 +520,7 @@ out_dev_put: void nvmet_ns_disable(struct nvmet_ns *ns) { struct nvmet_subsys *subsys = ns->subsys; + struct nvmet_ctrl *ctrl; mutex_lock(&subsys->lock); if (!ns->enabled) @@ -434,6 +530,10 @@ void nvmet_ns_disable(struct nvmet_ns *ns) list_del_rcu(&ns->dev_link); if (ns->nsid == subsys->max_nsid) subsys->max_nsid = nvmet_max_nsid(subsys); + + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) + pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); + mutex_unlock(&subsys->lock); /* @@ -450,6 +550,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns) percpu_ref_exit(&ns->ref); mutex_lock(&subsys->lock); + subsys->nr_namespaces--; nvmet_ns_changed(subsys, ns->nsid); nvmet_ns_dev_disable(ns); @@ -725,6 +826,51 @@ void nvmet_req_execute(struct nvmet_req *req) } EXPORT_SYMBOL_GPL(nvmet_req_execute); +int nvmet_req_alloc_sgl(struct nvmet_req *req) +{ + struct pci_dev *p2p_dev = NULL; + + if (IS_ENABLED(CONFIG_PCI_P2PDMA)) { + if (req->sq->ctrl && req->ns) + p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, + req->ns->nsid); + + req->p2p_dev = NULL; + if (req->sq->qid && p2p_dev) { + req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, + req->transfer_len); + if (req->sg) { + req->p2p_dev = p2p_dev; + return 0; + } + } + + /* + * If no P2P memory was available we fallback to using + * regular memory + */ + } + + req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt); + if (!req->sg) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl); + +void nvmet_req_free_sgl(struct nvmet_req *req) +{ + if (req->p2p_dev) + pci_p2pmem_free_sgl(req->p2p_dev, req->sg); + else + sgl_free(req->sg); + + req->sg = NULL; + req->sg_cnt = 0; +} +EXPORT_SYMBOL_GPL(nvmet_req_free_sgl); + static inline bool nvmet_cc_en(u32 cc) { return (cc >> NVME_CC_EN_SHIFT) & 0x1; @@ -921,6 +1067,37 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, return __nvmet_host_allowed(subsys, hostnqn); } +/* + * Note: ctrl->subsys->lock should be held when calling this function + */ +static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, + struct nvmet_req *req) +{ + struct nvmet_ns *ns; + + if (!req->p2p_client) + return; + + ctrl->p2p_client = get_device(req->p2p_client); + + list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) + nvmet_p2pmem_ns_add_p2p(ctrl, ns); +} + +/* + * Note: ctrl->subsys->lock should be held when calling this function + */ +static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) +{ + struct radix_tree_iter iter; + void __rcu **slot; + + radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) + pci_dev_put(radix_tree_deref_slot(slot)); + + put_device(ctrl->p2p_client); +} + u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) { @@ -962,6 +1139,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_LIST_HEAD(&ctrl->async_events); + INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); @@ -1026,6 +1204,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, mutex_lock(&subsys->lock); list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); + nvmet_setup_p2p_ns_map(ctrl, req); mutex_unlock(&subsys->lock); *ctrlp = ctrl; @@ -1053,6 +1232,7 @@ static void nvmet_ctrl_free(struct kref *ref) struct nvmet_subsys *subsys = ctrl->subsys; mutex_lock(&subsys->lock); + nvmet_release_p2p_ns_map(ctrl); list_del(&ctrl->subsys_entry); mutex_unlock(&subsys->lock); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 7bc9f6240432..5660dd7ca755 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -78,6 +78,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) op = REQ_OP_READ; } + if (is_pci_p2pdma_page(sg_page(req->sg))) + op_flags |= REQ_NOMERGE; + sector = le64_to_cpu(req->cmd->rw.slba); sector <<= (req->ns->blksize_shift - 9); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index ec9af4ee03b6..d6be098f342b 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -26,6 +26,7 @@ #include <linux/configfs.h> #include <linux/rcupdate.h> #include <linux/blkdev.h> +#include <linux/radix-tree.h> #define NVMET_ASYNC_EVENTS 4 #define NVMET_ERROR_LOG_SLOTS 128 @@ -77,6 +78,9 @@ struct nvmet_ns { struct completion disable_done; mempool_t *bvec_pool; struct kmem_cache *bvec_cache; + + int use_p2pmem; + struct pci_dev *p2p_dev; }; static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) @@ -84,6 +88,11 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) return container_of(to_config_group(item), struct nvmet_ns, group); } +static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) +{ + return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL; +} + struct nvmet_cq { u16 qid; u16 size; @@ -184,6 +193,9 @@ struct nvmet_ctrl { char subsysnqn[NVMF_NQN_FIELD_LEN]; char hostnqn[NVMF_NQN_FIELD_LEN]; + + struct device *p2p_client; + struct radix_tree_root p2p_ns_map; }; struct nvmet_subsys { @@ -294,6 +306,9 @@ struct nvmet_req { void (*execute)(struct nvmet_req *req); const struct nvmet_fabrics_ops *ops; + + struct pci_dev *p2p_dev; + struct device *p2p_client; }; extern struct workqueue_struct *buffered_io_wq; @@ -336,6 +351,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, void nvmet_req_uninit(struct nvmet_req *req); void nvmet_req_execute(struct nvmet_req *req); void nvmet_req_complete(struct nvmet_req *req, u16 status); +int nvmet_req_alloc_sgl(struct nvmet_req *req); +void nvmet_req_free_sgl(struct nvmet_req *req); void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size); diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index bfc4da660bb4..3f7971d3706d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -503,7 +503,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) } if (rsp->req.sg != rsp->cmd->inline_sg) - sgl_free(rsp->req.sg); + nvmet_req_free_sgl(&rsp->req); if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) nvmet_rdma_process_wr_wait_list(queue); @@ -652,24 +652,24 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, { struct rdma_cm_id *cm_id = rsp->queue->cm_id; u64 addr = le64_to_cpu(sgl->addr); - u32 len = get_unaligned_le24(sgl->length); u32 key = get_unaligned_le32(sgl->key); int ret; + rsp->req.transfer_len = get_unaligned_le24(sgl->length); + /* no data command? */ - if (!len) + if (!rsp->req.transfer_len) return 0; - rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt); - if (!rsp->req.sg) - return NVME_SC_INTERNAL; + ret = nvmet_req_alloc_sgl(&rsp->req); + if (ret < 0) + goto error_out; ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, nvmet_data_dir(&rsp->req)); if (ret < 0) - return NVME_SC_INTERNAL; - rsp->req.transfer_len += len; + goto error_out; rsp->n_rdma += ret; if (invalidate) { @@ -678,6 +678,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, } return 0; + +error_out: + rsp->req.transfer_len = 0; + return NVME_SC_INTERNAL; } static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) @@ -745,6 +749,8 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, cmd->send_sge.addr, cmd->send_sge.length, DMA_TO_DEVICE); + cmd->req.p2p_client = &queue->dev->device->dev; + if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_rdma_ops)) return; |