summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2013-04-10 03:13:20 +0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2013-05-02 22:41:05 +0400
commit159b67d7aefe3902df91075be5d80943c1570aa8 (patch)
tree8c830202c7bc95727ecfc4e6dace6a5d8f4a1ea8 /drivers/block
parent427e97080196548557b288517537ab7eb48c309f (diff)
downloadlinux-159b67d7aefe3902df91075be5d80943c1570aa8.tar.xz
NVMe: Device specific stripe size handling
We have an nvme device that has a concept of a stripe size. IO requests that do not transfer data crossing a stripe boundary has greater performance compared to IO that does cross it. This patch sets the stripe size for the device if the device and vendor ids match one with this feature and splits IO requests that cross the stripe boundary. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index f81280663a4b..4151a3d26e2d 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -519,7 +519,11 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
{
struct bio_vec *bvec, *bvprv = NULL;
struct scatterlist *sg = NULL;
- int i, length = 0, nsegs = 0;
+ int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+
+ if (nvmeq->dev->stripe_size)
+ split_len = nvmeq->dev->stripe_size -
+ ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
bio_for_each_segment(bvec, bio, i) {
@@ -535,6 +539,10 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
bvec->bv_offset);
nsegs++;
}
+
+ if (split_len - length < bvec->bv_len)
+ return nvme_split_and_submit(bio, nvmeq, i, split_len,
+ split_len - length);
length += bvec->bv_len;
bvprv = bvec;
}
@@ -543,6 +551,7 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
return -ENOMEM;
+ BUG_ON(length != bio->bi_size);
return length;
}
@@ -1612,6 +1621,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
struct nvme_id_ns *id_ns;
void *mem;
dma_addr_t dma_addr;
+ int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
res = nvme_setup_io_queues(dev);
if (res)
@@ -1634,10 +1644,11 @@ static int nvme_dev_add(struct nvme_dev *dev)
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
- if (ctrl->mdts) {
- int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+ if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- }
+ if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
for (i = 1; i <= nn; i++) {