diff options
author | Stephan Günther <guenther@tum.de> | 2015-11-08 04:07:02 +0300 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-11-11 19:36:56 +0300 |
commit | a310acd7a7ea53533886c11bb7edd11ffd61a036 (patch) | |
tree | 233c4cfac009f9fc03dd1817d4fa056619deaa01 /drivers/nvme | |
parent | 1fa8cc52f46c14fb1afc20c220855c40a5d28fcd (diff) | |
download | linux-a310acd7a7ea53533886c11bb7edd11ffd61a036.tar.xz |
NVMe: use split lo_hi_{read,write}q
Some controllers may require ordered split transfers even on 64bit
machines, e.g. Apple's NVMe controller as found in the MacBook8,1 and
MacBookAir7,1 (256/512GB models).
This patch enforces ordered split transfers on 64bit platforms, which
works around that issue for all controllers. As pointed out by Christoph
[1] there should be no performance impact due to that modification.
[1] http://lists.infradead.org/pipermail/linux-nvme/2015-November/002965.html
Signed-off-by: Stephan Guenther <guenther@tum.de>
Signed-off-by: Maurice Leclaire <leclaire@in.tum.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Updated by me to explicitly use lo_hi_read/writeq instead of playing
define tricks.
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/pci.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cb89789df40c..3435d79a99ee 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1725,7 +1725,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) { int result; u32 aqa; - u64 cap = readq(&dev->bar->cap); + u64 cap = lo_hi_readq(&dev->bar->cap); struct nvme_queue *nvmeq; unsigned page_shift = PAGE_SHIFT; unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; @@ -1774,8 +1774,8 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; writel(aqa, &dev->bar->aqa); - writeq(nvmeq->sq_dma_addr, &dev->bar->asq); - writeq(nvmeq->cq_dma_addr, &dev->bar->acq); + lo_hi_writeq(nvmeq->sq_dma_addr, &dev->bar->asq); + lo_hi_writeq(nvmeq->cq_dma_addr, &dev->bar->acq); result = nvme_enable_ctrl(dev, cap); if (result) @@ -2606,7 +2606,7 @@ static int nvme_dev_add(struct nvme_dev *dev) struct pci_dev *pdev = to_pci_dev(dev->dev); int res; struct nvme_id_ctrl *ctrl; - int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; + int shift = NVME_CAP_MPSMIN(lo_hi_readq(&dev->bar->cap)) + 12; res = nvme_identify_ctrl(dev, &ctrl); if (res) { @@ -2697,7 +2697,7 @@ static int nvme_dev_map(struct nvme_dev *dev) goto unmap; } - cap = readq(&dev->bar->cap); + cap = lo_hi_readq(&dev->bar->cap); dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); dev->db_stride = 1 << NVME_CAP_STRIDE(cap); dev->dbs = ((void __iomem *)dev->bar) + 4096; @@ -2760,7 +2760,7 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) * queues than admin tags. */ set_current_state(TASK_RUNNING); - nvme_disable_ctrl(dev, readq(&dev->bar->cap)); + nvme_disable_ctrl(dev, lo_hi_readq(&dev->bar->cap)); nvme_clear_queue(dev->queues[0]); flush_kthread_worker(dq->worker); nvme_disable_queue(dev, 0); |