summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2013-07-16 01:02:24 +0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2013-09-04 00:44:25 +0400
commit9d713c2bfb5e1d6abb18a8b12293631f9fcdc708 (patch)
treea31be5d7d9a6596a1d0f1efc6e01b7d5f560ad93 /drivers
parentcd63894630ab17a192bf97427d16dbec10710a6a (diff)
downloadlinux-9d713c2bfb5e1d6abb18a8b12293631f9fcdc708.tar.xz
NVMe: Handle ioremap failure
Decrement the number of queues required for doorbell remapping until the memory is successfully mapped for that size. Additional checks are done so that we don't call free_irq if it has already been freed. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/nvme-core.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 8efa728f1eac..9f2b424c445e 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1739,10 +1739,15 @@ static int set_queue_count(struct nvme_dev *dev, int count)
return min(result & 0xffff, result >> 16) + 1;
}
+static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
+{
+ return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct pci_dev *pdev = dev->pci_dev;
- int result, cpu, i, vecs, nr_io_queues, db_bar_size, q_depth;
+ int result, cpu, i, vecs, nr_io_queues, size, q_depth;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
@@ -1751,17 +1756,24 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues)
nr_io_queues = result;
- /* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
-
- db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
- if (db_bar_size > 8192) {
+ size = db_bar_size(dev, nr_io_queues);
+ if (size > 8192) {
iounmap(dev->bar);
- dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
+ do {
+ dev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (dev->bar)
+ break;
+ if (!--nr_io_queues)
+ return -ENOMEM;
+ size = db_bar_size(dev, nr_io_queues);
+ } while (1);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs;
}
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
vecs = nr_io_queues;
for (i = 0; i < vecs; i++)
dev->entry[i].entry = i;
@@ -1799,8 +1811,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
nr_io_queues = vecs;
result = queue_request_irq(dev, dev->queues[0], "nvme admin");
- if (result)
+ if (result) {
+ dev->queues[0]->q_suspended = 1;
goto free_queues;
+ }
/* Free previously allocated queues that are no longer usable */
spin_lock(&dev_list_lock);