diff options
author | Christoph Hellwig <hch@lst.de> | 2022-07-21 09:13:23 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-08-03 02:22:48 +0300 |
commit | 2455a4b77835c2c9d1c0310d50f69e6fbc1b173f (patch) | |
tree | 194cd0bb6ee0924acfe4ba9c86bd465667500ab0 /drivers/nvme/host/pci.c | |
parent | f91b727ccf1fabe6c02dc184bb8a156f895428c8 (diff) | |
download | linux-2455a4b77835c2c9d1c0310d50f69e6fbc1b173f.tar.xz |
nvme-pci: split nvme_dev_add
Split nvme_dev_add into a helper to actually allocate the tag set, and
one that just update the number of queues. Add a local variable for
the tag_set to clean up the code a bit.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r-- | drivers/nvme/host/pci.c | 72 |
1 files changed, 37 insertions, 35 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 57f9a5ed94ab..71a4f26ba476 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2533,47 +2533,45 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) return true; } -static void nvme_dev_add(struct nvme_dev *dev) +static void nvme_pci_alloc_tag_set(struct nvme_dev *dev) { + struct blk_mq_tag_set * set = &dev->tagset; int ret; - if (!dev->ctrl.tagset) { - dev->tagset.ops = &nvme_mq_ops; - dev->tagset.nr_hw_queues = dev->online_queues - 1; - dev->tagset.nr_maps = 2; /* default + read */ - if (dev->io_queues[HCTX_TYPE_POLL]) - dev->tagset.nr_maps++; - dev->tagset.timeout = NVME_IO_TIMEOUT; - dev->tagset.numa_node = dev->ctrl.numa_node; - dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth, - BLK_MQ_MAX_DEPTH) - 1; - dev->tagset.cmd_size = sizeof(struct nvme_iod); - dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; - dev->tagset.driver_data = dev; - - /* - * Some Apple controllers requires tags to be unique - * across admin and IO queue, so reserve the first 32 - * tags of the IO queue. - */ - if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) - dev->tagset.reserved_tags = NVME_AQ_DEPTH; + set->ops = &nvme_mq_ops; + set->nr_hw_queues = dev->online_queues - 1; + set->nr_maps = 2; /* default + read */ + if (dev->io_queues[HCTX_TYPE_POLL]) + set->nr_maps++; + set->timeout = NVME_IO_TIMEOUT; + set->numa_node = dev->ctrl.numa_node; + set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; + set->cmd_size = sizeof(struct nvme_iod); + set->flags = BLK_MQ_F_SHOULD_MERGE; + set->driver_data = dev; - ret = blk_mq_alloc_tag_set(&dev->tagset); - if (ret) { - dev_warn(dev->ctrl.device, - "IO queues tagset allocation failed %d\n", ret); - return; - } - dev->ctrl.tagset = &dev->tagset; - } else { - blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); + /* + * Some Apple controllers requires tags to be unique + * across admin and IO queue, so reserve the first 32 + * tags of the IO queue. + */ + if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) + set->reserved_tags = NVME_AQ_DEPTH; - /* Free previously allocated queues that are no longer usable */ - nvme_free_queues(dev, dev->online_queues); + ret = blk_mq_alloc_tag_set(set); + if (ret) { + dev_warn(dev->ctrl.device, + "IO queues tagset allocation failed %d\n", ret); + return; } + dev->ctrl.tagset = set; +} - nvme_dbbuf_set(dev); +static void nvme_pci_update_nr_queues(struct nvme_dev *dev) +{ + blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); + /* free previously allocated queues that are no longer usable */ + nvme_free_queues(dev, dev->online_queues); } static int nvme_pci_enable(struct nvme_dev *dev) @@ -2924,7 +2922,11 @@ static void nvme_reset_work(struct work_struct *work) } else { nvme_start_queues(&dev->ctrl); nvme_wait_freeze(&dev->ctrl); - nvme_dev_add(dev); + if (!dev->ctrl.tagset) + nvme_pci_alloc_tag_set(dev); + else + nvme_pci_update_nr_queues(dev); + nvme_dbbuf_set(dev); nvme_unfreeze(&dev->ctrl); } |