]> git.ipfire.org Git - people/ms/linux.git/commitdiff
nvme-pci: split nvme_dev_add
authorChristoph Hellwig <hch@lst.de>
Thu, 21 Jul 2022 06:13:23 +0000 (08:13 +0200)
committerJens Axboe <axboe@kernel.dk>
Tue, 2 Aug 2022 23:22:48 +0000 (17:22 -0600)
Split nvme_dev_add into a helper to actually allocate the tag set, and
one that just update the number of queues.  Add a local variable for
the tag_set to clean up the code a bit.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/pci.c

index 57f9a5ed94ab21ec2f8049b1723ac2cb8d6eeda1..71a4f26ba4760597c1bf115c491e85e0711058b5 100644 (file)
@@ -2533,47 +2533,45 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
        return true;
 }
 
-static void nvme_dev_add(struct nvme_dev *dev)
+static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
 {
+       struct blk_mq_tag_set * set = &dev->tagset;
        int ret;
 
-       if (!dev->ctrl.tagset) {
-               dev->tagset.ops = &nvme_mq_ops;
-               dev->tagset.nr_hw_queues = dev->online_queues - 1;
-               dev->tagset.nr_maps = 2; /* default + read */
-               if (dev->io_queues[HCTX_TYPE_POLL])
-                       dev->tagset.nr_maps++;
-               dev->tagset.timeout = NVME_IO_TIMEOUT;
-               dev->tagset.numa_node = dev->ctrl.numa_node;
-               dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
-                                               BLK_MQ_MAX_DEPTH) - 1;
-               dev->tagset.cmd_size = sizeof(struct nvme_iod);
-               dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
-               dev->tagset.driver_data = dev;
-
-               /*
-                * Some Apple controllers requires tags to be unique
-                * across admin and IO queue, so reserve the first 32
-                * tags of the IO queue.
-                */
-               if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
-                       dev->tagset.reserved_tags = NVME_AQ_DEPTH;
+       set->ops = &nvme_mq_ops;
+       set->nr_hw_queues = dev->online_queues - 1;
+       set->nr_maps = 2; /* default + read */
+       if (dev->io_queues[HCTX_TYPE_POLL])
+               set->nr_maps++;
+       set->timeout = NVME_IO_TIMEOUT;
+       set->numa_node = dev->ctrl.numa_node;
+       set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+       set->cmd_size = sizeof(struct nvme_iod);
+       set->flags = BLK_MQ_F_SHOULD_MERGE;
+       set->driver_data = dev;
 
-               ret = blk_mq_alloc_tag_set(&dev->tagset);
-               if (ret) {
-                       dev_warn(dev->ctrl.device,
-                               "IO queues tagset allocation failed %d\n", ret);
-                       return;
-               }
-               dev->ctrl.tagset = &dev->tagset;
-       } else {
-               blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+       /*
+        * Some Apple controllers requires tags to be unique
+        * across admin and IO queue, so reserve the first 32
+        * tags of the IO queue.
+        */
+       if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+               set->reserved_tags = NVME_AQ_DEPTH;
 
-               /* Free previously allocated queues that are no longer usable */
-               nvme_free_queues(dev, dev->online_queues);
+       ret = blk_mq_alloc_tag_set(set);
+       if (ret) {
+               dev_warn(dev->ctrl.device,
+                       "IO queues tagset allocation failed %d\n", ret);
+               return;
        }
+       dev->ctrl.tagset = set;
+}
 
-       nvme_dbbuf_set(dev);
+static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+{
+       blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+       /* free previously allocated queues that are no longer usable */
+       nvme_free_queues(dev, dev->online_queues);
 }
 
 static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2924,7 +2922,11 @@ static void nvme_reset_work(struct work_struct *work)
        } else {
                nvme_start_queues(&dev->ctrl);
                nvme_wait_freeze(&dev->ctrl);
-               nvme_dev_add(dev);
+               if (!dev->ctrl.tagset)
+                       nvme_pci_alloc_tag_set(dev);
+               else
+                       nvme_pci_update_nr_queues(dev);
+               nvme_dbbuf_set(dev);
                nvme_unfreeze(&dev->ctrl);
        }