]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
nvme: set max_hw_sectors unconditionally
authorChristoph Hellwig <hch@lst.de>
Mon, 4 Mar 2024 14:04:45 +0000 (07:04 -0700)
committerKeith Busch <kbusch@kernel.org>
Mon, 4 Mar 2024 16:24:55 +0000 (08:24 -0800)
All transports set a max_hw_sectors value in the nvme_ctrl, so make
the code using it unconditional and clean it up using a little helper.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/host/core.c

index cb13f7c79eaf9c28af2a02397c20c8a30364c443..6ae9aedf7bc2784744127c632a3a4ee988c9a4d2 100644 (file)
@@ -1944,19 +1944,19 @@ static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
        return 0;
 }
 
+static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
+{
+       return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
+}
+
 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                struct request_queue *q)
 {
        bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
 
-       if (ctrl->max_hw_sectors) {
-               u32 max_segments =
-                       (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
-
-               max_segments = min_not_zero(max_segments, ctrl->max_segments);
-               blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
-               blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
-       }
+       blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+       blk_queue_max_segments(q, min_t(u32, USHRT_MAX,
+               min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)));
        blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
        blk_queue_dma_alignment(q, 3);
        blk_queue_write_cache(q, vwc, vwc);