]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
nvme: fold nvme_config_discard() into nvme_update_disk_info()
authorCaleb Sander Mateos <csander@purestorage.com>
Fri, 27 Feb 2026 20:23:47 +0000 (13:23 -0700)
committerKeith Busch <kbusch@kernel.org>
Fri, 27 Mar 2026 14:35:04 +0000 (07:35 -0700)
The choice of what queue limits are set in nvme_update_disk_info() vs.
nvme_config_discard() seems a bit arbitrary. A subsequent commit will
compute the discard_granularity limit using struct nvme_id_ns, which is
only passed to nvme_update_disk_info() currently. So move the logic in
nvme_config_discard() to nvme_update_disk_info(). Replace several
instances of ns->ctrl in nvme_update_disk_info() with the ctrl variable
brought from nvme_config_discard().

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/host/core.c

index 3de52f1d27234371de74c29442ea3d7196df160f..da477b502762900792e9a17ab124d9c3f38b9dba 100644 (file)
@@ -1884,26 +1884,6 @@ static bool nvme_init_integrity(struct nvme_ns_head *head,
        return true;
 }
 
-static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
-{
-       struct nvme_ctrl *ctrl = ns->ctrl;
-
-       if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
-               lim->max_hw_discard_sectors =
-                       nvme_lba_to_sect(ns->head, ctrl->dmrsl);
-       else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
-               lim->max_hw_discard_sectors = UINT_MAX;
-       else
-               lim->max_hw_discard_sectors = 0;
-
-       lim->discard_granularity = lim->logical_block_size;
-
-       if (ctrl->dmrl)
-               lim->max_discard_segments = ctrl->dmrl;
-       else
-               lim->max_discard_segments = NVME_DSM_MAX_RANGES;
-}
-
 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
 {
        return uuid_equal(&a->uuid, &b->uuid) &&
@@ -2082,6 +2062,7 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
                struct queue_limits *lim)
 {
        struct nvme_ns_head *head = ns->head;
+       struct nvme_ctrl *ctrl = ns->ctrl;
        u32 bs = 1U << head->lba_shift;
        u32 atomic_bs, phys_bs, io_opt = 0;
        bool valid = true;
@@ -2116,11 +2097,26 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
        lim->physical_block_size = min(phys_bs, atomic_bs);
        lim->io_min = phys_bs;
        lim->io_opt = io_opt;
-       if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
-           (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
+       if ((ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
+           (ctrl->oncs & NVME_CTRL_ONCS_DSM))
                lim->max_write_zeroes_sectors = UINT_MAX;
        else
-               lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
+               lim->max_write_zeroes_sectors = ctrl->max_zeroes_sectors;
+
+       if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
+               lim->max_hw_discard_sectors =
+                       nvme_lba_to_sect(ns->head, ctrl->dmrsl);
+       else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
+               lim->max_hw_discard_sectors = UINT_MAX;
+       else
+               lim->max_hw_discard_sectors = 0;
+
+       lim->discard_granularity = lim->logical_block_size;
+
+       if (ctrl->dmrl)
+               lim->max_discard_segments = ctrl->dmrl;
+       else
+               lim->max_discard_segments = NVME_DSM_MAX_RANGES;
        return valid;
 }
 
@@ -2385,7 +2381,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
        if (!nvme_update_disk_info(ns, id, &lim))
                capacity = 0;
 
-       nvme_config_discard(ns, &lim);
        if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
            ns->head->ids.csi == NVME_CSI_ZNS)
                nvme_update_zone_info(ns, &lim, &zi);