]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
nvmet: report NPDGL and NPDAL
authorCaleb Sander Mateos <csander@purestorage.com>
Fri, 27 Feb 2026 20:23:53 +0000 (13:23 -0700)
committerKeith Busch <kbusch@kernel.org>
Fri, 27 Mar 2026 14:35:05 +0000 (07:35 -0700)
A block device with a very large discard_granularity queue limit may not
be able to report it in the 16-bit NPDG and NPDA fields in the Identify
Namespace data structure. For this reason, version 2.1 of the NVMe specs
added 32-bit fields NPDGL and NPDAL to the NVM Command Set Specific
Identify Namespace structure. So report the discard_granularity there
too and set OPTPERF to 11b to indicate those fields are supported.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/io-cmd-bdev.c
drivers/nvme/target/nvmet.h

index 9de93f65d7d76588640121d4b5e8efdfe4638083..c0d38480bb62ed7bc7547719641d9f40b1b8552b 100644 (file)
@@ -1057,6 +1057,8 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
                status = NVME_SC_INTERNAL;
                goto out;
        }
+       if (req->ns->bdev)
+               nvmet_bdev_set_nvm_limits(req->ns->bdev, id);
        status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
        kfree(id);
 out:
index a8a7d3a88ef20f31c76bf7006838b137785f7af3..f2d9e8901df4e1f4ac456132d538383729484bfd 100644 (file)
@@ -30,11 +30,11 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
        id->nacwu = lpp0b;
 
        /*
-        * OPTPERF = 01b indicates that the fields NPWG, NPWA, NPDG, NPDA, and
-        * NOWS are defined for this namespace and should be used by
-        * the host for I/O optimization.
+        * OPTPERF = 11b indicates that the fields NPWG, NPWA, NPDG, NPDA,
+        * NPDGL, NPDAL, and NOWS are defined for this namespace and should be
+        * used by the host for I/O optimization.
         */
-       id->nsfeat |= 0x1 << NVME_NS_FEAT_OPTPERF_SHIFT;
+       id->nsfeat |= 0x3 << NVME_NS_FEAT_OPTPERF_SHIFT;
        /* NPWG = Namespace Preferred Write Granularity. 0's based */
        id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
        /* NPWA = Namespace Preferred Write Alignment. 0's based */
@@ -52,6 +52,17 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
                id->dlfeat = (1 << 3) | 0x1;
 }
 
+void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
+                              struct nvme_id_ns_nvm *id)
+{
+       /*
+        * NPDGL = Namespace Preferred Deallocate Granularity Large
+        * NPDAL = Namespace Preferred Deallocate Alignment Large
+        */
+       id->npdgl = id->npdal = cpu_to_le32(bdev_discard_granularity(bdev) /
+                                           bdev_logical_block_size(bdev));
+}
+
 void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
 {
        if (ns->bdev_file) {
index f5d22267df6a5d03fe13e04356e4be7688b0c62b..5db8f0d6e3f2c564628d51487831c08a71edb659 100644 (file)
@@ -549,6 +549,8 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
 u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
+void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
+                              struct nvme_id_ns_nvm *id);
 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);