status = NVME_SC_INTERNAL;
goto out;
}
+ if (req->ns->bdev)
+ nvmet_bdev_set_nvm_limits(req->ns->bdev, id);
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
kfree(id);
out:
id->nacwu = lpp0b;
/*
- * OPTPERF = 01b indicates that the fields NPWG, NPWA, NPDG, NPDA, and
- * NOWS are defined for this namespace and should be used by
- * the host for I/O optimization.
+ * OPTPERF = 11b indicates that the fields NPWG, NPWA, NPDG, NPDA,
+ * NPDGL, NPDAL, and NOWS are defined for this namespace and should be
+ * used by the host for I/O optimization.
*/
- id->nsfeat |= 0x1 << NVME_NS_FEAT_OPTPERF_SHIFT;
+ id->nsfeat |= 0x3 << NVME_NS_FEAT_OPTPERF_SHIFT;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->dlfeat = (1 << 3) | 0x1;
}
+void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
+ struct nvme_id_ns_nvm *id)
+{
+ /*
+ * NPDGL = Namespace Preferred Deallocate Granularity Large
+ * NPDAL = Namespace Preferred Deallocate Alignment Large
+ */
+ id->npdgl = id->npdal = cpu_to_le32(bdev_discard_granularity(bdev) /
+ bdev_logical_block_size(bdev));
+}
+
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
if (ns->bdev_file) {
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
+void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
+ struct nvme_id_ns_nvm *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);