[ Upstream commit
6fad84a4d624c300d03ebba457cc641765050c43 ]
If the device supports SGLs, use these for all user requests. This
format encodes the expected transfer length so it can catch short buffer
errors in a user command, whether it occurred accidently or maliciously.
For controllers that support SGL data mode, this is a viable mitigation
to CVE-2023-6238. For controllers that don't support SGLs, log a warning
in the passthrough path since not having the capability can corrupt
data if the interface is not used correctly.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Stable-dep-of:
00817f0f1c45 ("nvme-ioctl: fix leaked requests on mapping error")
Signed-off-by: Sasha Levin <sashal@kernel.org>
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
bool has_metadata = meta_buffer && meta_len;
struct bio *bio = NULL;
int ret;
- if (has_metadata && !supports_metadata)
- return -EINVAL;
+ if (!nvme_ctrl_sgl_supported(ctrl))
+ dev_warn_once(ctrl->device, "using unchecked data buffer\n");
+ if (has_metadata) {
+ if (!supports_metadata)
+ return -EINVAL;
+ if (!nvme_ctrl_meta_sgl_supported(ctrl))
+ dev_warn_once(ctrl->device,
+ "using unchecked metadata buffer\n");
+ }
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
{
if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl))
return false;
- return req->nr_integrity_segments > 1;
+ return req->nr_integrity_segments > 1 ||
+ nvme_req(req)->flags & NVME_REQ_USERCMD;
}
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
if (nvme_pci_metadata_use_sgls(dev, req))
return true;
if (!sgl_threshold || avg_seg_size < sgl_threshold)
- return false;
+ return nvme_req(req)->flags & NVME_REQ_USERCMD;
return true;
}