]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
nvmet: pci-epf: Do not complete commands twice if nvmet_req_init() fails
authorRick Wertenbroek <rick.wertenbroek@gmail.com>
Wed, 16 Jul 2025 11:15:03 +0000 (13:15 +0200)
committerChristoph Hellwig <hch@lst.de>
Thu, 17 Jul 2025 11:39:57 +0000 (13:39 +0200)
Have nvmet_req_init() and req->execute() complete failed commands.

Description of the problem:
nvmet_req_init() calls __nvmet_req_complete() internally upon failure,
e.g., unsupported opcode, which calls the "queue_response" callback,
this results in nvmet_pci_epf_queue_response() being called, which will
call nvmet_pci_epf_complete_iod() if data_len is 0 or if dma_dir is
different from DMA_TO_DEVICE. This results in a double completion as
nvmet_pci_epf_exec_iod_work() also calls nvmet_pci_epf_complete_iod()
when nvmet_req_init() fails.

Steps to reproduce:
On the host send a command with an unsupported opcode with nvme-cli,
For example the admin command "security receive"
$ sudo nvme security-recv /dev/nvme0n1 -n1 -x4096

This triggers a double completion as nvmet_req_init() fails and
nvmet_pci_epf_queue_response() is called, here iod->dma_dir is still
in the default state of "DMA_NONE" as set by default in
nvmet_pci_epf_alloc_iod(), so nvmet_pci_epf_complete_iod() is called.
Because nvmet_req_init() failed nvmet_pci_epf_complete_iod() is also
called in nvmet_pci_epf_exec_iod_work() leading to a double completion.
This not only sends two completions to the host but also corrupts the
state of the PCI NVMe target leading to kernel oops.

This patch lets nvmet_req_init() and req->execute() complete all failed
commands, and removes the double completion case in
nvmet_pci_epf_exec_iod_work() therefore fixing the edge cases where
double completions occurred.

Fixes: 0faa0fe6f90e ("nvmet: New NVMe PCI endpoint function target driver")
Signed-off-by: Rick Wertenbroek <rick.wertenbroek@gmail.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/pci-epf.c

index 44288d8f5aa111416dacd6871ab5c271498cb9f0..2e78397a7373a7d8ba67150f301f392123db88d1 100644 (file)
@@ -1242,8 +1242,11 @@ static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
 
        iod->status = le16_to_cpu(req->cqe->status) >> 1;
 
-       /* If we have no data to transfer, directly complete the command. */
-       if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
+       /*
+        * If the command failed or we have no data to transfer, complete the
+        * command immediately.
+        */
+       if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
                nvmet_pci_epf_complete_iod(iod);
                return;
        }
@@ -1604,8 +1607,13 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
                goto complete;
        }
 
+       /*
+        * If nvmet_req_init() fails (e.g., unsupported opcode) it will call
+        * __nvmet_req_complete() internally which will call
+        * nvmet_pci_epf_queue_response() and will complete the command directly.
+        */
        if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
-               goto complete;
+               return;
 
        iod->data_len = nvmet_req_transfer_len(req);
        if (iod->data_len) {
@@ -1643,10 +1651,11 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
 
        wait_for_completion(&iod->done);
 
-       if (iod->status == NVME_SC_SUCCESS) {
-               WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
-               nvmet_pci_epf_transfer_iod_data(iod);
-       }
+       if (iod->status != NVME_SC_SUCCESS)
+               return;
+
+       WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
+       nvmet_pci_epf_transfer_iod_data(iod);
 
 complete:
        nvmet_pci_epf_complete_iod(iod);