]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
nvmet-tcp: fix race between ICReq handling and queue teardown
authorChaitanya Kulkarni <kch@nvidia.com>
Wed, 8 Apr 2026 07:51:31 +0000 (00:51 -0700)
committerKeith Busch <kbusch@kernel.org>
Thu, 9 Apr 2026 14:19:32 +0000 (07:19 -0700)
nvmet_tcp_handle_icreq() updates queue->state after sending an
Initialization Connection Response (ICResp), but it does so without
serializing against target-side queue teardown.

If an NVMe/TCP host sends an Initialization Connection Request
(ICReq) and immediately closes the connection, target-side teardown
may start in softirq context before io_work drains the already
buffered ICReq. In that case, nvmet_tcp_schedule_release_queue()
sets queue->state to NVMET_TCP_Q_DISCONNECTING and drops the queue
reference under state_lock.

If io_work later processes that ICReq, nvmet_tcp_handle_icreq() can
still overwrite the state back to NVMET_TCP_Q_LIVE. That defeats the
DISCONNECTING-state guard in nvmet_tcp_schedule_release_queue() and
allows a later socket state change to re-enter teardown and issue a
second kref_put() on an already released queue.

The ICResp send failure path has the same problem. If teardown has
already moved the queue to DISCONNECTING, a send error can still
overwrite the state with NVMET_TCP_Q_FAILED, again reopening the
window for a second teardown path to drop the queue reference.

Fix this by serializing both post-send state transitions with
state_lock and bailing out if teardown has already started.

Use -ESHUTDOWN as an internal sentinel for that bail-out path rather
than propagating it as a transport error like -ECONNRESET. Keep
nvmet_tcp_socket_error() setting rcv_state to NVMET_TCP_RECV_ERR before
honoring that sentinel so receive-side parsing stays quiesced until the
existing release path completes.

Fixes: c46a6465bac2 ("nvmet-tcp: add NVMe over TCP target driver")
Cc: stable@vger.kernel.org
Reported-by: Shivam Kumar <skumar47@syr.edu>
Tested-by: Shivam Kumar <kumar.shivam43666@gmail.com>
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/target/tcp.c

index a4c3c62e33f57dc0f49e120c070b3207fbbb32ba..164a564ba3b4e994ccbf437170e19ddac4fdc9a1 100644 (file)
@@ -394,6 +394,19 @@ static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
 
 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
 {
+       /*
+        * Keep rcv_state at RECV_ERR even for the internal -ESHUTDOWN path.
+        * nvmet_tcp_handle_icreq() can return -ESHUTDOWN after the ICReq has
+        * already been consumed and queue teardown has started.
+        *
+        * If nvmet_tcp_data_ready() or nvmet_tcp_write_space() queues
+        * nvmet_tcp_io_work() again before nvmet_tcp_release_queue_work()
+        * cancels it, the queue must not keep that old receive state.
+        * Otherwise the next nvmet_tcp_io_work() run can reach
+        * nvmet_tcp_done_recv_pdu() and try to handle the same ICReq again.
+        *
+        * That is why queue->rcv_state needs to be updated before we return.
+        */
        queue->rcv_state = NVMET_TCP_RECV_ERR;
        if (status == -EPIPE || status == -ECONNRESET || !queue->nvme_sq.ctrl)
                kernel_sock_shutdown(queue->sock, SHUT_RDWR);
@@ -908,11 +921,24 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
        iov.iov_len = sizeof(*icresp);
        ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
        if (ret < 0) {
+               spin_lock_bh(&queue->state_lock);
+               if (queue->state == NVMET_TCP_Q_DISCONNECTING) {
+                       spin_unlock_bh(&queue->state_lock);
+                       return -ESHUTDOWN;
+               }
                queue->state = NVMET_TCP_Q_FAILED;
+               spin_unlock_bh(&queue->state_lock);
                return ret; /* queue removal will cleanup */
        }
 
+       spin_lock_bh(&queue->state_lock);
+       if (queue->state == NVMET_TCP_Q_DISCONNECTING) {
+               spin_unlock_bh(&queue->state_lock);
+               /* Tell nvmet_tcp_socket_error() teardown is in progress. */
+               return -ESHUTDOWN;
+       }
        queue->state = NVMET_TCP_Q_LIVE;
+       spin_unlock_bh(&queue->state_lock);
        nvmet_prepare_receive_pdu(queue);
        return 0;
 }