]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xprtrdma: Close sendctx get/put race that can block a transport
authorChuck Lever <chuck.lever@oracle.com>
Fri, 6 Mar 2026 21:56:22 +0000 (16:56 -0500)
committerTrond Myklebust <trond.myklebust@hammerspace.com>
Mon, 13 Apr 2026 18:52:49 +0000 (11:52 -0700)
rpcrdma_sendctx_get_locked() and rpcrdma_sendctx_put_locked() can
race in a way that leaves XPRT_WRITE_SPACE set permanently, blocking
all further sends on the transport:

  get_locked              put_locked (Send completion)
  ----------              --------------------------
  read rb_sc_tail
    -> ring full
                          advance rb_sc_tail
                          xprt_write_space():
                            test_bit(WRITE_SPACE)
                            -> not set, return
  set_bit(WRITE_SPACE)
  return NULL (-EAGAIN)

After the sender releases XPRT_LOCKED, the release path refuses to
wake the next task because XPRT_WRITE_SPACE is set. The sender
retries, finds XPRT_WRITE_SPACE still set, and sleeps on
xprt_sending. No further Send completions arrive to clear the flag
because no new Sends can be posted.

With nconnect, the stalled transport's share of congestion credits
are never returned, starving the remaining transports as well.

Fixes: 05eb06d86685 ("xprtrdma: Fix occasional transport deadlock")
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
net/sunrpc/xprtrdma/verbs.c

index b51a162885bbc42d6a062532c46e871a532ad302..90fd83f2d8469c84e66bdefc57c6b2fd9522ef8e 100644 (file)
@@ -708,6 +708,18 @@ out_emptyq:
         */
        xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
        r_xprt->rx_stats.empty_sendctx_q++;
+
+       /* Recheck: a Send completion between the ring-empty test
+        * and the set_bit could cause its xprt_write_space() to
+        * miss, leaving XPRT_WRITE_SPACE set with a non-full ring.
+        * The smp_mb__after_atomic() pairs with smp_store_release()
+        * in rpcrdma_sendctx_put_locked().
+        */
+       smp_mb__after_atomic();
+       next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
+       if (next_head != READ_ONCE(buf->rb_sc_tail))
+               xprt_write_space(&r_xprt->rx_xprt);
+
        return NULL;
 }
 
@@ -739,7 +751,10 @@ static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
 
        } while (buf->rb_sc_ctxs[next_tail] != sc);
 
-       /* Paired with READ_ONCE */
+       /* Paired with READ_ONCE in rpcrdma_sendctx_get_locked():
+        * both the fast-path ring-full test and the post-set_bit
+        * recheck in the slow path depend on this store-release.
+        */
        smp_store_release(&buf->rb_sc_tail, next_tail);
 
        xprt_write_space(&r_xprt->rx_xprt);