From afcae7d7b8a278a6c29e064f99e5bafd4ac1fb37 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 27 Jan 2026 19:53:59 -0500 Subject: [PATCH] RDMA/core: add rdma_rw_max_sge() helper for SQ sizing svc_rdma_accept() computes sc_sq_depth as the sum of rq_depth and the number of rdma_rw contexts (ctxts). This value is used to allocate the Send CQ and to initialize the sc_sq_avail credit pool. However, when the device uses memory registration for RDMA operations, rdma_rw_init_qp() inflates the QP's max_send_wr by a factor of three per context to account for REG and INV work requests. The Send CQ and credit pool remain sized for only one work request per context, causing Send Queue exhaustion under heavy NFS WRITE workloads. Introduce rdma_rw_max_sge() to compute the actual number of Send Queue entries required for a given number of rdma_rw contexts. Upper layer protocols call this helper before creating a Queue Pair so that their Send CQs and credit accounting match the QP's true capacity. Update svc_rdma_accept() to use rdma_rw_max_sge() when computing sc_sq_depth, ensuring the credit pool reflects the work requests that rdma_rw_init_qp() will reserve. Reviewed-by: Christoph Hellwig Fixes: 00bd1439f464 ("RDMA/rw: Support threshold for registration vs scattering to local pages") Signed-off-by: Chuck Lever Link: https://patch.msgid.link/20260128005400.25147-5-cel@kernel.org Signed-off-by: Leon Romanovsky --- drivers/infiniband/core/rw.c | 53 +++++++++++++++++------- include/rdma/rw.h | 2 + net/sunrpc/xprtrdma/svc_rdma_transport.c | 8 +++- 3 files changed, 46 insertions(+), 17 deletions(-) diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 2c148457b589b..518095d82d5da 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -1071,34 +1071,57 @@ unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, } EXPORT_SYMBOL(rdma_rw_mr_factor); +/** + * rdma_rw_max_send_wr - compute max Send WRs needed for RDMA R/W contexts + * @dev: RDMA device + * @port_num: port number + * @max_rdma_ctxs: number of rdma_rw_ctx structures + * @create_flags: QP create flags (pass IB_QP_CREATE_INTEGRITY_EN if + * data integrity will be enabled on the QP) + * + * Returns the total number of Send Queue entries needed for + * @max_rdma_ctxs. The result accounts for memory registration and + * invalidation work requests when the device requires them. + * + * ULPs use this to size Send Queues and Send CQs before creating a + * Queue Pair. + */ +unsigned int rdma_rw_max_send_wr(struct ib_device *dev, u32 port_num, + unsigned int max_rdma_ctxs, u32 create_flags) +{ + unsigned int factor = 1; + unsigned int result; + + if (create_flags & IB_QP_CREATE_INTEGRITY_EN || + rdma_rw_can_use_mr(dev, port_num)) + factor += 2; /* reg + inv */ + + if (check_mul_overflow(factor, max_rdma_ctxs, &result)) + return UINT_MAX; + return result; +} +EXPORT_SYMBOL(rdma_rw_max_send_wr); + void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) { - u32 factor; + unsigned int factor = 1; WARN_ON_ONCE(attr->port_num == 0); /* - * Each context needs at least one RDMA READ or WRITE WR. - * - * For some hardware we might need more, eventually we should ask the - * HCA driver for a multiplier here. - */ - factor = 1; - - /* - * If the device needs MRs to perform RDMA READ or WRITE operations, - * we'll need two additional MRs for the registrations and the - * invalidation. + * If the device uses MRs to perform RDMA READ or WRITE operations, + * or if data integrity is enabled, account for registration and + * invalidation work requests. */ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN || rdma_rw_can_use_mr(dev, attr->port_num)) - factor += 2; /* inv + reg */ + factor += 2; /* reg + inv */ attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; /* - * But maybe we were just too high in the sky and the device doesn't - * even support all we need, and we'll have to live with what we get.. + * The device might not support all we need, and we'll have to + * live with what we get. */ attr->cap.max_send_wr = min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr); diff --git a/include/rdma/rw.h b/include/rdma/rw.h index 3400c017bfb68..6a1d08614e097 100644 --- a/include/rdma/rw.h +++ b/include/rdma/rw.h @@ -86,6 +86,8 @@ int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, unsigned int maxpages); +unsigned int rdma_rw_max_send_wr(struct ib_device *dev, u32 port_num, + unsigned int max_rdma_ctxs, u32 create_flags); void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); void rdma_rw_cleanup_mrs(struct ib_qp *qp); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index b7b318ad25c42..9b623849723ed 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -462,7 +462,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt->sc_max_bc_requests = 2; } - /* Arbitrary estimate of the needed number of rdma_rw contexts. + /* Estimate the needed number of rdma_rw contexts. The maximum + * Read and Write chunks have one segment each. Each request + * can involve one Read chunk and either a Write chunk or Reply + * chunk; thus a factor of three. */ maxpayload = min(xprt->xpt_server->sv_max_payload, RPCSVC_MAXPAYLOAD_RDMA); @@ -470,7 +473,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) rdma_rw_mr_factor(dev, newxprt->sc_port_num, maxpayload >> PAGE_SHIFT); - newxprt->sc_sq_depth = rq_depth + ctxts; + newxprt->sc_sq_depth = rq_depth + + rdma_rw_max_send_wr(dev, newxprt->sc_port_num, ctxts, 0); if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) newxprt->sc_sq_depth = dev->attrs.max_qp_wr; atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); -- 2.47.3