]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
smb: smbdirect: introduce smbdirect_connection_{create,destroy}_qp()
authorStefan Metzmacher <metze@samba.org>
Mon, 15 Sep 2025 23:10:00 +0000 (01:10 +0200)
committerSteve French <stfrench@microsoft.com>
Thu, 16 Apr 2026 02:58:19 +0000 (21:58 -0500)
smbdirect_connection_create_qp() is basically a copy of
smb_direct_create_qpair() in the server, it just adds
extra send_wr space for MR requests.

smbdirect_connection_destroy_qp() is the cleanup code
smb_direct_create_qpair() has, plus calling
ib_drain_qp(), it be a no-op if no requests are posted.

These additions allow the functions to be used by client and
server.

Cc: Steve French <smfrench@gmail.com>
Cc: Tom Talpey <tom@talpey.com>
Cc: Long Li <longli@microsoft.com>
Cc: Namjae Jeon <linkinjeon@kernel.org>
Cc: linux-cifs@vger.kernel.org
Cc: samba-technical@lists.samba.org
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Acked-by: Namjae Jeon <linkinjeon@kernel.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
fs/smb/common/smbdirect/smbdirect_connection.c
fs/smb/common/smbdirect/smbdirect_socket.h

index 5308bdd4797eb041b1afdf5bb27336723b6bfed3..3b0cbbece44caf1ebc9faef241b1667053988eec 100644 (file)
@@ -44,6 +44,220 @@ static void smbdirect_connection_qp_event_handler(struct ib_event *event, void *
        }
 }
 
+static u32 smbdirect_rdma_rw_send_wrs(struct ib_device *dev,
+                                     const struct ib_qp_init_attr *attr)
+{
+       /*
+        * This could be split out of rdma_rw_init_qp()
+        * and be a helper function next to rdma_rw_mr_factor()
+        *
+        * We can't check unlikely(rdma_rw_force_mr) here,
+        * but that is most likely 0 anyway.
+        */
+       u32 factor;
+
+       WARN_ON_ONCE(attr->port_num == 0);
+
+       /*
+        * Each context needs at least one RDMA READ or WRITE WR.
+        *
+        * For some hardware we might need more, eventually we should ask the
+        * HCA driver for a multiplier here.
+        */
+       factor = 1;
+
+       /*
+        * If the device needs MRs to perform RDMA READ or WRITE operations,
+        * we'll need two additional MRs for the registrations and the
+        * invalidation.
+        */
+       if (rdma_protocol_iwarp(dev, attr->port_num) || dev->attrs.max_sgl_rd)
+               factor += 2;    /* inv + reg */
+
+       return factor * attr->cap.max_rdma_ctxs;
+}
+
+static void smbdirect_connection_destroy_qp(struct smbdirect_socket *sc);
+
+__maybe_unused /* this is temporary while this file is included in others */
+static int smbdirect_connection_create_qp(struct smbdirect_socket *sc)
+{
+       const struct smbdirect_socket_parameters *sp = &sc->parameters;
+       struct ib_qp_init_attr qp_attr;
+       struct ib_qp_cap qp_cap;
+       u32 rdma_send_wr;
+       u32 max_send_wr;
+       int ret;
+
+       /*
+        * Note that {rdma,ib}_create_qp() will call
+        * rdma_rw_init_qp() if max_rdma_ctxs is not 0.
+        * It will adjust max_send_wr to the required
+        * number of additional WRs for the RDMA RW operations.
+        * It will cap max_send_wr to the device limit.
+        *
+        * We use allocate sp->responder_resources * 2 MRs
+        * and each MR needs WRs for REG and INV, so
+        * we use '* 4'.
+        *
+        * +1 for ib_drain_qp()
+        */
+       memset(&qp_cap, 0, sizeof(qp_cap));
+       qp_cap.max_send_wr = sp->send_credit_target + sp->responder_resources * 4 + 1;
+       qp_cap.max_recv_wr = sp->recv_credit_max + 1;
+       qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
+       qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
+       qp_cap.max_inline_data = 0;
+       qp_cap.max_rdma_ctxs = sc->rw_io.credits.max;
+
+       /*
+        * Find out the number of max_send_wr
+        * after rdma_rw_init_qp() adjusted it.
+        *
+        * We only do it on a temporary variable,
+        * as rdma_create_qp() will trigger
+        * rdma_rw_init_qp() again.
+        */
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.cap = qp_cap;
+       qp_attr.port_num = sc->rdma.cm_id->port_num;
+       rdma_send_wr = smbdirect_rdma_rw_send_wrs(sc->ib.dev, &qp_attr);
+       max_send_wr = qp_cap.max_send_wr + rdma_send_wr;
+
+       if (qp_cap.max_send_wr > sc->ib.dev->attrs.max_cqe ||
+           qp_cap.max_send_wr > sc->ib.dev->attrs.max_qp_wr) {
+               pr_err("Possible CQE overrun: max_send_wr %d\n",
+                      qp_cap.max_send_wr);
+               pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+                      IB_DEVICE_NAME_MAX,
+                      sc->ib.dev->name,
+                      sc->ib.dev->attrs.max_cqe,
+                      sc->ib.dev->attrs.max_qp_wr);
+               pr_err("consider lowering send_credit_target = %d\n",
+                      sp->send_credit_target);
+               return -EINVAL;
+       }
+
+       if (qp_cap.max_rdma_ctxs &&
+           (max_send_wr >= sc->ib.dev->attrs.max_cqe ||
+            max_send_wr >= sc->ib.dev->attrs.max_qp_wr)) {
+               pr_err("Possible CQE overrun: rdma_send_wr %d + max_send_wr %d = %d\n",
+                      rdma_send_wr, qp_cap.max_send_wr, max_send_wr);
+               pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+                      IB_DEVICE_NAME_MAX,
+                      sc->ib.dev->name,
+                      sc->ib.dev->attrs.max_cqe,
+                      sc->ib.dev->attrs.max_qp_wr);
+               pr_err("consider lowering send_credit_target = %d, max_rdma_ctxs = %d\n",
+                      sp->send_credit_target, qp_cap.max_rdma_ctxs);
+               return -EINVAL;
+       }
+
+       if (qp_cap.max_recv_wr > sc->ib.dev->attrs.max_cqe ||
+           qp_cap.max_recv_wr > sc->ib.dev->attrs.max_qp_wr) {
+               pr_err("Possible CQE overrun: max_recv_wr %d\n",
+                      qp_cap.max_recv_wr);
+               pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+                      IB_DEVICE_NAME_MAX,
+                      sc->ib.dev->name,
+                      sc->ib.dev->attrs.max_cqe,
+                      sc->ib.dev->attrs.max_qp_wr);
+               pr_err("consider lowering receive_credit_max = %d\n",
+                      sp->recv_credit_max);
+               return -EINVAL;
+       }
+
+       if (qp_cap.max_send_sge > sc->ib.dev->attrs.max_send_sge ||
+           qp_cap.max_recv_sge > sc->ib.dev->attrs.max_recv_sge) {
+               pr_err("device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
+                      IB_DEVICE_NAME_MAX,
+                      sc->ib.dev->name,
+                      sc->ib.dev->attrs.max_send_sge,
+                      sc->ib.dev->attrs.max_recv_sge);
+               return -EINVAL;
+       }
+
+       sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
+       if (IS_ERR(sc->ib.pd)) {
+               pr_err("Can't create RDMA PD: %1pe\n", sc->ib.pd);
+               ret = PTR_ERR(sc->ib.pd);
+               sc->ib.pd = NULL;
+               return ret;
+       }
+
+       sc->ib.send_cq = ib_alloc_cq_any(sc->ib.dev, sc,
+                                        max_send_wr,
+                                        sc->ib.poll_ctx);
+       if (IS_ERR(sc->ib.send_cq)) {
+               pr_err("Can't create RDMA send CQ: %1pe\n", sc->ib.send_cq);
+               ret = PTR_ERR(sc->ib.send_cq);
+               sc->ib.send_cq = NULL;
+               goto err;
+       }
+
+       sc->ib.recv_cq = ib_alloc_cq_any(sc->ib.dev, sc,
+                                        qp_cap.max_recv_wr,
+                                        sc->ib.poll_ctx);
+       if (IS_ERR(sc->ib.recv_cq)) {
+               pr_err("Can't create RDMA recv CQ: %1pe\n", sc->ib.recv_cq);
+               ret = PTR_ERR(sc->ib.recv_cq);
+               sc->ib.recv_cq = NULL;
+               goto err;
+       }
+
+       /*
+        * We reset completely here!
+        * As the above use was just temporary
+        * to calc max_send_wr and rdma_send_wr.
+        *
+        * rdma_create_qp() will trigger rdma_rw_init_qp()
+        * again if max_rdma_ctxs is not 0.
+        */
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.event_handler = smbdirect_connection_qp_event_handler;
+       qp_attr.qp_context = sc;
+       qp_attr.cap = qp_cap;
+       qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+       qp_attr.qp_type = IB_QPT_RC;
+       qp_attr.send_cq = sc->ib.send_cq;
+       qp_attr.recv_cq = sc->ib.recv_cq;
+       qp_attr.port_num = ~0;
+
+       ret = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
+       if (ret) {
+               pr_err("Can't create RDMA QP: %1pe\n",
+                      SMBDIRECT_DEBUG_ERR_PTR(ret));
+               goto err;
+       }
+       sc->ib.qp = sc->rdma.cm_id->qp;
+
+       return 0;
+err:
+       smbdirect_connection_destroy_qp(sc);
+       return ret;
+}
+
+static void smbdirect_connection_destroy_qp(struct smbdirect_socket *sc)
+{
+       if (sc->ib.qp) {
+               ib_drain_qp(sc->ib.qp);
+               sc->ib.qp = NULL;
+               rdma_destroy_qp(sc->rdma.cm_id);
+       }
+       if (sc->ib.recv_cq) {
+               ib_destroy_cq(sc->ib.recv_cq);
+               sc->ib.recv_cq = NULL;
+       }
+       if (sc->ib.send_cq) {
+               ib_destroy_cq(sc->ib.send_cq);
+               sc->ib.send_cq = NULL;
+       }
+       if (sc->ib.pd) {
+               ib_dealloc_pd(sc->ib.pd);
+               sc->ib.pd = NULL;
+       }
+}
+
 static void smbdirect_connection_destroy_mem_pools(struct smbdirect_socket *sc);
 
 __maybe_unused /* this is temporary while this file is included in others */
index ef0c488143116d3a60bd3f8047e3eb25b8f365f8..874d4d1a56a0f148ef61d7bc760d68f22537975f 100644 (file)
@@ -120,6 +120,7 @@ struct smbdirect_socket {
        /* IB verbs related */
        struct {
                struct ib_pd *pd;
+               enum ib_poll_context poll_ctx;
                struct ib_cq *send_cq;
                struct ib_cq *recv_cq;
 
@@ -498,6 +499,8 @@ static __always_inline void smbdirect_socket_init(struct smbdirect_socket *sc)
        INIT_WORK(&sc->disconnect_work, __smbdirect_socket_disabled_work);
        disable_work_sync(&sc->disconnect_work);
 
+       sc->ib.poll_ctx = IB_POLL_UNBOUND_WORKQUEUE;
+
        spin_lock_init(&sc->connect.lock);
        INIT_WORK(&sc->connect.work, __smbdirect_socket_disabled_work);
        disable_work_sync(&sc->connect.work);