]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
RDMA/irdma: Support 64-byte CQEs and GEN3 CQE opcode decoding
authorShiraz Saleem <shiraz.saleem@intel.com>
Wed, 27 Aug 2025 15:25:40 +0000 (10:25 -0500)
committerLeon Romanovsky <leon@kernel.org>
Thu, 18 Sep 2025 08:48:46 +0000 (04:48 -0400)
Introduce support for 64-byte CQEs in GEN3 devices. Additionally,
implement GEN3-specific CQE opcode decoding.

Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Link: https://patch.msgid.link/20250827152545.2056-12-tatyana.e.nikolova@intel.com
Tested-by: Jacob Moroni <jmoroni@google.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/main.h
drivers/infiniband/hw/irdma/utils.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/irdma/verbs.h

index 459343ef72b9e3e709165f535160e25116e8546c..2931d1a879e9dc7892ae35794b79d02867acec62 100644 (file)
@@ -1117,13 +1117,15 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
        struct irdma_sc_dev *dev = &rf->sc_dev;
        struct irdma_ccq_init_info info = {};
        struct irdma_ccq *ccq = &rf->ccq;
+       int ccq_size;
        int status;
 
        dev->ccq = &ccq->sc_cq;
        dev->ccq->dev = dev;
        info.dev = dev;
+       ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE;
        ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
-       ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
+       ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * ccq_size,
                                 IRDMA_CQ0_ALIGNMENT);
        ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
                                            &ccq->mem_cq.pa, GFP_KERNEL);
@@ -1140,7 +1142,7 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
        /* populate the ccq init info */
        info.cq_base = ccq->mem_cq.va;
        info.cq_pa = ccq->mem_cq.pa;
-       info.num_elem = IW_CCQ_SIZE;
+       info.num_elem = ccq_size;
        info.shadow_area = ccq->shadow_area.va;
        info.shadow_area_pa = ccq->shadow_area.pa;
        info.ceqe_mask = false;
index 3d37cd12e9f6514508660e643b53da67c4fe8deb..6922cfaac6d061dcd78d54697ede6817cf275ff9 100644 (file)
@@ -66,7 +66,8 @@ extern struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv;
 #define IRDMA_MACIP_ADD                1
 #define IRDMA_MACIP_DELETE     2
 
-#define IW_CCQ_SIZE    (IRDMA_CQP_SW_SQSIZE_2048 + 1)
+#define IW_GEN_3_CCQ_SIZE  (2 * IRDMA_CQP_SW_SQSIZE_2048 + 2)
+#define IW_CCQ_SIZE    (IRDMA_CQP_SW_SQSIZE_2048 + 2)
 #define IW_CEQ_SIZE    2048
 #define IW_AEQ_SIZE    2048
 
index 1fd09e287e6f44bc5ae1cc8c12aadda7e1f41a31..0b12a875dbe9d8de3e8bc656b2c2def3c54b182e 100644 (file)
@@ -2338,7 +2338,10 @@ bool irdma_cq_empty(struct irdma_cq *iwcq)
        u8 polarity;
 
        ukcq  = &iwcq->sc_cq.cq_uk;
-       cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+       if (ukcq->avoid_mem_cflct)
+               cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq);
+       else
+               cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
        get_64bit_val(cqe, 24, &qword3);
        polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
 
index 2857631543b707176e1fa22a5e80293f72b1d71c..da0f56e0c89747bd096767708ccc71504aacc0a5 100644 (file)
@@ -1971,8 +1971,13 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
 
        if (!iwcq->user_mode) {
                entries++;
-               if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+
+               if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct &&
+                   dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
                        entries *= 2;
+
+               if (entries & 1)
+                       entries += 1; /* cq size must be an even number */
        }
 
        info.cq_size = max(entries, 4);
@@ -2115,6 +2120,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
        unsigned long flags;
        int err_code;
        int entries = attr->cqe;
+       bool cqe_64byte_ena;
 
        err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
        if (err_code)
@@ -2138,6 +2144,9 @@ static int irdma_create_cq(struct ib_cq *ibcq,
        info.dev = dev;
        ukinfo->cq_size = max(entries, 4);
        ukinfo->cq_id = cq_num;
+       cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ?
+                        true : false;
+       ukinfo->avoid_mem_cflct = cqe_64byte_ena;
        iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
        if (attr->comp_vector < rf->ceqs_count)
                info.ceq_id = attr->comp_vector;
@@ -2213,11 +2222,18 @@ static int irdma_create_cq(struct ib_cq *ibcq,
                }
 
                entries++;
-               if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+               if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
                        entries *= 2;
+
+               if (entries & 1)
+                       entries += 1; /* cq size must be an even number */
+
                ukinfo->cq_size = entries;
 
-               rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+               if (cqe_64byte_ena)
+                       rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
+               else
+                       rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
                iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
                iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
                                                   iwcq->kmem.size,
@@ -3784,8 +3800,12 @@ static void irdma_process_cqe(struct ib_wc *entry,
        if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
                set_ib_wc_op_sq(cq_poll_info, entry);
        } else {
-               set_ib_wc_op_rq(cq_poll_info, entry,
-                               qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
+               if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+                       set_ib_wc_op_rq(cq_poll_info, entry,
+                                       qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
+                                       true : false);
+               else
+                       set_ib_wc_op_rq_gen_3(cq_poll_info, entry);
                if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
                    cq_poll_info->stag_invalid_set) {
                        entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
index cfa140b36395ae9f49a9b928baa4d5a1a0aaf336..fcb163c452528db335f7d748bc7da26a572bbdbf 100644 (file)
@@ -267,6 +267,19 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
        }
 }
 
+static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info,
+                                        struct ib_wc *entry)
+{
+       switch (info->op_type) {
+       case IRDMA_OP_TYPE_RDMA_WRITE:
+       case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+               entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+               break;
+       default:
+               entry->opcode = IB_WC_RECV;
+       }
+}
+
 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
                                   struct ib_wc *entry, bool send_imm_support)
 {