]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/irdma: Add support for GEN4 hardware
authorJacob Moroni <jmoroni@google.com>
Mon, 16 Mar 2026 18:39:49 +0000 (13:39 -0500)
committerLeon Romanovsky <leon@kernel.org>
Mon, 30 Mar 2026 17:47:37 +0000 (13:47 -0400)
GEN4 hardware is similar to GEN3 and requires only a few special cases.

Signed-off-by: Jacob Moroni <jmoroni@google.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/irdma/ctrl.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/ig3rdma_hw.c
drivers/infiniband/hw/irdma/irdma.h

index 13820f1a48a40325cf5e1c078d7a23a596fb47f0..335ae3c82e1733bdaeefbda7807e0dabaeb3a52d 100644 (file)
@@ -6465,6 +6465,7 @@ static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
                icrdma_init_hw(dev);
                break;
        case IRDMA_GEN_3:
+       case IRDMA_GEN_4:
                ig3rdma_init_hw(dev);
                break;
        }
index 7fad9dd9c7d2354efb903733ffa3fd8ad93a502b..f9be467d137f823391b9756d74f55c9818a8fa5c 100644 (file)
@@ -1082,6 +1082,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
                cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
                break;
        case IRDMA_GEN_3:
+       case IRDMA_GEN_4:
                cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_3;
                cqp_init_info.ts_override = 1;
                break;
@@ -1508,7 +1509,7 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
                   hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
        aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
        /* GEN_3 does not support virtual AEQ. Cap at max Kernel alloc size */
-       if (rf->rdma_ver == IRDMA_GEN_3)
+       if (rf->rdma_ver >= IRDMA_GEN_3)
                aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
                               sizeof(struct irdma_sc_aeqe)));
        aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
@@ -1518,7 +1519,7 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
                                         GFP_KERNEL | __GFP_NOWARN);
        if (aeq->mem.va)
                goto skip_virt_aeq;
-       else if (rf->rdma_ver == IRDMA_GEN_3)
+       else if (rf->rdma_ver >= IRDMA_GEN_3)
                return -ENOMEM;
 
        /* physically mapped aeq failed. setup virtual aeq */
@@ -2192,8 +2193,13 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
        set_bit(2, rf->allocated_pds);
 
        INIT_LIST_HEAD(&rf->mc_qht_list.list);
-       /* stag index mask has a minimum of 14 bits */
-       mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
+
+       if (rf->rdma_ver >= IRDMA_GEN_4)
+               mrdrvbits = 24 - max(get_count_order(rf->max_mr), 16);
+       else
+               /* stag index mask has a minimum of 14 bits */
+               mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
+
        rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
 
        return 0;
index 2e8bb475e22a94a286945a08e08f82e7b38b3d33..f0361675c2de19414c9806d6a2525f445d0de37a 100644 (file)
@@ -113,7 +113,6 @@ void ig3rdma_init_hw(struct irdma_sc_dev *dev)
        dev->irq_ops = &ig3rdma_irq_ops;
        dev->hw_stats_map = ig3rdma_hw_stat_map;
 
-       dev->hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_3;
        dev->hw_attrs.uk_attrs.max_hw_wq_frags = IG3RDMA_MAX_WQ_FRAGMENT_COUNT;
        dev->hw_attrs.uk_attrs.max_hw_read_sges = IG3RDMA_MAX_SGE_RD;
        dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
index ff938a01d70c4ddbcfe288c9a5cab5f1ce265f77..b5ce515f4ee834754e3b270766aa5eb78a23a185 100644 (file)
@@ -119,6 +119,7 @@ enum irdma_vers {
        IRDMA_GEN_1,
        IRDMA_GEN_2,
        IRDMA_GEN_3,
+       IRDMA_GEN_4,
        IRDMA_GEN_NEXT,
        IRDMA_GEN_MAX = IRDMA_GEN_NEXT-1
 };