]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/mana_ib: Add support of 4M, 1G, and 2G pages
authorKonstantin Taranov <kotaranov@microsoft.com>
Mon, 14 Apr 2025 09:00:34 +0000 (02:00 -0700)
committerLeon Romanovsky <leon@kernel.org>
Sun, 20 Apr 2025 10:36:26 +0000 (06:36 -0400)
Check PF capability flag whether the 4M, 1G, and 2G pages are
supported. Add these pages sizes to mana_ib, if supported.

Define possible page sizes in enum gdma_page_type and
remove unused enum atb_page_size.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Link: https://patch.msgid.link/1744621234-26114-4-git-send-email-kotaranov@linux.microsoft.com
Reviewed-by: Long Li <longli@microsoft.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mana/main.c
drivers/infiniband/hw/mana/mana_ib.h
drivers/net/ethernet/microsoft/mana/gdma_main.c
include/net/mana/gdma.h

index eda9c5b971dee6a29c653535550bb42e9a4c1665..bb0f685babe6032c3c0ead54a80d5400d9862f54 100644 (file)
@@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
 {
        unsigned long page_sz;
 
-       page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+       page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
        if (!page_sz) {
                ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
                return -EINVAL;
@@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume
        unsigned long page_sz;
 
        /* Hardware requires dma region to align to chosen page size */
-       page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+       page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
        if (!page_sz) {
                ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
                return -EINVAL;
@@ -577,7 +577,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
 
        memset(props, 0, sizeof(*props));
        props->max_mr_size = MANA_IB_MAX_MR_SIZE;
-       props->page_size_cap = PAGE_SZ_BM;
+       props->page_size_cap = dev->adapter_caps.page_size_cap;
        props->max_qp = dev->adapter_caps.max_qp_count;
        props->max_qp_wr = dev->adapter_caps.max_qp_wr;
        props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
@@ -696,6 +696,10 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
        caps->max_recv_sge_count = resp.max_recv_sge_count;
        caps->feature_flags = resp.feature_flags;
 
+       caps->page_size_cap = PAGE_SZ_BM;
+       if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+               caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
        return 0;
 }
 
index 6903946677e556a74aeb780b7f0396157574db02..f0dbd90b860076f3c0b27eff21f1ac63d0c046e6 100644 (file)
@@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
        u32 max_recv_sge_count;
        u32 max_inline_data_size;
        u64 feature_flags;
+       u64 page_size_cap;
 };
 
 struct mana_ib_queue {
index 4ffaf75888852741c1ed99201f77c730469f319c..8ee1aa3a7ec3d83d0721d28314d85e45b568cc8b 100644 (file)
@@ -964,6 +964,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
                        err, resp.hdr.status);
                return err ? err : -EPROTO;
        }
+       gc->pf_cap_flags1 = resp.pf_cap_flags1;
        if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
                err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
                if (err) {
index 239a7003255092df85ac19cc3470bcd7713fa527..ffa9820f14ba5493666aa7293199dc21f5fb8a59 100644 (file)
@@ -407,6 +407,8 @@ struct gdma_context {
 
        /* Azure RDMA adapter */
        struct gdma_dev         mana_ib;
+
+       u64 pf_cap_flags1;
 };
 
 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
@@ -553,6 +555,7 @@ enum {
  */
 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
+#define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
 
 /* Driver can handle holes (zeros) in the device list */
@@ -707,20 +710,6 @@ struct gdma_query_hwc_timeout_resp {
        u32 reserved;
 };
 
-enum atb_page_size {
-       ATB_PAGE_SIZE_4K,
-       ATB_PAGE_SIZE_8K,
-       ATB_PAGE_SIZE_16K,
-       ATB_PAGE_SIZE_32K,
-       ATB_PAGE_SIZE_64K,
-       ATB_PAGE_SIZE_128K,
-       ATB_PAGE_SIZE_256K,
-       ATB_PAGE_SIZE_512K,
-       ATB_PAGE_SIZE_1M,
-       ATB_PAGE_SIZE_2M,
-       ATB_PAGE_SIZE_MAX,
-};
-
 enum gdma_mr_access_flags {
        GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
        GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),