]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5: Expand mkey page size to support 6 bits
authorMichael Guralnik <michaelgur@nvidia.com>
Mon, 9 Sep 2024 10:04:57 +0000 (13:04 +0300)
committerLeon Romanovsky <leon@kernel.org>
Wed, 11 Sep 2024 11:56:07 +0000 (14:56 +0300)
Protect the usage of the 6th bit with the relevant capability to ensure
we are using the new page sizes with FW that supports the bit extension.

Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Link: https://patch.msgid.link/20240909100504.29797-2-michaelgur@nvidia.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
include/linux/mlx5/mlx5_ifc.h

index 5505eb70939bd7dd2b3e1fad5e9dec6f3c8b90ee..1c96f209cda65d3d79b55983f87ea954343cef3e 100644 (file)
@@ -63,17 +63,6 @@ __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
        return GENMASK(largest_pg_shift, pgsz_shift);
 }
 
-/*
- * For mkc users, instead of a page_offset the command has a start_iova which
- * specifies both the page_offset and the on-the-wire IOVA
- */
-#define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova)    \
-       ib_umem_find_best_pgsz(umem,                                           \
-                              __mlx5_log_page_size_to_bitmap(                 \
-                                      __mlx5_bit_sz(typ, log_pgsz_fld),       \
-                                      pgsz_shift),                            \
-                              iova)
-
 static __always_inline unsigned long
 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
                              unsigned int offset_shift)
@@ -1724,4 +1713,20 @@ static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
        return (port - 1) / dev->num_ports + 1;
 }
 
+/*
+ * For mkc users, instead of a page_offset the command has a start_iova which
+ * specifies both the page_offset and the on-the-wire IOVA
+ */
+static __always_inline unsigned long
+mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+                            u64 iova)
+{
+       int page_size_bits =
+               MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5;
+       unsigned long bitmap =
+               __mlx5_log_page_size_to_bitmap(page_size_bits, 0);
+
+       return ib_umem_find_best_pgsz(umem, bitmap, iova);
+}
+
 #endif /* MLX5_IB_H */
index d5b5cd73e20c066fb658ea019e6f78d7c48139bf..45d9dc9c6c8fdaa30718cdf7e6ba6613f53931fa 100644 (file)
@@ -1120,8 +1120,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
        if (umem->is_dmabuf)
                page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
        else
-               page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
-                                                    0, iova);
+               page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
        if (WARN_ON(!page_size))
                return ERR_PTR(-EINVAL);
 
@@ -1426,8 +1425,8 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
                mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
                                        MLX5_MKC_ACCESS_MODE_MTT);
        } else {
-               unsigned int page_size = mlx5_umem_find_best_pgsz(
-                       umem, mkc, log_page_size, 0, iova);
+               unsigned int page_size =
+                       mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
 
                mutex_lock(&dev->slow_path_mutex);
                mr = reg_create(pd, umem, iova, access_flags, page_size,
@@ -1745,8 +1744,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
        if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
                return false;
 
-       *page_size =
-               mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
+       *page_size = mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova);
        if (WARN_ON(!*page_size))
                return false;
        return (mr->mmkey.cache_ent->rb_key.ndescs) >=
index 44a3428ea34243b51d77ea5134a44ec82f1a6cd3..221820874e7a6cea1a98ed22ec6d3bc1afc86ffa 100644 (file)
@@ -693,7 +693,7 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
        struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
        u32 xlt_flags = 0;
        int err;
-       unsigned int page_size;
+       unsigned long page_size;
 
        if (flags & MLX5_PF_FLAGS_ENABLE)
                xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
index 970c9d8473eff2254ea2ba8b068c1787362959cc..ec1117d4e44198df15708fd47bff189b1185c0ba 100644 (file)
@@ -1988,7 +1988,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
        u8         migratable[0x1];
        u8         reserved_at_81[0x11];
        u8         query_vuid[0x1];
-       u8         reserved_at_93[0xd];
+       u8         reserved_at_93[0x5];
+       u8         umr_log_entity_size_5[0x1];
+       u8         reserved_at_99[0x7];
 
        u8         max_reformat_insert_size[0x8];
        u8         max_reformat_insert_offset[0x8];
@@ -4212,8 +4214,7 @@ struct mlx5_ifc_mkc_bits {
 
        u8         reserved_at_1c0[0x19];
        u8         relaxed_ordering_read[0x1];
-       u8         reserved_at_1d9[0x1];
-       u8         log_page_size[0x5];
+       u8         log_page_size[0x6];
 
        u8         reserved_at_1e0[0x20];
 };