]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5: Expose HW bits for Memory scheme ODP
authorMichael Guralnik <michaelgur@nvidia.com>
Mon, 9 Sep 2024 10:04:58 +0000 (13:04 +0300)
committerLeon Romanovsky <leon@kernel.org>
Wed, 11 Sep 2024 11:56:12 +0000 (14:56 +0300)
Expose IFC bits to support the new memory scheme on demand paging.
Change the macro reading odp capabilities to be able to read from the
new IFC layout and align the code in upper layers to be compiled.

Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Link: https://patch.msgid.link/20240909100504.29797-3-michaelgur@nvidia.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mlx5/odp.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
include/linux/mlx5/device.h
include/linux/mlx5/mlx5_ifc.h

index 221820874e7a6cea1a98ed22ec6d3bc1afc86ffa..300504bf79d7ab5428089f51cf62879c8ff21ba7 100644 (file)
@@ -332,46 +332,46 @@ static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
        else
                dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
 
-       if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.send))
                caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
 
-       if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.srq_receive))
                caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.send))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.receive))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.write))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.read))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.atomic))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.srq_receive))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.send))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.receive))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.write))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.read))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.atomic))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.srq_receive))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
 
        if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
@@ -388,13 +388,17 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
        int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
                     pfault->wqe.wq_num : pfault->token;
        u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
+       void *info;
        int err;
 
        MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
-       MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
-       MLX5_SET(page_fault_resume_in, in, token, pfault->token);
-       MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
-       MLX5_SET(page_fault_resume_in, in, error, !!error);
+
+       info = MLX5_ADDR_OF(page_fault_resume_in, in,
+                           page_fault_info.trans_page_fault_info);
+       MLX5_SET(trans_page_fault_info, info, page_fault_type, pfault->type);
+       MLX5_SET(trans_page_fault_info, info, fault_token, pfault->token);
+       MLX5_SET(trans_page_fault_info, info, wq_number, wq_num);
+       MLX5_SET(trans_page_fault_info, info, error, !!error);
 
        err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
        if (err)
index 5b7e6f4b5c7ea10efd4b47eb621726482e83bb72..cc2aa46cff0477fdbbe492924d38407818eae808 100644 (file)
@@ -479,20 +479,20 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
                }                                                              \
        } while (0)
 
-       ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
-       ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.ud_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.rc_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.send);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.write);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.read);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.atomic);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.send);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.write);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic);
 
        if (!do_set)
                return 0;
index ba875a619b971c03ac5ccc090a8a761de869f82a..bd081f276654ecd390754470c5a9d916100472b6 100644 (file)
@@ -1369,6 +1369,10 @@ enum mlx5_qcam_feature_groups {
 #define MLX5_CAP_ODP(mdev, cap)\
        MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
 
+#define MLX5_CAP_ODP_SCHEME(mdev, cap)                       \
+       MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+                transport_page_fault_scheme_cap.cap)
+
 #define MLX5_CAP_ODP_MAX(mdev, cap)\
        MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
 
index ec1117d4e44198df15708fd47bff189b1185c0ba..3e3336bb9191d0b5fa072136fdf19586ac52d1f8 100644 (file)
@@ -1326,11 +1326,13 @@ struct mlx5_ifc_atomic_caps_bits {
        u8         reserved_at_e0[0x720];
 };
 
-struct mlx5_ifc_odp_cap_bits {
+struct mlx5_ifc_odp_scheme_cap_bits {
        u8         reserved_at_0[0x40];
 
        u8         sig[0x1];
-       u8         reserved_at_41[0x1f];
+       u8         reserved_at_41[0x4];
+       u8         page_prefetch[0x1];
+       u8         reserved_at_46[0x1a];
 
        u8         reserved_at_60[0x20];
 
@@ -1344,7 +1346,20 @@ struct mlx5_ifc_odp_cap_bits {
 
        struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
 
-       u8         reserved_at_120[0x6E0];
+       u8         reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+       struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap;
+
+       struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap;
+
+       u8         reserved_at_400[0x200];
+
+       u8         mem_page_fault[0x1];
+       u8         reserved_at_601[0x1f];
+
+       u8         reserved_at_620[0x1e0];
 };
 
 struct mlx5_ifc_tls_cap_bits {
@@ -2034,7 +2049,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
        u8         min_mkey_log_entity_size_fixed_buffer[0x5];
        u8         ec_vf_vport_base[0x10];
 
-       u8         reserved_at_3a0[0x10];
+       u8         reserved_at_3a0[0xa];
+       u8         max_mkey_log_entity_size_mtt[0x6];
        u8         max_rqt_vhca_id[0x10];
 
        u8         reserved_at_3c0[0x20];
@@ -7258,6 +7274,30 @@ struct mlx5_ifc_qp_2err_in_bits {
        u8         reserved_at_60[0x20];
 };
 
+struct mlx5_ifc_trans_page_fault_info_bits {
+       u8         error[0x1];
+       u8         reserved_at_1[0x4];
+       u8         page_fault_type[0x3];
+       u8         wq_number[0x18];
+
+       u8         reserved_at_20[0x8];
+       u8         fault_token[0x18];
+};
+
+struct mlx5_ifc_mem_page_fault_info_bits {
+       u8          error[0x1];
+       u8          reserved_at_1[0xf];
+       u8          fault_token_47_32[0x10];
+
+       u8          fault_token_31_0[0x20];
+};
+
+union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits {
+       struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info;
+       struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info;
+       u8          reserved_at_0[0x40];
+};
+
 struct mlx5_ifc_page_fault_resume_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
@@ -7274,13 +7314,8 @@ struct mlx5_ifc_page_fault_resume_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         error[0x1];
-       u8         reserved_at_41[0x4];
-       u8         page_fault_type[0x3];
-       u8         wq_number[0x18];
-
-       u8         reserved_at_60[0x8];
-       u8         token[0x18];
+       union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits
+               page_fault_info;
 };
 
 struct mlx5_ifc_nop_out_bits {