]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net/mlx5: Update relaxed ordering read HCA capabilities
authorAvihai Horon <avihaih@nvidia.com>
Mon, 10 Apr 2023 13:07:52 +0000 (16:07 +0300)
committerLeon Romanovsky <leon@kernel.org>
Sun, 16 Apr 2023 10:29:19 +0000 (13:29 +0300)
Rename existing HCA capability relaxed_ordering_read to
relaxed_ordering_read_pci_enabled. This is in accordance with recent PRM
change to better describe the capability, as it's set only if both the
device supports relaxed ordering (RO) read and RO is enabled in PCI
config space.

In addition, add new HCA capability relaxed_ordering_read which is set
if the device supports RO read, regardless of RO in PCI config space.
This will be used in the following patch to allow RO in VFs and VMs.

Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Link: https://lore.kernel.org/r/caa0002fd8135086357dfcc368e2f5cc73b08480.1681131553.git.leon@kernel.org
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/umr.h
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
include/linux/mlx5/mlx5_ifc.h

index bd0a818ba1cd87dccd8bda1a9b1bb10ff8ce33a4..6a3a8e00bfaaf6fd364c1c597cc010b3eef04ea2 100644 (file)
@@ -70,7 +70,8 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
        if (acc & IB_ACCESS_RELAXED_ORDERING) {
                if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
                        MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
-               if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
+               if (MLX5_CAP_GEN(dev->mdev,
+                                relaxed_ordering_read_pci_enabled) &&
                    pcie_relaxed_ordering_enabled(dev->mdev->pdev))
                        MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
        }
@@ -791,7 +792,7 @@ static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
                ret |= IB_ACCESS_RELAXED_ORDERING;
 
        if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
-           MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
+           MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled) &&
            !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
                ret |= IB_ACCESS_RELAXED_ORDERING;
 
index c9d0021381a2806c4bf2c07719dcb3b32e025e14..e12ecd7e079cf206fbd43a2f35bfd71d58db68ad 100644 (file)
@@ -62,7 +62,7 @@ static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
                return false;
 
        if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
-           MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
+           MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled) &&
            !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
                return false;
 
index 993af4c12d909d2723f6d7421cadad5b8aef3ddf..3c765a1f91a57b5bfd72181154d43f52dcb81440 100644 (file)
@@ -41,7 +41,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
 {
        bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
        bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
-       bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
+       bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read_pci_enabled);
 
        MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
        MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
index e4306cd87cd77bf9ecbb29cdc2b2223b46712d77..b54339a1b1c61c2352b7c9447a40cb060de4a815 100644 (file)
@@ -1511,7 +1511,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 
        u8         log_max_eq_sz[0x8];
        u8         relaxed_ordering_write[0x1];
-       u8         relaxed_ordering_read[0x1];
+       u8         relaxed_ordering_read_pci_enabled[0x1];
        u8         log_max_mkey[0x6];
        u8         reserved_at_f0[0x6];
        u8         terminate_scatter_list_mkey[0x1];
@@ -1727,7 +1727,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 
        u8         reserved_at_320[0x3];
        u8         log_max_transport_domain[0x5];
-       u8         reserved_at_328[0x3];
+       u8         reserved_at_328[0x2];
+       u8         relaxed_ordering_read[0x1];
        u8         log_max_pd[0x5];
        u8         reserved_at_330[0x9];
        u8         q_counter_aggregation[0x1];