]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/mlx5: Allow relaxed ordering read in VFs and VMs
authorAvihai Horon <avihaih@nvidia.com>
Mon, 10 Apr 2023 13:07:53 +0000 (16:07 +0300)
committerLeon Romanovsky <leon@kernel.org>
Sun, 16 Apr 2023 10:29:26 +0000 (13:29 +0300)
According to PCIe spec, Enable Relaxed Ordering value in the VF's PCI
config space is wired to 0 and PF relaxed ordering (RO) setting should
be applied to the VF. In QEMU (and maybe others), when assigning VFs,
the RO bit in PCI config space is not emulated properly and is always
set to 0.

Therefore, pcie_relaxed_ordering_enabled() always returns 0 for VFs and
VMs and thus MKeys can't be created with RO read even if the PF supports
it.

pcie_relaxed_ordering_enabled() check was added to avoid a syndrome when
creating a MKey with relaxed ordering (RO) enabled when the driver's
relaxed_ordering_read_pci_enabled HCA capability is out of sync with FW.
With the new relaxed_ordering_read capability this can't happen, as it's
set regardless of RO value in PCI config space and thus can't change
during runtime.

Hence, to allow RO read in VFs and VMs, use the new HCA capability
relaxed_ordering_read without checking pcie_relaxed_ordering_enabled().
The old capability checks are kept for backward compatibility with older
FWs.

Allowing RO in VFs and VMs is valuable since it can greatly improve
performance on some setups. For example, testing throughput of a VF on
an AMD EPYC 7763 and ConnectX-6 Dx setup showed roughly 60% performance
improvement.

Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Aya Levin <ayal@nvidia.com>
Link: https://lore.kernel.org/r/e7048640d66c341a8fa0465e099926e7989184bc.1681131553.git.leon@kernel.org
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/umr.c
drivers/infiniband/hw/mlx5/umr.h
drivers/net/ethernet/mellanox/mlx5/core/en_common.c

index 6a3a8e00bfaaf6fd364c1c597cc010b3eef04ea2..2017ede100a62d793f161761fede1a73d104246f 100644 (file)
@@ -70,9 +70,11 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
        if (acc & IB_ACCESS_RELAXED_ORDERING) {
                if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
                        MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
-               if (MLX5_CAP_GEN(dev->mdev,
-                                relaxed_ordering_read_pci_enabled) &&
-                   pcie_relaxed_ordering_enabled(dev->mdev->pdev))
+
+               if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
+                   (MLX5_CAP_GEN(dev->mdev,
+                                 relaxed_ordering_read_pci_enabled) &&
+                    pcie_relaxed_ordering_enabled(dev->mdev->pdev)))
                        MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
        }
 
@@ -792,7 +794,8 @@ static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
                ret |= IB_ACCESS_RELAXED_ORDERING;
 
        if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
-           MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled) &&
+           (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
+            MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
            !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
                ret |= IB_ACCESS_RELAXED_ORDERING;
 
index c9e176e8ced4631c84ac8d10e3786604e97b0ed4..234bf30db7319aa63aac50aebab90ca88685f1be 100644 (file)
@@ -381,7 +381,8 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
                                       unsigned int access_flags)
 {
        bool ro_read = (access_flags & IB_ACCESS_RELAXED_ORDERING) &&
-                      pcie_relaxed_ordering_enabled(dev->mdev->pdev);
+                      (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
+                       pcie_relaxed_ordering_enabled(dev->mdev->pdev));
 
        MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
        MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
index e12ecd7e079cf206fbd43a2f35bfd71d58db68ad..3799bb758e490a103438b602968fb2cd39c95181 100644 (file)
@@ -62,7 +62,8 @@ static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
                return false;
 
        if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
-           MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled) &&
+           (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
+            MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
            !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
                return false;
 
index 3c765a1f91a57b5bfd72181154d43f52dcb81440..1f90594499c6098d06f37f1beed887d200b1a1a4 100644 (file)
 
 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
 {
-       bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
        bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
-       bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read_pci_enabled);
+       bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read) ||
+                      (pcie_relaxed_ordering_enabled(mdev->pdev) &&
+                       MLX5_CAP_GEN(mdev, relaxed_ordering_read_pci_enabled));
 
-       MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
+       MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read);
        MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
 }