]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net/mlx5e: Add missing sanity checks for max TX WQE size
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Thu, 3 Nov 2022 06:55:42 +0000 (23:55 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Nov 2022 09:04:00 +0000 (10:04 +0100)
[ Upstream commit f9c955b4fe5c8626d11b8a9b93ccc0ba77358fec ]

The commit cited below started using the firmware capability for the
maximum TX WQE size. This commit adds an important check to verify that
the driver doesn't attempt to exceed this capability, and also restores
another check mistakenly removed in the cited commit (a WQE must not
exceed the page size).

Fixes: c27bd1718c06 ("net/mlx5e: Read max WQEBBs on the SQ from firmware")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index ff8ca7a7e1036c708c811abea1215b1c6dce3d9c..aeed165a2dec2eba7b81f3abd3a2616f80561cb2 100644 (file)
 
 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
 
+/* IPSEC inline data includes:
+ * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
+ *    next header.
+ * 2. ESP authentication data: 16 bytes for ICV.
+ */
+#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
+                                          255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
+
+/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
+ * encapsulations.
+ */
+#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
+                                           MLX5_SEND_WQE_DS)
+
+/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
+#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
+                                        MLX5E_MAX_TX_INLINE_DS + \
+                                        MLX5E_MAX_TX_IPSEC_DS + \
+                                        MAX_SKB_FRAGS + 1, \
+                                        MLX5_SEND_WQEBB_NUM_DS)
+
 #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
 
 static inline
@@ -424,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
 
 static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
 {
+       WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
+
        /* A WQE must not cross the page boundary, hence two conditions:
         * 1. Its size must not exceed the page size.
         * 2. If the WQE size is X, and the space remaining in a page is less
@@ -436,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si
                  "wqe_size %u is greater than max SQ WQEBBs %u",
                  wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
 
-
        return MLX5E_STOP_ROOM(wqe_size);
 }
 
index 02eb2f0fa2ae785c1022432bc64376a67fee249d..6cf6a81775a85ad9bc070a0b196f0d24e1d34e8e 100644 (file)
@@ -5514,6 +5514,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
        if (priv->fs)
                priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
+       /* Validate the max_wqe_size_sq capability. */
+       if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
+               mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n",
+                              mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS);
+               return -EIO;
+       }
+
        /* max number of channels may have changed */
        max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
        if (priv->channels.params.num_channels > max_nch) {
index 4d45150a3f8ef21247693ee32c99ac48f1c813db..a30e50d9969f7d0a8dec02b8a3131e8bc24037b7 100644 (file)
@@ -304,6 +304,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
        u16 ds_cnt_inl = 0;
        u16 ds_cnt_ids = 0;
 
+       /* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */
+
        if (attr->insz)
                ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
                                          MLX5_SEND_WQE_DS);
@@ -316,6 +318,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
                        inl += VLAN_HLEN;
 
                ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+               if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS))
+                       netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl,
+                                   (u16)MLX5E_MAX_TX_INLINE_DS);
                ds_cnt += ds_cnt_inl;
        }