]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5: Verify support for scheduling element and TSAR type
authorCarolina Jubran <cjubran@nvidia.com>
Mon, 5 Aug 2024 10:13:03 +0000 (13:13 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Mon, 9 Sep 2024 19:39:57 +0000 (12:39 -0700)
Before creating a scheduling element in a NIC or E-Switch scheduler,
ensure that the requested element type is supported. If the element is
of type Transmit Scheduling Arbiter (TSAR), also verify that the
specific TSAR type is supported.

Fixes: 214baf22870c ("net/mlx5e: Support HTB offload")
Fixes: 85c5f7c9200e ("net/mlx5: E-switch, Create QoS on demand")
Fixes: 0fe132eac38c ("net/mlx5: E-switch, Allow to add vports to rate groups")
Signed-off-by: Carolina Jubran <cjubran@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/qos.c

index 997c412a81afe816645af69f5f95a1e1f60de517..02a3563f51ad2697af9485b0e4947e750bb5d08f 100644 (file)
@@ -312,6 +312,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
        return err;
 }
 
+static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
+{
+       switch (type) {
+       case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+               return MLX5_CAP_QOS(dev, esw_element_type) &
+                      ELEMENT_TYPE_CAP_MASK_TSAR;
+       case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+               return MLX5_CAP_QOS(dev, esw_element_type) &
+                      ELEMENT_TYPE_CAP_MASK_VPORT;
+       case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+               return MLX5_CAP_QOS(dev, esw_element_type) &
+                      ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+       case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+               return MLX5_CAP_QOS(dev, esw_element_type) &
+                      ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+       }
+       return false;
+}
+
 static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
                                              struct mlx5_vport *vport,
                                              u32 max_rate, u32 bw_share)
@@ -323,6 +342,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
        void *vport_elem;
        int err;
 
+       if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
+               return -EOPNOTSUPP;
+
        parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
        MLX5_SET(scheduling_context, sched_ctx, element_type,
                 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
@@ -533,25 +555,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
        return err;
 }
 
-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
-{
-       switch (type) {
-       case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
-               return MLX5_CAP_QOS(dev, esw_element_type) &
-                      ELEMENT_TYPE_CAP_MASK_TSAR;
-       case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
-               return MLX5_CAP_QOS(dev, esw_element_type) &
-                      ELEMENT_TYPE_CAP_MASK_VPORT;
-       case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
-               return MLX5_CAP_QOS(dev, esw_element_type) &
-                      ELEMENT_TYPE_CAP_MASK_VPORT_TC;
-       case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
-               return MLX5_CAP_QOS(dev, esw_element_type) &
-                      ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
-       }
-       return false;
-}
-
 static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
 {
        u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
@@ -562,7 +565,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
        if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
                return -EOPNOTSUPP;
 
-       if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+       if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
+           !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
                return -EOPNOTSUPP;
 
        MLX5_SET(scheduling_context, tsar_ctx, element_type,
index 8bce730b5c5befdc4e553810eae0ab1400fe836f..db2bd3ad63ba36b5a3158f15c1932131fe8ed768 100644 (file)
@@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
 {
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
 
+       if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
+               return -EOPNOTSUPP;
+
        MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
        MLX5_SET(scheduling_context, sched_ctx, element_type,
                 SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
@@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
        void *attr;
 
+       if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
+           !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
+               return -EOPNOTSUPP;
+
        MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
        MLX5_SET(scheduling_context, sched_ctx, element_type,
                 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);