]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5: HWS, fix counting of rules in the matcher
authorYevgeny Kliteynik <kliteyn@nvidia.com>
Sun, 11 May 2025 19:38:07 +0000 (22:38 +0300)
committerJakub Kicinski <kuba@kernel.org>
Tue, 13 May 2025 22:30:25 +0000 (15:30 -0700)
Currently the counter that counts number of rules in a matcher is
increased only when rule insertion is completed. In a multi-threaded
usecase this can lead to a scenario that many rules can be in process
of insertion in the same matcher, while none of them has completed
the insertion and the rule counter is not updated. This results in
a rule insertion failure for many of them at first attempt, which
leads to all of them requiring rehash and requiring locking of all
the queue locks.

This patch fixes the case by increasing the rule counter in the
beginning of insertion process and decreasing in case of any failure.

Signed-off-by: Vlad Dogaru <vdogaru@nvidia.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1746992290-568936-8-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c

index dce2605fc99bae23f569025b17f25ab8bcdf3042..7d991a61eeb39073afb54794b034498033f026de 100644 (file)
@@ -341,16 +341,12 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
 {
        struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
 
-       atomic_inc(&bwc_matcher->num_of_rules);
        bwc_rule->bwc_queue_idx = idx;
        list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
 }
 
 static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
 {
-       struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-
-       atomic_dec(&bwc_matcher->num_of_rules);
        list_del_init(&bwc_rule->list_node);
 }
 
@@ -404,6 +400,7 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
        mutex_lock(queue_lock);
 
        ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+       atomic_dec(&bwc_matcher->num_of_rules);
        hws_bwc_rule_list_remove(bwc_rule);
 
        mutex_unlock(queue_lock);
@@ -840,7 +837,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
        }
 
        /* check if number of rules require rehash */
-       num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+       num_of_rules = atomic_inc_return(&bwc_matcher->num_of_rules);
 
        if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
                mutex_unlock(queue_lock);
@@ -854,6 +851,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
                                    bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
                                    bwc_matcher->size_log,
                                    ret);
+                       atomic_dec(&bwc_matcher->num_of_rules);
                        return ret;
                }
 
@@ -887,6 +885,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
 
        if (ret) {
                mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+               atomic_dec(&bwc_matcher->num_of_rules);
                return ret;
        }
 
@@ -902,6 +901,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
        if (unlikely(ret)) {
                mutex_unlock(queue_lock);
                mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+               atomic_dec(&bwc_matcher->num_of_rules);
                return ret;
        }