]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net/mlx5: HWS, expose polling function in header file
authorYevgeny Kliteynik <kliteyn@nvidia.com>
Sun, 11 May 2025 19:38:03 +0000 (22:38 +0300)
committerJakub Kicinski <kuba@kernel.org>
Tue, 13 May 2025 22:30:25 +0000 (15:30 -0700)
In preparation for complex matcher, expose the function that is
polling queue for completion (mlx5hws_bwc_queue_poll) in header
file, so that it will be used by complex matcher code.

Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Vlad Dogaru <vdogaru@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1746992290-568936-4-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h

index 510bfbbe599181c45fd3c403680d98bd459ee685..27b6420678d83de37d09bdb7108cfa2b1443e785 100644 (file)
@@ -223,10 +223,10 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
        return 0;
 }
 
-static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
-                             u16 queue_id,
-                             u32 *pending_rules,
-                             bool drain)
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+                          u16 queue_id,
+                          u32 *pending_rules,
+                          bool drain)
 {
        unsigned long timeout = jiffies +
                                secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
@@ -361,7 +361,8 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
        if (unlikely(ret))
                return ret;
 
-       ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+       ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+                                    &expected_completions, true);
        if (unlikely(ret))
                return ret;
 
@@ -442,9 +443,8 @@ hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
        if (unlikely(ret))
                return ret;
 
-       ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
-
-       return ret;
+       return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+                                     &expected_completions, true);
 }
 
 static int
@@ -465,7 +465,8 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
        if (unlikely(ret))
                return ret;
 
-       ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+       ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+                                    &expected_completions, true);
        if (unlikely(ret))
                mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
 
@@ -651,8 +652,10 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match
                                                            &bwc_matcher->rules[i]) ?
                                               NULL : list_next_entry(bwc_rules[i], list_node);
 
-                               ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
-                                                        &pending_rules[i], false);
+                               ret = mlx5hws_bwc_queue_poll(ctx,
+                                                            rule_attr.queue_id,
+                                                            &pending_rules[i],
+                                                            false);
                                if (unlikely(ret)) {
                                        mlx5hws_err(ctx,
                                                    "Moving BWC rule failed during rehash (%d)\n",
@@ -669,8 +672,8 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match
                        u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
 
                        mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
-                       ret = hws_bwc_queue_poll(ctx, queue_id,
-                                                &pending_rules[i], true);
+                       ret = mlx5hws_bwc_queue_poll(ctx, queue_id,
+                                                    &pending_rules[i], true);
                        if (unlikely(ret)) {
                                mlx5hws_err(ctx,
                                            "Moving BWC rule failed during rehash (%d)\n", ret);
index bb0cf4b922ceba21c623b63e13d70abf01595763..a2aa2d5da694ae5256d05086d3a1a372bda32bb9 100644 (file)
@@ -64,6 +64,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
                                u32 flow_source,
                                struct mlx5hws_rule_attr *rule_attr);
 
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+                          u16 queue_id,
+                          u32 *pending_rules,
+                          bool drain);
+
 static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
 {
        /* Besides the control queue, half of the queues are