]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net/mlx5e: Move async ICOSQ to dynamic allocation
authorWilliam Tu <witu@nvidia.com>
Wed, 14 Jan 2026 07:46:39 +0000 (09:46 +0200)
committerJakub Kicinski <kuba@kernel.org>
Mon, 19 Jan 2026 20:26:42 +0000 (12:26 -0800)
Dynamically allocate async ICOSQ. ICO (Internal Communication
Operations) is for driver to communicate with the HW, and it's
not used for traffic. Currently mlx5 driver has sync and async
ICO send queues. The async ICOSQ means that it's not necessarily
under NAPI context protection. The patch is in preparation for
the later patch to detect its usage and enable it when necessary.

Signed-off-by: William Tu <witu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1768376800-1607672-4-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c

index 8f0dd4ccba0f5d8ccfc653a2a6eca5385ebd2cf0..29ad846f8de7727ef91aef55a88c44b4cf8fed94 100644 (file)
@@ -782,7 +782,7 @@ struct mlx5e_channel {
        struct mlx5e_xdpsq         xsksq;
 
        /* Async ICOSQ */
-       struct mlx5e_icosq         async_icosq;
+       struct mlx5e_icosq        *async_icosq;
 
        /* data path - accessed per napi poll */
        const struct cpumask      *aff_mask;
index a59199ed590da9c5a4d9cecdc66707e8d6a9f43d..9e33156fac8adc6a2a2509db2b86194151d98711 100644 (file)
@@ -26,10 +26,12 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
                 * active and not polled by NAPI. Return 0, because the upcoming
                 * activate will trigger the IRQ for us.
                 */
-               if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->async_icosq.state)))
+               if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED,
+                                      &c->async_icosq->state)))
                        return 0;
 
-               if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
+               if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
+                                    &c->async_icosq->state))
                        return 0;
 
                mlx5e_trigger_napi_icosq(c);
index 8bc8231f521fa73e630bb7307e10a3f05074872c..5d8fe252799ecae90e45fe46f70d0d553e2ec074 100644 (file)
@@ -202,7 +202,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
        int err;
 
        err = 0;
-       sq = &c->async_icosq;
+       sq = c->async_icosq;
        spin_lock_bh(&sq->lock);
 
        cseg = post_static_params(sq, priv_rx);
@@ -344,7 +344,7 @@ static void resync_handle_work(struct work_struct *work)
        }
 
        c = resync->priv->channels.c[priv_rx->rxq];
-       sq = &c->async_icosq;
+       sq = c->async_icosq;
 
        if (resync_post_get_progress_params(sq, priv_rx)) {
                priv_rx->rq_stats->tls_resync_req_skip++;
@@ -371,7 +371,7 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
        struct mlx5e_icosq *sq;
        bool trigger_poll;
 
-       sq = &c->async_icosq;
+       sq = c->async_icosq;
        ktls_resync = sq->ktls_resync;
        trigger_poll = false;
 
@@ -753,7 +753,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
        LIST_HEAD(local_list);
        int i, j;
 
-       sq = &c->async_icosq;
+       sq = c->async_icosq;
 
        if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
                return false;
index cb08799769ee19d8437a289f0cf6841b52ce0883..4022c7e78a2e8bca635be8b65ff1e1aec5ac2f14 100644 (file)
@@ -50,7 +50,8 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget);
 static inline bool
 mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
 {
-       return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
+       return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+                                 &c->async_icosq->state);
 }
 
 static inline void
index 116d46116422934e4126866e582fcbb543a45f8e..b7c0645d2b6c822ee90f851ca43268aa07c7b94c 100644 (file)
@@ -2589,6 +2589,47 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
        return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
 }
 
+static struct mlx5e_icosq *
+mlx5e_open_async_icosq(struct mlx5e_channel *c,
+                      struct mlx5e_params *params,
+                      struct mlx5e_channel_param *cparam,
+                      struct mlx5e_create_cq_param *ccp)
+{
+       struct dim_cq_moder icocq_moder = {0, 0};
+       struct mlx5e_icosq *async_icosq;
+       int err;
+
+       async_icosq = kvzalloc_node(sizeof(*async_icosq), GFP_KERNEL,
+                                   cpu_to_node(c->cpu));
+       if (!async_icosq)
+               return ERR_PTR(-ENOMEM);
+
+       err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, ccp,
+                           &async_icosq->cq);
+       if (err)
+               goto err_free_async_icosq;
+
+       err = mlx5e_open_icosq(c, params, &cparam->async_icosq, async_icosq,
+                              mlx5e_async_icosq_err_cqe_work);
+       if (err)
+               goto err_close_async_icosq_cq;
+
+       return async_icosq;
+
+err_close_async_icosq_cq:
+       mlx5e_close_cq(&async_icosq->cq);
+err_free_async_icosq:
+       kvfree(async_icosq);
+       return ERR_PTR(err);
+}
+
+static void mlx5e_close_async_icosq(struct mlx5e_icosq *async_icosq)
+{
+       mlx5e_close_icosq(async_icosq);
+       mlx5e_close_cq(&async_icosq->cq);
+       kvfree(async_icosq);
+}
+
 static int mlx5e_open_queues(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
                             struct mlx5e_channel_param *cparam)
@@ -2600,15 +2641,10 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
 
        mlx5e_build_create_cq_param(&ccp, c);
 
-       err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
-                           &c->async_icosq.cq);
-       if (err)
-               return err;
-
        err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
                            &c->icosq.cq);
        if (err)
-               goto err_close_async_icosq_cq;
+               return err;
 
        err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
        if (err)
@@ -2632,10 +2668,11 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
        if (err)
                goto err_close_rx_cq;
 
-       err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
-                              mlx5e_async_icosq_err_cqe_work);
-       if (err)
+       c->async_icosq = mlx5e_open_async_icosq(c, params, cparam, &ccp);
+       if (IS_ERR(c->async_icosq)) {
+               err = PTR_ERR(c->async_icosq);
                goto err_close_rq_xdpsq_cq;
+       }
 
        mutex_init(&c->icosq_recovery_lock);
 
@@ -2671,7 +2708,7 @@ err_close_icosq:
        mlx5e_close_icosq(&c->icosq);
 
 err_close_async_icosq:
-       mlx5e_close_icosq(&c->async_icosq);
+       mlx5e_close_async_icosq(c->async_icosq);
 
 err_close_rq_xdpsq_cq:
        if (c->xdp)
@@ -2690,9 +2727,6 @@ err_close_tx_cqs:
 err_close_icosq_cq:
        mlx5e_close_cq(&c->icosq.cq);
 
-err_close_async_icosq_cq:
-       mlx5e_close_cq(&c->async_icosq.cq);
-
        return err;
 }
 
@@ -2706,7 +2740,7 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
        mlx5e_close_sqs(c);
        mlx5e_close_icosq(&c->icosq);
        mutex_destroy(&c->icosq_recovery_lock);
-       mlx5e_close_icosq(&c->async_icosq);
+       mlx5e_close_async_icosq(c->async_icosq);
        if (c->xdp)
                mlx5e_close_cq(&c->rq_xdpsq.cq);
        mlx5e_close_cq(&c->rq.cq);
@@ -2714,7 +2748,6 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
                mlx5e_close_xdpredirect_sq(c->xdpsq);
        mlx5e_close_tx_cqs(c);
        mlx5e_close_cq(&c->icosq.cq);
-       mlx5e_close_cq(&c->async_icosq.cq);
 }
 
 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
@@ -2879,7 +2912,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_activate_txqsq(&c->sq[tc]);
        mlx5e_activate_icosq(&c->icosq);
-       mlx5e_activate_icosq(&c->async_icosq);
+       mlx5e_activate_icosq(c->async_icosq);
 
        if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
                mlx5e_activate_xsk(c);
@@ -2900,7 +2933,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
        else
                mlx5e_deactivate_rq(&c->rq);
 
-       mlx5e_deactivate_icosq(&c->async_icosq);
+       mlx5e_deactivate_icosq(c->async_icosq);
        mlx5e_deactivate_icosq(&c->icosq);
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_deactivate_txqsq(&c->sq[tc]);
index 76108299ea57deff374bc0cd1ea782c1aa668cea..57c54265dbda224354faa11d1e5e6f4c7b45d88f 100644 (file)
@@ -180,11 +180,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
        busy |= work_done == budget;
 
        mlx5e_poll_ico_cq(&c->icosq.cq);
-       if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
+       if (mlx5e_poll_ico_cq(&c->async_icosq->cq))
                /* Don't clear the flag if nothing was polled to prevent
                 * queueing more WQEs and overflowing the async ICOSQ.
                 */
-               clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
+               clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
+                         &c->async_icosq->state);
 
        /* Keep after async ICOSQ CQ poll */
        if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
@@ -236,7 +237,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 
        mlx5e_cq_arm(&rq->cq);
        mlx5e_cq_arm(&c->icosq.cq);
-       mlx5e_cq_arm(&c->async_icosq.cq);
+       mlx5e_cq_arm(&c->async_icosq->cq);
        if (c->xdpsq)
                mlx5e_cq_arm(&c->xdpsq->cq);