]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net/mlx5e: Move async ICOSQ lock into ICOSQ struct
authorWilliam Tu <witu@nvidia.com>
Wed, 14 Jan 2026 07:46:37 +0000 (09:46 +0200)
committerJakub Kicinski <kuba@kernel.org>
Mon, 19 Jan 2026 20:26:42 +0000 (12:26 -0800)
Move the async_icosq spinlock from the mlx5e_channel structure into
the mlx5e_icosq structure itself for better encapsulation and for
later patch to also use it for other icosq use cases.

Changes:
- Add spinlock_t lock field to struct mlx5e_icosq
- Remove async_icosq_lock field from struct mlx5e_channel
- Initialize the new lock in mlx5e_open_icosq()
- Update all lock usage in ktls_rx.c and en_main.c to use sq->lock
  instead of c->async_icosq_lock

Signed-off-by: William Tu <witu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1768376800-1607672-2-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index ff4ab4691baf0bf2cc4b8220f566e5cee5aea6f4..ad2bdb041fa32f23c416270155c3d5a5ac846efb 100644 (file)
@@ -545,6 +545,8 @@ struct mlx5e_icosq {
        u32                        sqn;
        u16                        reserved_room;
        unsigned long              state;
+       /* icosq can be accessed from any CPU - the spinlock protects it. */
+       spinlock_t                 lock;
        struct mlx5e_ktls_resync_resp *ktls_resync;
 
        /* control path */
@@ -777,8 +779,6 @@ struct mlx5e_channel {
 
        /* Async ICOSQ */
        struct mlx5e_icosq         async_icosq;
-       /* async_icosq can be accessed from any CPU - the spinlock protects it. */
-       spinlock_t                 async_icosq_lock;
 
        /* data path - accessed per napi poll */
        const struct cpumask      *aff_mask;
index da2d1eb52c131aa4a91ea883b44615e1a050277d..8bc8231f521fa73e630bb7307e10a3f05074872c 100644 (file)
@@ -203,7 +203,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
 
        err = 0;
        sq = &c->async_icosq;
-       spin_lock_bh(&c->async_icosq_lock);
+       spin_lock_bh(&sq->lock);
 
        cseg = post_static_params(sq, priv_rx);
        if (IS_ERR(cseg))
@@ -214,7 +214,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
 
        mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
 unlock:
-       spin_unlock_bh(&c->async_icosq_lock);
+       spin_unlock_bh(&sq->lock);
 
        return err;
 
@@ -277,10 +277,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        buf->priv_rx = priv_rx;
 
-       spin_lock_bh(&sq->channel->async_icosq_lock);
+       spin_lock_bh(&sq->lock);
 
        if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
-               spin_unlock_bh(&sq->channel->async_icosq_lock);
+               spin_unlock_bh(&sq->lock);
                err = -ENOSPC;
                goto err_dma_unmap;
        }
@@ -311,7 +311,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
        icosq_fill_wi(sq, pi, &wi);
        sq->pc++;
        mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
-       spin_unlock_bh(&sq->channel->async_icosq_lock);
+       spin_unlock_bh(&sq->lock);
 
        return 0;
 
@@ -413,9 +413,9 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
                return;
 
        if (!napi_if_scheduled_mark_missed(&c->napi)) {
-               spin_lock_bh(&c->async_icosq_lock);
+               spin_lock_bh(&sq->lock);
                mlx5e_trigger_irq(sq);
-               spin_unlock_bh(&c->async_icosq_lock);
+               spin_unlock_bh(&sq->lock);
        }
 }
 
@@ -772,7 +772,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
                clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
        spin_unlock(&ktls_resync->lock);
 
-       spin_lock(&c->async_icosq_lock);
+       spin_lock(&sq->lock);
        for (j = 0; j < i; j++) {
                struct mlx5_wqe_ctrl_seg *cseg;
 
@@ -791,7 +791,7 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
        }
        if (db_cseg)
                mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg);
-       spin_unlock(&c->async_icosq_lock);
+       spin_unlock(&sq->lock);
 
        priv_rx->rq_stats->tls_resync_res_ok += j;
 
index 6316412c6e870f2ea15a9b0246b328c57d0d2ea7..2ca21825622b12bb0099be7705bcdfad126f0ebc 100644 (file)
@@ -2075,6 +2075,8 @@ static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params
        if (err)
                goto err_free_icosq;
 
+       spin_lock_init(&sq->lock);
+
        if (param->is_tls) {
                sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
                if (IS_ERR(sq->ktls_resync)) {
@@ -2630,8 +2632,6 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
        if (err)
                goto err_close_rx_cq;
 
-       spin_lock_init(&c->async_icosq_lock);
-
        err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
                               mlx5e_async_icosq_err_cqe_work);
        if (err)
@@ -2750,9 +2750,11 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
 
 void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
 {
-       spin_lock_bh(&c->async_icosq_lock);
-       mlx5e_trigger_irq(&c->async_icosq);
-       spin_unlock_bh(&c->async_icosq_lock);
+       struct mlx5e_icosq *async_icosq = &c->async_icosq;
+
+       spin_lock_bh(&async_icosq->lock);
+       mlx5e_trigger_irq(async_icosq);
+       spin_unlock_bh(&async_icosq->lock);
 }
 
 void mlx5e_trigger_napi_sched(struct napi_struct *napi)