u32 sqn;
u16 reserved_room;
unsigned long state;
+ /* icosq can be accessed from any CPU - the spinlock protects it. */
+ spinlock_t lock;
struct mlx5e_ktls_resync_resp *ktls_resync;
/* control path */
/* Async ICOSQ */
struct mlx5e_icosq async_icosq;
- /* async_icosq can be accessed from any CPU - the spinlock protects it. */
- spinlock_t async_icosq_lock;
/* data path - accessed per napi poll */
const struct cpumask *aff_mask;
err = 0;
sq = &c->async_icosq;
- spin_lock_bh(&c->async_icosq_lock);
+ spin_lock_bh(&sq->lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
- spin_unlock_bh(&c->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
return err;
buf->priv_rx = priv_rx;
- spin_lock_bh(&sq->channel->async_icosq_lock);
+ spin_lock_bh(&sq->lock);
if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
- spin_unlock_bh(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
err = -ENOSPC;
goto err_dma_unmap;
}
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- spin_unlock_bh(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
return 0;
return;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
- spin_lock_bh(&c->async_icosq_lock);
+ spin_lock_bh(&sq->lock);
mlx5e_trigger_irq(sq);
- spin_unlock_bh(&c->async_icosq_lock);
+ spin_unlock_bh(&sq->lock);
}
}
clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
spin_unlock(&ktls_resync->lock);
- spin_lock(&c->async_icosq_lock);
+ spin_lock(&sq->lock);
for (j = 0; j < i; j++) {
struct mlx5_wqe_ctrl_seg *cseg;
}
if (db_cseg)
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock(&sq->lock);
priv_rx->rq_stats->tls_resync_res_ok += j;
if (err)
goto err_free_icosq;
+ spin_lock_init(&sq->lock);
+
if (param->is_tls) {
sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
if (IS_ERR(sq->ktls_resync)) {
if (err)
goto err_close_rx_cq;
- spin_lock_init(&c->async_icosq_lock);
-
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
mlx5e_async_icosq_err_cqe_work);
if (err)
void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
{
- spin_lock_bh(&c->async_icosq_lock);
- mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock_bh(&c->async_icosq_lock);
+ struct mlx5e_icosq *async_icosq = &c->async_icosq;
+
+ spin_lock_bh(&async_icosq->lock);
+ mlx5e_trigger_irq(async_icosq);
+ spin_unlock_bh(&async_icosq->lock);
}
void mlx5e_trigger_napi_sched(struct napi_struct *napi)