return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
}
+static struct mlx5e_icosq *
+mlx5e_open_async_icosq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_create_cq_param *ccp)
+{
+ struct dim_cq_moder icocq_moder = {0, 0};
+ struct mlx5e_icosq *async_icosq;
+ int err;
+
+ async_icosq = kvzalloc_node(sizeof(*async_icosq), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!async_icosq)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, ccp,
+ &async_icosq->cq);
+ if (err)
+ goto err_free_async_icosq;
+
+ err = mlx5e_open_icosq(c, params, &cparam->async_icosq, async_icosq,
+ mlx5e_async_icosq_err_cqe_work);
+ if (err)
+ goto err_close_async_icosq_cq;
+
+ return async_icosq;
+
+err_close_async_icosq_cq:
+ mlx5e_close_cq(&async_icosq->cq);
+err_free_async_icosq:
+ kvfree(async_icosq);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_close_async_icosq(struct mlx5e_icosq *async_icosq)
+{
+ mlx5e_close_icosq(async_icosq);
+ mlx5e_close_cq(&async_icosq->cq);
+ kvfree(async_icosq);
+}
+
static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
mlx5e_build_create_cq_param(&ccp, c);
- err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
- &c->async_icosq.cq);
- if (err)
- return err;
-
err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
- goto err_close_async_icosq_cq;
+ return err;
err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
if (err)
if (err)
goto err_close_rx_cq;
- err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
- mlx5e_async_icosq_err_cqe_work);
- if (err)
+ c->async_icosq = mlx5e_open_async_icosq(c, params, cparam, &ccp);
+ if (IS_ERR(c->async_icosq)) {
+ err = PTR_ERR(c->async_icosq);
goto err_close_rq_xdpsq_cq;
+ }
mutex_init(&c->icosq_recovery_lock);
mlx5e_close_icosq(&c->icosq);
err_close_async_icosq:
- mlx5e_close_icosq(&c->async_icosq);
+ mlx5e_close_async_icosq(c->async_icosq);
err_close_rq_xdpsq_cq:
if (c->xdp)
err_close_icosq_cq:
mlx5e_close_cq(&c->icosq.cq);
-err_close_async_icosq_cq:
- mlx5e_close_cq(&c->async_icosq.cq);
-
return err;
}
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mutex_destroy(&c->icosq_recovery_lock);
- mlx5e_close_icosq(&c->async_icosq);
+ mlx5e_close_async_icosq(c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_xdpredirect_sq(c->xdpsq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
- mlx5e_close_cq(&c->async_icosq.cq);
}
static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
- mlx5e_activate_icosq(&c->async_icosq);
+ mlx5e_activate_icosq(c->async_icosq);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
else
mlx5e_deactivate_rq(&c->rq);
- mlx5e_deactivate_icosq(&c->async_icosq);
+ mlx5e_deactivate_icosq(c->async_icosq);
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
busy |= work_done == budget;
mlx5e_poll_ico_cq(&c->icosq.cq);
- if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
+ if (mlx5e_poll_ico_cq(&c->async_icosq->cq))
/* Don't clear the flag if nothing was polled to prevent
* queueing more WQEs and overflowing the async ICOSQ.
*/
- clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
+ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
+ &c->async_icosq->state);
/* Keep after async ICOSQ CQ poll */
if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
- mlx5e_cq_arm(&c->async_icosq.cq);
+ mlx5e_cq_arm(&c->async_icosq->cq);
if (c->xdpsq)
mlx5e_cq_arm(&c->xdpsq->cq);