static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam)
+ struct mlx5e_channel_param *cparam,
+ bool async_icosq_needed)
{
const struct net_device_ops *netdev_ops = c->netdev->netdev_ops;
struct dim_cq_moder icocq_moder = {0, 0};
if (err)
goto err_close_rx_cq;
- c->async_icosq = mlx5e_open_async_icosq(c, params, cparam, &ccp);
- if (IS_ERR(c->async_icosq)) {
- err = PTR_ERR(c->async_icosq);
- goto err_close_rq_xdpsq_cq;
+ if (async_icosq_needed) {
+ c->async_icosq = mlx5e_open_async_icosq(c, params, cparam,
+ &ccp);
+ if (IS_ERR(c->async_icosq)) {
+ err = PTR_ERR(c->async_icosq);
+ goto err_close_rq_xdpsq_cq;
+ }
}
mutex_init(&c->icosq_recovery_lock);
mlx5e_close_icosq(&c->icosq);
err_close_async_icosq:
- mlx5e_close_async_icosq(c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_close_async_icosq(c->async_icosq);
err_close_rq_xdpsq_cq:
if (c->xdp)
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mutex_destroy(&c->icosq_recovery_lock);
- mlx5e_close_async_icosq(c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_close_async_icosq(c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
struct mlx5e_channel_param *cparam;
struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
+ bool async_icosq_needed;
struct mlx5e_channel *c;
unsigned int irq;
int vec_ix;
netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
netif_napi_set_irq_locked(&c->napi, irq);
- err = mlx5e_open_queues(c, params, cparam);
+ async_icosq_needed = !!xsk_pool || priv->ktls_rx_was_enabled;
+ err = mlx5e_open_queues(c, params, cparam, async_icosq_needed);
if (unlikely(err))
goto err_napi_del;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
- mlx5e_activate_icosq(c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_activate_icosq(c->async_icosq);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
else
mlx5e_deactivate_rq(&c->rq);
- mlx5e_deactivate_icosq(c->async_icosq);
+ if (c->async_icosq)
+ mlx5e_deactivate_icosq(c->async_icosq);
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
+ struct mlx5e_icosq *aicosq = c->async_icosq;
struct mlx5e_ch_stats *ch_stats = c->stats;
struct mlx5e_xdpsq *xsksq = &c->xsksq;
struct mlx5e_txqsq __rcu **qos_sqs;
busy |= work_done == budget;
mlx5e_poll_ico_cq(&c->icosq.cq);
- if (mlx5e_poll_ico_cq(&c->async_icosq->cq))
- /* Don't clear the flag if nothing was polled to prevent
- * queueing more WQEs and overflowing the async ICOSQ.
- */
- clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
- &c->async_icosq->state);
-
- /* Keep after async ICOSQ CQ poll */
- if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
- busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
+ if (aicosq) {
+ if (mlx5e_poll_ico_cq(&aicosq->cq))
+ /* Don't clear the flag if nothing was polled to prevent
+ * queueing more WQEs and overflowing the async ICOSQ.
+ */
+ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX,
+ &aicosq->state);
+
+ /* Keep after async ICOSQ CQ poll */
+ if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
+ busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
+ }
busy |= INDIRECT_CALL_2(rq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
- mlx5e_cq_arm(&c->async_icosq->cq);
+ if (aicosq) {
+ mlx5e_cq_arm(&aicosq->cq);
+ if (xsk_open) {
+ mlx5e_handle_rx_dim(xskrq);
+ mlx5e_cq_arm(&xsksq->cq);
+ mlx5e_cq_arm(&xskrq->cq);
+ }
+ }
if (c->xdpsq)
mlx5e_cq_arm(&c->xdpsq->cq);
- if (xsk_open) {
- mlx5e_handle_rx_dim(xskrq);
- mlx5e_cq_arm(&xsksq->cq);
- mlx5e_cq_arm(&xskrq->cq);
- }
-
if (unlikely(aff_change && busy_xsk)) {
mlx5e_trigger_irq(&c->icosq);
ch_stats->force_irq++;