static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{
struct mlx5_eq_comp *eq;
+ struct mlx5e_priv *priv;
struct mlx5e_rq *rq;
int err;
rq = ctx;
+ priv = rq->priv;
+
+ mutex_lock(&priv->state_lock);
+
eq = rq->cq.mcq.eq;
err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
if (err && rq->icosq)
clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
+ mutex_unlock(&priv->state_lock);
+
return err;
}
mlx5e_reporter_rq_cqe_err(rq);
}
+static void mlx5e_rq_timeout_work(struct work_struct *timeout_work)
+{
+ struct mlx5e_rq *rq = container_of(timeout_work,
+ struct mlx5e_rq,
+ rx_timeout_work);
+
+ /* Acquire netdev instance lock to synchronize with channel close and
+ * reopen flows. Either successfully obtain the lock, or detect that
+ * channels are closing for another reason, making this work no longer
+ * necessary.
+ */
+ while (!netdev_trylock(rq->netdev)) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &rq->priv->state))
+ return;
+ msleep(20);
+ }
+
+ mlx5e_reporter_rx_timeout(rq);
+ netdev_unlock(rq->netdev);
+}
+
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
rqp->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
+ INIT_WORK(&rq->rx_timeout_work, mlx5e_rq_timeout_work);
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
- mlx5e_reporter_rx_timeout(rq);
+ queue_work(rq->priv->wq, &rq->rx_timeout_work);
+
return -ETIMEDOUT;
}
if (rq->dim)
cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work);
+ cancel_work_sync(&rq->rx_timeout_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);