#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#include <net/netdev_queues.h>
#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev,
struct mlx5e_rq_opt_param *rqo)
{
+ struct netdev_queue_config *qcfg = rqo ? rqo->qcfg : NULL;
struct mlx5e_xsk_param *xsk = mlx5e_rqo_xsk_param(rqo);
u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
u8 req_page_shift;
- req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
+ if (xsk)
+ req_page_shift = order_base_2(xsk->chunk_size);
+ else if (qcfg && qcfg->rx_page_size)
+ req_page_shift = order_base_2(qcfg->rx_page_size);
+ else
+ req_page_shift = PAGE_SHIFT;
/* Regular RQ uses order-0 pages, the NIC must be able to map them. */
if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct netdev_queue_config *qcfg,
struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
int err;
- err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
+ cparam->rq_opt.qcfg = qcfg;
+
+ err = mlx5e_build_rq_param(mdev, params, &cparam->rq_opt, &cparam->rq);
if (err)
return err;
struct mlx5e_rq_opt_param {
struct mlx5e_xsk_param *xsk;
+ struct netdev_queue_config *qcfg;
};
struct mlx5e_cq_param {
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct netdev_queue_config *qcfg,
struct mlx5e_channel_param *cparam);
void mlx5e_build_xsk_channel_param(struct mlx5_core_dev *mdev,
return err;
}
-static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_param)
+static int mlx5e_open_rxq_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param,
+ struct mlx5e_rq_opt_param *rqo)
{
u16 q_counter = c->priv->q_counter[c->sd_ix];
int err;
if (err)
return err;
- return mlx5e_open_rq(params, rq_param, NULL, cpu_to_node(c->cpu),
+ return mlx5e_open_rq(params, rq_param, rqo, cpu_to_node(c->cpu),
q_counter, &c->rq);
}
if (err)
goto err_close_icosq;
- err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
+ err = mlx5e_open_rxq_rq(c, params, &cparam->rq, &cparam->rq_opt);
if (err)
goto err_close_sqs;
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
+ struct netdev_queue_config *qcfg,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
goto err_free;
}
- err = mlx5e_build_channel_param(mdev, params, cparam);
+ err = mlx5e_build_channel_param(mdev, params, qcfg, cparam);
if (err)
goto err_free;
if (chs->params.xdp_prog)
xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, xsk_pool, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, NULL,
+ xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
goto unlock;
}
- err = mlx5e_open_channel(priv, queue_index, ¶ms, NULL, &new->c);
+ err = mlx5e_open_channel(priv, queue_index, ¶ms, qcfg, NULL,
+ &new->c);
unlock:
mutex_unlock(&priv->state_lock);
return err;