int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_param *param)
+ struct mlx5e_rq_param *rq_param)
{
- void *rqc = param->rqc;
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ void *rqc = rq_param->rqc;
u32 lro_timeout;
int ndsegs = 1;
+ void *wq;
int err;
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
}
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info,
- ¶m->xdp_frag_size);
+ err = mlx5e_build_rq_frags_info(mdev, params, xsk,
+ &rq_param->frags_info,
+ &rq_param->xdp_frag_size);
if (err)
return err;
- ndsegs = param->frags_info.num_frags;
+ ndsegs = rq_param->frags_info.num_frags;
}
MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
+ rq_param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ mlx5e_build_rx_cq_param(mdev, params, xsk, &rq_param->cqp);
return 0;
}
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- struct mlx5e_rq_param *param)
+ struct mlx5e_rq_param *rq_param)
{
- void *rqc = param->rqc;
+ void *rqc = rq_param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ rq_param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_rq_param *rqp)
+ struct mlx5e_rq_param *rq_param)
{
u32 wqebbs, total_pages, useful_space;
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq_param *rq_param,
struct mlx5e_rq *rq,
int node)
{
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rq_param);
hd_buf_size = hd_per_wq * BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
nentries = hd_buf_size / PAGE_SIZE;
if (!nentries) {
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq_param *rq_param,
int node, struct mlx5e_rq *rq)
{
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
struct mlx5_core_dev *mdev = rq->mdev;
- void *rqc = rqp->rqc;
- void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 pool_size;
int wq_sz;
int err;
int i;
- rqp->wq.db_numa_node = node;
+ rq_param->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
INIT_WORK(&rq->rx_timeout_work, mlx5e_rq_timeout_work);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
- &rq->wq_ctrl);
+ err = mlx5_wq_ll_create(mdev, &rq_param->wq, rqc_wq,
+ &rq->mpwqe.wq, &rq->wq_ctrl);
if (err)
goto err_rq_xdp_prog;
if (err)
goto err_rq_mkey;
- err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, node);
+ err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
if (err)
goto err_free_mpwqe_info;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
- err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
- &rq->wq_ctrl);
+ err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq,
+ &rq->wqe.wq, &rq->wq_ctrl);
if (err)
goto err_rq_xdp_prog;
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
- rq->wqe.info = rqp->frags_info;
+ rq->wqe.info = rq_param->frags_info;
rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
err = mlx5e_init_wqe_alloc_info(rq, node);
xdp_rxq_info_unreg(&rq->xdp_rxq);
}
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *rq_param,
+ u16 q_counter)
{
struct mlx5_core_dev *mdev = rq->mdev;
u8 ts_format;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
- memcpy(rqc, param->rqc, sizeof(param->rqc));
+ memcpy(rqc, rq_param->rqc, sizeof(rq_param->rqc));
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
}
-int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
+int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rq_param,
struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq)
{
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
__set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
- err = mlx5e_alloc_rq(params, xsk, param, node, rq);
+ err = mlx5e_alloc_rq(params, xsk, rq_param, node, rq);
if (err)
return err;
- err = mlx5e_create_rq(rq, param, q_counter);
+ err = mlx5e_create_rq(rq, rq_param, q_counter);
if (err)
goto err_free_rq;
}
static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_params)
+ struct mlx5e_rq_param *rq_param)
{
u16 q_counter = c->priv->q_counter[c->sd_ix];
int err;
- err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
+ err = mlx5e_init_rxq_rq(c, params, rq_param->xdp_frag_size, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
+ return mlx5e_open_rq(params, rq_param, NULL, cpu_to_node(c->cpu),
+ q_counter, &c->rq);
}
static struct mlx5e_icosq *
static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq,
- struct mlx5e_rq_param *param)
+ struct mlx5e_rq_param *rq_param)
{
- void *rqc = param->rqc;
- void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
int err;
- param->wq.db_numa_node = param->wq.buf_numa_node;
+ rq_param->wq.db_numa_node = rq_param->wq.buf_numa_node;
- err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
+ err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
return err;