return 0;
}
+static int mlx5e_rq_alloc_mpwqe_linear_info(struct mlx5e_rq *rq, int node,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_opt_param *rqo,
+ u32 *pool_size)
+{
+ struct mlx5_core_dev *mdev = rq->mdev;
+ struct mlx5e_mpw_linear_info *li;
+
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) ||
+ !params->xdp_prog)
+ return 0;
+
+ li = kvzalloc_node(sizeof(*li), GFP_KERNEL, node);
+ if (!li)
+ return -ENOMEM;
+
+ rq->mpwqe.linear_info = li;
+
+ /* additional page per packet for the linear part */
+ *pool_size *= 2;
+
+ return 0;
+}
static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
{
mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo);
pool_order = rq->mpwqe.page_shift - PAGE_SHIFT;
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) &&
- params->xdp_prog)
- pool_size *= 2; /* additional page per packet for the linear part */
-
rq->mpwqe.log_stride_sz =
mlx5e_mpwqe_get_log_stride_size(mdev, params,
rqo);
if (err)
goto err_rq_mkey;
- err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
+ err = mlx5e_rq_alloc_mpwqe_linear_info(rq, node, params, rqo,
+ &pool_size);
if (err)
goto err_free_mpwqe_info;
+ err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
+ if (err)
+ goto err_free_mpwqe_linear_info;
+
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq,
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_shampo(rq);
+err_free_mpwqe_linear_info:
+ kvfree(rq->mpwqe.linear_info);
err_free_mpwqe_info:
kvfree(rq->mpwqe.info);
err_rq_mkey:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_shampo(rq);
+ kvfree(rq->mpwqe.linear_info);
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
+ struct mlx5e_frag_page *linear_page = NULL;
struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
u32 page_size = BIT(rq->mpwqe.page_shift);
u32 frag_offset = head_offset;
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+
+ linear_page = &rq->mpwqe.linear_info->frag_page;
if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
- &wi->linear_page))) {
+ linear_page))) {
rq->stats->buff_alloc_err++;
return NULL;
}
- va = netmem_address(wi->linear_page.netmem);
+ va = netmem_address(linear_page->netmem);
net_prefetchw(va); /* xdp_frame data area */
linear_hr = XDP_PACKET_HEADROOM;
linear_data_len = 0;
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
- wi->linear_page.frags++;
+ linear_page->frags++;
}
mlx5e_page_release_fragmented(rq->page_pool,
- &wi->linear_page);
+ linear_page);
return NULL; /* page/packet was consumed by XDP */
}
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq->page_pool,
- &wi->linear_page);
+ linear_page);
return NULL;
}
skb_mark_for_recycle(skb);
- wi->linear_page.frags++;
- mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
+ linear_page->frags++;
+ mlx5e_page_release_fragmented(rq->page_pool, linear_page);
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;