#define MLX5E_PAGECNT_BIAS_MAX U16_MAX
#define MLX5E_RX_MAX_HEAD (256)
+#define MLX5E_XDP_LOG_MAX_LINEAR_SZ \
+ order_base_2(MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM + MLX5E_RX_MAX_HEAD))
+
#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE \
(PAGE_SIZE >> MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
struct mlx5e_mpw_linear_info {
struct mlx5e_frag_page frag_page;
+ u16 max_frags;
};
#define MLX5E_MAX_RX_FRAGS 4
bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
bool dim_enabled, bool keep_dim_state);
+void mlx5e_mpwqe_dealloc_linear_page(struct mlx5e_rq *rq);
+
struct mlx5e_sq_param;
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
static int mlx5e_rq_alloc_mpwqe_linear_info(struct mlx5e_rq *rq, int node,
struct mlx5e_params *params,
- struct mlx5e_rq_opt_param *rqo,
- u32 *pool_size)
+ struct mlx5e_rq_opt_param *rqo)
{
struct mlx5_core_dev *mdev = rq->mdev;
struct mlx5e_mpw_linear_info *li;
+ u32 linear_frag_count;
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) ||
!params->xdp_prog)
if (!li)
return -ENOMEM;
+ linear_frag_count =
+ BIT(rq->mpwqe.page_shift - MLX5E_XDP_LOG_MAX_LINEAR_SZ);
+ if (linear_frag_count > U16_MAX) {
+ netdev_warn(rq->netdev,
+ "rq %d: linear_frag_count (%u) larger than expected (%u), page_shift: %u, log_max_linear_sz: %u\n",
+ rq->ix, linear_frag_count, U16_MAX,
+ rq->mpwqe.page_shift, MLX5E_XDP_LOG_MAX_LINEAR_SZ);
+ kvfree(li);
+ return -EINVAL;
+ }
+
+ li->max_frags = linear_frag_count;
rq->mpwqe.linear_info = li;
- /* additional page per packet for the linear part */
- *pool_size *= 2;
+ /* Set to max to force allocation on first run. */
+ li->frag_page.frags = li->max_frags;
return 0;
}
if (err)
goto err_rq_mkey;
- err = mlx5e_rq_alloc_mpwqe_linear_info(rq, node, params, rqo,
- &pool_size);
+ err = mlx5e_rq_alloc_mpwqe_linear_info(rq, node, params, rqo);
if (err)
goto err_free_mpwqe_info;
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
+
+ mlx5e_mpwqe_dealloc_linear_page(rq);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u16 missing = mlx5_wq_cyc_missing(wq);
page_pool_put_unrefed_netmem(pp, netmem, -1, true);
}
+static int mlx5e_mpwqe_linear_page_refill(struct mlx5e_rq *rq)
+{
+ struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
+
+ if (likely(li->frag_page.frags < li->max_frags))
+ return 0;
+
+ if (likely(li->frag_page.netmem)) {
+ mlx5e_page_release_fragmented(rq->page_pool, &li->frag_page);
+ li->frag_page.netmem = 0;
+ }
+
+ return mlx5e_page_alloc_fragmented(rq->page_pool, &li->frag_page);
+}
+
+static void *mlx5e_mpwqe_get_linear_page_frag(struct mlx5e_rq *rq)
+{
+ struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
+ u32 frag_offset;
+
+ if (unlikely(mlx5e_mpwqe_linear_page_refill(rq)))
+ return NULL;
+
+ frag_offset = li->frag_page.frags << MLX5E_XDP_LOG_MAX_LINEAR_SZ;
+ WARN_ON(frag_offset >= BIT(rq->mpwqe.page_shift));
+
+ return netmem_address(li->frag_page.netmem) + frag_offset;
+}
+
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
}
+void mlx5e_mpwqe_dealloc_linear_page(struct mlx5e_rq *rq)
+{
+ struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
+
+ if (!li || !li->frag_page.netmem)
+ return;
+
+ mlx5e_page_release_fragmented(rq->page_pool, &li->frag_page);
+
+ /* Recovery flow can call this function and then alloc again, so leave
+ * things in a good state for re-allocation.
+ */
+ li->frag_page.netmem = 0;
+ li->frag_page.frags = li->max_frags;
+}
+
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
/* area for bpf_xdp_[store|load]_bytes */
net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
- linear_page = &rq->mpwqe.linear_info->frag_page;
- if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
- linear_page))) {
+ va = mlx5e_mpwqe_get_linear_page_frag(rq);
+ if (!va) {
rq->stats->buff_alloc_err++;
return NULL;
}
- va = netmem_address(linear_page->netmem);
net_prefetchw(va); /* xdp_frame data area */
linear_hr = XDP_PACKET_HEADROOM;
linear_data_len = 0;
linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
+ linear_page = &rq->mpwqe.linear_info->frag_page;
} else {
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
linear_page->frags++;
}
- mlx5e_page_release_fragmented(rq->page_pool,
- linear_page);
return NULL; /* page/packet was consumed by XDP */
}
rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
mxbuf->xdp.data - mxbuf->xdp.data_meta);
- if (unlikely(!skb)) {
- mlx5e_page_release_fragmented(rq->page_pool,
- linear_page);
+ if (unlikely(!skb))
return NULL;
- }
skb_mark_for_recycle(skb);
linear_page->frags++;
- mlx5e_page_release_fragmented(rq->page_pool, linear_page);
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;