if (ring->page_pool) {
struct page *page = virt_to_head_page(data);
struct xdp_buff xdp;
- u32 ret;
+ u32 ret, metasize;
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
- false);
+ true);
xdp_buff_clear_frags_flag(&xdp);
ret = mtk_xdp_run(eth, ring, &xdp, netdev);
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)