/* Receive Packet Data Buffer Size.
* The Packet Data Buffer Size is defined in 128 byte units.
*/
- rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
+ rlan_ctx.dbuf = DIV_ROUND_UP(ICE_RXBUF_3072,
BIT_ULL(ICE_RLAN_CTX_DBUF_S));
/* use 32 byte descriptors */
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
- rlan_ctx.rxmax = min_t(u32, ring->max_frame,
- ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
+ rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
+ ICE_MAX_CHAINED_RX_BUFS * ICE_RXBUF_3072);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
#if (PAGE_SIZE >= 8192)
frame_sz = rx_ring->rx_buf_len;
#else
- frame_sz = ice_rx_pg_size(rx_ring) / 2;
+ frame_sz = PAGE_SIZE;
#endif
return frame_sz;
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
+ u32 rx_buf_len;
int err;
if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
- ring->rx_buf_len);
+ ICE_RXBUF_3072);
if (err)
return err;
}
if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq);
- ring->rx_buf_len =
+ rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
- ring->rx_buf_len);
+ rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
- ring->rx_buf_len);
+ ICE_RXBUF_3072);
if (err)
return err;
}
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
{
if (!vsi->netdev) {
- ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- ring->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
} else {
- ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- ring->rx_buf_len = ICE_RXBUF_3072;
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
}
}
*/
dma_sync_single_range_for_cpu(dev, rx_buf->dma,
rx_buf->page_offset,
- rx_ring->rx_buf_len,
+ ICE_RXBUF_3072,
DMA_FROM_DEVICE);
/* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
+ dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
+ __free_page(rx_buf->page);
rx_buf->page = NULL;
rx_buf->page_offset = 0;
struct page *page = bi->page;
dma_addr_t dma;
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
-
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
bi->page = page;
bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
- bi->pagecnt_bias = USHRT_MAX;
return true;
}
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
- rx_ring->rx_buf_len,
+ ICE_RXBUF_3072,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
return !!cleaned_count;
}
-/**
- * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
- * @rx_buf: Rx buffer to adjust
- * @size: Size of adjustment
- *
- * Update the offset within page so that Rx buf will be ready to be reused.
- * For systems with PAGE_SIZE < 8192 this function will flip the page offset
- * so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by "size" bytes
- */
-static void
-ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
-{
-#if (PAGE_SIZE < 8192)
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= size;
-#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += size;
-#endif
-}
-
-/**
- * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
- * @rx_buf: buffer containing the page
- *
- * If page is reusable, we have a green light for calling ice_reuse_rx_page,
- * which will assign the current buffer to the buffer that next_to_alloc is
- * pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
- */
-static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
-{
- unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
- struct page *page = rx_buf->page;
-
- /* avoid re-using remote and pfmemalloc pages */
- if (!dev_page_is_reusable(page))
- return false;
-
- /* if we are only owner of page we can reuse it */
- if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
- return false;
-#if (PAGE_SIZE >= 8192)
-#define ICE_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
- if (rx_buf->page_offset > ICE_LAST_OFFSET)
- return false;
-#endif /* PAGE_SIZE >= 8192) */
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(pagecnt_bias == 1)) {
- page_ref_add(page, USHRT_MAX - 1);
- rx_buf->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
/**
* ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
* @rx_ring: Rx descriptor ring to transact packets on
return 0;
}
-/**
- * ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: Rx descriptor ring to store buffers on
- * @old_buf: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- */
-static void
-ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
-{
- u16 nta = rx_ring->next_to_alloc;
- struct ice_rx_buf *new_buf;
-
- new_buf = &rx_ring->rx_buf[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* Transfer page from old buffer to new buffer.
- * Move each member individually to avoid possible store
- * forwarding stalls and unnecessary copy of skb.
- */
- new_buf->dma = old_buf->dma;
- new_buf->page = old_buf->page;
- new_buf->page_offset = old_buf->page_offset;
- new_buf->pagecnt_bias = old_buf->pagecnt_bias;
-}
-
/**
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
rx_buf->page_offset, size,
DMA_FROM_DEVICE);
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buf->pagecnt_bias--;
-
return rx_buf;
}
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
- ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
- ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
- }
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE,
+ ICE_RX_DMA_ATTR);
/* clear contents of buffer_info */
rx_buf->page = NULL;
u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
struct ice_rx_buf *buf;
- u32 xdp_frags = 0;
- int i = 0;
-
- if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
while (idx != ntc) {
buf = &rx_ring->rx_buf[idx];
if (++idx == cnt)
idx = 0;
- /* An XDP program could release fragments from the end of the
- * buffer. For these, we need to keep the pagecnt_bias as-is.
- * To do this, only adjust pagecnt_bias for fragments up to
- * the total remaining after the XDP program has run.
- */
- if (verdict != ICE_XDP_CONSUMED)
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- else if (i++ <= xdp_frags)
- buf->pagecnt_bias++;
-
ice_put_rx_buf(rx_ring, buf);
}
#define ICE_MAX_TXQ_PER_TXQG 128
-/* Attempt to maximize the headroom available for incoming frames. We use a 2K
- * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
- * This leaves us with 512 bytes of room. From that we need to deduct the
- * space needed for the shared info and the padding needed to IP align the
- * frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define ICE_2K_TOO_SMALL_WITH_PADDING \
- ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
- SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
-
-/**
- * ice_compute_pad - compute the padding
- * @rx_buf_len: buffer length
- *
- * Figure out the size of half page based on given buffer length and
- * then subtract the skb_shared_info followed by subtraction of the
- * actual buffer length; this in turn results in the actual space that
- * is left for padding usage
- */
-static inline int ice_compute_pad(int rx_buf_len)
-{
- int half_page_size;
-
- half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
-}
-
-/**
- * ice_skb_pad - determine the padding that we can supply
- *
- * Figure out the right Rx buffer size and based on that calculate the
- * padding
- */
-static inline int ice_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (ICE_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = ICE_RXBUF_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return ice_compute_pad(rx_buf_len);
-}
-
-#define ICE_SKB_PAD ice_skb_pad()
-#else
-#define ICE_2K_TOO_SMALL_WITH_PADDING false
#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
struct page *page;
unsigned int page_offset;
unsigned int pgcnt;
- unsigned int pagecnt_bias;
};
struct ice_q_stats {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
- u16 max_frame;
u16 rx_buf_len;
dma_addr_t dma; /* physical address of ring */
u8 dcb_tc; /* Traffic class of ring */
static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
return 0;
}
-#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
-
union ice_32b_rx_flex_desc;
void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs);