* emac_xmit_xdp_frame - transmits an XDP frame
* @emac: emac device
* @xdpf: data to transmit
- * @page: page from page pool if already DMA mapped
* @q_idx: queue id
+ * @buff_type: Type of buffer to be transmitted
*
* Return: XDP state
*/
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
- struct page *page,
- unsigned int q_idx)
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type)
{
struct cppi5_host_desc_t *first_desc;
struct net_device *ndev = emac->ndev;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
struct prueth_swdata *swdata;
+ struct page *page;
u32 *epib;
int ret;
return ICSSG_XDP_CONSUMED; /* drop */
}
- if (page) { /* already DMA mapped by page_pool */
+ if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */
+ page = virt_to_head_page(xdpf->data);
+ if (unlikely(!page)) {
+ netdev_err(ndev, "xdp tx: failed to get page from xdpf\n");
+ goto drop_free_descs;
+ }
buf_dma = page_pool_get_dma_addr(page);
buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
} else { /* Map the linear buffer */
* emac_run_xdp - run an XDP program
* @emac: emac device
* @xdp: XDP buffer containing the frame
- * @page: page with RX data if already DMA mapped
* @len: Rx descriptor packet length
*
* Return: XDP state
*/
-static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
- struct page *page, u32 *len)
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len)
{
struct net_device *ndev = emac->ndev;
struct netdev_queue *netif_txq;
q_idx = cpu % emac->tx_ch_num;
netif_txq = netdev_get_tx_queue(ndev, q_idx);
__netif_tx_lock(netif_txq, cpu);
- result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ result = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_TX);
__netif_tx_unlock(netif_txq);
if (result == ICSSG_XDP_CONSUMED) {
ndev->stats.tx_dropped++;
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
ndev->stats.rx_dropped++;
- page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
+ page_pool_recycle_direct(emac->rx_chns.pg_pool,
+ virt_to_head_page(xdp->data));
return ICSSG_XDP_CONSUMED;
}
}
xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
- *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
+ *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len);
if (*xdp_state != ICSSG_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
PRUETH_SWDATA_XSK,
};
+enum prueth_tx_buff_type {
+ PRUETH_TX_BUFF_TYPE_XDP_TX,
+ PRUETH_TX_BUFF_TYPE_XDP_NDO,
+};
+
struct prueth_swdata {
enum prueth_swdata_type type;
union prueth_data {
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
- struct page *page,
- unsigned int q_idx);
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type);
void prueth_rx_cleanup(void *data, dma_addr_t desc_dma);
void prueth_tx_cleanup(void *data, dma_addr_t desc_dma);
int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags);