xsk_tx_metadata_to_compl(meta,
&tx_q->tx_skbuff_dma[entry].xsk_meta);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
u64_stats_update_begin(&txq_stats->napi_syncp);
stmmac_release_tx_desc(priv, p, priv->descriptor_mode);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
}
tx_q->dirty_tx = entry;
return false;
stmmac_set_tx_owner(priv, p);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
return true;
}
while (tmp_len > 0) {
dma_addr_t curr_addr;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
unsigned int frag_size = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
desc = stmmac_get_tx_desc(priv, tx_q, entry);
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
+ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
rx_q->dirty_rx = entry;
stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->dirty_rx);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
return STMMAC_XDP_TX;
dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
+ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
if (rx_desc) {
break;
/* Prefetch the next RX descriptor */
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (unlikely(status & dma_own))
break;
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;