return ret;
}
+static void stmmac_set_tx_dma_entry(struct stmmac_tx_queue *tx_q,
+ unsigned int entry,
+ enum stmmac_txbuf_type type,
+ dma_addr_t addr, size_t len,
+ bool map_as_page)
+{
+ tx_q->tx_skbuff_dma[entry].buf = addr;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].buf_type = type;
+ tx_q->tx_skbuff_dma[entry].map_as_page = map_as_page;
+ tx_q->tx_skbuff_dma[entry].last_segment = false;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+}
+
+static void stmmac_set_tx_skb_dma_entry(struct stmmac_tx_queue *tx_q,
+ unsigned int entry, dma_addr_t addr,
+ size_t len, bool map_as_page)
+{
+ stmmac_set_tx_dma_entry(tx_q, entry, STMMAC_TXBUF_T_SKB, addr, len,
+ map_as_page);
+}
+
+static void stmmac_set_tx_dma_last_segment(struct stmmac_tx_queue *tx_q,
+ unsigned int entry)
+{
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+}
+
/**
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @priv: driver private structure
p = tx_q->dma_tx + i;
stmmac_clear_desc(priv, p);
+ stmmac_set_tx_skb_dma_entry(tx_q, i, 0, 0, false);
- tx_q->tx_skbuff_dma[i].buf = 0;
- tx_q->tx_skbuff_dma[i].map_as_page = false;
- tx_q->tx_skbuff_dma[i].len = 0;
- tx_q->tx_skbuff_dma[i].last_segment = false;
tx_q->tx_skbuff[i] = NULL;
}
meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
- tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
-
/* To return XDP buffer to XSK pool, we simple call
* xsk_tx_completed(), so we don't need to fill up
* 'buf' and 'xdpf'.
*/
- tx_q->tx_skbuff_dma[entry].buf = 0;
- tx_q->xdpf[entry] = NULL;
+ stmmac_set_tx_dma_entry(tx_q, entry, STMMAC_TXBUF_T_XSK_TX,
+ 0, xdp_desc.len, false);
+ stmmac_set_tx_dma_last_segment(tx_q, entry);
- tx_q->tx_skbuff_dma[entry].map_as_page = false;
- tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
- tx_q->tx_skbuff_dma[entry].last_segment = true;
- tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+ tx_q->xdpf[entry] = NULL;
stmmac_set_desc_addr(priv, tx_desc, dma_addr);
tx_q->tx_skbuff_dma[entry].map_as_page = false;
}
+ /* This looks at tx_q->tx_skbuff_dma[tx_q->dirty_tx].is_jumbo
+ * and tx_q->tx_skbuff_dma[tx_q->dirty_tx].last_segment
+ */
stmmac_clean_desc3(priv, tx_q, p);
tx_q->tx_skbuff_dma[entry].last_segment = false;
* this DMA buffer right after the DMA engine completely finishes the
* full buffer transmission.
*/
- tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
- tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
- tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
- tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+ stmmac_set_tx_skb_dma_entry(tx_q, tx_q->cur_tx, des, skb_headlen(skb),
+ false);
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
stmmac_tso_allocator(priv, des, skb_frag_size(frag),
(i == nfrags - 1), queue);
- tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
- tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
- tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
- tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+ stmmac_set_tx_skb_dma_entry(tx_q, tx_q->cur_tx, des,
+ skb_frag_size(frag), true);
}
- tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
+ stmmac_set_tx_dma_last_segment(tx_q, tx_q->cur_tx);
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[tx_q->cur_tx] = skb;
- tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
/* Manage tx mitigation */
tx_packets = CIRC_CNT(tx_q->cur_tx + 1, first_tx,
if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */
- tx_q->tx_skbuff_dma[entry].buf = des;
-
+ stmmac_set_tx_skb_dma_entry(tx_q, entry, des, len, true);
stmmac_set_desc_addr(priv, desc, des);
- tx_q->tx_skbuff_dma[entry].map_as_page = true;
- tx_q->tx_skbuff_dma[entry].len = len;
- tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
- tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
-
/* Prepare the descriptor and set the own bit too */
stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
priv->mode, 1, last_segment, skb->len);
}
+ stmmac_set_tx_dma_last_segment(tx_q, entry);
+
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[entry] = skb;
- tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
/* According to the coalesce parameter the IC bit for the latest
* segment is reset and the timer re-started to clean the tx status.
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- tx_q->tx_skbuff_dma[first_entry].buf = des;
- tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
- tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
+ stmmac_set_tx_skb_dma_entry(tx_q, first_entry, des, nopaged_len,
+ false);
stmmac_set_desc_addr(priv, first, des);
- tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
- tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
+ if (last_segment)
+ stmmac_set_tx_dma_last_segment(tx_q, first_entry);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
unsigned int entry = tx_q->cur_tx;
+ enum stmmac_txbuf_type buf_type;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
bool set_ic;
if (dma_mapping_error(priv->device, dma_addr))
return STMMAC_XDP_CONSUMED;
- tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
+ buf_type = STMMAC_TXBUF_T_XDP_NDO;
} else {
struct page *page = virt_to_page(xdpf->data);
dma_sync_single_for_device(priv->device, dma_addr,
xdpf->len, DMA_BIDIRECTIONAL);
- tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
+ buf_type = STMMAC_TXBUF_T_XDP_TX;
}
- tx_q->tx_skbuff_dma[entry].buf = dma_addr;
- tx_q->tx_skbuff_dma[entry].map_as_page = false;
- tx_q->tx_skbuff_dma[entry].len = xdpf->len;
- tx_q->tx_skbuff_dma[entry].last_segment = true;
- tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+ stmmac_set_tx_dma_entry(tx_q, entry, buf_type, dma_addr, xdpf->len,
+ false);
+ stmmac_set_tx_dma_last_segment(tx_q, entry);
tx_q->xdpf[entry] = xdpf;