priv->dma_conf.dma_tx_size);
}
+static struct dma_desc *stmmac_get_tx_desc(struct stmmac_priv *priv,
+ struct stmmac_tx_queue *tx_q,
+ unsigned int index)
+{
+ if (priv->extend_desc)
+ return &tx_q->dma_etx[index].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ return &tx_q->dma_entx[index].basic;
+ else
+ return &tx_q->dma_tx[index];
+}
+
static size_t stmmac_get_rx_desc_size(struct stmmac_priv *priv)
{
if (priv->extend_desc)
pr_info("\tTX Queue %d rings\n", queue);
- if (priv->extend_desc) {
- head_tx = (void *)tx_q->dma_etx;
+ if (priv->extend_desc)
desc_size = sizeof(struct dma_extended_desc);
- } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
- head_tx = (void *)tx_q->dma_entx;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc_size = sizeof(struct dma_edesc);
- } else {
- head_tx = (void *)tx_q->dma_tx;
+ else
desc_size = sizeof(struct dma_desc);
- }
+
+ head_tx = stmmac_get_tx_desc(priv, tx_q, 0);
stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
tx_q->dma_tx_phy, desc_size);
int last = (i == (dma_conf->dma_tx_size - 1));
struct dma_desc *p;
- if (priv->extend_desc)
- p = &tx_q->dma_etx[i].basic;
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- p = &tx_q->dma_entx[i].basic;
- else
- p = &tx_q->dma_tx[i];
-
+ p = stmmac_get_tx_desc(priv, tx_q, i);
stmmac_init_tx_desc(priv, p, priv->mode, last);
}
}
for (i = 0; i < dma_conf->dma_tx_size; i++) {
struct dma_desc *p;
- if (priv->extend_desc)
- p = &((tx_q->dma_etx + i)->basic);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- p = &((tx_q->dma_entx + i)->basic);
- else
- p = tx_q->dma_tx + i;
-
+ p = stmmac_get_tx_desc(priv, tx_q, i);
stmmac_clear_desc(priv, p);
stmmac_set_tx_skb_dma_entry(tx_q, i, 0, 0, false);
continue;
}
- if (likely(priv->extend_desc))
- tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- tx_desc = &tx_q->dma_entx[entry].basic;
- else
- tx_desc = tx_q->dma_tx + entry;
-
+ tx_desc = stmmac_get_tx_desc(priv, tx_q, entry);
dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
skb = NULL;
}
- if (priv->extend_desc)
- p = (struct dma_desc *)(tx_q->dma_etx + entry);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- p = &tx_q->dma_entx[entry].basic;
- else
- p = tx_q->dma_tx + entry;
-
+ p = stmmac_get_tx_desc(priv, tx_q, entry);
status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
/* Check if the descriptor is owned by the DMA */
if (unlikely(status & tx_dma_own))
csum_insertion = !csum_insertion;
}
- if (likely(priv->extend_desc))
- desc = (struct dma_desc *)(tx_q->dma_etx + entry);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- desc = &tx_q->dma_entx[entry].basic;
- else
- desc = tx_q->dma_tx + entry;
-
+ desc = stmmac_get_tx_desc(priv, tx_q, entry);
first = desc;
if (has_vlan)
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
- if (likely(priv->extend_desc))
- desc = (struct dma_desc *)(tx_q->dma_etx + entry);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- desc = &tx_q->dma_entx[entry].basic;
- else
- desc = tx_q->dma_tx + entry;
+ desc = stmmac_get_tx_desc(priv, tx_q, entry);
des = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
set_ic = false;
if (set_ic) {
- if (likely(priv->extend_desc))
- desc = &tx_q->dma_etx[entry].basic;
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- desc = &tx_q->dma_entx[entry].basic;
- else
- desc = &tx_q->dma_tx[entry];
-
+ desc = stmmac_get_tx_desc(priv, tx_q, entry);
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
}
return STMMAC_XDP_CONSUMED;
}
- if (likely(priv->extend_desc))
- tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
- tx_desc = &tx_q->dma_entx[entry].basic;
- else
- tx_desc = tx_q->dma_tx + entry;
-
+ tx_desc = stmmac_get_tx_desc(priv, tx_q, entry);
if (dma_map) {
dma_addr = dma_map_single(priv->device, xdpf->data,
xdpf->len, DMA_TO_DEVICE);