return &tx_q->dma_tx[index];
}
+static void stmmac_set_queue_tx_tail_ptr(struct stmmac_priv *priv,
+ struct stmmac_tx_queue *tx_q,
+ unsigned int chan, unsigned int index)
+{
+ int desc_size;
+
+ desc_size = stmmac_get_tx_desc_size(priv, tx_q);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + index * desc_size;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, chan);
+}
+
static size_t stmmac_get_rx_desc_size(struct stmmac_priv *priv)
{
if (priv->extend_desc)
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy;
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
- tx_q->tx_tail_addr, chan);
+ stmmac_set_queue_tx_tail_ptr(priv, tx_q, chan, 0);
}
return ret;
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- int desc_size;
-
- desc_size = stmmac_get_tx_desc_size(priv, tx_q);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
*/
wmb();
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+ stmmac_set_queue_tx_tail_ptr(priv, tx_q, queue, tx_q->cur_tx);
}
/**
if (tx_q->tbs & STMMAC_TBS_AVAIL)
stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy;
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
- tx_q->tx_tail_addr, tx_q->queue_index);
+ stmmac_set_queue_tx_tail_ptr(priv, tx_q, tx_q->queue_index, 0);
stmmac_start_tx_dma(priv, queue);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy;
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
- tx_q->tx_tail_addr, chan);
+ stmmac_set_queue_tx_tail_ptr(priv, tx_q, chan, 0);
hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}