]> git.ipfire.org Git - thirdparty/kernel/stable.git/blobdiff - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: Uniformize the use of dma_init_* callbacks
[thirdparty/kernel/stable.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
index b65e2d144698db0384bbd1e8ba804bea3623f997..34c1fcc23fb6599c4e252b6ce223b9c346731966 100644 (file)
 #include <linux/seq_file.h>
 #endif /* CONFIG_DEBUG_FS */
 #include <linux/net_tstamp.h>
+#include <net/pkt_cls.h>
 #include "stmmac_ptp.h"
 #include "stmmac.h"
 #include <linux/reset.h>
 #include <linux/of_mdio.h>
 #include "dwmac1000.h"
+#include "hwif.h"
 
 #define STMMAC_ALIGN(x)        L1_CACHE_ALIGN(x)
 #define        TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
@@ -335,8 +337,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 
        /* Check and enter in LPI mode */
        if (!priv->tx_path_in_lpi_mode)
-               priv->hw->mac->set_eee_mode(priv->hw,
-                                           priv->plat->en_tx_lpi_clockgating);
+               stmmac_set_eee_mode(priv, priv->hw,
+                               priv->plat->en_tx_lpi_clockgating);
 }
 
 /**
@@ -347,7 +349,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
  */
 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 {
-       priv->hw->mac->reset_eee_mode(priv->hw);
+       stmmac_reset_eee_mode(priv, priv->hw);
        del_timer_sync(&priv->eee_ctrl_timer);
        priv->tx_path_in_lpi_mode = false;
 }
@@ -410,8 +412,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                        if (priv->eee_active) {
                                netdev_dbg(priv->dev, "disable EEE\n");
                                del_timer_sync(&priv->eee_ctrl_timer);
-                               priv->hw->mac->set_eee_timer(priv->hw, 0,
-                                                            tx_lpi_timer);
+                               stmmac_set_eee_timer(priv, priv->hw, 0,
+                                               tx_lpi_timer);
                        }
                        priv->eee_active = 0;
                        spin_unlock_irqrestore(&priv->lock, flags);
@@ -426,12 +428,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                        mod_timer(&priv->eee_ctrl_timer,
                                  STMMAC_LPI_T(eee_timer));
 
-                       priv->hw->mac->set_eee_timer(priv->hw,
-                                                    STMMAC_DEFAULT_LIT_LS,
-                                                    tx_lpi_timer);
+                       stmmac_set_eee_timer(priv, priv->hw,
+                                       STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
                }
                /* Set HW EEE according to the speed */
-               priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
+               stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
 
                ret = true;
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -464,9 +465,9 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
                return;
 
        /* check tx tstamp status */
-       if (priv->hw->desc->get_tx_timestamp_status(p)) {
+       if (stmmac_get_tx_timestamp_status(priv, p)) {
                /* get the valid tstamp */
-               ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
+               stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
 
                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
                shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -502,8 +503,8 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
                desc = np;
 
        /* Check if timestamp is available */
-       if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
-               ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+       if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
+               stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -707,18 +708,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
        priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 
        if (!priv->hwts_tx_en && !priv->hwts_rx_en)
-               priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
+               stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
        else {
                value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
                         tstamp_all | ptp_v2 | ptp_over_ethernet |
                         ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
                         ts_master_en | snap_type_sel);
-               priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
+               stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
 
                /* program Sub Second Increment reg */
-               sec_inc = priv->hw->ptp->config_sub_second_increment(
-                       priv->ptpaddr, priv->plat->clk_ptp_rate,
-                       priv->plat->has_gmac4);
+               stmmac_config_sub_second_increment(priv,
+                               priv->ptpaddr, priv->plat->clk_ptp_rate,
+                               priv->plat->has_gmac4, &sec_inc);
                temp = div_u64(1000000000ULL, sec_inc);
 
                /* calculate default added value:
@@ -728,15 +729,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                 */
                temp = (u64)(temp << 32);
                priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
-               priv->hw->ptp->config_addend(priv->ptpaddr,
-                                            priv->default_addend);
+               stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
 
                /* initialize system time */
                ktime_get_real_ts64(&now);
 
                /* lower 32 bits of tv_sec are safe until y2106 */
-               priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
-                                           now.tv_nsec);
+               stmmac_init_systime(priv, priv->ptpaddr,
+                               (u32)now.tv_sec, now.tv_nsec);
        }
 
        return copy_to_user(ifr->ifr_data, &config,
@@ -770,7 +770,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
                netdev_info(priv->dev,
                            "IEEE 1588-2008 Advanced Timestamp supported\n");
 
-       priv->hw->ptp = &stmmac_ptp;
        priv->hwts_tx_en = 0;
        priv->hwts_rx_en = 0;
 
@@ -795,8 +794,8 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
 {
        u32 tx_cnt = priv->plat->tx_queues_to_use;
 
-       priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
-                                priv->pause, tx_cnt);
+       stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
+                       priv->pause, tx_cnt);
 }
 
 /**
@@ -1008,7 +1007,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
                        head_rx = (void *)rx_q->dma_rx;
 
                /* Display RX ring */
-               priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+               stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
        }
 }
 
@@ -1029,7 +1028,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
                else
                        head_tx = (void *)tx_q->dma_tx;
 
-               priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+               stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
        }
 }
 
@@ -1073,13 +1072,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
        /* Clear the RX descriptors */
        for (i = 0; i < DMA_RX_SIZE; i++)
                if (priv->extend_desc)
-                       priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
-                                                    priv->use_riwt, priv->mode,
-                                                    (i == DMA_RX_SIZE - 1));
+                       stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
+                                       priv->use_riwt, priv->mode,
+                                       (i == DMA_RX_SIZE - 1));
                else
-                       priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
-                                                    priv->use_riwt, priv->mode,
-                                                    (i == DMA_RX_SIZE - 1));
+                       stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
+                                       priv->use_riwt, priv->mode,
+                                       (i == DMA_RX_SIZE - 1));
 }
 
 /**
@@ -1097,13 +1096,11 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
        /* Clear the TX descriptors */
        for (i = 0; i < DMA_TX_SIZE; i++)
                if (priv->extend_desc)
-                       priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
-                                                    priv->mode,
-                                                    (i == DMA_TX_SIZE - 1));
+                       stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
+                                       priv->mode, (i == DMA_TX_SIZE - 1));
                else
-                       priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
-                                                    priv->mode,
-                                                    (i == DMA_TX_SIZE - 1));
+                       stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
+                                       priv->mode, (i == DMA_TX_SIZE - 1));
 }
 
 /**
@@ -1159,14 +1156,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
                return -EINVAL;
        }
 
-       if (priv->synopsys_id >= DWMAC_CORE_4_00)
-               p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
-       else
-               p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
+       stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
 
-       if ((priv->hw->mode->init_desc3) &&
-           (priv->dma_buf_sz == BUF_SIZE_16KiB))
-               priv->hw->mode->init_desc3(p);
+       if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+               stmmac_init_desc3(priv, p);
 
        return 0;
 }
@@ -1232,13 +1225,14 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 rx_count = priv->plat->rx_queues_to_use;
-       unsigned int bfsize = 0;
        int ret = -ENOMEM;
+       int bfsize = 0;
        int queue;
        int i;
 
-       if (priv->hw->mode->set_16kib_bfsize)
-               bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
+       bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
+       if (bfsize < 0)
+               bfsize = 0;
 
        if (bfsize < BUF_SIZE_16KiB)
                bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
@@ -1282,13 +1276,11 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
                /* Setup the chained descriptor addresses */
                if (priv->mode == STMMAC_CHAIN_MODE) {
                        if (priv->extend_desc)
-                               priv->hw->mode->init(rx_q->dma_erx,
-                                                    rx_q->dma_rx_phy,
-                                                    DMA_RX_SIZE, 1);
+                               stmmac_mode_init(priv, rx_q->dma_erx,
+                                               rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
                        else
-                               priv->hw->mode->init(rx_q->dma_rx,
-                                                    rx_q->dma_rx_phy,
-                                                    DMA_RX_SIZE, 0);
+                               stmmac_mode_init(priv, rx_q->dma_rx,
+                                               rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
                }
        }
 
@@ -1335,13 +1327,11 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
                /* Setup the chained descriptor addresses */
                if (priv->mode == STMMAC_CHAIN_MODE) {
                        if (priv->extend_desc)
-                               priv->hw->mode->init(tx_q->dma_etx,
-                                                    tx_q->dma_tx_phy,
-                                                    DMA_TX_SIZE, 1);
+                               stmmac_mode_init(priv, tx_q->dma_etx,
+                                               tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
                        else
-                               priv->hw->mode->init(tx_q->dma_tx,
-                                                    tx_q->dma_tx_phy,
-                                                    DMA_TX_SIZE, 0);
+                               stmmac_mode_init(priv, tx_q->dma_tx,
+                                               tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
                }
 
                for (i = 0; i < DMA_TX_SIZE; i++) {
@@ -1351,14 +1341,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
                        else
                                p = tx_q->dma_tx + i;
 
-                       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
-                               p->des0 = 0;
-                               p->des1 = 0;
-                               p->des2 = 0;
-                               p->des3 = 0;
-                       } else {
-                               p->des2 = 0;
-                       }
+                       stmmac_clear_desc(priv, p);
 
                        tx_q->tx_skbuff_dma[i].buf = 0;
                        tx_q->tx_skbuff_dma[i].map_as_page = false;
@@ -1664,7 +1647,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
 
        for (queue = 0; queue < rx_queues_count; queue++) {
                mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
-               priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
+               stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
        }
 }
 
@@ -1678,7 +1661,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
 {
        netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
-       priv->hw->dma->start_rx(priv->ioaddr, chan);
+       stmmac_start_rx(priv, priv->ioaddr, chan);
 }
 
 /**
@@ -1691,7 +1674,7 @@ static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
 {
        netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
-       priv->hw->dma->start_tx(priv->ioaddr, chan);
+       stmmac_start_tx(priv, priv->ioaddr, chan);
 }
 
 /**
@@ -1704,7 +1687,7 @@ static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
 {
        netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
-       priv->hw->dma->stop_rx(priv->ioaddr, chan);
+       stmmac_stop_rx(priv, priv->ioaddr, chan);
 }
 
 /**
@@ -1717,7 +1700,7 @@ static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
 {
        netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
-       priv->hw->dma->stop_tx(priv->ioaddr, chan);
+       stmmac_stop_tx(priv, priv->ioaddr, chan);
 }
 
 /**
@@ -1804,23 +1787,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
        }
 
        /* configure all channels */
-       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
-               for (chan = 0; chan < rx_channels_count; chan++) {
-                       qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+       for (chan = 0; chan < rx_channels_count; chan++) {
+               qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
 
-                       priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
-                                                  rxfifosz, qmode);
-               }
+               stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
+                               rxfifosz, qmode);
+       }
 
-               for (chan = 0; chan < tx_channels_count; chan++) {
-                       qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+       for (chan = 0; chan < tx_channels_count; chan++) {
+               qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
 
-                       priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
-                                                  txfifosz, qmode);
-               }
-       } else {
-               priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
-                                       rxfifosz);
+               stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
+                               txfifosz, qmode);
        }
 }
 
@@ -1851,9 +1829,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
                else
                        p = tx_q->dma_tx + entry;
 
-               status = priv->hw->desc->tx_status(&priv->dev->stats,
-                                                     &priv->xstats, p,
-                                                     priv->ioaddr);
+               status = stmmac_tx_status(priv, &priv->dev->stats,
+                               &priv->xstats, p, priv->ioaddr);
                /* Check if the descriptor is owned by the DMA */
                if (unlikely(status & tx_dma_own))
                        break;
@@ -1891,8 +1868,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
                        tx_q->tx_skbuff_dma[entry].map_as_page = false;
                }
 
-               if (priv->hw->mode->clean_desc3)
-                       priv->hw->mode->clean_desc3(tx_q, p);
+               stmmac_clean_desc3(priv, tx_q, p);
 
                tx_q->tx_skbuff_dma[entry].last_segment = false;
                tx_q->tx_skbuff_dma[entry].is_jumbo = false;
@@ -1904,7 +1880,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
                        tx_q->tx_skbuff[entry] = NULL;
                }
 
-               priv->hw->desc->release_tx_desc(p, priv->mode);
+               stmmac_release_tx_desc(priv, p, priv->mode);
 
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
        }
@@ -1929,16 +1905,6 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
        netif_tx_unlock(priv->dev);
 }
 
-static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
-{
-       priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
-}
-
-static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
-{
-       priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
-}
-
 /**
  * stmmac_tx_err - to manage the tx error
  * @priv: driver private structure
@@ -1957,13 +1923,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
        dma_free_tx_skbufs(priv, chan);
        for (i = 0; i < DMA_TX_SIZE; i++)
                if (priv->extend_desc)
-                       priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
-                                                    priv->mode,
-                                                    (i == DMA_TX_SIZE - 1));
+                       stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
+                                       priv->mode, (i == DMA_TX_SIZE - 1));
                else
-                       priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
-                                                    priv->mode,
-                                                    (i == DMA_TX_SIZE - 1));
+                       stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
+                                       priv->mode, (i == DMA_TX_SIZE - 1));
        tx_q->dirty_tx = 0;
        tx_q->cur_tx = 0;
        tx_q->mss = 0;
@@ -2003,31 +1967,22 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
        rxfifosz /= rx_channels_count;
        txfifosz /= tx_channels_count;
 
-       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
-               priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
-                                          rxfifosz, rxqmode);
-               priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
-                                          txfifosz, txqmode);
-       } else {
-               priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
-                                       rxfifosz);
-       }
+       stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
+       stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
 }
 
 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
 {
-       bool ret = false;
-
-       /* Safety features are only available in cores >= 5.10 */
-       if (priv->synopsys_id < DWMAC_CORE_5_10)
-               return ret;
-       if (priv->hw->mac->safety_feat_irq_status)
-               ret = priv->hw->mac->safety_feat_irq_status(priv->dev,
-                               priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
+       int ret;
 
-       if (ret)
+       ret = stmmac_safety_feat_irq_status(priv, priv->dev,
+                       priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
+       if (ret && (ret != -EINVAL)) {
                stmmac_global_err(priv);
-       return ret;
+               return true;
+       }
+
+       return false;
 }
 
 /**
@@ -2045,7 +2000,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
                                tx_channel_count : rx_channel_count;
        u32 chan;
        bool poll_scheduled = false;
-       int status[channels_to_check];
+       int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+
+       /* Make sure we never check beyond our status buffer. */
+       if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
+               channels_to_check = ARRAY_SIZE(status);
 
        /* Each DMA channel can be used for rx and tx simultaneously, yet
         * napi_struct is embedded in struct stmmac_rx_queue rather than in a
@@ -2054,16 +2013,15 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
         * all tx queues rather than just a single tx queue.
         */
        for (chan = 0; chan < channels_to_check; chan++)
-               status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
-                                                           &priv->xstats,
-                                                           chan);
+               status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+                               &priv->xstats, chan);
 
        for (chan = 0; chan < rx_channel_count; chan++) {
                if (likely(status[chan] & handle_rx)) {
                        struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
 
                        if (likely(napi_schedule_prep(&rx_q->napi))) {
-                               stmmac_disable_dma_irq(priv, chan);
+                               stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
                                __napi_schedule(&rx_q->napi);
                                poll_scheduled = true;
                        }
@@ -2084,7 +2042,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
                                        &priv->rx_queue[0];
 
                                if (likely(napi_schedule_prep(&rx_q->napi))) {
-                                       stmmac_disable_dma_irq(priv, chan);
+                                       stmmac_disable_dma_irq(priv,
+                                                       priv->ioaddr, chan);
                                        __napi_schedule(&rx_q->napi);
                                }
                                break;
@@ -2126,14 +2085,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
 
-       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
-               priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
-               priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
-       } else {
-               priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
-               priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
-       }
-
        dwmac_mmc_intr_all_mask(priv->mmcaddr);
 
        if (priv->dma_cap.rmon) {
@@ -2143,32 +2094,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
                netdev_info(priv->dev, "No MAC Management Counters available\n");
 }
 
-/**
- * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
- * @priv: driver private structure
- * Description: select the Enhanced/Alternate or Normal descriptors.
- * In case of Enhanced/Alternate, it checks if the extended descriptors are
- * supported by the HW capability register.
- */
-static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
-{
-       if (priv->plat->enh_desc) {
-               dev_info(priv->device, "Enhanced/Alternate descriptors\n");
-
-               /* GMAC older than 3.50 has no extended descriptors */
-               if (priv->synopsys_id >= DWMAC_CORE_3_50) {
-                       dev_info(priv->device, "Enabled extended descriptors\n");
-                       priv->extend_desc = 1;
-               } else
-                       dev_warn(priv->device, "Extended descriptors not supported\n");
-
-               priv->hw->desc = &enh_desc_ops;
-       } else {
-               dev_info(priv->device, "Normal descriptors\n");
-               priv->hw->desc = &ndesc_ops;
-       }
-}
-
 /**
  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
  * @priv: driver private structure
@@ -2180,15 +2105,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
  */
 static int stmmac_get_hw_features(struct stmmac_priv *priv)
 {
-       u32 ret = 0;
-
-       if (priv->hw->dma->get_hw_feature) {
-               priv->hw->dma->get_hw_feature(priv->ioaddr,
-                                             &priv->dma_cap);
-               ret = 1;
-       }
-
-       return ret;
+       return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
 }
 
 /**
@@ -2201,8 +2118,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
 {
        if (!is_valid_ether_addr(priv->dev->dev_addr)) {
-               priv->hw->mac->get_umac_addr(priv->hw,
-                                            priv->dev->dev_addr, 0);
+               stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
                if (!is_valid_ether_addr(priv->dev->dev_addr))
                        eth_hw_addr_random(priv->dev);
                netdev_info(priv->dev, "device MAC address %pM\n",
@@ -2222,10 +2138,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
 {
        u32 rx_channels_count = priv->plat->rx_queues_to_use;
        u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
        struct stmmac_rx_queue *rx_q;
        struct stmmac_tx_queue *tx_q;
-       u32 dummy_dma_rx_phy = 0;
-       u32 dummy_dma_tx_phy = 0;
        u32 chan = 0;
        int atds = 0;
        int ret = 0;
@@ -2238,59 +2153,47 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
        if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
                atds = 1;
 
-       ret = priv->hw->dma->reset(priv->ioaddr);
+       ret = stmmac_reset(priv, priv->ioaddr);
        if (ret) {
                dev_err(priv->device, "Failed to reset the dma\n");
                return ret;
        }
 
-       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
-               /* DMA Configuration */
-               priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
-                                   dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
-
-               /* DMA RX Channel Configuration */
-               for (chan = 0; chan < rx_channels_count; chan++) {
-                       rx_q = &priv->rx_queue[chan];
-
-                       priv->hw->dma->init_rx_chan(priv->ioaddr,
-                                                   priv->plat->dma_cfg,
-                                                   rx_q->dma_rx_phy, chan);
-
-                       rx_q->rx_tail_addr = rx_q->dma_rx_phy +
-                                   (DMA_RX_SIZE * sizeof(struct dma_desc));
-                       priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
-                                                      rx_q->rx_tail_addr,
-                                                      chan);
-               }
-
-               /* DMA TX Channel Configuration */
-               for (chan = 0; chan < tx_channels_count; chan++) {
-                       tx_q = &priv->tx_queue[chan];
+       /* DMA RX Channel Configuration */
+       for (chan = 0; chan < rx_channels_count; chan++) {
+               rx_q = &priv->rx_queue[chan];
 
-                       priv->hw->dma->init_chan(priv->ioaddr,
-                                                priv->plat->dma_cfg,
-                                                chan);
+               stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+                                   rx_q->dma_rx_phy, chan);
 
-                       priv->hw->dma->init_tx_chan(priv->ioaddr,
-                                                   priv->plat->dma_cfg,
-                                                   tx_q->dma_tx_phy, chan);
+               rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+                           (DMA_RX_SIZE * sizeof(struct dma_desc));
+               stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+                                      rx_q->rx_tail_addr, chan);
+       }
 
-                       tx_q->tx_tail_addr = tx_q->dma_tx_phy +
-                                   (DMA_TX_SIZE * sizeof(struct dma_desc));
-                       priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
-                                                      tx_q->tx_tail_addr,
-                                                      chan);
-               }
-       } else {
-               rx_q = &priv->rx_queue[chan];
+       /* DMA TX Channel Configuration */
+       for (chan = 0; chan < tx_channels_count; chan++) {
                tx_q = &priv->tx_queue[chan];
-               priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
-                                   tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
+
+               stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+                                   tx_q->dma_tx_phy, chan);
+
+               tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+                           (DMA_TX_SIZE * sizeof(struct dma_desc));
+               stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+                                      tx_q->tx_tail_addr, chan);
        }
 
-       if (priv->plat->axi && priv->hw->dma->axi)
-               priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
+       /* DMA CSR Channel configuration */
+       for (chan = 0; chan < dma_csr_ch; chan++)
+               stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+       /* DMA Configuration */
+       stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
+       if (priv->plat->axi)
+               stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
 
        return ret;
 }
@@ -2336,18 +2239,14 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
        u32 chan;
 
        /* set TX ring length */
-       if (priv->hw->dma->set_tx_ring_len) {
-               for (chan = 0; chan < tx_channels_count; chan++)
-                       priv->hw->dma->set_tx_ring_len(priv->ioaddr,
-                                                      (DMA_TX_SIZE - 1), chan);
-       }
+       for (chan = 0; chan < tx_channels_count; chan++)
+               stmmac_set_tx_ring_len(priv, priv->ioaddr,
+                               (DMA_TX_SIZE - 1), chan);
 
        /* set RX ring length */
-       if (priv->hw->dma->set_rx_ring_len) {
-               for (chan = 0; chan < rx_channels_count; chan++)
-                       priv->hw->dma->set_rx_ring_len(priv->ioaddr,
-                                                      (DMA_RX_SIZE - 1), chan);
-       }
+       for (chan = 0; chan < rx_channels_count; chan++)
+               stmmac_set_rx_ring_len(priv, priv->ioaddr,
+                               (DMA_RX_SIZE - 1), chan);
 }
 
 /**
@@ -2363,7 +2262,7 @@ static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
 
        for (queue = 0; queue < tx_queues_count; queue++) {
                weight = priv->plat->tx_queues_cfg[queue].weight;
-               priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
+               stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
        }
 }
 
@@ -2384,7 +2283,7 @@ static void stmmac_configure_cbs(struct stmmac_priv *priv)
                if (mode_to_use == MTL_QUEUE_DCB)
                        continue;
 
-               priv->hw->mac->config_cbs(priv->hw,
+               stmmac_config_cbs(priv, priv->hw,
                                priv->plat->tx_queues_cfg[queue].send_slope,
                                priv->plat->tx_queues_cfg[queue].idle_slope,
                                priv->plat->tx_queues_cfg[queue].high_credit,
@@ -2406,7 +2305,7 @@ static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
 
        for (queue = 0; queue < rx_queues_count; queue++) {
                chan = priv->plat->rx_queues_cfg[queue].chan;
-               priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
+               stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
        }
 }
 
@@ -2426,7 +2325,7 @@ static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
                        continue;
 
                prio = priv->plat->rx_queues_cfg[queue].prio;
-               priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
+               stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
        }
 }
 
@@ -2446,7 +2345,7 @@ static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
                        continue;
 
                prio = priv->plat->tx_queues_cfg[queue].prio;
-               priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
+               stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
        }
 }
 
@@ -2467,7 +2366,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
                        continue;
 
                packet = priv->plat->rx_queues_cfg[queue].pkt_route;
-               priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
+               stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
        }
 }
 
@@ -2481,50 +2380,47 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
        u32 rx_queues_count = priv->plat->rx_queues_to_use;
        u32 tx_queues_count = priv->plat->tx_queues_to_use;
 
-       if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
+       if (tx_queues_count > 1)
                stmmac_set_tx_queue_weight(priv);
 
        /* Configure MTL RX algorithms */
-       if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
-               priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
-                                               priv->plat->rx_sched_algorithm);
+       if (rx_queues_count > 1)
+               stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
+                               priv->plat->rx_sched_algorithm);
 
        /* Configure MTL TX algorithms */
-       if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
-               priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
-                                               priv->plat->tx_sched_algorithm);
+       if (tx_queues_count > 1)
+               stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
+                               priv->plat->tx_sched_algorithm);
 
        /* Configure CBS in AVB TX queues */
-       if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
+       if (tx_queues_count > 1)
                stmmac_configure_cbs(priv);
 
        /* Map RX MTL to DMA channels */
-       if (priv->hw->mac->map_mtl_to_dma)
-               stmmac_rx_queue_dma_chan_map(priv);
+       stmmac_rx_queue_dma_chan_map(priv);
 
        /* Enable MAC RX Queues */
-       if (priv->hw->mac->rx_queue_enable)
-               stmmac_mac_enable_rx_queues(priv);
+       stmmac_mac_enable_rx_queues(priv);
 
        /* Set RX priorities */
-       if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
+       if (rx_queues_count > 1)
                stmmac_mac_config_rx_queues_prio(priv);
 
        /* Set TX priorities */
-       if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
+       if (tx_queues_count > 1)
                stmmac_mac_config_tx_queues_prio(priv);
 
        /* Set RX routing */
-       if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
+       if (rx_queues_count > 1)
                stmmac_mac_config_rx_queues_routing(priv);
 }
 
 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
 {
-       if (priv->hw->mac->safety_feat_config && priv->dma_cap.asp) {
+       if (priv->dma_cap.asp) {
                netdev_info(priv->dev, "Enabling Safety Features\n");
-               priv->hw->mac->safety_feat_config(priv->ioaddr,
-                               priv->dma_cap.asp);
+               stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
        } else {
                netdev_info(priv->dev, "No Safety Features support found\n");
        }
@@ -2559,7 +2455,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        }
 
        /* Copy the MAC addr into the HW  */
-       priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
+       stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
 
        /* PS and related bits will be programmed according to the speed */
        if (priv->hw->pcs) {
@@ -2575,17 +2471,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        }
 
        /* Initialize the MAC Core */
-       priv->hw->mac->core_init(priv->hw, dev);
+       stmmac_core_init(priv, priv->hw, dev);
 
        /* Initialize MTL*/
-       if (priv->synopsys_id >= DWMAC_CORE_4_00)
-               stmmac_mtl_configuration(priv);
+       stmmac_mtl_configuration(priv);
 
        /* Initialize Safety Features */
-       if (priv->synopsys_id >= DWMAC_CORE_5_10)
-               stmmac_safety_feat_configuration(priv);
+       stmmac_safety_feat_configuration(priv);
 
-       ret = priv->hw->mac->rx_ipc(priv->hw);
+       ret = stmmac_rx_ipc(priv, priv->hw);
        if (!ret) {
                netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
                priv->plat->rx_coe = STMMAC_RX_COE_NONE;
@@ -2593,7 +2487,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        }
 
        /* Enable the MAC Rx/Tx */
-       priv->hw->mac->set_mac(priv->ioaddr, true);
+       stmmac_mac_set(priv, priv->ioaddr, true);
 
        /* Set the HW DMA mode and the COE */
        stmmac_dma_operation_mode(priv);
@@ -2623,13 +2517,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
 
        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
-       if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
-               priv->rx_riwt = MAX_DMA_RIWT;
-               priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
+       if (priv->use_riwt) {
+               ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
+               if (!ret)
+                       priv->rx_riwt = MAX_DMA_RIWT;
        }
 
-       if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
-               priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
+       if (priv->hw->pcs)
+               stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
 
        /* set TX and RX rings length */
        stmmac_set_rings_length(priv);
@@ -2637,7 +2532,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        /* Enable TSO */
        if (priv->tso) {
                for (chan = 0; chan < tx_cnt; chan++)
-                       priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
+                       stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
        }
 
        return 0;
@@ -2808,7 +2703,7 @@ static int stmmac_release(struct net_device *dev)
        free_dma_desc_resources(priv);
 
        /* Disable the MAC Rx/Tx */
-       priv->hw->mac->set_mac(priv->ioaddr, false);
+       stmmac_mac_set(priv, priv->ioaddr, false);
 
        netif_carrier_off(dev);
 
@@ -2851,10 +2746,10 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
                buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
                            TSO_MAX_BUFF_SIZE : tmp_len;
 
-               priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
-                       0, 1,
-                       (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
-                       0, 0);
+               stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
+                               0, 1,
+                               (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
+                               0, 0);
 
                tmp_len -= TSO_MAX_BUFF_SIZE;
        }
@@ -2926,7 +2821,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        /* set new MSS value if needed */
        if (mss != tx_q->mss) {
                mss_desc = tx_q->dma_tx + tx_q->cur_tx;
-               priv->hw->desc->set_mss(mss_desc, mss);
+               stmmac_set_mss(priv, mss_desc, mss);
                tx_q->mss = mss;
                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
@@ -3012,7 +2907,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
        } else {
                priv->tx_count_frames = 0;
-               priv->hw->desc->set_tx_ic(desc);
+               stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
        }
 
@@ -3022,11 +2917,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                     priv->hwts_tx_en)) {
                /* declare that device is doing timestamping */
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-               priv->hw->desc->enable_tx_timestamp(first);
+               stmmac_enable_tx_timestamp(priv, first);
        }
 
        /* Complete the first descriptor before granting the DMA */
-       priv->hw->desc->prepare_tso_tx_desc(first, 1,
+       stmmac_prepare_tso_tx_desc(priv, first, 1,
                        proto_hdr_len,
                        pay_len,
                        1, tx_q->tx_skbuff_dma[first_entry].last_segment,
@@ -3040,7 +2935,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                 * sure that MSS's own bit is the last thing written.
                 */
                dma_wmb();
-               priv->hw->desc->set_tx_owner(mss_desc);
+               stmmac_set_tx_owner(priv, mss_desc);
        }
 
        /* The own bit must be the latest setting done when prepare the
@@ -3054,8 +2949,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                        __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
                        tx_q->cur_tx, first, nfrags);
 
-               priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
-                                            0);
+               stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
 
                pr_info(">>> frame to be transmitted: ");
                print_pkt(skb->data, skb_headlen(skb));
@@ -3063,8 +2957,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
-       priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
-                                      queue);
+       stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
 
        return NETDEV_TX_OK;
 
@@ -3136,12 +3029,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        enh_desc = priv->plat->enh_desc;
        /* To program the descriptors according to the size of the frame */
        if (enh_desc)
-               is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
+               is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
 
-       if (unlikely(is_jumbo) && likely(priv->synopsys_id <
-                                        DWMAC_CORE_4_00)) {
-               entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
-               if (unlikely(entry < 0))
+       if (unlikely(is_jumbo)) {
+               entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
+               if (unlikely(entry < 0) && (entry != -EINVAL))
                        goto dma_map_err;
        }
 
@@ -3164,19 +3056,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                        goto dma_map_err; /* should reuse desc w/o issues */
 
                tx_q->tx_skbuff_dma[entry].buf = des;
-               if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
-                       desc->des0 = cpu_to_le32(des);
-               else
-                       desc->des2 = cpu_to_le32(des);
+
+               stmmac_set_desc_addr(priv, desc, des);
 
                tx_q->tx_skbuff_dma[entry].map_as_page = true;
                tx_q->tx_skbuff_dma[entry].len = len;
                tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
 
                /* Prepare the descriptor and set the own bit too */
-               priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
-                                               priv->mode, 1, last_segment,
-                                               skb->len);
+               stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
+                               priv->mode, 1, last_segment, skb->len);
        }
 
        /* Only the last descriptor gets to point to the skb. */
@@ -3203,7 +3092,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                else
                        tx_head = (void *)tx_q->dma_tx;
 
-               priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
+               stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
 
                netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
                print_pkt(skb->data, skb->len);
@@ -3223,13 +3112,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
         * element in case of no SG.
         */
        priv->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+       if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
+           !priv->tx_timer_armed) {
                mod_timer(&priv->txtimer,
                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
+               priv->tx_timer_armed = true;
        } else {
                priv->tx_count_frames = 0;
-               priv->hw->desc->set_tx_ic(desc);
+               stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
+               priv->tx_timer_armed = false;
        }
 
        skb_tx_timestamp(skb);
@@ -3247,10 +3139,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                        goto dma_map_err;
 
                tx_q->tx_skbuff_dma[first_entry].buf = des;
-               if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
-                       first->des0 = cpu_to_le32(des);
-               else
-                       first->des2 = cpu_to_le32(des);
+
+               stmmac_set_desc_addr(priv, first, des);
 
                tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
                tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -3259,13 +3149,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                             priv->hwts_tx_en)) {
                        /* declare that device is doing timestamping */
                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-                       priv->hw->desc->enable_tx_timestamp(first);
+                       stmmac_enable_tx_timestamp(priv, first);
                }
 
                /* Prepare the first descriptor setting the OWN bit too */
-               priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
-                                               csum_insertion, priv->mode, 1,
-                                               last_segment, skb->len);
+               stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
+                               csum_insertion, priv->mode, 1, last_segment,
+                               skb->len);
 
                /* The own bit must be the latest setting done when prepare the
                 * descriptor and then barrier is needed to make sure that
@@ -3277,10 +3167,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
        if (priv->synopsys_id < DWMAC_CORE_4_00)
-               priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+               stmmac_enable_dma_transmission(priv, priv->ioaddr);
        else
-               priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
-                                              queue);
+               stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
+                               queue);
 
        return NETDEV_TX_OK;
 
@@ -3364,14 +3254,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
                                break;
                        }
 
-                       if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
-                               p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
-                               p->des1 = 0;
-                       } else {
-                               p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
-                       }
-                       if (priv->hw->mode->refill_desc3)
-                               priv->hw->mode->refill_desc3(rx_q, p);
+                       stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
+                       stmmac_refill_desc3(priv, rx_q, p);
 
                        if (rx_q->rx_zeroc_thresh > 0)
                                rx_q->rx_zeroc_thresh--;
@@ -3382,9 +3266,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
                dma_wmb();
 
                if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
-                       priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
+                       stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
                else
-                       priv->hw->desc->set_rx_owner(p);
+                       stmmac_set_rx_owner(priv, p);
 
                dma_wmb();
 
@@ -3418,7 +3302,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                else
                        rx_head = (void *)rx_q->dma_rx;
 
-               priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
+               stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
        }
        while (count < limit) {
                int status;
@@ -3431,8 +3315,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        p = rx_q->dma_rx + entry;
 
                /* read the status of the incoming frame */
-               status = priv->hw->desc->rx_status(&priv->dev->stats,
-                                                  &priv->xstats, p);
+               status = stmmac_rx_status(priv, &priv->dev->stats,
+                               &priv->xstats, p);
                /* check if managed by the DMA otherwise go ahead */
                if (unlikely(status & dma_own))
                        break;
@@ -3449,11 +3333,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 
                prefetch(np);
 
-               if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
-                       priv->hw->desc->rx_extended_status(&priv->dev->stats,
-                                                          &priv->xstats,
-                                                          rx_q->dma_erx +
-                                                          entry);
+               if (priv->extend_desc)
+                       stmmac_rx_extended_status(priv, &priv->dev->stats,
+                                       &priv->xstats, rx_q->dma_erx + entry);
                if (unlikely(status == discard_frame)) {
                        priv->dev->stats.rx_errors++;
                        if (priv->hwts_rx_en && !priv->extend_desc) {
@@ -3479,7 +3361,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        else
                                des = le32_to_cpu(p->des2);
 
-                       frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
+                       frame_len = stmmac_get_rx_frame_len(priv, p, coe);
 
                        /*  If frame length is greater than skb buffer size
                         *  (preallocated during init) then the packet is
@@ -3621,7 +3503,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
        work_done = stmmac_rx(priv, budget, rx_q->queue_index);
        if (work_done < budget) {
                napi_complete_done(napi, work_done);
-               stmmac_enable_dma_irq(priv, chan);
+               stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
        }
        return work_done;
 }
@@ -3654,7 +3536,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       priv->hw->mac->set_filter(priv->hw, dev);
+       stmmac_set_filter(priv, priv->hw, dev);
 }
 
 /**
@@ -3727,7 +3609,7 @@ static int stmmac_set_features(struct net_device *netdev,
        /* No check needed because rx_coe has been set before and it will be
         * fixed in case of issue.
         */
-       priv->hw->mac->rx_ipc(priv->hw);
+       stmmac_rx_ipc(priv, priv->hw);
 
        return 0;
 }
@@ -3771,8 +3653,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
 
        /* To handle GMAC own interrupts */
        if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
-               int status = priv->hw->mac->host_irq_status(priv->hw,
-                                                           &priv->xstats);
+               int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
 
                if (unlikely(status)) {
                        /* For LPI we need to save the tx status */
@@ -3787,15 +3668,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
                                struct stmmac_rx_queue *rx_q =
                                &priv->rx_queue[queue];
 
-                               status |=
-                               priv->hw->mac->host_mtl_irq_status(priv->hw,
-                                                                  queue);
+                               status |= stmmac_host_mtl_irq_status(priv,
+                                               priv->hw, queue);
 
-                               if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
-                                   priv->hw->dma->set_rx_tail_ptr)
-                                       priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
-                                                               rx_q->rx_tail_addr,
-                                                               queue);
+                               if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+                                       stmmac_set_rx_tail_ptr(priv,
+                                                       priv->ioaddr,
+                                                       rx_q->rx_tail_addr,
+                                                       queue);
                        }
                }
 
@@ -3860,6 +3740,58 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        return ret;
 }
 
+static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+                                   void *cb_priv)
+{
+       struct stmmac_priv *priv = cb_priv;
+       int ret = -EOPNOTSUPP;
+
+       stmmac_disable_all_queues(priv);
+
+       switch (type) {
+       case TC_SETUP_CLSU32:
+               if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
+                       ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
+               break;
+       default:
+               break;
+       }
+
+       stmmac_enable_all_queues(priv);
+       return ret;
+}
+
+static int stmmac_setup_tc_block(struct stmmac_priv *priv,
+                                struct tc_block_offload *f)
+{
+       if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+               return -EOPNOTSUPP;
+
+       switch (f->command) {
+       case TC_BLOCK_BIND:
+               return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
+                               priv, priv);
+       case TC_BLOCK_UNBIND:
+               tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+                          void *type_data)
+{
+       struct stmmac_priv *priv = netdev_priv(ndev);
+
+       switch (type) {
+       case TC_SETUP_BLOCK:
+               return stmmac_setup_tc_block(priv, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
 {
        struct stmmac_priv *priv = netdev_priv(ndev);
@@ -3869,7 +3801,7 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
        if (ret)
                return ret;
 
-       priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
+       stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
 
        return ret;
 }
@@ -4098,6 +4030,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
        .ndo_set_rx_mode = stmmac_set_rx_mode,
        .ndo_tx_timeout = stmmac_tx_timeout,
        .ndo_do_ioctl = stmmac_ioctl,
+       .ndo_setup_tc = stmmac_setup_tc,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = stmmac_poll_controller,
 #endif
@@ -4145,49 +4078,17 @@ static void stmmac_service_task(struct work_struct *work)
  */
 static int stmmac_hw_init(struct stmmac_priv *priv)
 {
-       struct mac_device_info *mac;
-
-       /* Identify the MAC HW device */
-       if (priv->plat->setup) {
-               mac = priv->plat->setup(priv);
-       } else if (priv->plat->has_gmac) {
-               priv->dev->priv_flags |= IFF_UNICAST_FLT;
-               mac = dwmac1000_setup(priv->ioaddr,
-                                     priv->plat->multicast_filter_bins,
-                                     priv->plat->unicast_filter_entries,
-                                     &priv->synopsys_id);
-       } else if (priv->plat->has_gmac4) {
-               priv->dev->priv_flags |= IFF_UNICAST_FLT;
-               mac = dwmac4_setup(priv->ioaddr,
-                                  priv->plat->multicast_filter_bins,
-                                  priv->plat->unicast_filter_entries,
-                                  &priv->synopsys_id);
-       } else {
-               mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
-       }
-       if (!mac)
-               return -ENOMEM;
-
-       priv->hw = mac;
+       int ret;
 
        /* dwmac-sun8i only work in chain mode */
        if (priv->plat->has_sun8i)
                chain_mode = 1;
+       priv->chain_mode = chain_mode;
 
-       /* To use the chained or ring mode */
-       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
-               priv->hw->mode = &dwmac4_ring_mode_ops;
-       } else {
-               if (chain_mode) {
-                       priv->hw->mode = &chain_mode_ops;
-                       dev_info(priv->device, "Chain mode enabled\n");
-                       priv->mode = STMMAC_CHAIN_MODE;
-               } else {
-                       priv->hw->mode = &ring_mode_ops;
-                       dev_info(priv->device, "Ring mode enabled\n");
-                       priv->mode = STMMAC_RING_MODE;
-               }
-       }
+       /* Initialize HW Interface */
+       ret = stmmac_hwif_init(priv);
+       if (ret)
+               return ret;
 
        /* Get the HW capability (new GMAC newer than 3.50a) */
        priv->hw_cap_support = stmmac_get_hw_features(priv);
@@ -4221,12 +4122,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
                dev_info(priv->device, "No HW DMA feature register supported\n");
        }
 
-       /* To use alternate (extended), normal or GMAC4 descriptor structures */
-       if (priv->synopsys_id >= DWMAC_CORE_4_00)
-               priv->hw->desc = &dwmac4_desc_ops;
-       else
-               stmmac_selec_desc_mode(priv);
-
        if (priv->plat->rx_coe) {
                priv->hw->rx_csum = priv->plat->rx_coe;
                dev_info(priv->device, "RX Checksum Offload Engine supported\n");
@@ -4335,6 +4230,11 @@ int stmmac_dvr_probe(struct device *device,
        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                            NETIF_F_RXCSUM;
 
+       ret = stmmac_tc_init(priv, priv);
+       if (!ret) {
+               ndev->hw_features |= NETIF_F_HW_TC;
+       }
+
        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
                ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
                priv->tso = true;
@@ -4458,7 +4358,7 @@ int stmmac_dvr_remove(struct device *dev)
 
        stmmac_stop_all_dma(priv);
 
-       priv->hw->mac->set_mac(priv->ioaddr, false);
+       stmmac_mac_set(priv, priv->ioaddr, false);
        netif_carrier_off(ndev);
        unregister_netdev(ndev);
        if (priv->plat->stmmac_rst)
@@ -4507,10 +4407,10 @@ int stmmac_suspend(struct device *dev)
 
        /* Enable Power down mode by programming the PMT regs */
        if (device_may_wakeup(priv->device)) {
-               priv->hw->mac->pmt(priv->hw, priv->wolopts);
+               stmmac_pmt(priv, priv->hw, priv->wolopts);
                priv->irq_wake = 1;
        } else {
-               priv->hw->mac->set_mac(priv->ioaddr, false);
+               stmmac_mac_set(priv, priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
                clk_disable(priv->plat->pclk);
@@ -4574,7 +4474,7 @@ int stmmac_resume(struct device *dev)
         */
        if (device_may_wakeup(priv->device)) {
                spin_lock_irqsave(&priv->lock, flags);
-               priv->hw->mac->pmt(priv->hw, 0);
+               stmmac_pmt(priv, priv->hw, 0);
                spin_unlock_irqrestore(&priv->lock, flags);
                priv->irq_wake = 0;
        } else {