#include <linux/seq_file.h>
#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
+#include <net/pkt_cls.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
#include <linux/reset.h>
return -EINVAL;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
- else
- p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
+ stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
else
p = tx_q->dma_tx + i;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- p->des0 = 0;
- p->des1 = 0;
- p->des2 = 0;
- p->des3 = 0;
- } else {
- p->des2 = 0;
- }
+ stmmac_clear_desc(priv, p);
tx_q->tx_skbuff_dma[i].buf = 0;
tx_q->tx_skbuff_dma[i].map_as_page = false;
}
/* configure all channels */
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- for (chan = 0; chan < rx_channels_count; chan++) {
- qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
- stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
- rxfifosz, qmode);
- }
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
+ rxfifosz, qmode);
+ }
- for (chan = 0; chan < tx_channels_count; chan++) {
- qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
- stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
- txfifosz, qmode);
- }
- } else {
- stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
+ txfifosz, qmode);
}
}
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz,
- rxqmode);
- stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
- txqmode);
- } else {
- stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
- }
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
}
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
{
- int ret = false;
+ int ret;
- /* Safety features are only available in cores >= 5.10 */
- if (priv->synopsys_id < DWMAC_CORE_5_10)
- return ret;
ret = stmmac_safety_feat_irq_status(priv, priv->dev,
priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
if (ret && (ret != -EINVAL)) {
tx_channel_count : rx_channel_count;
u32 chan;
bool poll_scheduled = false;
- int status[channels_to_check];
+ int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+
+ /* Make sure we never check beyond our status buffer. */
+ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
+ channels_to_check = ARRAY_SIZE(status);
/* Each DMA channel can be used for rx and tx simultaneously, yet
* napi_struct is embedded in struct stmmac_rx_queue rather than in a
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
- priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
- } else {
- priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
- priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
- }
-
dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) {
{
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
- u32 dummy_dma_rx_phy = 0;
- u32 dummy_dma_tx_phy = 0;
u32 chan = 0;
int atds = 0;
int ret = 0;
return ret;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- /* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
- dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
-
- /* DMA RX Channel Configuration */
- for (chan = 0; chan < rx_channels_count; chan++) {
- rx_q = &priv->rx_queue[chan];
-
- stmmac_init_rx_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, rx_q->dma_rx_phy,
- chan);
-
- rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
- stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
- rx_q->rx_tail_addr, chan);
- }
-
- /* DMA TX Channel Configuration */
- for (chan = 0; chan < tx_channels_count; chan++) {
- tx_q = &priv->tx_queue[chan];
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->rx_queue[chan];
- stmmac_init_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, chan);
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
- stmmac_init_tx_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, tx_q->dma_tx_phy,
- chan);
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+ }
- tx_q->tx_tail_addr = tx_q->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
- tx_q->tx_tail_addr, chan);
- }
- } else {
- rx_q = &priv->rx_queue[chan];
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
tx_q = &priv->tx_queue[chan];
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
- tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
}
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
stmmac_core_init(priv, priv->hw, dev);
/* Initialize MTL*/
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- stmmac_mtl_configuration(priv);
+ stmmac_mtl_configuration(priv);
/* Initialize Safety Features */
- if (priv->synopsys_id >= DWMAC_CORE_5_10)
- stmmac_safety_feat_configuration(priv);
+ stmmac_safety_feat_configuration(priv);
ret = stmmac_rx_ipc(priv, priv->hw);
if (!ret) {
if (enh_desc)
is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
- if (unlikely(is_jumbo) && likely(priv->synopsys_id <
- DWMAC_CORE_4_00)) {
+ if (unlikely(is_jumbo)) {
entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
- if (unlikely(entry < 0))
+ if (unlikely(entry < 0) && (entry != -EINVAL))
goto dma_map_err;
}
goto dma_map_err; /* should reuse desc w/o issues */
tx_q->tx_skbuff_dma[entry].buf = des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- desc->des0 = cpu_to_le32(des);
- else
- desc->des2 = cpu_to_le32(des);
+
+ stmmac_set_desc_addr(priv, desc, des);
tx_q->tx_skbuff_dma[entry].map_as_page = true;
tx_q->tx_skbuff_dma[entry].len = len;
* element in case of no SG.
*/
priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
+ !priv->tx_timer_armed) {
mod_timer(&priv->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ priv->tx_timer_armed = true;
} else {
priv->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ priv->tx_timer_armed = false;
}
skb_tx_timestamp(skb);
goto dma_map_err;
tx_q->tx_skbuff_dma[first_entry].buf = des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- first->des0 = cpu_to_le32(des);
- else
- first->des2 = cpu_to_le32(des);
+
+ stmmac_set_desc_addr(priv, first, des);
tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
break;
}
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
- p->des1 = 0;
- } else {
- p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
- }
-
+ stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
stmmac_refill_desc3(priv, rx_q, p);
if (rx_q->rx_zeroc_thresh > 0)
return ret;
}
+static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct stmmac_priv *priv = cb_priv;
+ int ret = -EOPNOTSUPP;
+
+ stmmac_disable_all_queues(priv);
+
+ switch (type) {
+ case TC_SETUP_CLSU32:
+ if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
+ break;
+ default:
+ break;
+ }
+
+ stmmac_enable_all_queues(priv);
+ return ret;
+}
+
+static int stmmac_setup_tc_block(struct stmmac_priv *priv,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return stmmac_setup_tc_block(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
struct stmmac_priv *priv = netdev_priv(ndev);
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
+ .ndo_setup_tc = stmmac_setup_tc,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+ ret = stmmac_tc_init(priv, priv);
+ if (!ret) {
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
priv->tso = true;