stmmac_set_queue_tx_tail_ptr(priv, tx_q, queue, tx_q->cur_tx);
}
+static void stmmac_set_gso_types(struct stmmac_priv *priv, bool tso)
+{
+ if (!tso) {
+ priv->gso_enabled_types = 0;
+ } else {
+ /* Manage oversized TCP frames for GMAC4 device */
+ priv->gso_enabled_types = SKB_GSO_TCPV4 | SKB_GSO_TCPV6;
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
+ priv->gso_enabled_types |= SKB_GSO_UDP_L4;
+ }
+}
+
static size_t stmmac_tso_header_size(struct sk_buff *skb)
{
size_t size;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int first_entry, tx_packets;
- int gso = skb_shinfo(skb)->gso_type;
struct stmmac_txq_stats *txq_stats;
struct dma_desc *desc, *first_desc;
struct stmmac_tx_queue *tx_q;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
stmmac_stop_sw_lpi(priv);
- /* Manage oversized TCP frames for GMAC4 device */
- if (skb_is_gso(skb) && priv->tso) {
- if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
- return stmmac_tso_xmit(skb, dev);
- if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
- (gso & SKB_GSO_UDP_L4))
- return stmmac_tso_xmit(skb, dev);
- }
+ if (skb_is_gso(skb) &&
+ skb_shinfo(skb)->gso_type & priv->gso_enabled_types)
+ return stmmac_tso_xmit(skb, dev);
if (priv->est && priv->est->enable &&
priv->est->max_sdu[queue]) {
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
}
- priv->tso = !!(features & NETIF_F_TSO);
+ stmmac_set_gso_types(priv, features & NETIF_F_TSO);
if (features & NETIF_F_HW_VLAN_CTAG_RX)
priv->hw->hw_vlan_en = true;
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
if (priv->plat->core_type == DWMAC_CORE_GMAC4)
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
- priv->tso = true;
+ stmmac_set_gso_types(priv, true);
dev_info(priv->device, "TSO feature enabled\n");
}