]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
amd-xgbe: Fix to ensure dependent features are toggled with RX checksum offload
authorVishal Badole <Vishal.Badole@amd.com>
Thu, 24 Apr 2025 13:02:48 +0000 (18:32 +0530)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 9 May 2025 07:41:35 +0000 (09:41 +0200)
commit f04dd30f1bef1ed2e74a4050af6e5e5e3869bac3 upstream.

According to the XGMAC specification, enabling features such as Layer 3
and Layer 4 Packet Filtering, Split Header and Virtualized Network support
automatically selects the IPC Full Checksum Offload Engine on the receive
side.

When RX checksum offload is disabled, these dependent features must also
be disabled to prevent abnormal behavior caused by mismatched feature
dependencies.

Ensure that toggling RX checksum offload (disabling or enabling) properly
disables or enables all dependent features, maintaining consistent and
expected behavior in the network device.

Cc: stable@vger.kernel.org
Fixes: 1a510ccf5869 ("amd-xgbe: Add support for VXLAN offload capabilities")
Signed-off-by: Vishal Badole <Vishal.Badole@amd.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250424130248.428865-1-Vishal.Badole@amd.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe.h

index 230726d7b74f6343e7d5262e1a02ba455fc0b713..d41b58fad37bbf216c35ed05ef0261e30080aefd 100644 (file)
@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
        }
 
        /* Set up the header page info */
-       xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
-                            XGBE_SKB_ALLOC_SIZE);
+       if (pdata->netdev->features & NETIF_F_RXCSUM) {
+               xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+                                    XGBE_SKB_ALLOC_SIZE);
+       } else {
+               xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+                                    pdata->rx_buf_size);
+       }
 
        /* Set up the buffer page info */
        xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
index 4030d619e84f56863d0fde61692ef12b386c4c32..3cf7943b590cf08377dabfc94de75056f10ecad8 100644 (file)
@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
 }
 
+static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
+{
+       unsigned int i;
+
+       for (i = 0; i < pdata->channel_count; i++) {
+               if (!pdata->channel[i]->rx_ring)
+                       break;
+
+               XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
+       }
+}
+
 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
                              unsigned int index, unsigned int val)
 {
@@ -3495,8 +3507,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
        xgbe_config_tx_coalesce(pdata);
        xgbe_config_rx_buffer_size(pdata);
        xgbe_config_tso_mode(pdata);
-       xgbe_config_sph_mode(pdata);
-       xgbe_config_rss(pdata);
+
+       if (pdata->netdev->features & NETIF_F_RXCSUM) {
+               xgbe_config_sph_mode(pdata);
+               xgbe_config_rss(pdata);
+       }
+
        desc_if->wrapper_tx_desc_init(pdata);
        desc_if->wrapper_rx_desc_init(pdata);
        xgbe_enable_dma_interrupts(pdata);
@@ -3650,5 +3666,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
        hw_if->disable_vxlan = xgbe_disable_vxlan;
        hw_if->set_vxlan_id = xgbe_set_vxlan_id;
 
+       /* For Split Header*/
+       hw_if->enable_sph = xgbe_config_sph_mode;
+       hw_if->disable_sph = xgbe_disable_sph_mode;
+
        DBGPR("<--xgbe_init_function_ptrs\n");
 }
index 6b73648b3779368f8a01cbc95f8afb88bc07786d..34d45cebefb5d3b938c6be484dae80a6d0ef28f3 100644 (file)
@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev,
        if (ret)
                return ret;
 
-       if ((features & NETIF_F_RXCSUM) && !rxcsum)
+       if ((features & NETIF_F_RXCSUM) && !rxcsum) {
+               hw_if->enable_sph(pdata);
+               hw_if->enable_vxlan(pdata);
                hw_if->enable_rx_csum(pdata);
-       else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+               schedule_work(&pdata->restart_work);
+       } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
+               hw_if->disable_sph(pdata);
+               hw_if->disable_vxlan(pdata);
                hw_if->disable_rx_csum(pdata);
+               schedule_work(&pdata->restart_work);
+       }
 
        if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
                hw_if->enable_rx_vlan_stripping(pdata);
index 7a41367c437ddf3cc484d4650b4c0ccdb1496f37..b17c7d1dc4b00219c82bf5a618b6f8514f588c31 100644 (file)
@@ -859,6 +859,10 @@ struct xgbe_hw_if {
        void (*enable_vxlan)(struct xgbe_prv_data *);
        void (*disable_vxlan)(struct xgbe_prv_data *);
        void (*set_vxlan_id)(struct xgbe_prv_data *);
+
+       /* For Split Header */
+       void (*enable_sph)(struct xgbe_prv_data *pdata);
+       void (*disable_sph)(struct xgbe_prv_data *pdata);
 };
 
 /* This structure represents implementation specific routines for an