]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
idpf: Fix RSS LUT NULL pointer crash on early ethtool operations
authorSreedevi Joshi <sreedevi.joshi@intel.com>
Mon, 24 Nov 2025 18:47:48 +0000 (12:47 -0600)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Tue, 6 Jan 2026 23:42:10 +0000 (15:42 -0800)
The RSS LUT is not initialized until the interface comes up, causing
the following NULL pointer crash when ethtool operations like rxhash on/off
are performed before the interface is brought up for the first time.

Move RSS LUT initialization from ndo_open to vport creation to ensure LUT
is always available. This enables RSS configuration via ethtool before
bringing the interface up. Simplify LUT management by maintaining all
changes in the driver's soft copy and programming zeros to the indirection
table when rxhash is disabled. Defer HW programming until the interface
comes up if it is down during rxhash and LUT configuration changes.

Steps to reproduce:
** Load idpf driver; interfaces will be created
modprobe idpf
** Before bringing the interfaces up, turn rxhash off
ethtool -K eth2 rxhash off

[89408.371875] BUG: kernel NULL pointer dereference, address: 0000000000000000
[89408.371908] #PF: supervisor read access in kernel mode
[89408.371924] #PF: error_code(0x0000) - not-present page
[89408.371940] PGD 0 P4D 0
[89408.371953] Oops: Oops: 0000 [#1] SMP NOPTI
<snip>
[89408.372052] RIP: 0010:memcpy_orig+0x16/0x130
[89408.372310] Call Trace:
[89408.372317]  <TASK>
[89408.372326]  ? idpf_set_features+0xfc/0x180 [idpf]
[89408.372363]  __netdev_update_features+0x295/0xde0
[89408.372384]  ethnl_set_features+0x15e/0x460
[89408.372406]  genl_family_rcv_msg_doit+0x11f/0x180
[89408.372429]  genl_rcv_msg+0x1ad/0x2b0
[89408.372446]  ? __pfx_ethnl_set_features+0x10/0x10
[89408.372465]  ? __pfx_genl_rcv_msg+0x10/0x10
[89408.372482]  netlink_rcv_skb+0x58/0x100
[89408.372502]  genl_rcv+0x2c/0x50
[89408.372516]  netlink_unicast+0x289/0x3e0
[89408.372533]  netlink_sendmsg+0x215/0x440
[89408.372551]  __sys_sendto+0x234/0x240
[89408.372571]  __x64_sys_sendto+0x28/0x30
[89408.372585]  x64_sys_call+0x1909/0x1da0
[89408.372604]  do_syscall_64+0x7a/0xfa0
[89408.373140]  ? clear_bhb_loop+0x60/0xb0
[89408.373647]  entry_SYSCALL_64_after_hwframe+0x76/0x7e
[89408.378887]  </TASK>
<snip>

Fixes: a251eee62133 ("idpf: add SRIOV support and other ndo_ops")
Signed-off-by: Sreedevi Joshi <sreedevi.joshi@intel.com>
Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Reviewed-by: Emil Tantilov <emil.s.tantilov@intel.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
Reviewed-by: Simon Horman <horms@kernel.org>
Tested-by: Samuel Salin <Samuel.salin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/idpf/idpf.h
drivers/net/ethernet/intel/idpf/idpf_lib.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/intel/idpf/idpf_txrx.h
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c

index dab36c0c3cdc3595e53d1930ce72c404f678814b..1bf7934d4e28b9f83818d60fb4e0b607f348f2ce 100644 (file)
@@ -423,14 +423,12 @@ enum idpf_user_flags {
  * @rss_key: RSS hash key
  * @rss_lut_size: Size of RSS lookup table
  * @rss_lut: RSS lookup table
- * @cached_lut: Used to restore previously init RSS lut
  */
 struct idpf_rss_data {
        u16 rss_key_size;
        u8 *rss_key;
        u16 rss_lut_size;
        u32 *rss_lut;
-       u32 *cached_lut;
 };
 
 /**
index d56366e676cf74d6ac615cb89ab92556880af372..51716e5a84ef3176a86306d96402ce88a868e380 100644 (file)
@@ -1073,7 +1073,7 @@ static void idpf_vport_rel(struct idpf_vport *vport)
        u16 idx = vport->idx;
 
        vport_config = adapter->vport_config[vport->idx];
-       idpf_deinit_rss(vport);
+       idpf_deinit_rss_lut(vport);
        rss_data = &vport_config->user_config.rss_data;
        kfree(rss_data->rss_key);
        rss_data->rss_key = NULL;
@@ -1226,6 +1226,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
        u16 idx = adapter->next_vport;
        struct idpf_vport *vport;
        u16 num_max_q;
+       int err;
 
        if (idx == IDPF_NO_FREE_SLOT)
                return NULL;
@@ -1276,10 +1277,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
 
        idpf_vport_init(vport, max_q);
 
-       /* This alloc is done separate from the LUT because it's not strictly
-        * dependent on how many queues we have. If we change number of queues
-        * and soft reset we'll need a new LUT but the key can remain the same
-        * for as long as the vport exists.
+       /* LUT and key are both initialized here. Key is not strictly dependent
+        * on how many queues we have. If we change number of queues and soft
+        * reset is initiated, LUT will be freed and a new LUT will be allocated
+        * as per the updated number of queues during vport bringup. However,
+        * the key remains the same for as long as the vport exists.
         */
        rss_data = &adapter->vport_config[idx]->user_config.rss_data;
        rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
@@ -1289,6 +1291,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
        /* Initialize default rss key */
        netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
 
+       /* Initialize default rss LUT */
+       err = idpf_init_rss_lut(vport);
+       if (err)
+               goto free_rss_key;
+
        /* fill vport slot in the adapter struct */
        adapter->vports[idx] = vport;
        adapter->vport_ids[idx] = idpf_get_vport_id(vport);
@@ -1299,6 +1306,8 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
 
        return vport;
 
+free_rss_key:
+       kfree(rss_data->rss_key);
 free_vector_idxs:
        kfree(vport->q_vector_idxs);
 free_vport:
@@ -1476,6 +1485,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
        struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
        struct idpf_adapter *adapter = vport->adapter;
        struct idpf_vport_config *vport_config;
+       struct idpf_rss_data *rss_data;
        int err;
 
        if (test_bit(IDPF_VPORT_UP, np->state))
@@ -1570,12 +1580,21 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
        idpf_restore_features(vport);
 
        vport_config = adapter->vport_config[vport->idx];
-       if (vport_config->user_config.rss_data.rss_lut)
-               err = idpf_config_rss(vport);
-       else
-               err = idpf_init_rss(vport);
+       rss_data = &vport_config->user_config.rss_data;
+
+       if (!rss_data->rss_lut) {
+               err = idpf_init_rss_lut(vport);
+               if (err) {
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to initialize RSS LUT for vport %u: %d\n",
+                               vport->vport_id, err);
+                       goto disable_vport;
+               }
+       }
+
+       err = idpf_config_rss(vport);
        if (err) {
-               dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
+               dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
                        vport->vport_id, err);
                goto disable_vport;
        }
@@ -1584,7 +1603,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
        if (err) {
                dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
                        vport->vport_id, err);
-               goto deinit_rss;
+               goto disable_vport;
        }
 
        if (rtnl)
@@ -1592,8 +1611,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
 
        return 0;
 
-deinit_rss:
-       idpf_deinit_rss(vport);
 disable_vport:
        idpf_send_disable_vport_msg(vport);
 disable_queues:
@@ -2051,7 +2068,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
                idpf_vport_stop(vport, false);
        }
 
-       idpf_deinit_rss(vport);
+       idpf_deinit_rss_lut(vport);
        /* We're passing in vport here because we need its wait_queue
         * to send a message and it should be getting all the vport
         * config data out of the adapter but we need to be careful not
@@ -2219,40 +2236,6 @@ static void idpf_set_rx_mode(struct net_device *netdev)
                dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
 }
 
-/**
- * idpf_vport_manage_rss_lut - disable/enable RSS
- * @vport: the vport being changed
- *
- * In the event of disable request for RSS, this function will zero out RSS
- * LUT, while in the event of enable request for RSS, it will reconfigure RSS
- * LUT with the default LUT configuration.
- */
-static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
-{
-       bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
-       struct idpf_rss_data *rss_data;
-       u16 idx = vport->idx;
-       int lut_size;
-
-       rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
-       lut_size = rss_data->rss_lut_size * sizeof(u32);
-
-       if (ena) {
-               /* This will contain the default or user configured LUT */
-               memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
-       } else {
-               /* Save a copy of the current LUT to be restored later if
-                * requested.
-                */
-               memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
-
-               /* Zero out the current LUT to disable */
-               memset(rss_data->rss_lut, 0, lut_size);
-       }
-
-       return idpf_config_rss(vport);
-}
-
 /**
  * idpf_set_features - set the netdev feature flags
  * @netdev: ptr to the netdev being adjusted
@@ -2278,10 +2261,19 @@ static int idpf_set_features(struct net_device *netdev,
        }
 
        if (changed & NETIF_F_RXHASH) {
+               struct idpf_netdev_priv *np = netdev_priv(netdev);
+
                netdev->features ^= NETIF_F_RXHASH;
-               err = idpf_vport_manage_rss_lut(vport);
-               if (err)
-                       goto unlock_mutex;
+
+               /* If the interface is not up when changing the rxhash, update
+                * to the HW is skipped. The updated LUT will be committed to
+                * the HW when the interface is brought up.
+                */
+               if (test_bit(IDPF_VPORT_UP, np->state)) {
+                       err = idpf_config_rss(vport);
+                       if (err)
+                               goto unlock_mutex;
+               }
        }
 
        if (changed & NETIF_F_GRO_HW) {
index 1d91c56f7469673d375724305a1917d7ee3c281c..8991a891a440a058654508176ecadf7ae55df594 100644 (file)
@@ -4650,57 +4650,47 @@ static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
 
        rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
 
-       for (i = 0; i < rss_data->rss_lut_size; i++) {
+       for (i = 0; i < rss_data->rss_lut_size; i++)
                rss_data->rss_lut[i] = i % num_active_rxq;
-               rss_data->cached_lut[i] = rss_data->rss_lut[i];
-       }
 }
 
 /**
- * idpf_init_rss - Allocate and initialize RSS resources
+ * idpf_init_rss_lut - Allocate and initialize RSS LUT
  * @vport: virtual port
  *
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
  */
-int idpf_init_rss(struct idpf_vport *vport)
+int idpf_init_rss_lut(struct idpf_vport *vport)
 {
        struct idpf_adapter *adapter = vport->adapter;
        struct idpf_rss_data *rss_data;
-       u32 lut_size;
 
        rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+       if (!rss_data->rss_lut) {
+               u32 lut_size;
 
-       lut_size = rss_data->rss_lut_size * sizeof(u32);
-       rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
-       if (!rss_data->rss_lut)
-               return -ENOMEM;
-
-       rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
-       if (!rss_data->cached_lut) {
-               kfree(rss_data->rss_lut);
-               rss_data->rss_lut = NULL;
-
-               return -ENOMEM;
+               lut_size = rss_data->rss_lut_size * sizeof(u32);
+               rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
+               if (!rss_data->rss_lut)
+                       return -ENOMEM;
        }
 
        /* Fill the default RSS lut values */
        idpf_fill_dflt_rss_lut(vport);
 
-       return idpf_config_rss(vport);
+       return 0;
 }
 
 /**
- * idpf_deinit_rss - Release RSS resources
+ * idpf_deinit_rss_lut - Release RSS LUT
  * @vport: virtual port
  */
-void idpf_deinit_rss(struct idpf_vport *vport)
+void idpf_deinit_rss_lut(struct idpf_vport *vport)
 {
        struct idpf_adapter *adapter = vport->adapter;
        struct idpf_rss_data *rss_data;
 
        rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
-       kfree(rss_data->cached_lut);
-       rss_data->cached_lut = NULL;
        kfree(rss_data->rss_lut);
        rss_data->rss_lut = NULL;
 }
index 75b977094741fd848e11b3a2535dd453f98bf53e..7d20593bd87782e06147a61b13a9f8b079ffc17f 100644 (file)
@@ -1086,8 +1086,8 @@ void idpf_vport_intr_deinit(struct idpf_vport *vport);
 int idpf_vport_intr_init(struct idpf_vport *vport);
 void idpf_vport_intr_ena(struct idpf_vport *vport);
 int idpf_config_rss(struct idpf_vport *vport);
-int idpf_init_rss(struct idpf_vport *vport);
-void idpf_deinit_rss(struct idpf_vport *vport);
+int idpf_init_rss_lut(struct idpf_vport *vport);
+void idpf_deinit_rss_lut(struct idpf_vport *vport);
 int idpf_rx_bufs_init_all(struct idpf_vport *vport);
 
 struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
index 01bbd12a642a0744c7a915f42d5cb530f17c2e6e..cb702eac86c804887fac7103a85a4c2141c2b088 100644 (file)
@@ -2804,6 +2804,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
  * @vport: virtual port data structure
  * @get: flag to set or get rss look up table
  *
+ * When rxhash is disabled, RSS LUT will be configured with zeros.  If rxhash
+ * is enabled, the LUT values stored in driver's soft copy will be used to setup
+ * the HW.
+ *
  * Returns 0 on success, negative on failure.
  */
 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
@@ -2814,10 +2818,12 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
        struct idpf_rss_data *rss_data;
        int buf_size, lut_buf_size;
        ssize_t reply_sz;
+       bool rxhash_ena;
        int i;
 
        rss_data =
                &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
+       rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
        buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
        rl = kzalloc(buf_size, GFP_KERNEL);
        if (!rl)
@@ -2839,7 +2845,8 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
        } else {
                rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
                for (i = 0; i < rss_data->rss_lut_size; i++)
-                       rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
+                       rl->lut[i] = rxhash_ena ?
+                               cpu_to_le32(rss_data->rss_lut[i]) : 0;
 
                xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
        }