--- /dev/null
+From 9f9c8e9064ea8ceb13540a283f08550c097bb673 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 17 Mar 2025 10:54:25 +0100
+Subject: ASoC: ak4458: Convert to RUNTIME_PM_OPS() & co
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 9f9c8e9064ea8ceb13540a283f08550c097bb673 upstream.
+
+Use the newer RUNTIME_PM_OPS() and SYSTEM_SLEEP_PM_OPS() macros
+instead of SET_RUNTIME_PM_OPS() and SET_SYSTEM_SLEEP_PM_OPS() together
+with pm_ptr(), which allows us dropping ugly __maybe_unused attributes
+and CONFIG_PM ifdefs.
+
+This optimizes slightly when CONFIG_PM is disabled, too.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Reviewed-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://patch.msgid.link/20250317095603.20073-4-tiwai@suse.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/ak4458.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/sound/soc/codecs/ak4458.c
++++ b/sound/soc/codecs/ak4458.c
+@@ -639,8 +639,7 @@ static void ak4458_reset(struct ak4458_p
+ }
+ }
+
+-#ifdef CONFIG_PM
+-static int __maybe_unused ak4458_runtime_suspend(struct device *dev)
++static int ak4458_runtime_suspend(struct device *dev)
+ {
+ struct ak4458_priv *ak4458 = dev_get_drvdata(dev);
+
+@@ -656,7 +655,7 @@ static int __maybe_unused ak4458_runtime
+ return 0;
+ }
+
+-static int __maybe_unused ak4458_runtime_resume(struct device *dev)
++static int ak4458_runtime_resume(struct device *dev)
+ {
+ struct ak4458_priv *ak4458 = dev_get_drvdata(dev);
+ int ret;
+@@ -686,7 +685,6 @@ err:
+ regulator_bulk_disable(ARRAY_SIZE(ak4458->supplies), ak4458->supplies);
+ return ret;
+ }
+-#endif /* CONFIG_PM */
+
+ static const struct snd_soc_component_driver soc_codec_dev_ak4458 = {
+ .controls = ak4458_snd_controls,
+@@ -735,9 +733,8 @@ static const struct ak4458_drvdata ak449
+ };
+
+ static const struct dev_pm_ops ak4458_pm = {
+- SET_RUNTIME_PM_OPS(ak4458_runtime_suspend, ak4458_runtime_resume, NULL)
+- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+- pm_runtime_force_resume)
++ RUNTIME_PM_OPS(ak4458_runtime_suspend, ak4458_runtime_resume, NULL)
++ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ };
+
+ static int ak4458_i2c_probe(struct i2c_client *i2c)
+@@ -809,7 +806,7 @@ MODULE_DEVICE_TABLE(of, ak4458_of_match)
+ static struct i2c_driver ak4458_i2c_driver = {
+ .driver = {
+ .name = "ak4458",
+- .pm = &ak4458_pm,
++ .pm = pm_ptr(&ak4458_pm),
+ .of_match_table = ak4458_of_match,
+ },
+ .probe = ak4458_i2c_probe,
--- /dev/null
+From stable+bounces-230167-greg=kroah.com@vger.kernel.org Tue Mar 24 15:15:52 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Tue, 24 Mar 2026 07:04:52 -0700
+Subject: ice: Fix PTP NULL pointer dereference during VSI rebuild
+To: stable@vger.kernel.org
+Cc: Aaron Ma <aaron.ma@canonical.com>, Sunitha Mekala <sunithax.d.mekala@intel.com>, Tony Nguyen <anthony.l.nguyen@intel.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260324140456.832964-6-harshit.m.mogalapalli@oracle.com>
+
+From: Aaron Ma <aaron.ma@canonical.com>
+
+[ Upstream commit fc6f36eaaedcf4b81af6fe1a568f018ffd530660 ]
+
+Fix race condition where PTP periodic work runs while VSI is being
+rebuilt, accessing NULL vsi->rx_rings.
+
+The sequence was:
+1. ice_ptp_prepare_for_reset() cancels PTP work
+2. ice_ptp_rebuild() immediately queues PTP work
+3. VSI rebuild happens AFTER ice_ptp_rebuild()
+4. PTP work runs and accesses NULL vsi->rx_rings
+
+Fix: Keep PTP work cancelled during rebuild, only queue it after
+VSI rebuild completes in ice_rebuild().
+
+Added ice_ptp_queue_work() helper function to encapsulate the logic
+for queuing PTP work, ensuring it's only queued when PTP is supported
+and the state is ICE_PTP_READY.
+
+Error log:
+[ 121.392544] ice 0000:60:00.1: PTP reset successful
+[ 121.392692] BUG: kernel NULL pointer dereference, address: 0000000000000000
+[ 121.392712] #PF: supervisor read access in kernel mode
+[ 121.392720] #PF: error_code(0x0000) - not-present page
+[ 121.392727] PGD 0
+[ 121.392734] Oops: Oops: 0000 [#1] SMP NOPTI
+[ 121.392746] CPU: 8 UID: 0 PID: 1005 Comm: ice-ptp-0000:60 Tainted: G S 6.19.0-rc6+ #4 PREEMPT(voluntary)
+[ 121.392761] Tainted: [S]=CPU_OUT_OF_SPEC
+[ 121.392773] RIP: 0010:ice_ptp_update_cached_phctime+0xbf/0x150 [ice]
+[ 121.393042] Call Trace:
+[ 121.393047] <TASK>
+[ 121.393055] ice_ptp_periodic_work+0x69/0x180 [ice]
+[ 121.393202] kthread_worker_fn+0xa2/0x260
+[ 121.393216] ? __pfx_ice_ptp_periodic_work+0x10/0x10 [ice]
+[ 121.393359] ? __pfx_kthread_worker_fn+0x10/0x10
+[ 121.393371] kthread+0x10d/0x230
+[ 121.393382] ? __pfx_kthread+0x10/0x10
+[ 121.393393] ret_from_fork+0x273/0x2b0
+[ 121.393407] ? __pfx_kthread+0x10/0x10
+[ 121.393417] ret_from_fork_asm+0x1a/0x30
+[ 121.393432] </TASK>
+
+Fixes: 803bef817807d ("ice: factor out ice_ptp_rebuild_owner()")
+Signed-off-by: Aaron Ma <aaron.ma@canonical.com>
+Tested-by: Sunitha Mekala <sunithax.d.mekala@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+(cherry picked from commit fc6f36eaaedcf4b81af6fe1a568f018ffd530660)
+[Harshit: Backported to 6.12.y, ice_ptp_prepare_rebuild_sec() is not
+present in 6.12.y]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 3 +++
+ drivers/net/ethernet/intel/ice/ice_ptp.c | 17 ++++++++++++++---
+ drivers/net/ethernet/intel/ice/ice_ptp.h | 5 +++++
+ 3 files changed, 22 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -7792,6 +7792,9 @@ static void ice_rebuild(struct ice_pf *p
+
+ /* Restore timestamp mode settings after VSI rebuild */
+ ice_ptp_restore_timestamp_mode(pf);
++
++ /* Start PTP periodic work after VSI is fully rebuilt */
++ ice_ptp_queue_work(pf);
+ return;
+
+ err_vsi_rebuild:
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -2754,6 +2754,20 @@ static void ice_ptp_periodic_work(struct
+ }
+
+ /**
++ * ice_ptp_queue_work - Queue PTP periodic work for a PF
++ * @pf: Board private structure
++ *
++ * Helper function to queue PTP periodic work after VSI rebuild completes.
++ * This ensures that PTP work only runs when VSI structures are ready.
++ */
++void ice_ptp_queue_work(struct ice_pf *pf)
++{
++ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) &&
++ pf->ptp.state == ICE_PTP_READY)
++ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
++}
++
++/**
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+@@ -2888,9 +2902,6 @@ void ice_ptp_rebuild(struct ice_pf *pf,
+
+ ptp->state = ICE_PTP_READY;
+
+- /* Start periodic work going */
+- kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+-
+ dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
+ return;
+
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
+@@ -333,6 +333,7 @@ void ice_ptp_prepare_for_reset(struct ic
+ void ice_ptp_init(struct ice_pf *pf);
+ void ice_ptp_release(struct ice_pf *pf);
+ void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
++void ice_ptp_queue_work(struct ice_pf *pf);
+ #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+ static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+ {
+@@ -384,6 +385,10 @@ static inline void ice_ptp_link_change(s
+ {
+ }
+
++static inline void ice_ptp_queue_work(struct ice_pf *pf)
++{
++}
++
+ static inline int ice_ptp_clock_index(struct ice_pf *pf)
+ {
+ return -1;
--- /dev/null
+From stable+bounces-230166-greg=kroah.com@vger.kernel.org Tue Mar 24 15:15:44 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Tue, 24 Mar 2026 07:04:50 -0700
+Subject: ice: fix using untrusted value of pkt_len in ice_vc_fdir_parse_raw()
+To: stable@vger.kernel.org
+Cc: Mateusz Polchlopek <mateusz.polchlopek@intel.com>, Przemek Kitszel <przemyslaw.kitszel@intel.com>, Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com>, Rafal Romanowski <rafal.romanowski@intel.com>, Tony Nguyen <anthony.l.nguyen@intel.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260324140456.832964-4-harshit.m.mogalapalli@oracle.com>
+
+From: Mateusz Polchlopek <mateusz.polchlopek@intel.com>
+
+[ Upstream commit 1388dd564183a5a18ec4a966748037736b5653c5 ]
+
+Fix using the untrusted value of proto->raw.pkt_len in function
+ice_vc_fdir_parse_raw() by verifying if it does not exceed the
+VIRTCHNL_MAX_SIZE_RAW_PACKET value.
+
+Fixes: 99f419df8a5c ("ice: enable FDIR filters from raw binary patterns for VFs")
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Mateusz Polchlopek <mateusz.polchlopek@intel.com>
+Signed-off-by: Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+(cherry picked from commit 1388dd564183a5a18ec4a966748037736b5653c5)
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 24 +++++++++++++--------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
+ struct virtchnl_proto_hdrs *proto,
+ struct virtchnl_fdir_fltr_conf *conf)
+ {
+- u8 *pkt_buf, *msk_buf __free(kfree);
++ u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
+ struct ice_parser_result rslt;
+ struct ice_pf *pf = vf->pf;
++ u16 pkt_len, udp_port = 0;
+ struct ice_parser *psr;
+ int status = -ENOMEM;
+ struct ice_hw *hw;
+- u16 udp_port = 0;
+
+- pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+- msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
++ pkt_len = proto->raw.pkt_len;
++
++ if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
++ return -EINVAL;
++
++ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
++ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
++
+ if (!pkt_buf || !msk_buf)
+ goto err_mem_alloc;
+
+- memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
+- memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
++ memcpy(pkt_buf, proto->raw.spec, pkt_len);
++ memcpy(msk_buf, proto->raw.mask, pkt_len);
+
+ hw = &pf->hw;
+
+@@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
+ if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
+ ice_parser_vxlan_tunnel_set(psr, udp_port, true);
+
+- status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
++ status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
+ if (status)
+ goto err_parser_destroy;
+
+@@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
+ }
+
+ status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
+- proto->raw.pkt_len, ICE_BLK_FD,
++ pkt_len, ICE_BLK_FD,
+ conf->prof);
+ if (status)
+ goto err_parser_profile_init;
+@@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
+ ice_parser_profile_dump(hw, conf->prof);
+
+ /* Store raw flow info into @conf */
+- conf->pkt_len = proto->raw.pkt_len;
++ conf->pkt_len = pkt_len;
+ conf->pkt_buf = pkt_buf;
+ conf->parser_ena = true;
+
--- /dev/null
+From stable+bounces-230170-greg=kroah.com@vger.kernel.org Tue Mar 24 15:06:04 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Tue, 24 Mar 2026 07:04:53 -0700
+Subject: idpf: check error for register_netdev() on init
+To: stable@vger.kernel.org
+Cc: Emil Tantilov <emil.s.tantilov@intel.com>, Tony Nguyen <anthony.l.nguyen@intel.com>, Simon Horman <horms@kernel.org>, Samuel Salin <Samuel.salin@intel.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260324140456.832964-7-harshit.m.mogalapalli@oracle.com>
+
+From: Emil Tantilov <emil.s.tantilov@intel.com>
+
+[ Upstream commit 680811c67906191b237bbafe7dabbbad64649b39 ]
+
+Current init logic ignores the error code from register_netdev(),
+which will cause WARN_ON() on attempt to unregister it, if there was one,
+and there is no info for the user that the creation of the netdev failed.
+
+WARNING: CPU: 89 PID: 6902 at net/core/dev.c:11512 unregister_netdevice_many_notify+0x211/0x1a10
+...
+[ 3707.563641] unregister_netdev+0x1c/0x30
+[ 3707.563656] idpf_vport_dealloc+0x5cf/0xce0 [idpf]
+[ 3707.563684] idpf_deinit_task+0xef/0x160 [idpf]
+[ 3707.563712] idpf_vc_core_deinit+0x84/0x320 [idpf]
+[ 3707.563739] idpf_remove+0xbf/0x780 [idpf]
+[ 3707.563769] pci_device_remove+0xab/0x1e0
+[ 3707.563786] device_release_driver_internal+0x371/0x530
+[ 3707.563803] driver_detach+0xbf/0x180
+[ 3707.563816] bus_remove_driver+0x11b/0x2a0
+[ 3707.563829] pci_unregister_driver+0x2a/0x250
+
+Introduce an error check and log the vport number and error code.
+On removal make sure to check VPORT_REG_NETDEV flag prior to calling
+unregister and free on the netdev.
+
+Add local variables for idx, vport_config and netdev for readability.
+
+Fixes: 0fe45467a104 ("idpf: add create vport and netdev configuration")
+Suggested-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+(cherry picked from commit 680811c67906191b237bbafe7dabbbad64649b39)
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_lib.c | 31 ++++++++++++++++++++---------
+ 1 file changed, 22 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -911,15 +911,19 @@ static int idpf_stop(struct net_device *
+ static void idpf_decfg_netdev(struct idpf_vport *vport)
+ {
+ struct idpf_adapter *adapter = vport->adapter;
++ u16 idx = vport->idx;
+
+ kfree(vport->rx_ptype_lkup);
+ vport->rx_ptype_lkup = NULL;
+
+- unregister_netdev(vport->netdev);
+- free_netdev(vport->netdev);
++ if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
++ adapter->vport_config[idx]->flags)) {
++ unregister_netdev(vport->netdev);
++ free_netdev(vport->netdev);
++ }
+ vport->netdev = NULL;
+
+- adapter->netdevs[vport->idx] = NULL;
++ adapter->netdevs[idx] = NULL;
+ }
+
+ /**
+@@ -1541,13 +1545,22 @@ void idpf_init_task(struct work_struct *
+ }
+
+ for (index = 0; index < adapter->max_vports; index++) {
+- if (adapter->netdevs[index] &&
+- !test_bit(IDPF_VPORT_REG_NETDEV,
+- adapter->vport_config[index]->flags)) {
+- register_netdev(adapter->netdevs[index]);
+- set_bit(IDPF_VPORT_REG_NETDEV,
+- adapter->vport_config[index]->flags);
++ struct net_device *netdev = adapter->netdevs[index];
++ struct idpf_vport_config *vport_config;
++
++ vport_config = adapter->vport_config[index];
++
++ if (!netdev ||
++ test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
++ continue;
++
++ err = register_netdev(netdev);
++ if (err) {
++ dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
++ index, ERR_PTR(err));
++ continue;
+ }
++ set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
+ }
+
+ /* As all the required vports are created, clear the reset flag
--- /dev/null
+From stable+bounces-230169-greg=kroah.com@vger.kernel.org Tue Mar 24 15:16:09 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Tue, 24 Mar 2026 07:04:54 -0700
+Subject: idpf: detach and close netdevs while handling a reset
+To: stable@vger.kernel.org
+Cc: Emil Tantilov <emil.s.tantilov@intel.com>, Madhu Chittim <madhu.chittim@intel.com>, Samuel Salin <Samuel.salin@intel.com>, Tony Nguyen <anthony.l.nguyen@intel.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260324140456.832964-8-harshit.m.mogalapalli@oracle.com>
+
+From: Emil Tantilov <emil.s.tantilov@intel.com>
+
+[ Upstream commit 2e281e1155fc476c571c0bd2ffbfe28ab829a5c3 ]
+
+Protect the reset path from callbacks by setting the netdevs to detached
+state and close any netdevs in UP state until the reset handling has
+completed. During a reset, the driver will de-allocate resources for the
+vport, and there is no guarantee that those will recover, which is why the
+existing vport_ctrl_lock does not provide sufficient protection.
+
+idpf_detach_and_close() is called right before reset handling. If the
+reset handling succeeds, the netdevs state is recovered via call to
+idpf_attach_and_open(). If the reset handling fails the netdevs remain
+down. The detach/down calls are protected with RTNL lock to avoid racing
+with callbacks. On the recovery side the attach can be done without
+holding the RTNL lock as there are no callbacks expected at that point,
+due to detach/close always being done first in that flow.
+
+The previous logic restoring the netdevs state based on the
+IDPF_VPORT_UP_REQUESTED flag in the init task is not needed anymore, hence
+the removal of idpf_set_vport_state(). The IDPF_VPORT_UP_REQUESTED is
+still being used to restore the state of the netdevs following the reset,
+but has no use outside of the reset handling flow.
+
+idpf_init_hard_reset() is converted to void, since it was used as such and
+there is no error handling being done based on its return value.
+
+Before this change, invoking hard and soft resets simultaneously will
+cause the driver to lose the vport state:
+ip -br a
+<inf> UP
+echo 1 > /sys/class/net/ens801f0/device/reset& \
+ethtool -L ens801f0 combined 8
+ip -br a
+<inf> DOWN
+ip link set <inf> up
+ip -br a
+<inf> DOWN
+
+Also in case of a failure in the reset path, the netdev is left
+exposed to external callbacks, while vport resources are not
+initialized, leading to a crash on subsequent ifup/down:
+[408471.398966] idpf 0000:83:00.0: HW reset detected
+[408471.411744] idpf 0000:83:00.0: Device HW Reset initiated
+[408472.277901] idpf 0000:83:00.0: The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x2
+[408508.125551] BUG: kernel NULL pointer dereference, address: 0000000000000078
+[408508.126112] #PF: supervisor read access in kernel mode
+[408508.126687] #PF: error_code(0x0000) - not-present page
+[408508.127256] PGD 2aae2f067 P4D 0
+[408508.127824] Oops: Oops: 0000 [#1] SMP NOPTI
+...
+[408508.130871] RIP: 0010:idpf_stop+0x39/0x70 [idpf]
+...
+[408508.139193] Call Trace:
+[408508.139637] <TASK>
+[408508.140077] __dev_close_many+0xbb/0x260
+[408508.140533] __dev_change_flags+0x1cf/0x280
+[408508.140987] netif_change_flags+0x26/0x70
+[408508.141434] dev_change_flags+0x3d/0xb0
+[408508.141878] devinet_ioctl+0x460/0x890
+[408508.142321] inet_ioctl+0x18e/0x1d0
+[408508.142762] ? _copy_to_user+0x22/0x70
+[408508.143207] sock_do_ioctl+0x3d/0xe0
+[408508.143652] sock_ioctl+0x10e/0x330
+[408508.144091] ? find_held_lock+0x2b/0x80
+[408508.144537] __x64_sys_ioctl+0x96/0xe0
+[408508.144979] do_syscall_64+0x79/0x3d0
+[408508.145415] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[408508.145860] RIP: 0033:0x7f3e0bb4caff
+
+Fixes: 0fe45467a104 ("idpf: add create vport and netdev configuration")
+Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+(cherry picked from commit 2e281e1155fc476c571c0bd2ffbfe28ab829a5c3)
+[Harshit: Backport to 6.12.y, resolve conflicts at 5 places:
+These are caused to missing non-backportable commits:
+1. commit: bd74a86bc75d ("idpf: link NAPIs to queues")
+2. commit: f4312e6bfa2a ("idpf: implement core RDMA auxiliary dev
+create, init, and destroy")
+3. commit: 8dd72ebc73f3 ("idpf: convert vport state to bitmap")
+4. commit: bf86a012e676 ("idpf: implement remaining IDC RDMA core
+ callbacks and handlers") in 6.12.y]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_lib.c | 115 +++++++++++++++++------------
+ 1 file changed, 70 insertions(+), 45 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -693,6 +693,65 @@ static int idpf_init_mac_addr(struct idp
+ return 0;
+ }
+
++static void idpf_detach_and_close(struct idpf_adapter *adapter)
++{
++ int max_vports = adapter->max_vports;
++
++ for (int i = 0; i < max_vports; i++) {
++ struct net_device *netdev = adapter->netdevs[i];
++
++ /* If the interface is in detached state, that means the
++ * previous reset was not handled successfully for this
++ * vport.
++ */
++ if (!netif_device_present(netdev))
++ continue;
++
++ /* Hold RTNL to protect racing with callbacks */
++ rtnl_lock();
++ netif_device_detach(netdev);
++ if (netif_running(netdev)) {
++ set_bit(IDPF_VPORT_UP_REQUESTED,
++ adapter->vport_config[i]->flags);
++ dev_close(netdev);
++ }
++ rtnl_unlock();
++ }
++}
++
++static void idpf_attach_and_open(struct idpf_adapter *adapter)
++{
++ int max_vports = adapter->max_vports;
++
++ for (int i = 0; i < max_vports; i++) {
++ struct idpf_vport *vport = adapter->vports[i];
++ struct idpf_vport_config *vport_config;
++ struct net_device *netdev;
++
++ /* In case of a critical error in the init task, the vport
++ * will be freed. Only continue to restore the netdevs
++ * if the vport is allocated.
++ */
++ if (!vport)
++ continue;
++
++ /* No need for RTNL on attach as this function is called
++ * following detach and dev_close(). We do take RTNL for
++ * dev_open() below as it can race with external callbacks
++ * following the call to netif_device_attach().
++ */
++ netdev = adapter->netdevs[i];
++ netif_device_attach(netdev);
++ vport_config = adapter->vport_config[vport->idx];
++ if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED,
++ vport_config->flags)) {
++ rtnl_lock();
++ dev_open(netdev, NULL);
++ rtnl_unlock();
++ }
++ }
++}
++
+ /**
+ * idpf_cfg_netdev - Allocate, configure and register a netdev
+ * @vport: main vport structure
+@@ -990,10 +1049,11 @@ static void idpf_vport_dealloc(struct id
+ unsigned int i = vport->idx;
+
+ idpf_deinit_mac_addr(vport);
+- idpf_vport_stop(vport);
+
+- if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
++ if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) {
++ idpf_vport_stop(vport);
+ idpf_decfg_netdev(vport);
++ }
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ idpf_del_all_mac_filters(vport);
+
+@@ -1471,7 +1531,6 @@ void idpf_init_task(struct work_struct *
+ struct idpf_vport_config *vport_config;
+ struct idpf_vport_max_q max_q;
+ struct idpf_adapter *adapter;
+- struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+ u16 num_default_vports;
+ struct pci_dev *pdev;
+@@ -1528,12 +1587,6 @@ void idpf_init_task(struct work_struct *
+ if (idpf_cfg_netdev(vport))
+ goto unwind_vports;
+
+- /* Once state is put into DOWN, driver is ready for dev_open */
+- np = netdev_priv(vport->netdev);
+- np->state = __IDPF_VPORT_DOWN;
+- if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
+- idpf_vport_open(vport);
+-
+ /* Spawn and return 'idpf_init_task' work queue until all the
+ * default vports are created
+ */
+@@ -1710,27 +1763,6 @@ static int idpf_check_reset_complete(str
+ }
+
+ /**
+- * idpf_set_vport_state - Set the vport state to be after the reset
+- * @adapter: Driver specific private structure
+- */
+-static void idpf_set_vport_state(struct idpf_adapter *adapter)
+-{
+- u16 i;
+-
+- for (i = 0; i < adapter->max_vports; i++) {
+- struct idpf_netdev_priv *np;
+-
+- if (!adapter->netdevs[i])
+- continue;
+-
+- np = netdev_priv(adapter->netdevs[i]);
+- if (np->state == __IDPF_VPORT_UP)
+- set_bit(IDPF_VPORT_UP_REQUESTED,
+- adapter->vport_config[i]->flags);
+- }
+-}
+-
+-/**
+ * idpf_init_hard_reset - Initiate a hardware reset
+ * @adapter: Driver specific private structure
+ *
+@@ -1738,35 +1770,23 @@ static void idpf_set_vport_state(struct
+ * reallocate. Also reinitialize the mailbox. Return 0 on success,
+ * negative on failure.
+ */
+-static int idpf_init_hard_reset(struct idpf_adapter *adapter)
++static void idpf_init_hard_reset(struct idpf_adapter *adapter)
+ {
+ struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
+ struct device *dev = &adapter->pdev->dev;
+- struct net_device *netdev;
+ int err;
+- u16 i;
+
++ idpf_detach_and_close(adapter);
+ mutex_lock(&adapter->vport_ctrl_lock);
+
+ dev_info(dev, "Device HW Reset initiated\n");
+
+- /* Avoid TX hangs on reset */
+- for (i = 0; i < adapter->max_vports; i++) {
+- netdev = adapter->netdevs[i];
+- if (!netdev)
+- continue;
+-
+- netif_carrier_off(netdev);
+- netif_tx_disable(netdev);
+- }
+-
+ /* Prepare for reset */
+ if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
+ } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
+ bool is_reset = idpf_is_reset_detected(adapter);
+
+- idpf_set_vport_state(adapter);
+ idpf_vc_core_deinit(adapter);
+ if (!is_reset)
+ reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
+@@ -1813,7 +1833,12 @@ static int idpf_init_hard_reset(struct i
+ unlock_mutex:
+ mutex_unlock(&adapter->vport_ctrl_lock);
+
+- return err;
++ /* Attempt to restore netdevs and initialize RDMA CORE AUX device,
++ * provided vc_core_init succeeded. It is still possible that
++ * vports are not allocated at this point if the init task failed.
++ */
++ if (!err)
++ idpf_attach_and_open(adapter);
+ }
+
+ /**
--- /dev/null
+From stable+bounces-230173-greg=kroah.com@vger.kernel.org Tue Mar 24 15:11:48 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Tue, 24 Mar 2026 07:04:55 -0700
+Subject: idpf: Fix RSS LUT NULL pointer crash on early ethtool operations
+To: stable@vger.kernel.org
+Cc: Sreedevi Joshi <sreedevi.joshi@intel.com>, Sridhar Samudrala <sridhar.samudrala@intel.com>, Emil Tantilov <emil.s.tantilov@intel.com>, Aleksandr Loktionov <aleksandr.loktionov@intel.com>, Paul Menzel <pmenzel@molgen.mpg.de>, Simon Horman <horms@kernel.org>, Samuel Salin <Samuel.salin@intel.com>, Tony Nguyen <anthony.l.nguyen@intel.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260324140456.832964-9-harshit.m.mogalapalli@oracle.com>
+
+From: Sreedevi Joshi <sreedevi.joshi@intel.com>
+
+[ Upstream commit 83f38f210b85676f40ba8586b5a8edae19b56995 ]
+
+The RSS LUT is not initialized until the interface comes up, causing
+the following NULL pointer crash when ethtool operations like rxhash on/off
+are performed before the interface is brought up for the first time.
+
+Move RSS LUT initialization from ndo_open to vport creation to ensure LUT
+is always available. This enables RSS configuration via ethtool before
+bringing the interface up. Simplify LUT management by maintaining all
+changes in the driver's soft copy and programming zeros to the indirection
+table when rxhash is disabled. Defer HW programming until the interface
+comes up if it is down during rxhash and LUT configuration changes.
+
+Steps to reproduce:
+** Load idpf driver; interfaces will be created
+ modprobe idpf
+** Before bringing the interfaces up, turn rxhash off
+ ethtool -K eth2 rxhash off
+
+[89408.371875] BUG: kernel NULL pointer dereference, address: 0000000000000000
+[89408.371908] #PF: supervisor read access in kernel mode
+[89408.371924] #PF: error_code(0x0000) - not-present page
+[89408.371940] PGD 0 P4D 0
+[89408.371953] Oops: Oops: 0000 [#1] SMP NOPTI
+<snip>
+[89408.372052] RIP: 0010:memcpy_orig+0x16/0x130
+[89408.372310] Call Trace:
+[89408.372317] <TASK>
+[89408.372326] ? idpf_set_features+0xfc/0x180 [idpf]
+[89408.372363] __netdev_update_features+0x295/0xde0
+[89408.372384] ethnl_set_features+0x15e/0x460
+[89408.372406] genl_family_rcv_msg_doit+0x11f/0x180
+[89408.372429] genl_rcv_msg+0x1ad/0x2b0
+[89408.372446] ? __pfx_ethnl_set_features+0x10/0x10
+[89408.372465] ? __pfx_genl_rcv_msg+0x10/0x10
+[89408.372482] netlink_rcv_skb+0x58/0x100
+[89408.372502] genl_rcv+0x2c/0x50
+[89408.372516] netlink_unicast+0x289/0x3e0
+[89408.372533] netlink_sendmsg+0x215/0x440
+[89408.372551] __sys_sendto+0x234/0x240
+[89408.372571] __x64_sys_sendto+0x28/0x30
+[89408.372585] x64_sys_call+0x1909/0x1da0
+[89408.372604] do_syscall_64+0x7a/0xfa0
+[89408.373140] ? clear_bhb_loop+0x60/0xb0
+[89408.373647] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[89408.378887] </TASK>
+<snip>
+
+Fixes: a251eee62133 ("idpf: add SRIOV support and other ndo_ops")
+Signed-off-by: Sreedevi Joshi <sreedevi.joshi@intel.com>
+Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Reviewed-by: Emil Tantilov <emil.s.tantilov@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+(cherry picked from commit 83f38f210b85676f40ba8586b5a8edae19b56995)
+[Harshit: While this is a clean cherry-pick I had to change a line of
+code where test_bit(IDPF_VPORT_UP,..) is used because 6.12.y branch
+doesn't have commit: 8dd72ebc73f3 ("idpf: convert vport state to
+bitmap")]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf.h | 2
+ drivers/net/ethernet/intel/idpf/idpf_lib.c | 94 ++++++++++--------------
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 36 +++------
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h | 4 -
+ drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 9 ++
+ 5 files changed, 66 insertions(+), 79 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf.h
++++ b/drivers/net/ethernet/intel/idpf/idpf.h
+@@ -361,14 +361,12 @@ enum idpf_user_flags {
+ * @rss_key: RSS hash key
+ * @rss_lut_size: Size of RSS lookup table
+ * @rss_lut: RSS lookup table
+- * @cached_lut: Used to restore previously init RSS lut
+ */
+ struct idpf_rss_data {
+ u16 rss_key_size;
+ u8 *rss_key;
+ u16 rss_lut_size;
+ u32 *rss_lut;
+- u32 *cached_lut;
+ };
+
+ /**
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -999,7 +999,7 @@ static void idpf_vport_rel(struct idpf_v
+ u16 idx = vport->idx;
+
+ vport_config = adapter->vport_config[vport->idx];
+- idpf_deinit_rss(vport);
++ idpf_deinit_rss_lut(vport);
+ rss_data = &vport_config->user_config.rss_data;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = NULL;
+@@ -1148,6 +1148,7 @@ static struct idpf_vport *idpf_vport_all
+ u16 idx = adapter->next_vport;
+ struct idpf_vport *vport;
+ u16 num_max_q;
++ int err;
+
+ if (idx == IDPF_NO_FREE_SLOT)
+ return NULL;
+@@ -1198,10 +1199,11 @@ static struct idpf_vport *idpf_vport_all
+
+ idpf_vport_init(vport, max_q);
+
+- /* This alloc is done separate from the LUT because it's not strictly
+- * dependent on how many queues we have. If we change number of queues
+- * and soft reset we'll need a new LUT but the key can remain the same
+- * for as long as the vport exists.
++ /* LUT and key are both initialized here. Key is not strictly dependent
++ * on how many queues we have. If we change number of queues and soft
++ * reset is initiated, LUT will be freed and a new LUT will be allocated
++ * as per the updated number of queues during vport bringup. However,
++ * the key remains the same for as long as the vport exists.
+ */
+ rss_data = &adapter->vport_config[idx]->user_config.rss_data;
+ rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
+@@ -1211,6 +1213,11 @@ static struct idpf_vport *idpf_vport_all
+ /* Initialize default rss key */
+ netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
+
++ /* Initialize default rss LUT */
++ err = idpf_init_rss_lut(vport);
++ if (err)
++ goto free_rss_key;
++
+ /* fill vport slot in the adapter struct */
+ adapter->vports[idx] = vport;
+ adapter->vport_ids[idx] = idpf_get_vport_id(vport);
+@@ -1221,6 +1228,8 @@ static struct idpf_vport *idpf_vport_all
+
+ return vport;
+
++free_rss_key:
++ kfree(rss_data->rss_key);
+ free_vector_idxs:
+ kfree(vport->q_vector_idxs);
+ free_vport:
+@@ -1397,6 +1406,7 @@ static int idpf_vport_open(struct idpf_v
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
++ struct idpf_rss_data *rss_data;
+ int err;
+
+ if (np->state != __IDPF_VPORT_DOWN)
+@@ -1479,12 +1489,21 @@ static int idpf_vport_open(struct idpf_v
+ idpf_restore_features(vport);
+
+ vport_config = adapter->vport_config[vport->idx];
+- if (vport_config->user_config.rss_data.rss_lut)
+- err = idpf_config_rss(vport);
+- else
+- err = idpf_init_rss(vport);
++ rss_data = &vport_config->user_config.rss_data;
++
++ if (!rss_data->rss_lut) {
++ err = idpf_init_rss_lut(vport);
++ if (err) {
++ dev_err(&adapter->pdev->dev,
++ "Failed to initialize RSS LUT for vport %u: %d\n",
++ vport->vport_id, err);
++ goto disable_vport;
++ }
++ }
++
++ err = idpf_config_rss(vport);
+ if (err) {
+- dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
++ dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
+ vport->vport_id, err);
+ goto disable_vport;
+ }
+@@ -1493,13 +1512,11 @@ static int idpf_vport_open(struct idpf_v
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
+ vport->vport_id, err);
+- goto deinit_rss;
++ goto disable_vport;
+ }
+
+ return 0;
+
+-deinit_rss:
+- idpf_deinit_rss(vport);
+ disable_vport:
+ idpf_send_disable_vport_msg(vport);
+ disable_queues:
+@@ -1936,7 +1953,7 @@ int idpf_initiate_soft_reset(struct idpf
+ idpf_vport_stop(vport);
+ }
+
+- idpf_deinit_rss(vport);
++ idpf_deinit_rss_lut(vport);
+ /* We're passing in vport here because we need its wait_queue
+ * to send a message and it should be getting all the vport
+ * config data out of the adapter but we need to be careful not
+@@ -2103,40 +2120,6 @@ static void idpf_set_rx_mode(struct net_
+ }
+
+ /**
+- * idpf_vport_manage_rss_lut - disable/enable RSS
+- * @vport: the vport being changed
+- *
+- * In the event of disable request for RSS, this function will zero out RSS
+- * LUT, while in the event of enable request for RSS, it will reconfigure RSS
+- * LUT with the default LUT configuration.
+- */
+-static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
+-{
+- bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
+- struct idpf_rss_data *rss_data;
+- u16 idx = vport->idx;
+- int lut_size;
+-
+- rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
+- lut_size = rss_data->rss_lut_size * sizeof(u32);
+-
+- if (ena) {
+- /* This will contain the default or user configured LUT */
+- memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
+- } else {
+- /* Save a copy of the current LUT to be restored later if
+- * requested.
+- */
+- memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
+-
+- /* Zero out the current LUT to disable */
+- memset(rss_data->rss_lut, 0, lut_size);
+- }
+-
+- return idpf_config_rss(vport);
+-}
+-
+-/**
+ * idpf_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+@@ -2161,10 +2144,19 @@ static int idpf_set_features(struct net_
+ }
+
+ if (changed & NETIF_F_RXHASH) {
++ struct idpf_netdev_priv *np = netdev_priv(netdev);
++
+ netdev->features ^= NETIF_F_RXHASH;
+- err = idpf_vport_manage_rss_lut(vport);
+- if (err)
+- goto unlock_mutex;
++
++ /* If the interface is not up when changing the rxhash, update
++ * to the HW is skipped. The updated LUT will be committed to
++ * the HW when the interface is brought up.
++ */
++ if (np->state == __IDPF_VPORT_UP) {
++ err = idpf_config_rss(vport);
++ if (err)
++ goto unlock_mutex;
++ }
+ }
+
+ if (changed & NETIF_F_GRO_HW) {
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -4068,57 +4068,47 @@ static void idpf_fill_dflt_rss_lut(struc
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+
+- for (i = 0; i < rss_data->rss_lut_size; i++) {
++ for (i = 0; i < rss_data->rss_lut_size; i++)
+ rss_data->rss_lut[i] = i % num_active_rxq;
+- rss_data->cached_lut[i] = rss_data->rss_lut[i];
+- }
+ }
+
+ /**
+- * idpf_init_rss - Allocate and initialize RSS resources
++ * idpf_init_rss_lut - Allocate and initialize RSS LUT
+ * @vport: virtual port
+ *
+- * Return 0 on success, negative on failure
++ * Return: 0 on success, negative on failure
+ */
+-int idpf_init_rss(struct idpf_vport *vport)
++int idpf_init_rss_lut(struct idpf_vport *vport)
+ {
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_rss_data *rss_data;
+- u32 lut_size;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
++ if (!rss_data->rss_lut) {
++ u32 lut_size;
+
+- lut_size = rss_data->rss_lut_size * sizeof(u32);
+- rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
+- if (!rss_data->rss_lut)
+- return -ENOMEM;
+-
+- rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
+- if (!rss_data->cached_lut) {
+- kfree(rss_data->rss_lut);
+- rss_data->rss_lut = NULL;
+-
+- return -ENOMEM;
++ lut_size = rss_data->rss_lut_size * sizeof(u32);
++ rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
++ if (!rss_data->rss_lut)
++ return -ENOMEM;
+ }
+
+ /* Fill the default RSS lut values */
+ idpf_fill_dflt_rss_lut(vport);
+
+- return idpf_config_rss(vport);
++ return 0;
+ }
+
+ /**
+- * idpf_deinit_rss - Release RSS resources
++ * idpf_deinit_rss_lut - Release RSS LUT
+ * @vport: virtual port
+ */
+-void idpf_deinit_rss(struct idpf_vport *vport)
++void idpf_deinit_rss_lut(struct idpf_vport *vport)
+ {
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_rss_data *rss_data;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+- kfree(rss_data->cached_lut);
+- rss_data->cached_lut = NULL;
+ kfree(rss_data->rss_lut);
+ rss_data->rss_lut = NULL;
+ }
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -1018,8 +1018,8 @@ void idpf_vport_intr_deinit(struct idpf_
+ int idpf_vport_intr_init(struct idpf_vport *vport);
+ void idpf_vport_intr_ena(struct idpf_vport *vport);
+ int idpf_config_rss(struct idpf_vport *vport);
+-int idpf_init_rss(struct idpf_vport *vport);
+-void idpf_deinit_rss(struct idpf_vport *vport);
++int idpf_init_rss_lut(struct idpf_vport *vport);
++void idpf_deinit_rss_lut(struct idpf_vport *vport);
+ int idpf_rx_bufs_init_all(struct idpf_vport *vport);
+ void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size);
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+@@ -2341,6 +2341,10 @@ int idpf_send_get_stats_msg(struct idpf_
+ * @vport: virtual port data structure
+ * @get: flag to set or get rss look up table
+ *
++ * When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash
++ * is enabled, the LUT values stored in driver's soft copy will be used to setup
++ * the HW.
++ *
+ * Returns 0 on success, negative on failure.
+ */
+ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
+@@ -2351,10 +2355,12 @@ int idpf_send_get_set_rss_lut_msg(struct
+ struct idpf_rss_data *rss_data;
+ int buf_size, lut_buf_size;
+ ssize_t reply_sz;
++ bool rxhash_ena;
+ int i;
+
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
++ rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
+ buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
+ rl = kzalloc(buf_size, GFP_KERNEL);
+ if (!rl)
+@@ -2376,7 +2382,8 @@ int idpf_send_get_set_rss_lut_msg(struct
+ } else {
+ rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
+ for (i = 0; i < rss_data->rss_lut_size; i++)
+- rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
++ rl->lut[i] = rxhash_ena ?
++ cpu_to_le32(rss_data->rss_lut[i]) : 0;
+
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
+ }
--- /dev/null
+From stable+bounces-230172-greg=kroah.com@vger.kernel.org Tue Mar 24 15:16:35 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Tue, 24 Mar 2026 07:04:56 -0700
+Subject: idpf: Fix RSS LUT NULL ptr issue after soft reset
+To: stable@vger.kernel.org
+Cc: Sreedevi Joshi <sreedevi.joshi@intel.com>, Aleksandr Loktionov <aleksandr.loktionov@intel.com>, Sridhar Samudrala <sridhar.samudrala@intel.com>, Emil Tantilov <emil.s.tantilov@intel.com>, Simon Horman <horms@kernel.org>, Samuel Salin <Samuel.salin@intel.com>, Tony Nguyen <anthony.l.nguyen@intel.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260324140456.832964-10-harshit.m.mogalapalli@oracle.com>
+
+From: Sreedevi Joshi <sreedevi.joshi@intel.com>
+
+[ Upstream commit ebecca5b093895da801b3eba1a55b4ec4027d196 ]
+
+During soft reset, the RSS LUT is freed and not restored unless the
+interface is up. If an ethtool command that accesses the rss lut is
+attempted immediately after reset, it will result in NULL ptr
+dereference. Also, there is no need to reset the rss lut if the soft reset
+does not involve queue count change.
+
+After soft reset, set the RSS LUT to default values based on the updated
+queue count only if the reset was a result of a queue count change and
+the LUT was not configured by the user. In all other cases, don't touch
+the LUT.
+
+Steps to reproduce:
+
+** Bring the interface down (if up)
+ifconfig eth1 down
+
+** update the queue count (eg., 27->20)
+ethtool -L eth1 combined 20
+
+** display the RSS LUT
+ethtool -x eth1
+
+[82375.558338] BUG: kernel NULL pointer dereference, address: 0000000000000000
+[82375.558373] #PF: supervisor read access in kernel mode
+[82375.558391] #PF: error_code(0x0000) - not-present page
+[82375.558408] PGD 0 P4D 0
+[82375.558421] Oops: Oops: 0000 [#1] SMP NOPTI
+<snip>
+[82375.558516] RIP: 0010:idpf_get_rxfh+0x108/0x150 [idpf]
+[82375.558786] Call Trace:
+[82375.558793] <TASK>
+[82375.558804] rss_prepare.isra.0+0x187/0x2a0
+[82375.558827] rss_prepare_data+0x3a/0x50
+[82375.558845] ethnl_default_doit+0x13d/0x3e0
+[82375.558863] genl_family_rcv_msg_doit+0x11f/0x180
+[82375.558886] genl_rcv_msg+0x1ad/0x2b0
+[82375.558902] ? __pfx_ethnl_default_doit+0x10/0x10
+[82375.558920] ? __pfx_genl_rcv_msg+0x10/0x10
+[82375.558937] netlink_rcv_skb+0x58/0x100
+[82375.558957] genl_rcv+0x2c/0x50
+[82375.558971] netlink_unicast+0x289/0x3e0
+[82375.558988] netlink_sendmsg+0x215/0x440
+[82375.559005] __sys_sendto+0x234/0x240
+[82375.559555] __x64_sys_sendto+0x28/0x30
+[82375.560068] x64_sys_call+0x1909/0x1da0
+[82375.560576] do_syscall_64+0x7a/0xfa0
+[82375.561076] ? clear_bhb_loop+0x60/0xb0
+[82375.561567] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+<snip>
+
+Fixes: 02cbfba1add5 ("idpf: add ethtool callbacks")
+Signed-off-by: Sreedevi Joshi <sreedevi.joshi@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Reviewed-by: Emil Tantilov <emil.s.tantilov@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+(cherry picked from commit ebecca5b093895da801b3eba1a55b4ec4027d196)
+[Harshit: backport to 6.12.y, conflicts due to missing commit:
+8dd72ebc73f3 - idpf: convert vport state to bitmap andbd74a86bc75d -
+idpf: link NAPIs to queues which changes idpf_vport_open/stop() APIs
+also take rtnl argument)
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_lib.c | 20 ++++----------------
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 2 +-
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h | 1 +
+ 3 files changed, 6 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -1405,8 +1405,6 @@ static int idpf_vport_open(struct idpf_v
+ {
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_adapter *adapter = vport->adapter;
+- struct idpf_vport_config *vport_config;
+- struct idpf_rss_data *rss_data;
+ int err;
+
+ if (np->state != __IDPF_VPORT_DOWN)
+@@ -1488,19 +1486,6 @@ static int idpf_vport_open(struct idpf_v
+
+ idpf_restore_features(vport);
+
+- vport_config = adapter->vport_config[vport->idx];
+- rss_data = &vport_config->user_config.rss_data;
+-
+- if (!rss_data->rss_lut) {
+- err = idpf_init_rss_lut(vport);
+- if (err) {
+- dev_err(&adapter->pdev->dev,
+- "Failed to initialize RSS LUT for vport %u: %d\n",
+- vport->vport_id, err);
+- goto disable_vport;
+- }
+- }
+-
+ err = idpf_config_rss(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
+@@ -1953,7 +1938,6 @@ int idpf_initiate_soft_reset(struct idpf
+ idpf_vport_stop(vport);
+ }
+
+- idpf_deinit_rss_lut(vport);
+ /* We're passing in vport here because we need its wait_queue
+ * to send a message and it should be getting all the vport
+ * config data out of the adapter but we need to be careful not
+@@ -1979,6 +1963,10 @@ int idpf_initiate_soft_reset(struct idpf
+ if (err)
+ goto err_open;
+
++ if (reset_cause == IDPF_SR_Q_CHANGE &&
++ !netif_is_rxfh_configured(vport->netdev))
++ idpf_fill_dflt_rss_lut(vport);
++
+ if (current_state == __IDPF_VPORT_UP)
+ err = idpf_vport_open(vport);
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -4059,7 +4059,7 @@ int idpf_config_rss(struct idpf_vport *v
+ * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
+ * @vport: virtual port structure
+ */
+-static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
++void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+ {
+ struct idpf_adapter *adapter = vport->adapter;
+ u16 num_active_rxq = vport->num_rxq;
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -1017,6 +1017,7 @@ void idpf_vport_intr_update_itr_ena_irq(
+ void idpf_vport_intr_deinit(struct idpf_vport *vport);
+ int idpf_vport_intr_init(struct idpf_vport *vport);
+ void idpf_vport_intr_ena(struct idpf_vport *vport);
++void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
+ int idpf_config_rss(struct idpf_vport *vport);
+ int idpf_init_rss_lut(struct idpf_vport *vport);
+ void idpf_deinit_rss_lut(struct idpf_vport *vport);
--- /dev/null
+From stable+bounces-230168-greg=kroah.com@vger.kernel.org Tue Mar 24 15:11:12 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Tue, 24 Mar 2026 07:04:49 -0700
+Subject: landlock: Fix handling of disconnected directories
+To: stable@vger.kernel.org
+Cc: "Mickaël Salaün" <mic@digikod.net>, "Christian Brauner" <brauner@kernel.org>, "Günther Noack" <gnoack@google.com>, "Song Liu" <song@kernel.org>, "Tingmao Wang" <m@maowtm.org>, "Harshit Mogalapalli" <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260324140456.832964-3-harshit.m.mogalapalli@oracle.com>
+
+From: Mickaël Salaün <mic@digikod.net>
+
+[ Upstream commit 49c9e09d961025b22e61ef9ad56aa1c21b6ce2f1 ]
+
+Disconnected files or directories can appear when they are visible and
+opened from a bind mount, but have been renamed or moved from the source
+of the bind mount in a way that makes them inaccessible from the mount
+point (i.e. out of scope).
+
+Previously, access rights tied to files or directories opened through a
+disconnected directory were collected by walking the related hierarchy
+down to the root of the filesystem, without taking into account the
+mount point because it couldn't be found. This could lead to
+inconsistent access results, potential access right widening, and
+hard-to-debug renames, especially since such paths cannot be printed.
+
+For a sandboxed task to create a disconnected directory, it needs to
+have write access (i.e. FS_MAKE_REG, FS_REMOVE_FILE, and FS_REFER) to
+the underlying source of the bind mount, and read access to the related
+mount point. Because a sandboxed task cannot acquire more access
+rights than those defined by its Landlock domain, this could lead to
+inconsistent access rights due to missing permissions that should be
+inherited from the mount point hierarchy, while inheriting permissions
+from the filesystem hierarchy hidden by this mount point instead.
+
+Landlock now handles files and directories opened from disconnected
+directories by taking into account the filesystem hierarchy when the
+mount point is not found in the hierarchy walk, and also always taking
+into account the mount point from which these disconnected directories
+were opened. This ensures that a rename is not allowed if it would
+widen access rights [1].
+
+The rationale is that, even if disconnected hierarchies might not be
+visible or accessible to a sandboxed task, relying on the collected
+access rights from them improves the guarantee that access rights will
+not be widened during a rename because of the access right comparison
+between the source and the destination (see LANDLOCK_ACCESS_FS_REFER).
+It may look like this would grant more access on disconnected files and
+directories, but the security policies are always enforced for all the
+evaluated hierarchies. This new behavior should be less surprising to
+users and safer from an access control perspective.
+
+Remove a wrong WARN_ON_ONCE() canary in collect_domain_accesses() and
+fix the related comment.
+
+Because opened files have their access rights stored in the related file
+security properties, there is no impact for disconnected or unlinked
+files.
+
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Günther Noack <gnoack@google.com>
+Cc: Song Liu <song@kernel.org>
+Reported-by: Tingmao Wang <m@maowtm.org>
+Closes: https://lore.kernel.org/r/027d5190-b37a-40a8-84e9-4ccbc352bcdf@maowtm.org
+Closes: https://lore.kernel.org/r/09b24128f86973a6022e6aa8338945fcfb9a33e4.1749925391.git.m@maowtm.org
+Fixes: b91c3e4ea756 ("landlock: Add support for file reparenting with LANDLOCK_ACCESS_FS_REFER")
+Fixes: cb2c7d1a1776 ("landlock: Support filesystem access-control")
+Link: https://lore.kernel.org/r/b0f46246-f2c5-42ca-93ce-0d629702a987@maowtm.org [1]
+Reviewed-by: Tingmao Wang <m@maowtm.org>
+Link: https://lore.kernel.org/r/20251128172200.760753-2-mic@digikod.net
+Signed-off-by: Mickaël Salaün <mic@digikod.net>
+(cherry picked from commit 49c9e09d961025b22e61ef9ad56aa1c21b6ce2f1)
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/landlock/errata/abi-1.h | 16 +++++++++++++++
+ security/landlock/fs.c | 40 +++++++++++++++++++++++++++------------
+ 2 files changed, 44 insertions(+), 12 deletions(-)
+ create mode 100644 security/landlock/errata/abi-1.h
+
+--- /dev/null
++++ b/security/landlock/errata/abi-1.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++/**
++ * DOC: erratum_3
++ *
++ * Erratum 3: Disconnected directory handling
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This fix addresses an issue with disconnected directories that occur when a
++ * directory is moved outside the scope of a bind mount. The change ensures
++ * that evaluated access rights include both those from the disconnected file
++ * hierarchy down to its filesystem root and those from the related mount point
++ * hierarchy. This prevents access right widening through rename or link
++ * actions.
++ */
++LANDLOCK_ERRATUM(3)
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -899,21 +899,31 @@ jump_up:
+ break;
+ }
+ }
++
+ if (unlikely(IS_ROOT(walker_path.dentry))) {
+- /*
+- * Stops at disconnected root directories. Only allows
+- * access to internal filesystems (e.g. nsfs, which is
+- * reachable through /proc/<pid>/ns/<namespace>).
+- */
+- if (walker_path.mnt->mnt_flags & MNT_INTERNAL) {
++ if (likely(walker_path.mnt->mnt_flags & MNT_INTERNAL)) {
++ /*
++ * Stops and allows access when reaching disconnected root
++ * directories that are part of internal filesystems (e.g. nsfs,
++ * which is reachable through /proc/<pid>/ns/<namespace>).
++ */
+ allowed_parent1 = true;
+ allowed_parent2 = true;
++ break;
+ }
+- break;
++
++ /*
++ * We reached a disconnected root directory from a bind mount.
++ * Let's continue the walk with the mount point we missed.
++ */
++ dput(walker_path.dentry);
++ walker_path.dentry = walker_path.mnt->mnt_root;
++ dget(walker_path.dentry);
++ } else {
++ parent_dentry = dget_parent(walker_path.dentry);
++ dput(walker_path.dentry);
++ walker_path.dentry = parent_dentry;
+ }
+- parent_dentry = dget_parent(walker_path.dentry);
+- dput(walker_path.dentry);
+- walker_path.dentry = parent_dentry;
+ }
+ path_put(&walker_path);
+
+@@ -990,6 +1000,9 @@ static access_mask_t maybe_remove(const
+ * file. While walking from @dir to @mnt_root, we record all the domain's
+ * allowed accesses in @layer_masks_dom.
+ *
++ * Because of disconnected directories, this walk may not reach @mnt_dir. In
++ * this case, the walk will continue to @mnt_dir after this call.
++ *
+ * This is similar to is_access_to_paths_allowed() but much simpler because it
+ * only handles walking on the same mount point and only checks one set of
+ * accesses.
+@@ -1031,8 +1044,11 @@ static bool collect_domain_accesses(
+ break;
+ }
+
+- /* We should not reach a root other than @mnt_root. */
+- if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
++ /*
++ * Stops at the mount point or the filesystem root for a disconnected
++ * directory.
++ */
++ if (dir == mnt_root || unlikely(IS_ROOT(dir)))
+ break;
+
+ parent_dentry = dget_parent(dir);
--- /dev/null
+From stable+bounces-230165-greg=kroah.com@vger.kernel.org Tue Mar 24 15:10:48 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Tue, 24 Mar 2026 07:04:48 -0700
+Subject: landlock: Optimize file path walks and prepare for audit support
+To: stable@vger.kernel.org
+Cc: "Mickaël Salaün" <mic@digikod.net>, "Günther Noack" <gnoack@google.com>, "Harshit Mogalapalli" <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260324140456.832964-2-harshit.m.mogalapalli@oracle.com>
+
+From: Mickaël Salaün <mic@digikod.net>
+
+[ Upstream commit d617f0d72d8041c7099fd04a62db0f0fa5331c1a ]
+
+Always synchronize access_masked_parent* with access_request_parent*
+according to allowed_parent*. This is required for audit support to be
+able to get back to the reason of denial.
+
+In a rename/link action, instead of always checking a rule two times for
+the same parent directory of the source and the destination files, only
+check it when an action on a child was not already allowed. This also
+enables us to keep consistent allowed_parent* status, which is required
+to get back to the reason of denial.
+
+For internal mount points, only upgrade allowed_parent* to true but do
+not wrongfully set both of them to false otherwise. This is also
+required to get back to the reason of denial.
+
+This does not impact the current behavior but slightly optimize code and
+prepare for audit support that needs to know the exact reason why an
+access was denied.
+
+Cc: Günther Noack <gnoack@google.com>
+Link: https://lore.kernel.org/r/20250108154338.1129069-14-mic@digikod.net
+Signed-off-by: Mickaël Salaün <mic@digikod.net>
+(cherry picked from commit d617f0d72d8041c7099fd04a62db0f0fa5331c1a)
+Stable-dep-of: 49c9e09d9610 ("landlock: Fix handling of disconnected directories")
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/landlock/fs.c | 44 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 27 insertions(+), 17 deletions(-)
+
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -849,15 +849,6 @@ static bool is_access_to_paths_allowed(
+ child1_is_directory, layer_masks_parent2,
+ layer_masks_child2,
+ child2_is_directory))) {
+- allowed_parent1 = scope_to_request(
+- access_request_parent1, layer_masks_parent1);
+- allowed_parent2 = scope_to_request(
+- access_request_parent2, layer_masks_parent2);
+-
+- /* Stops when all accesses are granted. */
+- if (allowed_parent1 && allowed_parent2)
+- break;
+-
+ /*
+ * Now, downgrades the remaining checks from domain
+ * handled accesses to requested accesses.
+@@ -865,15 +856,32 @@ static bool is_access_to_paths_allowed(
+ is_dom_check = false;
+ access_masked_parent1 = access_request_parent1;
+ access_masked_parent2 = access_request_parent2;
++
++ allowed_parent1 =
++ allowed_parent1 ||
++ scope_to_request(access_masked_parent1,
++ layer_masks_parent1);
++ allowed_parent2 =
++ allowed_parent2 ||
++ scope_to_request(access_masked_parent2,
++ layer_masks_parent2);
++
++ /* Stops when all accesses are granted. */
++ if (allowed_parent1 && allowed_parent2)
++ break;
+ }
+
+ rule = find_rule(domain, walker_path.dentry);
+- allowed_parent1 = landlock_unmask_layers(
+- rule, access_masked_parent1, layer_masks_parent1,
+- ARRAY_SIZE(*layer_masks_parent1));
+- allowed_parent2 = landlock_unmask_layers(
+- rule, access_masked_parent2, layer_masks_parent2,
+- ARRAY_SIZE(*layer_masks_parent2));
++ allowed_parent1 = allowed_parent1 ||
++ landlock_unmask_layers(
++ rule, access_masked_parent1,
++ layer_masks_parent1,
++ ARRAY_SIZE(*layer_masks_parent1));
++ allowed_parent2 = allowed_parent2 ||
++ landlock_unmask_layers(
++ rule, access_masked_parent2,
++ layer_masks_parent2,
++ ARRAY_SIZE(*layer_masks_parent2));
+
+ /* Stops when a rule from each layer grants access. */
+ if (allowed_parent1 && allowed_parent2)
+@@ -897,8 +905,10 @@ jump_up:
+ * access to internal filesystems (e.g. nsfs, which is
+ * reachable through /proc/<pid>/ns/<namespace>).
+ */
+- allowed_parent1 = allowed_parent2 =
+- !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
++ if (walker_path.mnt->mnt_flags & MNT_INTERNAL) {
++ allowed_parent1 = true;
++ allowed_parent2 = true;
++ }
+ break;
+ }
+ parent_dentry = dget_parent(walker_path.dentry);
--- /dev/null
+From 1bfe9fb5ed2667fb075682408b776b5273162615 Mon Sep 17 00:00:00 2001
+From: Josh Law <objecting@objecting.org>
+Date: Sat, 21 Mar 2026 10:54:25 -0700
+Subject: mm/damon/sysfs: check contexts->nr before accessing contexts_arr[0]
+
+From: Josh Law <objecting@objecting.org>
+
+commit 1bfe9fb5ed2667fb075682408b776b5273162615 upstream.
+
+Multiple sysfs command paths dereference contexts_arr[0] without first
+verifying that kdamond->contexts->nr == 1. A user can set nr_contexts to
+0 via sysfs while DAMON is running, causing NULL pointer dereferences.
+
+In more detail, the issue can be triggered by privileged users like
+below.
+
+First, start DAMON and make contexts directory empty
+(kdamond->contexts->nr == 0).
+
+ # damo start
+ # cd /sys/kernel/mm/damon/admin/kdamonds/0
+ # echo 0 > contexts/nr_contexts
+
+Then, each of below commands will cause the NULL pointer dereference.
+
+ # echo update_schemes_stats > state
+ # echo update_schemes_tried_regions > state
+ # echo update_schemes_tried_bytes > state
+ # echo update_schemes_effective_quotas > state
+ # echo update_tuned_intervals > state
+
+Guard all commands (except OFF) at the entry point of
+damon_sysfs_handle_cmd().
+
+Link: https://lkml.kernel.org/r/20260321175427.86000-3-sj@kernel.org
+Fixes: 0ac32b8affb5 ("mm/damon/sysfs: support DAMOS stats")
+Signed-off-by: Josh Law <objecting@objecting.org>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [5.18+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1548,6 +1548,9 @@ static int damon_sysfs_handle_cmd(enum d
+ {
+ bool need_wait = true;
+
++ if (cmd != DAMON_SYSFS_CMD_OFF && kdamond->contexts->nr != 1)
++ return -EINVAL;
++
+ /* Handle commands that doesn't access DAMON context-internal data */
+ switch (cmd) {
+ case DAMON_SYSFS_CMD_ON:
--- /dev/null
+From 561399680@139.com Tue Mar 31 09:12:38 2026
+From: XiaoHua Wang <561399680@139.com>
+Date: Tue, 31 Mar 2026 15:12:24 +0800
+Subject: net: add proper RCU protection to /proc/net/ptype
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: netdev@vger.kernel.org, Eric Dumazet <edumazet@google.com>, Yin Fengwei <fengwei_yin@linux.alibaba.com>, Dong Chenchen <dongchenchen2@huawei.com>, Willem de Bruijn <willemb@google.com>, Jakub Kicinski <kuba@kernel.org>, XiaoHua Wang <561399680@139.com>
+Message-ID: <20260331071224.14601-1-561399680@139.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit f613e8b4afea0cd17c7168e8b00e25bc8d33175d ]
+
+Yin Fengwei reported an RCU stall in ptype_seq_show() and provided
+a patch.
+
+Real issue is that ptype_seq_next() and ptype_seq_show() violate
+RCU rules.
+
+ptype_seq_show() runs under rcu_read_lock(), and reads pt->dev
+to get device name without any barrier.
+
+At the same time, concurrent writers can remove a packet_type structure
+(which is correctly freed after an RCU grace period) and clear pt->dev
+without an RCU grace period.
+
+Define ptype_iter_state to carry a dev pointer along seq_net_private:
+
+struct ptype_iter_state {
+ struct seq_net_private p;
+ struct net_device *dev; // added in this patch
+};
+
+We need to record the device pointer in ptype_get_idx() and
+ptype_seq_next() so that ptype_seq_show() is safe against
+concurrent pt->dev changes.
+
+We also need to add full RCU protection in ptype_seq_next().
+(Missing READ_ONCE() when reading list.next values)
+
+Many thanks to Dong Chenchen for providing a repro.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Fixes: 1d10f8a1f40b ("net-procfs: show net devices bound packet types")
+Fixes: c353e8983e0d ("net: introduce per netns packet chains")
+Reported-by: Yin Fengwei <fengwei_yin@linux.alibaba.com>
+Reported-by: Dong Chenchen <dongchenchen2@huawei.com>
+Closes: https://lore.kernel.org/netdev/CANn89iKRRKPnWjJmb-_3a=sq+9h6DvTQM4DBZHT5ZRGPMzQaiA@mail.gmail.com/T/#m7b80b9fc9b9267f90e0b7aad557595f686f9c50d
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Tested-by: Yin Fengwei <fengwei_yin@linux.alibaba.com>
+Link: https://patch.msgid.link/20260202205217.2881198-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Some adjustments have been made. ]
+Signed-off-by: XiaoHua Wang <561399680@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/net-procfs.c | 49 +++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 33 insertions(+), 16 deletions(-)
+
+--- a/net/core/net-procfs.c
++++ b/net/core/net-procfs.c
+@@ -168,8 +168,14 @@ static const struct seq_operations softn
+ .show = softnet_seq_show,
+ };
+
++struct ptype_iter_state {
++ struct seq_net_private p;
++ struct net_device *dev;
++};
++
+ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
+ {
++ struct ptype_iter_state *iter = seq->private;
+ struct list_head *ptype_list = NULL;
+ struct packet_type *pt = NULL;
+ struct net_device *dev;
+@@ -179,12 +185,16 @@ static void *ptype_get_idx(struct seq_fi
+ for_each_netdev_rcu(seq_file_net(seq), dev) {
+ ptype_list = &dev->ptype_all;
+ list_for_each_entry_rcu(pt, ptype_list, list) {
+- if (i == pos)
++ if (i == pos) {
++ iter->dev = dev;
+ return pt;
++ }
+ ++i;
+ }
+ }
+
++ iter->dev = NULL;
++
+ list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
+ if (i == pos)
+ return pt;
+@@ -210,6 +220,7 @@ static void *ptype_seq_start(struct seq_
+
+ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
++ struct ptype_iter_state *iter = seq->private;
+ struct net_device *dev;
+ struct packet_type *pt;
+ struct list_head *nxt;
+@@ -220,20 +231,21 @@ static void *ptype_seq_next(struct seq_f
+ return ptype_get_idx(seq, 0);
+
+ pt = v;
+- nxt = pt->list.next;
+- if (pt->dev) {
+- if (nxt != &pt->dev->ptype_all)
++ nxt = READ_ONCE(pt->list.next);
++ dev = iter->dev;
++ if (dev) {
++ if (nxt != &dev->ptype_all)
+ goto found;
+
+- dev = pt->dev;
+ for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
+- if (!list_empty(&dev->ptype_all)) {
+- nxt = dev->ptype_all.next;
++ nxt = READ_ONCE(dev->ptype_all.next);
++ if (nxt != &dev->ptype_all) {
++ iter->dev = dev;
+ goto found;
+ }
+ }
+-
+- nxt = net_hotdata.ptype_all.next;
++ iter->dev = NULL;
++ nxt = READ_ONCE(net_hotdata.ptype_all.next);
+ goto ptype_all;
+ }
+
+@@ -242,14 +254,14 @@ ptype_all:
+ if (nxt != &net_hotdata.ptype_all)
+ goto found;
+ hash = 0;
+- nxt = ptype_base[0].next;
++ nxt = READ_ONCE(ptype_base[0].next);
+ } else
+ hash = ntohs(pt->type) & PTYPE_HASH_MASK;
+
+ while (nxt == &ptype_base[hash]) {
+ if (++hash >= PTYPE_HASH_SIZE)
+ return NULL;
+- nxt = ptype_base[hash].next;
++ nxt = READ_ONCE(ptype_base[hash].next);
+ }
+ found:
+ return list_entry(nxt, struct packet_type, list);
+@@ -263,19 +275,24 @@ static void ptype_seq_stop(struct seq_fi
+
+ static int ptype_seq_show(struct seq_file *seq, void *v)
+ {
++ struct ptype_iter_state *iter = seq->private;
+ struct packet_type *pt = v;
++ struct net_device *dev;
+
+- if (v == SEQ_START_TOKEN)
++ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Type Device Function\n");
+- else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
+- (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
++ return 0;
++ }
++ dev = iter->dev;
++ if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
++ (!dev || net_eq(dev_net(dev), seq_file_net(seq)))) {
+ if (pt->type == htons(ETH_P_ALL))
+ seq_puts(seq, "ALL ");
+ else
+ seq_printf(seq, "%04x", ntohs(pt->type));
+
+ seq_printf(seq, " %-8s %ps\n",
+- pt->dev ? pt->dev->name : "", pt->func);
++ dev ? dev->name : "", pt->func);
+ }
+
+ return 0;
+@@ -299,7 +316,7 @@ static int __net_init dev_proc_net_init(
+ &softnet_seq_ops))
+ goto out_dev;
+ if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
+- sizeof(struct seq_net_private)))
++ sizeof(struct ptype_iter_state)))
+ goto out_softnet;
+
+ if (wext_proc_init(net))
--- /dev/null
+From 1f6ee9be92f8df85a8c9a5a78c20fd39c0c21a95 Mon Sep 17 00:00:00 2001
+From: Fei Lv <feilv@asrmicro.com>
+Date: Mon, 22 Jul 2024 18:14:43 +0800
+Subject: ovl: make fsync after metadata copy-up opt-in mount option
+
+From: Fei Lv <feilv@asrmicro.com>
+
+commit 1f6ee9be92f8df85a8c9a5a78c20fd39c0c21a95 upstream.
+
+Commit 7d6899fb69d25 ("ovl: fsync after metadata copy-up") was done to
+fix durability of overlayfs copy up on an upper filesystem which does
+not enforce ordering on storing of metadata changes (e.g. ubifs).
+
+In an earlier revision of the regressing commit by Lei Lv, the metadata
+fsync behavior was opt-in via a new "fsync=strict" mount option.
+We were hoping that the opt-in mount option could be avoided, so the
+change was only made to depend on metacopy=off, in the hope of not
+hurting performance of metadata heavy workloads, which are more likely
+to be using metacopy=on.
+
+This hope was proven wrong by a performance regression report from Google
+COS workload after upgrade to kernel 6.12.
+
+This is an adaptation of Lei's original "fsync=strict" mount option
+to the existing upstream code.
+
+The new mount option is mutually exclusive with the "volatile" mount
+option, so the latter is now an alias to the "fsync=volatile" mount
+option.
+
+Reported-by: Chenglong Tang <chenglongtang@google.com>
+Closes: https://lore.kernel.org/linux-unionfs/CAOdxtTadAFH01Vui1FvWfcmQ8jH1O45owTzUcpYbNvBxnLeM7Q@mail.gmail.com/
+Link: https://lore.kernel.org/linux-unionfs/CAOQ4uxgKC1SgjMWre=fUb00v8rxtd6sQi-S+dxR8oDzAuiGu8g@mail.gmail.com/
+Fixes: 7d6899fb69d25 ("ovl: fsync after metadata copy-up")
+Depends: 50e638beb67e0 ("ovl: Use str_on_off() helper in ovl_show_options()")
+Cc: stable@vger.kernel.org # v6.12+
+Signed-off-by: Fei Lv <feilv@asrmicro.com>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/filesystems/overlayfs.rst | 50 ++++++++++++++++++++++++++++++++
+ fs/overlayfs/copy_up.c | 6 +--
+ fs/overlayfs/overlayfs.h | 21 +++++++++++++
+ fs/overlayfs/ovl_entry.h | 7 ----
+ fs/overlayfs/params.c | 33 +++++++++++++++++----
+ fs/overlayfs/super.c | 2 -
+ 6 files changed, 104 insertions(+), 15 deletions(-)
+
+--- a/Documentation/filesystems/overlayfs.rst
++++ b/Documentation/filesystems/overlayfs.rst
+@@ -745,6 +745,56 @@ controlled by the "uuid" mount option, w
+ mounted with "uuid=on".
+
+
++Durability and copy up
++----------------------
++
++The fsync(2) system call ensures that the data and metadata of a file
++are safely written to the backing storage, which is expected to
++guarantee the existence of the information post system crash.
++
++Without an fsync(2) call, there is no guarantee that the observed
++data after a system crash will be either the old or the new data, but
++in practice, the observed data after crash is often the old or new data
++or a mix of both.
++
++When an overlayfs file is modified for the first time, copy up will
++create a copy of the lower file and its parent directories in the upper
++layer. Since the Linux filesystem API does not enforce any particular
++ordering on storing changes without explicit fsync(2) calls, in case
++of a system crash, the upper file could end up with no data at all
++(i.e. zeros), which would be an unusual outcome. To avoid this
++experience, overlayfs calls fsync(2) on the upper file before completing
++data copy up with rename(2) or link(2) to make the copy up "atomic".
++
++By default, overlayfs does not explicitly call fsync(2) on copied up
++directories or on metadata-only copy up, so it provides no guarantee to
++persist the user's modification unless the user calls fsync(2).
++The fsync during copy up only guarantees that if a copy up is observed
++after a crash, the observed data is not zeroes or intermediate values
++from the copy up staging area.
++
++On traditional local filesystems with a single journal (e.g. ext4, xfs),
++fsync on a file also persists the parent directory changes, because they
++are usually modified in the same transaction, so metadata durability during
++data copy up effectively comes for free. Overlayfs further limits risk by
++disallowing network filesystems as upper layer.
++
++Overlayfs can be tuned to prefer performance or durability when storing
++to the underlying upper layer. This is controlled by the "fsync" mount
++option, which supports these values:
++
++- "auto": (default)
++ Call fsync(2) on upper file before completion of data copy up.
++ No explicit fsync(2) on directory or metadata-only copy up.
++- "strict":
++ Call fsync(2) on upper file and directories before completion of any
++ copy up.
++- "volatile": [*]
++ Prefer performance over durability (see `Volatile mount`_)
++
++[*] The mount option "volatile" is an alias to "fsync=volatile".
++
++
+ Volatile mount
+ --------------
+
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -1160,15 +1160,15 @@ static int ovl_copy_up_one(struct dentry
+ return -EOVERFLOW;
+
+ /*
+- * With metacopy disabled, we fsync after final metadata copyup, for
++ * With "fsync=strict", we fsync after final metadata copyup, for
+ * both regular files and directories to get atomic copyup semantics
+ * on filesystems that do not use strict metadata ordering (e.g. ubifs).
+ *
+- * With metacopy enabled we want to avoid fsync on all meta copyup
++ * By default, we want to avoid fsync on all meta copyup, because
+ * that will hurt performance of workloads such as chown -R, so we
+ * only fsync on data copyup as legacy behavior.
+ */
+- ctx.metadata_fsync = !OVL_FS(dentry->d_sb)->config.metacopy &&
++ ctx.metadata_fsync = ovl_should_sync_metadata(OVL_FS(dentry->d_sb)) &&
+ (S_ISREG(ctx.stat.mode) || S_ISDIR(ctx.stat.mode));
+ ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
+
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -99,6 +99,12 @@ enum {
+ OVL_VERITY_REQUIRE,
+ };
+
++enum {
++ OVL_FSYNC_VOLATILE,
++ OVL_FSYNC_AUTO,
++ OVL_FSYNC_STRICT,
++};
++
+ /*
+ * The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
+ * where:
+@@ -618,6 +624,21 @@ static inline bool ovl_xino_warn(struct
+ return ofs->config.xino == OVL_XINO_ON;
+ }
+
++static inline bool ovl_should_sync(struct ovl_fs *ofs)
++{
++ return ofs->config.fsync_mode != OVL_FSYNC_VOLATILE;
++}
++
++static inline bool ovl_should_sync_metadata(struct ovl_fs *ofs)
++{
++ return ofs->config.fsync_mode == OVL_FSYNC_STRICT;
++}
++
++static inline bool ovl_is_volatile(struct ovl_config *config)
++{
++ return config->fsync_mode == OVL_FSYNC_VOLATILE;
++}
++
+ /*
+ * To avoid regressions in existing setups with overlay lower offline changes,
+ * we allow lower changes only if none of the new features are used.
+--- a/fs/overlayfs/ovl_entry.h
++++ b/fs/overlayfs/ovl_entry.h
+@@ -18,7 +18,7 @@ struct ovl_config {
+ int xino;
+ bool metacopy;
+ bool userxattr;
+- bool ovl_volatile;
++ int fsync_mode;
+ };
+
+ struct ovl_sb {
+@@ -118,11 +118,6 @@ static inline struct ovl_fs *OVL_FS(stru
+ return (struct ovl_fs *)sb->s_fs_info;
+ }
+
+-static inline bool ovl_should_sync(struct ovl_fs *ofs)
+-{
+- return !ofs->config.ovl_volatile;
+-}
+-
+ static inline unsigned int ovl_numlower(struct ovl_entry *oe)
+ {
+ return oe ? oe->__numlower : 0;
+--- a/fs/overlayfs/params.c
++++ b/fs/overlayfs/params.c
+@@ -58,6 +58,7 @@ enum ovl_opt {
+ Opt_xino,
+ Opt_metacopy,
+ Opt_verity,
++ Opt_fsync,
+ Opt_volatile,
+ };
+
+@@ -139,6 +140,23 @@ static int ovl_verity_mode_def(void)
+ return OVL_VERITY_OFF;
+ }
+
++static const struct constant_table ovl_parameter_fsync[] = {
++ { "volatile", OVL_FSYNC_VOLATILE },
++ { "auto", OVL_FSYNC_AUTO },
++ { "strict", OVL_FSYNC_STRICT },
++ {}
++};
++
++static const char *ovl_fsync_mode(struct ovl_config *config)
++{
++ return ovl_parameter_fsync[config->fsync_mode].name;
++}
++
++static int ovl_fsync_mode_def(void)
++{
++ return OVL_FSYNC_AUTO;
++}
++
+ const struct fs_parameter_spec ovl_parameter_spec[] = {
+ fsparam_string_empty("lowerdir", Opt_lowerdir),
+ fsparam_string("lowerdir+", Opt_lowerdir_add),
+@@ -154,6 +172,7 @@ const struct fs_parameter_spec ovl_param
+ fsparam_enum("xino", Opt_xino, ovl_parameter_xino),
+ fsparam_enum("metacopy", Opt_metacopy, ovl_parameter_bool),
+ fsparam_enum("verity", Opt_verity, ovl_parameter_verity),
++ fsparam_enum("fsync", Opt_fsync, ovl_parameter_fsync),
+ fsparam_flag("volatile", Opt_volatile),
+ {}
+ };
+@@ -590,8 +609,11 @@ static int ovl_parse_param(struct fs_con
+ case Opt_verity:
+ config->verity_mode = result.uint_32;
+ break;
++ case Opt_fsync:
++ config->fsync_mode = result.uint_32;
++ break;
+ case Opt_volatile:
+- config->ovl_volatile = true;
++ config->fsync_mode = OVL_FSYNC_VOLATILE;
+ break;
+ case Opt_userxattr:
+ config->userxattr = true;
+@@ -702,6 +724,7 @@ int ovl_init_fs_context(struct fs_contex
+ ofs->config.nfs_export = ovl_nfs_export_def;
+ ofs->config.xino = ovl_xino_def();
+ ofs->config.metacopy = ovl_metacopy_def;
++ ofs->config.fsync_mode = ovl_fsync_mode_def();
+
+ fc->s_fs_info = ofs;
+ fc->fs_private = ctx;
+@@ -770,9 +793,9 @@ int ovl_fs_params_verify(const struct ov
+ config->index = false;
+ }
+
+- if (!config->upperdir && config->ovl_volatile) {
++ if (!config->upperdir && ovl_is_volatile(config)) {
+ pr_info("option \"volatile\" is meaningless in a non-upper mount, ignoring it.\n");
+- config->ovl_volatile = false;
++ config->fsync_mode = ovl_fsync_mode_def();
+ }
+
+ if (!config->upperdir && config->uuid == OVL_UUID_ON) {
+@@ -997,8 +1020,8 @@ int ovl_show_options(struct seq_file *m,
+ seq_printf(m, ",xino=%s", ovl_xino_mode(&ofs->config));
+ if (ofs->config.metacopy != ovl_metacopy_def)
+ seq_printf(m, ",metacopy=%s", str_on_off(ofs->config.metacopy));
+- if (ofs->config.ovl_volatile)
+- seq_puts(m, ",volatile");
++ if (ofs->config.fsync_mode != ovl_fsync_mode_def())
++ seq_printf(m, ",fsync=%s", ovl_fsync_mode(&ofs->config));
+ if (ofs->config.userxattr)
+ seq_puts(m, ",userxattr");
+ if (ofs->config.verity_mode != ovl_verity_mode_def())
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -744,7 +744,7 @@ static int ovl_make_workdir(struct super
+ * For volatile mount, create a incompat/volatile/dirty file to keep
+ * track of it.
+ */
+- if (ofs->config.ovl_volatile) {
++ if (ovl_is_volatile(&ofs->config)) {
+ err = ovl_create_volatile_dirty(ofs);
+ if (err < 0) {
+ pr_err("Failed to create volatile/dirty file.\n");
--- /dev/null
+From 50e638beb67e020a9124d77bd8a88bde3cd380e3 Mon Sep 17 00:00:00 2001
+From: Thorsten Blum <thorsten.blum@linux.dev>
+Date: Mon, 14 Apr 2025 22:54:08 +0200
+Subject: ovl: Use str_on_off() helper in ovl_show_options()
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+commit 50e638beb67e020a9124d77bd8a88bde3cd380e3 upstream.
+
+Remove hard-coded strings by using the str_on_off() helper function.
+
+Acked-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/params.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/fs/overlayfs/params.c
++++ b/fs/overlayfs/params.c
+@@ -987,17 +987,16 @@ int ovl_show_options(struct seq_file *m,
+ seq_printf(m, ",redirect_dir=%s",
+ ovl_redirect_mode(&ofs->config));
+ if (ofs->config.index != ovl_index_def)
+- seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off");
++ seq_printf(m, ",index=%s", str_on_off(ofs->config.index));
+ if (ofs->config.uuid != ovl_uuid_def())
+ seq_printf(m, ",uuid=%s", ovl_uuid_mode(&ofs->config));
+ if (ofs->config.nfs_export != ovl_nfs_export_def)
+- seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ?
+- "on" : "off");
++ seq_printf(m, ",nfs_export=%s",
++ str_on_off(ofs->config.nfs_export));
+ if (ofs->config.xino != ovl_xino_def() && !ovl_same_fs(ofs))
+ seq_printf(m, ",xino=%s", ovl_xino_mode(&ofs->config));
+ if (ofs->config.metacopy != ovl_metacopy_def)
+- seq_printf(m, ",metacopy=%s",
+- ofs->config.metacopy ? "on" : "off");
++ seq_printf(m, ",metacopy=%s", str_on_off(ofs->config.metacopy));
+ if (ofs->config.ovl_volatile)
+ seq_puts(m, ",volatile");
+ if (ofs->config.userxattr)
--- /dev/null
+From 42415d163e5df6db799c7de6262d707e402c2c7e Mon Sep 17 00:00:00 2001
+From: Benno Lossin <lossin@kernel.org>
+Date: Fri, 5 Sep 2025 16:00:46 +0200
+Subject: rust: pin-init: add references to previously initialized fields
+
+From: Benno Lossin <lossin@kernel.org>
+
+commit 42415d163e5df6db799c7de6262d707e402c2c7e upstream.
+
+After initializing a field in an initializer macro, create a variable
+holding a reference that points at that field. The type is either
+`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
+kind.
+
+[ Applied fixes to devres and rust_driver_pci sample - Benno]
+Reviewed-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+[ Removed the devres changes, because devres is not present in 6.12.y and
+ earlier. Also adjusted paths in the macro to account for the fact that
+ pin-init is part of the kernel crate in 6.12.y and earlier. - Benno ]
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/init/macros.rs | 149 ++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 115 insertions(+), 34 deletions(-)
+
+--- a/rust/kernel/init/macros.rs
++++ b/rust/kernel/init/macros.rs
+@@ -985,38 +985,56 @@ macro_rules! __pin_data {
+ @pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
+ @not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
+ ) => {
+- // For every field, we create a projection function according to its projection type. If a
+- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+- // structurally pinned, then it can be initialized via `Init`.
+- //
+- // The functions are `unsafe` to prevent accidentally calling them.
+- #[allow(dead_code)]
+- #[expect(clippy::missing_safety_doc)]
+- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+- where $($whr)*
+- {
+- $(
+- $(#[$($p_attr)*])*
+- $pvis unsafe fn $p_field<E>(
+- self,
+- slot: *mut $p_type,
+- init: impl $crate::init::PinInit<$p_type, E>,
+- ) -> ::core::result::Result<(), E> {
+- // SAFETY: TODO.
+- unsafe { $crate::init::PinInit::__pinned_init(init, slot) }
+- }
+- )*
+- $(
+- $(#[$($attr)*])*
+- $fvis unsafe fn $field<E>(
+- self,
+- slot: *mut $type,
+- init: impl $crate::init::Init<$type, E>,
+- ) -> ::core::result::Result<(), E> {
+- // SAFETY: TODO.
+- unsafe { $crate::init::Init::__init(init, slot) }
+- }
+- )*
++ $crate::macros::paste! {
++ // For every field, we create a projection function according to its projection type. If a
++ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
++ // structurally pinned, then it can be initialized via `Init`.
++ //
++ // The functions are `unsafe` to prevent accidentally calling them.
++ #[allow(dead_code, non_snake_case)]
++ #[expect(clippy::missing_safety_doc)]
++ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
++ where $($whr)*
++ {
++ $(
++ $(#[$($p_attr)*])*
++ $pvis unsafe fn $p_field<E>(
++ self,
++ slot: *mut $p_type,
++ init: impl $crate::init::PinInit<$p_type, E>,
++ ) -> ::core::result::Result<(), E> {
++ // SAFETY: TODO.
++ unsafe { $crate::init::PinInit::__pinned_init(init, slot) }
++ }
++
++ $(#[$($p_attr)*])*
++ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
++ self,
++ slot: &'__slot mut $p_type,
++ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
++ unsafe { ::core::pin::Pin::new_unchecked(slot) }
++ }
++ )*
++ $(
++ $(#[$($attr)*])*
++ $fvis unsafe fn $field<E>(
++ self,
++ slot: *mut $type,
++ init: impl $crate::init::Init<$type, E>,
++ ) -> ::core::result::Result<(), E> {
++ // SAFETY: TODO.
++ unsafe { $crate::init::Init::__init(init, slot) }
++ }
++
++ $(#[$($attr)*])*
++ $fvis unsafe fn [<__project_ $field>]<'__slot>(
++ self,
++ slot: &'__slot mut $type,
++ ) -> &'__slot mut $type {
++ slot
++ }
++ )*
++ }
+ }
+ };
+ }
+@@ -1213,6 +1231,13 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
++ // SAFETY:
++ // - the project function does the correct field projection,
++ // - the field has been initialized,
++ // - the reference is only valid until the end of the initializer.
++ #[allow(unused_variables, unused_assignments)]
++ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
++
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+@@ -1244,6 +1269,14 @@ macro_rules! __init_internal {
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
++
++ // SAFETY:
++ // - the field is not structurally pinned, since the line above must compile,
++ // - the field has been initialized,
++ // - the reference is only valid until the end of the initializer.
++ #[allow(unused_variables, unused_assignments)]
++ let $field = unsafe { &mut (*$slot).$field };
++
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+@@ -1262,7 +1295,7 @@ macro_rules! __init_internal {
+ );
+ }
+ };
+- (init_slot($($use_data:ident)?):
++ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+@@ -1276,6 +1309,15 @@ macro_rules! __init_internal {
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
++
++ #[allow(unused_variables, unused_assignments)]
++ // SAFETY:
++ // - the field is not structurally pinned, since no `use_data` was required to create this
++ // initializer,
++ // - the field has been initialized,
++ // - the reference is only valid until the end of the initializer.
++ let $field = unsafe { &mut (*$slot).$field };
++
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+@@ -1286,7 +1328,46 @@ macro_rules! __init_internal {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+- $crate::__init_internal!(init_slot($($use_data)?):
++ $crate::__init_internal!(init_slot():
++ @data($data),
++ @slot($slot),
++ @guards([< __ $field _guard >], $($guards,)*),
++ @munch_fields($($rest)*),
++ );
++ }
++ };
++ (init_slot($use_data:ident):
++ @data($data:ident),
++ @slot($slot:ident),
++ @guards($($guards:ident,)*),
++ // Init by-value.
++ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
++ ) => {
++ {
++ $(let $field = $val;)?
++ // Initialize the field.
++ //
++ // SAFETY: The memory at `slot` is uninitialized.
++ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
++ }
++ // SAFETY:
++ // - the project function does the correct field projection,
++ // - the field has been initialized,
++ // - the reference is only valid until the end of the initializer.
++ #[allow(unused_variables, unused_assignments)]
++ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
++
++ // Create the drop guard:
++ //
++ // We rely on macro hygiene to make it impossible for users to access this local variable.
++ // We use `paste!` to create new hygiene for `$field`.
++ $crate::macros::paste! {
++ // SAFETY: We forget the guard later when initialization has succeeded.
++ let [< __ $field _guard >] = unsafe {
++ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
++ };
++
++ $crate::__init_internal!(init_slot($use_data):
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
--- /dev/null
+From 580cc37b1de4fcd9997c48d7080e744533f09f36 Mon Sep 17 00:00:00 2001
+From: Benno Lossin <lossin@kernel.org>
+Date: Mon, 2 Mar 2026 15:04:15 +0100
+Subject: rust: pin-init: internal: init: document load-bearing fact of field accessors
+
+From: Benno Lossin <lossin@kernel.org>
+
+commit 580cc37b1de4fcd9997c48d7080e744533f09f36 upstream.
+
+The functions `[Pin]Init::__[pinned_]init` and `ptr::write` called from
+the `init!` macro require the passed pointer to be aligned. This fact is
+ensured by the creation of field accessors to previously initialized
+fields.
+
+Since we missed this very important fact from the beginning [1],
+document it in the code.
+
+Link: https://rust-for-linux.zulipchat.com/#narrow/channel/561532-pin-init/topic/initialized.20field.20accessor.20detection/with/576210658 [1]
+Fixes: 90e53c5e70a6 ("rust: add pin-init API core")
+Cc: <stable@vger.kernel.org> # 6.6.y, 6.12.y: 42415d163e5d: rust: pin-init: add references to previously initialized fields
+Cc: <stable@vger.kernel.org> # 6.6.y, 6.12.y, 6.18.y, 6.19.y
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Reviewed-by: Gary Guo <gary@garyguo.net>
+Link: https://patch.msgid.link/20260302140424.4097655-2-lossin@kernel.org
+[ Updated Cc: stable@ tags as discussed. - Miguel ]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+[ Moved changes to the declarative macro, because 6.19.y and earlier do not
+ have `syn`. Also duplicated the comment for all field accessor creations.
+ - Benno ]
+Signed-off-by: Benno Lossin <lossin@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/init/macros.rs | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/rust/kernel/init/macros.rs
++++ b/rust/kernel/init/macros.rs
+@@ -1231,6 +1231,10 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+@@ -1270,6 +1274,10 @@ macro_rules! __init_internal {
+ // return when an error/panic occurs.
+ unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+@@ -1310,6 +1318,10 @@ macro_rules! __init_internal {
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ #[allow(unused_variables, unused_assignments)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+@@ -1350,6 +1362,10 @@ macro_rules! __init_internal {
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
++ // NOTE: the field accessor ensures that the initialized field is properly aligned.
++ // Unaligned fields will cause the compiler to emit E0793. We do not support
++ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
++ // `ptr::write` below has the same requirement.
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
loongarch-vdso-emit-gnu_eh_frame-correctly.patch
spi-tegra210-quad-protect-curr_xfer-check-in-irq-handler.patch
media-nxp-imx8-isi-fix-streaming-cleanup-on-release.patch
+mm-damon-sysfs-check-contexts-nr-before-accessing-contexts_arr.patch
+rust-pin-init-add-references-to-previously-initialized-fields.patch
+rust-pin-init-internal-init-document-load-bearing-fact-of-field-accessors.patch
+ovl-use-str_on_off-helper-in-ovl_show_options.patch
+ovl-make-fsync-after-metadata-copy-up-opt-in-mount-option.patch
+xfs-avoid-dereferencing-log-items-after-push-callbacks.patch
+virt-tdx-guest-fix-handling-of-host-controlled-quote-buffer-length.patch
+net-add-proper-rcu-protection-to-proc-net-ptype.patch
+landlock-optimize-file-path-walks-and-prepare-for-audit-support.patch
+landlock-fix-handling-of-disconnected-directories.patch
+ice-fix-using-untrusted-value-of-pkt_len-in-ice_vc_fdir_parse_raw.patch
+ice-fix-ptp-null-pointer-dereference-during-vsi-rebuild.patch
+idpf-check-error-for-register_netdev-on-init.patch
+idpf-detach-and-close-netdevs-while-handling-a-reset.patch
+idpf-fix-rss-lut-null-pointer-crash-on-early-ethtool-operations.patch
+idpf-fix-rss-lut-null-ptr-issue-after-soft-reset.patch
+asoc-ak4458-convert-to-runtime_pm_ops-co.patch
--- /dev/null
+From stable+bounces-230979-greg=kroah.com@vger.kernel.org Sun Mar 29 23:02:29 2026
+From: Zubin Mithra <zsm@google.com>
+Date: Sun, 29 Mar 2026 21:02:20 +0000
+Subject: virt: tdx-guest: Fix handling of host controlled 'quote' buffer length
+To: stable@vger.kernel.org
+Cc: Zubin Mithra <zsm@google.com>, Dan Williams <dan.j.williams@intel.com>, "Kiryl Shutsemau (Meta)" <kas@kernel.org>, Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+Message-ID: <20260329210220.152814-1-zsm@google.com>
+
+From: Zubin Mithra <zsm@google.com>
+
+commit c3fd16c3b98ed726294feab2f94f876290bf7b61 upstream.
+
+Validate host controlled value `quote_buf->out_len` that determines how
+many bytes of the quote are copied out to guest userspace. In TDX
+environments with remote attestation, quotes are not considered private,
+and can be forwarded to an attestation server.
+
+Catch scenarios where the host specifies a response length larger than
+the guest's allocation, or otherwise races modifying the response while
+the guest consumes it.
+
+This prevents contents beyond the pages allocated for `quote_buf`
+(up to TSM_REPORT_OUTBLOB_MAX) from being read out to guest userspace,
+and possibly forwarded in attestation requests.
+
+Recall that some deployments want per-container configs-tsm-report
+interfaces, so the leak may cross container protection boundaries, not
+just local root.
+
+Fixes: f4738f56d1dc ("virt: tdx-guest: Add Quote generation support using TSM_REPORTS")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zubin Mithra <zsm@google.com>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Reviewed-by: Kiryl Shutsemau (Meta) <kas@kernel.org>
+Reviewed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Zubin Mithra <zsm@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/virt/coco/tdx-guest/tdx-guest.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
++++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
+@@ -35,6 +35,8 @@
+ #define GET_QUOTE_SUCCESS 0
+ #define GET_QUOTE_IN_FLIGHT 0xffffffffffffffff
+
++#define TDX_QUOTE_MAX_LEN (GET_QUOTE_BUF_SIZE - sizeof(struct tdx_quote_buf))
++
+ /* struct tdx_quote_buf: Format of Quote request buffer.
+ * @version: Quote format version, filled by TD.
+ * @status: Status code of Quote request, filled by VMM.
+@@ -162,6 +164,7 @@ static int tdx_report_new(struct tsm_rep
+ u8 *buf, *reportdata = NULL, *tdreport = NULL;
+ struct tdx_quote_buf *quote_buf = quote_data;
+ struct tsm_desc *desc = &report->desc;
++ u32 out_len;
+ int ret;
+ u64 err;
+
+@@ -226,14 +229,21 @@ static int tdx_report_new(struct tsm_rep
+ goto done;
+ }
+
+- buf = kvmemdup(quote_buf->data, quote_buf->out_len, GFP_KERNEL);
++ out_len = READ_ONCE(quote_buf->out_len);
++
++ if (out_len > TDX_QUOTE_MAX_LEN) {
++ ret = -EFBIG;
++ goto done;
++ }
++
++ buf = kvmemdup(quote_buf->data, out_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ report->outblob = buf;
+- report->outblob_len = quote_buf->out_len;
++ report->outblob_len = out_len;
+
+ /*
+ * TODO: parse the PEM-formatted cert chain out of the quote buffer when
--- /dev/null
+From 79ef34ec0554ec04bdbafafbc9836423734e1bd6 Mon Sep 17 00:00:00 2001
+From: Yuto Ohnuki <ytohnuki@amazon.com>
+Date: Tue, 10 Mar 2026 18:38:38 +0000
+Subject: xfs: avoid dereferencing log items after push callbacks
+
+From: Yuto Ohnuki <ytohnuki@amazon.com>
+
+commit 79ef34ec0554ec04bdbafafbc9836423734e1bd6 upstream.
+
+After xfsaild_push_item() calls iop_push(), the log item may have been
+freed if the AIL lock was dropped during the push. Background inode
+reclaim or the dquot shrinker can free the log item while the AIL lock
+is not held, and the tracepoints in the switch statement dereference
+the log item after iop_push() returns.
+
+Fix this by capturing the log item type, flags, and LSN before calling
+xfsaild_push_item(), and introducing a new xfs_ail_push_class trace
+event class that takes these pre-captured values and the ailp pointer
+instead of the log item pointer.
+
+Reported-by: syzbot+652af2b3c5569c4ab63c@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=652af2b3c5569c4ab63c
+Fixes: 90c60e164012 ("xfs: xfs_iflush() is no longer necessary")
+Cc: stable@vger.kernel.org # v5.9
+Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_trace.h | 36 ++++++++++++++++++++++++++++++++----
+ fs/xfs/xfs_trans_ail.c | 26 +++++++++++++++++++-------
+ 2 files changed, 51 insertions(+), 11 deletions(-)
+
+--- a/fs/xfs/xfs_trace.h
++++ b/fs/xfs/xfs_trace.h
+@@ -52,6 +52,7 @@
+ #include <linux/tracepoint.h>
+
+ struct xfs_agf;
++struct xfs_ail;
+ struct xfs_alloc_arg;
+ struct xfs_attr_list_context;
+ struct xfs_buf_log_item;
+@@ -1351,14 +1352,41 @@ TRACE_EVENT(xfs_log_force,
+ DEFINE_EVENT(xfs_log_item_class, name, \
+ TP_PROTO(struct xfs_log_item *lip), \
+ TP_ARGS(lip))
+-DEFINE_LOG_ITEM_EVENT(xfs_ail_push);
+-DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned);
+-DEFINE_LOG_ITEM_EVENT(xfs_ail_locked);
+-DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing);
+ DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_mark);
+ DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_skip);
+ DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_unpin);
+
++DECLARE_EVENT_CLASS(xfs_ail_push_class,
++ TP_PROTO(struct xfs_ail *ailp, uint type, unsigned long flags, xfs_lsn_t lsn),
++ TP_ARGS(ailp, type, flags, lsn),
++ TP_STRUCT__entry(
++ __field(dev_t, dev)
++ __field(uint, type)
++ __field(unsigned long, flags)
++ __field(xfs_lsn_t, lsn)
++ ),
++ TP_fast_assign(
++ __entry->dev = ailp->ail_log->l_mp->m_super->s_dev;
++ __entry->type = type;
++ __entry->flags = flags;
++ __entry->lsn = lsn;
++ ),
++ TP_printk("dev %d:%d lsn %d/%d type %s flags %s",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn),
++ __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
++ __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
++)
++
++#define DEFINE_AIL_PUSH_EVENT(name) \
++DEFINE_EVENT(xfs_ail_push_class, name, \
++ TP_PROTO(struct xfs_ail *ailp, uint type, unsigned long flags, xfs_lsn_t lsn), \
++ TP_ARGS(ailp, type, flags, lsn))
++DEFINE_AIL_PUSH_EVENT(xfs_ail_push);
++DEFINE_AIL_PUSH_EVENT(xfs_ail_pinned);
++DEFINE_AIL_PUSH_EVENT(xfs_ail_locked);
++DEFINE_AIL_PUSH_EVENT(xfs_ail_flushing);
++
+ DECLARE_EVENT_CLASS(xfs_ail_class,
+ TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn),
+ TP_ARGS(lip, old_lsn, new_lsn),
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -370,6 +370,12 @@ xfsaild_resubmit_item(
+ return XFS_ITEM_SUCCESS;
+ }
+
++/*
++ * Push a single log item from the AIL.
++ *
++ * @lip may have been released and freed by the time this function returns,
++ * so callers must not dereference the log item afterwards.
++ */
+ static inline uint
+ xfsaild_push_item(
+ struct xfs_ail *ailp,
+@@ -510,7 +516,10 @@ xfsaild_push(
+
+ lsn = lip->li_lsn;
+ while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
+- int lock_result;
++ int lock_result;
++ uint type = lip->li_type;
++ unsigned long flags = lip->li_flags;
++ xfs_lsn_t item_lsn = lip->li_lsn;
+
+ if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
+ goto next_item;
+@@ -519,14 +528,17 @@ xfsaild_push(
+ * Note that iop_push may unlock and reacquire the AIL lock. We
+ * rely on the AIL cursor implementation to be able to deal with
+ * the dropped lock.
++ *
++ * The log item may have been freed by the push, so it must not
++ * be accessed or dereferenced below this line.
+ */
+ lock_result = xfsaild_push_item(ailp, lip);
+ switch (lock_result) {
+ case XFS_ITEM_SUCCESS:
+ XFS_STATS_INC(mp, xs_push_ail_success);
+- trace_xfs_ail_push(lip);
++ trace_xfs_ail_push(ailp, type, flags, item_lsn);
+
+- ailp->ail_last_pushed_lsn = lsn;
++ ailp->ail_last_pushed_lsn = item_lsn;
+ break;
+
+ case XFS_ITEM_FLUSHING:
+@@ -542,22 +554,22 @@ xfsaild_push(
+ * AIL is being flushed.
+ */
+ XFS_STATS_INC(mp, xs_push_ail_flushing);
+- trace_xfs_ail_flushing(lip);
++ trace_xfs_ail_flushing(ailp, type, flags, item_lsn);
+
+ flushing++;
+- ailp->ail_last_pushed_lsn = lsn;
++ ailp->ail_last_pushed_lsn = item_lsn;
+ break;
+
+ case XFS_ITEM_PINNED:
+ XFS_STATS_INC(mp, xs_push_ail_pinned);
+- trace_xfs_ail_pinned(lip);
++ trace_xfs_ail_pinned(ailp, type, flags, item_lsn);
+
+ stuck++;
+ ailp->ail_log_flush++;
+ break;
+ case XFS_ITEM_LOCKED:
+ XFS_STATS_INC(mp, xs_push_ail_locked);
+- trace_xfs_ail_locked(lip);
++ trace_xfs_ail_locked(ailp, type, flags, item_lsn);
+
+ stuck++;
+ break;