--- /dev/null
+From b763d1f890ea78c4ce997ae25bc6250440f7de7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Aug 2024 14:41:52 -0400
+Subject: Bluetooth: hci_core: Fix not handling hibernation actions
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit 18b3256db76bd1130965acd99fbd38f87c3e6950 ]
+
+This fixes not handling hibernation actions on suspend notifier so they
+are treated in the same way as regular suspend actions.
+
+Fixes: 9952d90ea288 ("Bluetooth: Handle PM_SUSPEND_PREPARE and PM_POST_SUSPEND")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_core.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 210e03a3609d4..dc19a0b1a2f6d 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2405,10 +2405,16 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ /* To avoid a potential race with hci_unregister_dev. */
+ hci_dev_hold(hdev);
+
+- if (action == PM_SUSPEND_PREPARE)
++ switch (action) {
++ case PM_HIBERNATION_PREPARE:
++ case PM_SUSPEND_PREPARE:
+ ret = hci_suspend_dev(hdev);
+- else if (action == PM_POST_SUSPEND)
++ break;
++ case PM_POST_HIBERNATION:
++ case PM_POST_SUSPEND:
+ ret = hci_resume_dev(hdev);
++ break;
++ }
+
+ if (ret)
+ bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
+--
+2.43.0
+
--- /dev/null
+From 93b317770da95f2f07036a0f5a6d5f13525f7710 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 06:10:54 +0300
+Subject: bonding: implement xdo_dev_state_free and call it after deletion
+
+From: Jianbo Liu <jianbol@nvidia.com>
+
+[ Upstream commit ec13009472f4a756288eb4e18e20a7845da98d10 ]
+
+Add this implementation for bonding, so hardware resources can be
+freed from the active slave after xfrm state is deleted. The netdev
+used to invoke xdo_dev_state_free callback, is saved in the xfrm state
+(xs->xso.real_dev), which is also the bond's active slave. To prevent
+it from being freed, acquire netdev reference before leaving RCU
+read-side critical section, and release it after callback is done.
+
+And call it when deleting all SAs from old active real interface while
+switching current active slave.
+
+Fixes: 9a5605505d9c ("bonding: Add struct bond_ipesc to manage SA")
+Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Jay Vosburgh <jv@jvosburgh.net>
+Link: https://patch.msgid.link/20240823031056.110999-2-jianbol@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 36 +++++++++++++++++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index c218352814430..375412ce1ea5f 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -577,12 +577,47 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
+ __func__);
+ } else {
+ slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
++ if (slave->dev->xfrmdev_ops->xdo_dev_state_free)
++ slave->dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
+ }
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+ rcu_read_unlock();
+ }
+
++static void bond_ipsec_free_sa(struct xfrm_state *xs)
++{
++ struct net_device *bond_dev = xs->xso.dev;
++ struct net_device *real_dev;
++ netdevice_tracker tracker;
++ struct bonding *bond;
++ struct slave *slave;
++
++ if (!bond_dev)
++ return;
++
++ rcu_read_lock();
++ bond = netdev_priv(bond_dev);
++ slave = rcu_dereference(bond->curr_active_slave);
++ real_dev = slave ? slave->dev : NULL;
++ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
++ rcu_read_unlock();
++
++ if (!slave)
++ goto out;
++
++ if (!xs->xso.real_dev)
++ goto out;
++
++ WARN_ON(xs->xso.real_dev != real_dev);
++
++ if (real_dev && real_dev->xfrmdev_ops &&
++ real_dev->xfrmdev_ops->xdo_dev_state_free)
++ real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
++out:
++ netdev_put(real_dev, &tracker);
++}
++
+ /**
+ * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+@@ -623,6 +658,7 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+ static const struct xfrmdev_ops bond_xfrmdev_ops = {
+ .xdo_dev_state_add = bond_ipsec_add_sa,
+ .xdo_dev_state_delete = bond_ipsec_del_sa,
++ .xdo_dev_state_free = bond_ipsec_free_sa,
+ .xdo_dev_offload_ok = bond_ipsec_offload_ok,
+ };
+ #endif /* CONFIG_XFRM_OFFLOAD */
+--
+2.43.0
+
--- /dev/null
+From 9d44aa9080d6236867694f35eb099348ffeac338 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Aug 2024 10:50:47 +0300
+Subject: dmaengine: dw: Add memory bus width verification
+
+From: Serge Semin <fancer.lancer@gmail.com>
+
+[ Upstream commit d04b21bfa1c50a2ade4816cab6fdc91827b346b1 ]
+
+Currently in case of the DEV_TO_MEM or MEM_TO_DEV DMA transfers the memory
+data width (single transfer width) is determined based on the buffer
+length, buffer base address or DMA master-channel max address width
+capability. It isn't enough in case of the channel disabling prior the
+block transfer is finished. Here is what DW AHB DMA IP-core databook says
+regarding the port suspension (DMA-transfer pause) implementation in the
+controller:
+
+"When CTLx.SRC_TR_WIDTH < CTLx.DST_TR_WIDTH and the CFGx.CH_SUSP bit is
+high, the CFGx.FIFO_EMPTY is asserted once the contents of the FIFO do not
+permit a single word of CTLx.DST_TR_WIDTH to be formed. However, there may
+still be data in the channel FIFO, but not enough to form a single
+transfer of CTLx.DST_TR_WIDTH. In this scenario, once the channel is
+disabled, the remaining data in the channel FIFO is not transferred to the
+destination peripheral."
+
+So in case if the port gets to be suspended and then disabled it's
+possible to have the data silently discarded even though the controller
+reported that FIFO is empty and the CTLx.BLOCK_TS indicated the dropped
+data already received from the source device. This looks as if the data
+somehow got lost on a way from the peripheral device to memory and causes
+problems for instance in the DW APB UART driver, which pauses and disables
+the DMA-transfer as soon as the recv data timeout happens. Here is the way
+it looks:
+
+ Memory <------- DMA FIFO <------ UART FIFO <---------------- UART
+ DST_TR_WIDTH -+--------| | |
+ | | | | No more data
+ Current lvl -+--------| |---------+- DMA-burst lvl
+ | | |---------+- Leftover data
+ | | |---------+- SRC_TR_WIDTH
+ -+--------+-------+---------+
+
+In the example above: no more data is getting received over the UART port
+and BLOCK_TS is not even close to be fully received; some data is left in
+the UART FIFO, but not enough to perform a bursted DMA-xfer to the DMA
+FIFO; some data is left in the DMA FIFO, but not enough to be passed
+further to the system memory in a single transfer. In this situation the
+8250 UART driver catches the recv timeout interrupt, pauses the
+DMA-transfer and terminates it completely, after which the IRQ handler
+manually fetches the leftover data from the UART FIFO into the
+recv-buffer. But since the DMA-channel has been disabled with the data
+left in the DMA FIFO, that data will be just discarded and the recv-buffer
+will have a gap of the "current lvl" size in the recv-buffer at the tail
+of the lately received data portion. So the data will be lost just due to
+the misconfigured DMA transfer.
+
+Note this is only relevant for the case of the transfer suspension and
+_disabling_. No problem will happen if the transfer will be re-enabled
+afterwards or the block transfer is fully completed. In the later case the
+"FIFO flush mode" will be executed at the transfer final stage in order to
+push out the data left in the DMA FIFO.
+
+In order to fix the denoted problem the DW AHB DMA-engine driver needs to
+make sure that the _bursted_ source transfer width is greater or equal to
+the single destination transfer (note the HW databook describes more
+strict constraint than actually required). Since the peripheral-device
+side is prescribed by the client driver logic, the memory-side can be only
+used for that. The solution can be easily implemented for the DEV_TO_MEM
+transfers just by adjusting the memory-channel address width. Sadly it's
+not that easy for the MEM_TO_DEV transfers since the mem-to-dma burst size
+is normally dynamically determined by the controller. So the only thing
+that can be done is to make sure that memory-side address width is greater
+than the peripheral device address width.
+
+Fixes: a09820043c9e ("dw_dmac: autoconfigure data_width or get it via platform data")
+Signed-off-by: Serge Semin <fancer.lancer@gmail.com>
+Acked-by: Andy Shevchenko <andy@kernel.org>
+Link: https://lore.kernel.org/r/20240802075100.6475-3-fancer.lancer@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/dw/core.c | 51 +++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 44 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 9cafd8aff278e..66c98676e66ad 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -622,12 +622,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ struct dw_desc *prev;
+ struct dw_desc *first;
+ u32 ctllo, ctlhi;
+- u8 m_master = dwc->dws.m_master;
+- u8 lms = DWC_LLP_LMS(m_master);
++ u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
+ dma_addr_t reg;
+ unsigned int reg_width;
+ unsigned int mem_width;
+- unsigned int data_width = dw->pdata->data_width[m_master];
+ unsigned int i;
+ struct scatterlist *sg;
+ size_t total_len = 0;
+@@ -661,7 +659,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+- mem_width = __ffs(data_width | mem | len);
++ mem_width = __ffs(sconfig->src_addr_width | mem | len);
+
+ slave_sg_todev_fill_desc:
+ desc = dwc_desc_get(dwc);
+@@ -721,7 +719,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ lli_write(desc, sar, reg);
+ lli_write(desc, dar, mem);
+ lli_write(desc, ctlhi, ctlhi);
+- mem_width = __ffs(data_width | mem);
++ mem_width = __ffs(sconfig->dst_addr_width | mem);
+ lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
+ desc->len = dlen;
+
+@@ -813,6 +811,41 @@ static int dwc_verify_p_buswidth(struct dma_chan *chan)
+ return 0;
+ }
+
++static int dwc_verify_m_buswidth(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ u32 reg_width, reg_burst, mem_width;
++
++ mem_width = dw->pdata->data_width[dwc->dws.m_master];
++
++ /*
++ * It's possible to have a data portion locked in the DMA FIFO in case
++ * of the channel suspension. Subsequent channel disabling will cause
++ * that data silent loss. In order to prevent that maintain the src and
++ * dst transfer widths coherency by means of the relation:
++ * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH)
++ * Look for the details in the commit message that brings this change.
++ *
++ * Note the DMA configs utilized in the calculations below must have
++ * been verified to have correct values by this method call.
++ */
++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
++ reg_width = dwc->dma_sconfig.dst_addr_width;
++ if (mem_width < reg_width)
++ return -EINVAL;
++
++ dwc->dma_sconfig.src_addr_width = mem_width;
++ } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) {
++ reg_width = dwc->dma_sconfig.src_addr_width;
++ reg_burst = rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst);
++
++ dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst);
++ }
++
++ return 0;
++}
++
+ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+ {
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+@@ -822,14 +855,18 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+ memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+
+ dwc->dma_sconfig.src_maxburst =
+- clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst);
++ clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst);
+ dwc->dma_sconfig.dst_maxburst =
+- clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
++ clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst);
+
+ ret = dwc_verify_p_buswidth(chan);
+ if (ret)
+ return ret;
+
++ ret = dwc_verify_m_buswidth(chan);
++ if (ret)
++ return ret;
++
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
+
+--
+2.43.0
+
--- /dev/null
+From 4b53db5b1b1854504c432127f95339e7b7c64894 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Aug 2024 10:50:46 +0300
+Subject: dmaengine: dw: Add peripheral bus width verification
+
+From: Serge Semin <fancer.lancer@gmail.com>
+
+[ Upstream commit b336268dde75cb09bd795cb24893d52152a9191f ]
+
+Currently the src_addr_width and dst_addr_width fields of the
+dma_slave_config structure are mapped to the CTLx.SRC_TR_WIDTH and
+CTLx.DST_TR_WIDTH fields of the peripheral bus side in order to have the
+properly aligned data passed to the target device. It's done just by
+converting the passed peripheral bus width to the encoded value using the
+__ffs() function. This implementation has several problematic sides:
+
+1. __ffs() is undefined if no bit exist in the passed value. Thus if the
+specified addr-width is DMA_SLAVE_BUSWIDTH_UNDEFINED, __ffs() may return
+unexpected value depending on the platform-specific implementation.
+
+2. DW AHB DMA-engine permits having the power-of-2 transfer width limited
+by the DMAH_Mk_HDATA_WIDTH IP-core synthesize parameter. Specifying
+bus-width out of that constraints scope will definitely cause unexpected
+result since the destination reg will be only partly touched than the
+client driver implied.
+
+Let's fix all of that by adding the peripheral bus width verification
+method and calling it in dwc_config() which is supposed to be executed
+before preparing any transfer. The new method will make sure that the
+passed source or destination address width is valid and if undefined then
+the driver will just fallback to the 1-byte width transfer.
+
+Fixes: 029a40e97d0d ("dmaengine: dw: provide DMA capabilities")
+Signed-off-by: Serge Semin <fancer.lancer@gmail.com>
+Acked-by: Andy Shevchenko <andy@kernel.org>
+Link: https://lore.kernel.org/r/20240802075100.6475-2-fancer.lancer@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/dw/core.c | 38 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 97ba3bfc10b13..9cafd8aff278e 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -16,6 +16,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/log2.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -780,10 +781,43 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ }
+ EXPORT_SYMBOL_GPL(dw_dma_filter);
+
++static int dwc_verify_p_buswidth(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ u32 reg_width, max_width;
++
++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
++ reg_width = dwc->dma_sconfig.dst_addr_width;
++ else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
++ reg_width = dwc->dma_sconfig.src_addr_width;
++ else /* DMA_MEM_TO_MEM */
++ return 0;
++
++ max_width = dw->pdata->data_width[dwc->dws.p_master];
++
++ /* Fall-back to 1-byte transfer width if undefined */
++ if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
++ reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++ else if (!is_power_of_2(reg_width) || reg_width > max_width)
++ return -EINVAL;
++ else /* bus width is valid */
++ return 0;
++
++ /* Update undefined addr width value */
++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
++ dwc->dma_sconfig.dst_addr_width = reg_width;
++ else /* DMA_DEV_TO_MEM */
++ dwc->dma_sconfig.src_addr_width = reg_width;
++
++ return 0;
++}
++
+ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+ {
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
++ int ret;
+
+ memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+
+@@ -792,6 +826,10 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+ dwc->dma_sconfig.dst_maxburst =
+ clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
+
++ ret = dwc_verify_p_buswidth(chan);
++ if (ret)
++ return ret;
++
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
+
+--
+2.43.0
+
--- /dev/null
+From c185f97aaf173d3a982171567f5908275d2f7b3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Aug 2024 12:27:24 +0800
+Subject: drm/amd/display: avoid using null object of framebuffer
+
+From: Ma Ke <make24@iscas.ac.cn>
+
+[ Upstream commit 3b9a33235c773c7a3768060cf1d2cf8a9153bc37 ]
+
+Instead of using state->fb->obj[0] directly, get object from framebuffer
+by calling drm_gem_fb_get_obj() and return error code when object is
+null to avoid using null object of framebuffer.
+
+Fixes: 5d945cbcd4b1 ("drm/amd/display: Create a file dedicated to planes")
+Signed-off-by: Ma Ke <make24@iscas.ac.cn>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 73dd0ad9e5dad53766ea3e631303430116f834b3)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index cd6e99cf74a06..08b10df93c317 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -28,6 +28,7 @@
+ #include <drm/drm_blend.h>
+ #include <drm/drm_gem_atomic_helper.h>
+ #include <drm/drm_plane_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_fourcc.h>
+
+ #include "amdgpu.h"
+@@ -848,10 +849,14 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ }
+
+ afb = to_amdgpu_framebuffer(new_state->fb);
+- obj = new_state->fb->obj[0];
++ obj = drm_gem_fb_get_obj(new_state->fb, 0);
++ if (!obj) {
++ DRM_ERROR("Failed to get obj from framebuffer\n");
++ return -EINVAL;
++ }
++
+ rbo = gem_to_amdgpu_bo(obj);
+ adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+-
+ r = amdgpu_bo_reserve(rbo, true);
+ if (r) {
+ dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+--
+2.43.0
+
--- /dev/null
+From f560112ba29258bc2813bf66c0fb4e9c33c0f84a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 16:26:58 +1000
+Subject: ethtool: check device is present when getting link settings
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit a699781c79ecf6cfe67fb00a0331b4088c7c8466 ]
+
+A sysfs reader can race with a device reset or removal, attempting to
+read device state when the device is not actually present. eg:
+
+ [exception RIP: qed_get_current_link+17]
+ #8 [ffffb9e4f2907c48] qede_get_link_ksettings at ffffffffc07a994a [qede]
+ #9 [ffffb9e4f2907cd8] __rh_call_get_link_ksettings at ffffffff992b01a3
+ #10 [ffffb9e4f2907d38] __ethtool_get_link_ksettings at ffffffff992b04e4
+ #11 [ffffb9e4f2907d90] duplex_show at ffffffff99260300
+ #12 [ffffb9e4f2907e38] dev_attr_show at ffffffff9905a01c
+ #13 [ffffb9e4f2907e50] sysfs_kf_seq_show at ffffffff98e0145b
+ #14 [ffffb9e4f2907e68] seq_read at ffffffff98d902e3
+ #15 [ffffb9e4f2907ec8] vfs_read at ffffffff98d657d1
+ #16 [ffffb9e4f2907f00] ksys_read at ffffffff98d65c3f
+ #17 [ffffb9e4f2907f38] do_syscall_64 at ffffffff98a052fb
+
+ crash> struct net_device.state ffff9a9d21336000
+ state = 5,
+
+state 5 is __LINK_STATE_START (0b1) and __LINK_STATE_NOCARRIER (0b100).
+The device is not present, note lack of __LINK_STATE_PRESENT (0b10).
+
+This is the same sort of panic as observed in commit 4224cfd7fb65
+("net-sysfs: add check for netdevice being present to speed_show").
+
+There are many other callers of __ethtool_get_link_ksettings() which
+don't have a device presence check.
+
+Move this check into ethtool to protect all callers.
+
+Fixes: d519e17e2d01 ("net: export device speed and duplex via sysfs")
+Fixes: 4224cfd7fb65 ("net-sysfs: add check for netdevice being present to speed_show")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Link: https://patch.msgid.link/8bae218864beaa44ed01628140475b9bf641c5b0.1724393671.git.jamie.bainbridge@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/net-sysfs.c | 2 +-
+ net/ethtool/ioctl.c | 3 +++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index fdf3308b03350..8a06f97320e04 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -215,7 +215,7 @@ static ssize_t speed_show(struct device *dev,
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+- if (netif_running(netdev) && netif_device_present(netdev)) {
++ if (netif_running(netdev)) {
+ struct ethtool_link_ksettings cmd;
+
+ if (!__ethtool_get_link_ksettings(netdev, &cmd))
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index e31d1247b9f08..442c4c343e155 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -445,6 +445,9 @@ int __ethtool_get_link_ksettings(struct net_device *dev,
+ if (!dev->ethtool_ops->get_link_ksettings)
+ return -EOPNOTSUPP;
+
++ if (!netif_device_present(dev))
++ return -ENODEV;
++
+ memset(link_ksettings, 0, sizeof(*link_ksettings));
+ return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
+ }
+--
+2.43.0
+
--- /dev/null
+From 2d4d7d5fedc736253b9dc4f905dfd8632cdbc970 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 25 Aug 2024 12:16:38 -0700
+Subject: gtp: fix a potential NULL pointer dereference
+
+From: Cong Wang <cong.wang@bytedance.com>
+
+[ Upstream commit defd8b3c37b0f9cb3e0f60f47d3d78d459d57fda ]
+
+When sockfd_lookup() fails, gtp_encap_enable_socket() returns a
+NULL pointer, but its callers only check for error pointers thus miss
+the NULL pointer case.
+
+Fix it by returning an error pointer with the error code carried from
+sockfd_lookup().
+
+(I found this bug during code inspection.)
+
+Fixes: 1e3a3abd8b28 ("gtp: make GTP sockets in gtp_newlink optional")
+Cc: Andreas Schultz <aschultz@tpip.net>
+Cc: Harald Welte <laforge@gnumonks.org>
+Signed-off-by: Cong Wang <cong.wang@bytedance.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Link: https://patch.msgid.link/20240825191638.146748-1-xiyou.wangcong@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/gtp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 512daeb14e28b..bbe8d76b1595e 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1219,7 +1219,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ pr_debug("gtp socket fd=%d not found\n", fd);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ sk = sock->sk;
+--
+2.43.0
+
--- /dev/null
+From bad897f7cbef53c5c34be86378cfffa7848473f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 11:45:55 -0300
+Subject: iommu: Do not return 0 from map_pages if it doesn't do anything
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+[ Upstream commit 6093cd582f8e027117a8d4ad5d129a1aacdc53d2 ]
+
+These three implementations of map_pages() all succeed if a mapping is
+requested with no read or write. Since they return back to __iommu_map()
+leaving the mapped output as 0 it triggers an infinite loop. Therefore
+nothing is using no-access protection bits.
+
+Further, VFIO and iommufd rely on iommu_iova_to_phys() to get back PFNs
+stored by map, if iommu_map() succeeds but iommu_iova_to_phys() fails that
+will create serious bugs.
+
+Thus remove this never used "nothing to do" concept and just fail map
+immediately.
+
+Fixes: e5fc9753b1a8 ("iommu/io-pgtable: Add ARMv7 short descriptor support")
+Fixes: e1d3c0fd701d ("iommu: add ARM LPAE page table allocator")
+Fixes: 745ef1092bcf ("iommu/io-pgtable: Move Apple DART support to its own file")
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Acked-by: Will Deacon <will@kernel.org>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Link: https://lore.kernel.org/r/2-v1-1211e1294c27+4b1-iommu_no_prot_jgg@nvidia.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/io-pgtable-arm-v7s.c | 3 +--
+ drivers/iommu/io-pgtable-arm.c | 3 +--
+ drivers/iommu/io-pgtable-dart.c | 3 +--
+ 3 files changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
+index ba3115fd0f86a..08ec39111e608 100644
+--- a/drivers/iommu/io-pgtable-arm-v7s.c
++++ b/drivers/iommu/io-pgtable-arm-v7s.c
+@@ -552,9 +552,8 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ paddr >= (1ULL << data->iop.cfg.oas)))
+ return -ERANGE;
+
+- /* If no access, then nothing to do */
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+- return 0;
++ return -EINVAL;
+
+ while (pgcount--) {
+ ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index 0ba817e863465..1e38a24eb71cb 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -480,9 +480,8 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ if (WARN_ON(iaext || paddr >> cfg->oas))
+ return -ERANGE;
+
+- /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+- return 0;
++ return -EINVAL;
+
+ prot = arm_lpae_prot_to_pte(data, iommu_prot);
+ ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
+diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
+index 74b1ef2b96bee..10811e0b773d3 100644
+--- a/drivers/iommu/io-pgtable-dart.c
++++ b/drivers/iommu/io-pgtable-dart.c
+@@ -250,9 +250,8 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ if (WARN_ON(paddr >> cfg->oas))
+ return -ERANGE;
+
+- /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+- return 0;
++ return -EINVAL;
+
+ tbl = dart_get_table(data, iova);
+
+--
+2.43.0
+
--- /dev/null
+From cca2a69fae4c54e3929bf420b85ffbf32c707969 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Aug 2024 11:49:16 +0000
+Subject: net: busy-poll: use ktime_get_ns() instead of local_clock()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 0870b0d8b393dde53106678a1e2cec9dfa52f9b7 ]
+
+Typically, busy-polling durations are below 100 usec.
+
+When/if the busy-poller thread migrates to another cpu,
+local_clock() can be off by +/-2msec or more for small
+values of HZ, depending on the platform.
+
+Use ktimer_get_ns() to ensure deterministic behavior,
+which is the whole point of busy-polling.
+
+Fixes: 060212928670 ("net: add low latency socket poll")
+Fixes: 9a3c71aa8024 ("net: convert low latency sockets to sched_clock()")
+Fixes: 37089834528b ("sched, net: Fixup busy_loop_us_clock()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Mina Almasry <almasrymina@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Joe Damato <jdamato@fastly.com>
+Link: https://patch.msgid.link/20240827114916.223377-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/busy_poll.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
+index f90f0021f5f2d..5387e1daa5a8b 100644
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -63,7 +63,7 @@ static inline bool sk_can_busy_loop(struct sock *sk)
+ static inline unsigned long busy_loop_current_time(void)
+ {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+- return (unsigned long)(local_clock() >> 10);
++ return (unsigned long)(ktime_get_ns() >> 10);
+ #else
+ return 0;
+ #endif
+--
+2.43.0
+
--- /dev/null
+From db29b4ef897dba5679d4799400f5b9bb4563491a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Aug 2024 12:45:22 +0200
+Subject: netfilter: nf_tables: restore IP sanity checks for netdev/egress
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 5fd0628918977a0afdc2e6bc562d8751b5d3b8c5 ]
+
+Subtract network offset to skb->len before performing IPv4 header sanity
+checks, then adjust transport offset from offset from mac header.
+
+Jorge Ortiz says:
+
+When small UDP packets (< 4 bytes payload) are sent from eth0,
+`meta l4proto udp` condition is not met because `NFT_PKTINFO_L4PROTO` is
+not set. This happens because there is a comparison that checks if the
+transport header offset exceeds the total length. This comparison does
+not take into account the fact that the skb network offset might be
+non-zero in egress mode (e.g., 14 bytes for Ethernet header).
+
+Fixes: 0ae8e4cca787 ("netfilter: nf_tables: set transport offset from mac header for netdev/egress")
+Reported-by: Jorge Ortiz <jorge.ortiz.escribano@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables_ipv4.h | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
+index 5225d2bd1a6e9..10b0a7c9e721f 100644
+--- a/include/net/netfilter/nf_tables_ipv4.h
++++ b/include/net/netfilter/nf_tables_ipv4.h
+@@ -19,7 +19,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
+ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+ {
+ struct iphdr *iph, _iph;
+- u32 len, thoff;
++ u32 len, thoff, skb_len;
+
+ iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*iph), &_iph);
+@@ -30,15 +30,17 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+ return -1;
+
+ len = iph_totlen(pkt->skb, iph);
+- thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
+- if (pkt->skb->len < len)
++ thoff = iph->ihl * 4;
++ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
++
++ if (skb_len < len)
+ return -1;
+ else if (len < thoff)
+ return -1;
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = iph->protocol;
+- pkt->thoff = thoff;
++ pkt->thoff = skb_network_offset(pkt->skb) + thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 294d216ef1307ee6744a44ad6de33c29310694d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Aug 2024 15:03:23 +0200
+Subject: netfilter: nf_tables_ipv6: consider network offset in netdev/egress
+ validation
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 70c261d500951cf3ea0fcf32651aab9a65a91471 ]
+
+From netdev/egress, skb->len can include the ethernet header, therefore,
+subtract network offset from skb->len when validating IPv6 packet length.
+
+Fixes: 42df6e1d221d ("netfilter: Introduce egress hook")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables_ipv6.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
+index ec7eaeaf4f04c..f1d6a65280475 100644
+--- a/include/net/netfilter/nf_tables_ipv6.h
++++ b/include/net/netfilter/nf_tables_ipv6.h
+@@ -31,8 +31,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+ struct ipv6hdr *ip6h, _ip6h;
+ unsigned int thoff = 0;
+ unsigned short frag_off;
++ u32 pkt_len, skb_len;
+ int protohdr;
+- u32 pkt_len;
+
+ ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*ip6h), &_ip6h);
+@@ -43,7 +43,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+ return -1;
+
+ pkt_len = ntohs(ip6h->payload_len);
+- if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
++ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
++ if (pkt_len + sizeof(*ip6h) > skb_len)
+ return -1;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+--
+2.43.0
+
--- /dev/null
+From 937a708533e3a824c722d32ab00ff4515b6f87cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Aug 2024 11:48:22 +0300
+Subject: nfc: pn533: Add poll mod list filling check
+
+From: Aleksandr Mishin <amishin@t-argos.ru>
+
+[ Upstream commit febccb39255f9df35527b88c953b2e0deae50e53 ]
+
+In case of im_protocols value is 1 and tm_protocols value is 0 this
+combination successfully passes the check
+'if (!im_protocols && !tm_protocols)' in the nfc_start_poll().
+But then after pn533_poll_create_mod_list() call in pn533_start_poll()
+poll mod list will remain empty and dev->poll_mod_count will remain 0
+which lead to division by zero.
+
+Normally no im protocol has value 1 in the mask, so this combination is
+not expected by driver. But these protocol values actually come from
+userspace via Netlink interface (NFC_CMD_START_POLL operation). So a
+broken or malicious program may pass a message containing a "bad"
+combination of protocol parameter values so that dev->poll_mod_count
+is not incremented inside pn533_poll_create_mod_list(), thus leading
+to division by zero.
+Call trace looks like:
+nfc_genl_start_poll()
+ nfc_start_poll()
+ ->start_poll()
+ pn533_start_poll()
+
+Add poll mod list filling check.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: dfccd0f58044 ("NFC: pn533: Add some polling entropy")
+Signed-off-by: Aleksandr Mishin <amishin@t-argos.ru>
+Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://patch.msgid.link/20240827084822.18785-1-amishin@t-argos.ru
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nfc/pn533/pn533.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index f0cac19005527..2e0871409926b 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -1723,6 +1723,11 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
+ }
+
+ pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
++ if (!dev->poll_mod_count) {
++ nfc_err(dev->dev,
++ "Poll mod list is empty\n");
++ return -EINVAL;
++ }
+
+ /* Do not always start polling from the same modulation */
+ get_random_bytes(&rand_mod, sizeof(rand_mod));
+--
+2.43.0
+
--- /dev/null
+From dc3d80e264ea6e5057d0d9cd992f5d7cf389b009 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jun 2023 19:32:49 +0530
+Subject: phy: xilinx: add runtime PM support
+
+From: Piyush Mehta <piyush.mehta@amd.com>
+
+[ Upstream commit b3db66f624468ab4a0385586bc7f4221e477d6b2 ]
+
+Added Runtime power management support to the xilinx phy driver and using
+DEFINE_RUNTIME_DEV_PM_OPS new macros allows the compiler to remove the
+unused dev_pm_ops structure and related functions if !CONFIG_PM without
+the need to mark the functions __maybe_unused.
+
+Signed-off-by: Piyush Mehta <piyush.mehta@amd.com>
+Link: https://lore.kernel.org/r/20230613140250.3018947-2-piyush.mehta@amd.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: 5af9b304bc60 ("phy: xilinx: phy-zynqmp: Fix SGMII linkup failure on resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/xilinx/phy-zynqmp.c | 35 ++++++++++++++++++++++++++-------
+ 1 file changed, 28 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
+index 9be9535ad7ab7..964d8087fcf46 100644
+--- a/drivers/phy/xilinx/phy-zynqmp.c
++++ b/drivers/phy/xilinx/phy-zynqmp.c
+@@ -21,6 +21,7 @@
+ #include <linux/of.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
+ #include <linux/slab.h>
+
+ #include <dt-bindings/phy/phy.h>
+@@ -821,7 +822,7 @@ static struct phy *xpsgtr_xlate(struct device *dev,
+ * Power Management
+ */
+
+-static int __maybe_unused xpsgtr_suspend(struct device *dev)
++static int xpsgtr_runtime_suspend(struct device *dev)
+ {
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ unsigned int i;
+@@ -836,7 +837,7 @@ static int __maybe_unused xpsgtr_suspend(struct device *dev)
+ return 0;
+ }
+
+-static int __maybe_unused xpsgtr_resume(struct device *dev)
++static int xpsgtr_runtime_resume(struct device *dev)
+ {
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ unsigned int icm_cfg0, icm_cfg1;
+@@ -877,10 +878,8 @@ static int __maybe_unused xpsgtr_resume(struct device *dev)
+ return err;
+ }
+
+-static const struct dev_pm_ops xpsgtr_pm_ops = {
+- SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
+-};
+-
++static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
++ xpsgtr_runtime_resume, NULL);
+ /*
+ * Probe & Platform Driver
+ */
+@@ -1006,6 +1005,16 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ ret = PTR_ERR(provider);
+ goto err_clk_put;
+ }
++
++ pm_runtime_set_active(gtr_dev->dev);
++ pm_runtime_enable(gtr_dev->dev);
++
++ ret = pm_runtime_resume_and_get(gtr_dev->dev);
++ if (ret < 0) {
++ pm_runtime_disable(gtr_dev->dev);
++ goto err_clk_put;
++ }
++
+ return 0;
+
+ err_clk_put:
+@@ -1015,6 +1024,17 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++static int xpsgtr_remove(struct platform_device *pdev)
++{
++ struct xpsgtr_dev *gtr_dev = platform_get_drvdata(pdev);
++
++ pm_runtime_disable(gtr_dev->dev);
++ pm_runtime_put_noidle(gtr_dev->dev);
++ pm_runtime_set_suspended(gtr_dev->dev);
++
++ return 0;
++}
++
+ static const struct of_device_id xpsgtr_of_match[] = {
+ { .compatible = "xlnx,zynqmp-psgtr", },
+ { .compatible = "xlnx,zynqmp-psgtr-v1.1", },
+@@ -1024,10 +1044,11 @@ MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
+
+ static struct platform_driver xpsgtr_driver = {
+ .probe = xpsgtr_probe,
++ .remove = xpsgtr_remove,
+ .driver = {
+ .name = "xilinx-psgtr",
+ .of_match_table = xpsgtr_of_match,
+- .pm = &xpsgtr_pm_ops,
++ .pm = pm_ptr(&xpsgtr_pm_ops),
+ },
+ };
+
+--
+2.43.0
+
--- /dev/null
+From 877b2b5aa3f3f9b6233b21cfb56ca37a935af3e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jun 2023 19:32:50 +0530
+Subject: phy: xilinx: phy-zynqmp: dynamic clock support for power-save
+
+From: Piyush Mehta <piyush.mehta@amd.com>
+
+[ Upstream commit 25d70083351318b44ae699d92c042dcb18a738ea ]
+
+Enabling clock for all the lanes consumes power even PHY is active or
+inactive. To resolve this, enable/disable clocks in phy_init/phy_exit.
+
+By default clock is disabled for all the lanes. Whenever phy_init called
+from USB, SATA, or display driver, etc. It enabled the required clock
+for requested lane. On phy_exit cycle, it disabled clock for the active
+PHYs.
+
+During the suspend/resume cycle, each USB/ SATA/ display driver called
+phy_exit/phy_init individually. It disabled clock on exit, and enabled
+on initialization for the active PHYs.
+
+Signed-off-by: Piyush Mehta <piyush.mehta@amd.com>
+Link: https://lore.kernel.org/r/20230613140250.3018947-3-piyush.mehta@amd.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: 5af9b304bc60 ("phy: xilinx: phy-zynqmp: Fix SGMII linkup failure on resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/xilinx/phy-zynqmp.c | 61 ++++++++-------------------------
+ 1 file changed, 15 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
+index 964d8087fcf46..a8782aad62ca4 100644
+--- a/drivers/phy/xilinx/phy-zynqmp.c
++++ b/drivers/phy/xilinx/phy-zynqmp.c
+@@ -573,6 +573,10 @@ static int xpsgtr_phy_init(struct phy *phy)
+
+ mutex_lock(>r_dev->gtr_mutex);
+
++ /* Configure and enable the clock when peripheral phy_init call */
++ if (clk_prepare_enable(gtr_dev->clk[gtr_phy->lane]))
++ goto out;
++
+ /* Skip initialization if not required. */
+ if (!xpsgtr_phy_init_required(gtr_phy))
+ goto out;
+@@ -617,9 +621,13 @@ static int xpsgtr_phy_init(struct phy *phy)
+ static int xpsgtr_phy_exit(struct phy *phy)
+ {
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
++ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+
+ gtr_phy->skip_phy_init = false;
+
++ /* Ensure that disable clock only, which configure for lane */
++ clk_disable_unprepare(gtr_dev->clk[gtr_phy->lane]);
++
+ return 0;
+ }
+
+@@ -825,15 +833,11 @@ static struct phy *xpsgtr_xlate(struct device *dev,
+ static int xpsgtr_runtime_suspend(struct device *dev)
+ {
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+- unsigned int i;
+
+ /* Save the snapshot ICM_CFG registers. */
+ gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
+- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
+- clk_disable_unprepare(gtr_dev->clk[i]);
+-
+ return 0;
+ }
+
+@@ -843,13 +847,6 @@ static int xpsgtr_runtime_resume(struct device *dev)
+ unsigned int icm_cfg0, icm_cfg1;
+ unsigned int i;
+ bool skip_phy_init;
+- int err;
+-
+- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++) {
+- err = clk_prepare_enable(gtr_dev->clk[i]);
+- if (err)
+- goto err_clk_put;
+- }
+
+ icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+@@ -870,12 +867,6 @@ static int xpsgtr_runtime_resume(struct device *dev)
+ gtr_dev->phys[i].skip_phy_init = skip_phy_init;
+
+ return 0;
+-
+-err_clk_put:
+- while (i--)
+- clk_disable_unprepare(gtr_dev->clk[i]);
+-
+- return err;
+ }
+
+ static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
+@@ -887,7 +878,6 @@ static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
+ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
+ {
+ unsigned int refclk;
+- int ret;
+
+ for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
+ unsigned long rate;
+@@ -898,19 +888,14 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
+ snprintf(name, sizeof(name), "ref%u", refclk);
+ clk = devm_clk_get_optional(gtr_dev->dev, name);
+ if (IS_ERR(clk)) {
+- ret = dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
+- "Failed to get reference clock %u\n",
+- refclk);
+- goto err_clk_put;
++ return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
++ "Failed to get ref clock %u\n",
++ refclk);
+ }
+
+ if (!clk)
+ continue;
+
+- ret = clk_prepare_enable(clk);
+- if (ret)
+- goto err_clk_put;
+-
+ gtr_dev->clk[refclk] = clk;
+
+ /*
+@@ -930,18 +915,11 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
+ dev_err(gtr_dev->dev,
+ "Invalid rate %lu for reference clock %u\n",
+ rate, refclk);
+- ret = -EINVAL;
+- goto err_clk_put;
++ return -EINVAL;
+ }
+ }
+
+ return 0;
+-
+-err_clk_put:
+- while (refclk--)
+- clk_disable_unprepare(gtr_dev->clk[refclk]);
+-
+- return ret;
+ }
+
+ static int xpsgtr_probe(struct platform_device *pdev)
+@@ -950,7 +928,6 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ struct xpsgtr_dev *gtr_dev;
+ struct phy_provider *provider;
+ unsigned int port;
+- unsigned int i;
+ int ret;
+
+ gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
+@@ -990,8 +967,7 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+- ret = PTR_ERR(phy);
+- goto err_clk_put;
++ return PTR_ERR(phy);
+ }
+
+ gtr_phy->phy = phy;
+@@ -1002,8 +978,7 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(&pdev->dev, "registering provider failed\n");
+- ret = PTR_ERR(provider);
+- goto err_clk_put;
++ return PTR_ERR(provider);
+ }
+
+ pm_runtime_set_active(gtr_dev->dev);
+@@ -1012,16 +987,10 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ ret = pm_runtime_resume_and_get(gtr_dev->dev);
+ if (ret < 0) {
+ pm_runtime_disable(gtr_dev->dev);
+- goto err_clk_put;
++ return ret;
+ }
+
+ return 0;
+-
+-err_clk_put:
+- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
+- clk_disable_unprepare(gtr_dev->clk[i]);
+-
+- return ret;
+ }
+
+ static int xpsgtr_remove(struct platform_device *pdev)
+--
+2.43.0
+
--- /dev/null
+From 0c9b0966e08a53391d7cc4ba1e642bb27fbcf0ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Aug 2024 11:29:07 +0530
+Subject: phy: xilinx: phy-zynqmp: Fix SGMII linkup failure on resume
+
+From: Piyush Mehta <piyush.mehta@amd.com>
+
+[ Upstream commit 5af9b304bc6010723c02f74de0bfd24ff19b1a10 ]
+
+On a few Kria KR260 Robotics Starter Kit the PS-GEM SGMII linkup is not
+happening after the resume. This is because serdes registers are reset
+when FPD is off (in suspend state) and needs to be reprogrammed in the
+resume path with the same default initialization as done in the first
+stage bootloader psu_init routine.
+
+To address the failure introduce a set of serdes registers to be saved in
+the suspend path and then restore it on resume.
+
+Fixes: 4a33bea00314 ("phy: zynqmp: Add PHY driver for the Xilinx ZynqMP Gigabit Transceiver")
+Signed-off-by: Piyush Mehta <piyush.mehta@amd.com>
+Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Link: https://lore.kernel.org/r/1722837547-2578381-1-git-send-email-radhey.shyam.pandey@amd.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/xilinx/phy-zynqmp.c | 56 +++++++++++++++++++++++++++++++++
+ 1 file changed, 56 insertions(+)
+
+diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
+index a8782aad62ca4..75b0f9f31c81f 100644
+--- a/drivers/phy/xilinx/phy-zynqmp.c
++++ b/drivers/phy/xilinx/phy-zynqmp.c
+@@ -166,6 +166,24 @@
+ /* Timeout values */
+ #define TIMEOUT_US 1000
+
++/* Lane 0/1/2/3 offset */
++#define DIG_8(n) ((0x4000 * (n)) + 0x1074)
++#define ILL13(n) ((0x4000 * (n)) + 0x1994)
++#define DIG_10(n) ((0x4000 * (n)) + 0x107c)
++#define RST_DLY(n) ((0x4000 * (n)) + 0x19a4)
++#define BYP_15(n) ((0x4000 * (n)) + 0x1038)
++#define BYP_12(n) ((0x4000 * (n)) + 0x102c)
++#define MISC3(n) ((0x4000 * (n)) + 0x19ac)
++#define EQ11(n) ((0x4000 * (n)) + 0x1978)
++
++static u32 save_reg_address[] = {
++ /* Lane 0/1/2/3 Register */
++ DIG_8(0), ILL13(0), DIG_10(0), RST_DLY(0), BYP_15(0), BYP_12(0), MISC3(0), EQ11(0),
++ DIG_8(1), ILL13(1), DIG_10(1), RST_DLY(1), BYP_15(1), BYP_12(1), MISC3(1), EQ11(1),
++ DIG_8(2), ILL13(2), DIG_10(2), RST_DLY(2), BYP_15(2), BYP_12(2), MISC3(2), EQ11(2),
++ DIG_8(3), ILL13(3), DIG_10(3), RST_DLY(3), BYP_15(3), BYP_12(3), MISC3(3), EQ11(3),
++};
++
+ struct xpsgtr_dev;
+
+ /**
+@@ -214,6 +232,7 @@ struct xpsgtr_phy {
+ * @tx_term_fix: fix for GT issue
+ * @saved_icm_cfg0: stored value of ICM CFG0 register
+ * @saved_icm_cfg1: stored value of ICM CFG1 register
++ * @saved_regs: registers to be saved/restored during suspend/resume
+ */
+ struct xpsgtr_dev {
+ struct device *dev;
+@@ -226,6 +245,7 @@ struct xpsgtr_dev {
+ bool tx_term_fix;
+ unsigned int saved_icm_cfg0;
+ unsigned int saved_icm_cfg1;
++ u32 *saved_regs;
+ };
+
+ /*
+@@ -299,6 +319,32 @@ static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
+ writel((readl(addr) & ~clr) | set, addr);
+ }
+
++/**
++ * xpsgtr_save_lane_regs - Saves registers on suspend
++ * @gtr_dev: pointer to phy controller context structure
++ */
++static void xpsgtr_save_lane_regs(struct xpsgtr_dev *gtr_dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
++ gtr_dev->saved_regs[i] = xpsgtr_read(gtr_dev,
++ save_reg_address[i]);
++}
++
++/**
++ * xpsgtr_restore_lane_regs - Restores registers on resume
++ * @gtr_dev: pointer to phy controller context structure
++ */
++static void xpsgtr_restore_lane_regs(struct xpsgtr_dev *gtr_dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
++ xpsgtr_write(gtr_dev, save_reg_address[i],
++ gtr_dev->saved_regs[i]);
++}
++
+ /*
+ * Hardware Configuration
+ */
+@@ -838,6 +884,8 @@ static int xpsgtr_runtime_suspend(struct device *dev)
+ gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
++ xpsgtr_save_lane_regs(gtr_dev);
++
+ return 0;
+ }
+
+@@ -848,6 +896,8 @@ static int xpsgtr_runtime_resume(struct device *dev)
+ unsigned int i;
+ bool skip_phy_init;
+
++ xpsgtr_restore_lane_regs(gtr_dev);
++
+ icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
+@@ -990,6 +1040,12 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ gtr_dev->saved_regs = devm_kmalloc(gtr_dev->dev,
++ sizeof(save_reg_address),
++ GFP_KERNEL);
++ if (!gtr_dev->saved_regs)
++ return -ENOMEM;
++
+ return 0;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 05674588d388e339132e68b02c29e691df8422f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Aug 2024 15:07:11 +0200
+Subject: sctp: fix association labeling in the duplicate COOKIE-ECHO case
+
+From: Ondrej Mosnacek <omosnace@redhat.com>
+
+[ Upstream commit 3a0504d54b3b57f0d7bf3d9184a00c9f8887f6d7 ]
+
+sctp_sf_do_5_2_4_dupcook() currently calls security_sctp_assoc_request()
+on new_asoc, but as it turns out, this association is always discarded
+and the LSM labels never get into the final association (asoc).
+
+This can be reproduced by having two SCTP endpoints try to initiate an
+association with each other at approximately the same time and then peel
+off the association into a new socket, which exposes the unitialized
+labels and triggers SELinux denials.
+
+Fix it by calling security_sctp_assoc_request() on asoc instead of
+new_asoc. Xin Long also suggested limit calling the hook only to cases
+A, B, and D, since in cases C and E the COOKIE ECHO chunk is discarded
+and the association doesn't enter the ESTABLISHED state, so rectify that
+as well.
+
+One related caveat with SELinux and peer labeling: When an SCTP
+connection is set up simultaneously in this way, we will end up with an
+association that is initialized with security_sctp_assoc_request() on
+both sides, so the MLS component of the security context of the
+association will get swapped between the peers, instead of just one side
+setting it to the other's MLS component. However, at that point
+security_sctp_assoc_request() had already been called on both sides in
+sctp_sf_do_unexpected_init() (on a temporary association) and thus if
+the exchange didn't fail before due to MLS, it won't fail now either
+(most likely both endpoints have the same MLS range).
+
+Tested by:
+ - reproducer from https://src.fedoraproject.org/tests/selinux/pull-request/530
+ - selinux-testsuite (https://github.com/SELinuxProject/selinux-testsuite/)
+ - sctp-tests (https://github.com/sctp/sctp-tests) - no tests failed
+ that wouldn't fail also without the patch applied
+
+Fixes: c081d53f97a1 ("security: pass asoc to sctp_assoc_request and sctp_sk_clone")
+Suggested-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
+Acked-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Paul Moore <paul@paul-moore.com> (LSM/SELinux)
+Link: https://patch.msgid.link/20240826130711.141271-1-omosnace@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sctp/sm_statefuns.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 5383b6a9da61c..a56749a50e5c5 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -2261,12 +2261,6 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ }
+ }
+
+- /* Update socket peer label if first association. */
+- if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
+- sctp_association_free(new_asoc);
+- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+- }
+-
+ /* Set temp so that it won't be added into hashtable */
+ new_asoc->temp = 1;
+
+@@ -2275,6 +2269,22 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ */
+ action = sctp_tietags_compare(new_asoc, asoc);
+
++ /* In cases C and E the association doesn't enter the ESTABLISHED
++ * state, so there is no need to call security_sctp_assoc_request().
++ */
++ switch (action) {
++ case 'A': /* Association restart. */
++ case 'B': /* Collision case B. */
++ case 'D': /* Collision case D. */
++ /* Update socket peer label if first association. */
++ if (security_sctp_assoc_request((struct sctp_association *)asoc,
++ chunk->head_skb ?: chunk->skb)) {
++ sctp_association_free(new_asoc);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++ }
++ break;
++ }
++
+ switch (action) {
+ case 'A': /* Association restart. */
+ retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
+--
+2.43.0
+
--- /dev/null
+From 4ac0a1fc98000432c11949605b8d4cc7e48f56bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Aug 2024 19:15:11 +0200
+Subject: selftests: forwarding: local_termination: Down ports on cleanup
+
+From: Petr Machata <petrm@nvidia.com>
+
+[ Upstream commit 65a3cce43d5b4c53cf16b0be1a03991f665a0806 ]
+
+This test neglects to put ports down on cleanup. Fix it.
+
+Fixes: 90b9566aa5cd ("selftests: forwarding: add a test for local_termination.sh")
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://patch.msgid.link/bf9b79f45de378f88344d44550f0a5052b386199.1724692132.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/forwarding/local_termination.sh | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
+index c5b0cbc85b3e0..9b5a63519b949 100755
+--- a/tools/testing/selftests/net/forwarding/local_termination.sh
++++ b/tools/testing/selftests/net/forwarding/local_termination.sh
+@@ -278,6 +278,10 @@ bridge()
+ cleanup()
+ {
+ pre_cleanup
++
++ ip link set $h2 down
++ ip link set $h1 down
++
+ vrf_cleanup
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 7ba587726e50974ae112c77eb514cbba9a3a172c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 18:25:37 +0200
+Subject: selftests: forwarding: no_forwarding: Down ports on cleanup
+
+From: Petr Machata <petrm@nvidia.com>
+
+[ Upstream commit e8497d6951ee8541d73784f9aac9942a7f239980 ]
+
+This test neglects to put ports down on cleanup. Fix it.
+
+Fixes: 476a4f05d9b8 ("selftests: forwarding: add a no_forwarding.sh test")
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/0baf91dc24b95ae0cadfdf5db05b74888e6a228a.1724430120.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/forwarding/no_forwarding.sh | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/tools/testing/selftests/net/forwarding/no_forwarding.sh b/tools/testing/selftests/net/forwarding/no_forwarding.sh
+index af3b398d13f01..9e677aa64a06a 100755
+--- a/tools/testing/selftests/net/forwarding/no_forwarding.sh
++++ b/tools/testing/selftests/net/forwarding/no_forwarding.sh
+@@ -233,6 +233,9 @@ cleanup()
+ {
+ pre_cleanup
+
++ ip link set dev $swp2 down
++ ip link set dev $swp1 down
++
+ h2_destroy
+ h1_destroy
+
+--
+2.43.0
+
mm-fix-missing-folio-invalidation-calls-during-trunc.patch
btrfs-fix-extent-map-use-after-free-when-adding-pages-to-compressed-bio.patch
soundwire-stream-fix-programming-slave-ports-for-non-continous-port-maps.patch
+phy-xilinx-add-runtime-pm-support.patch
+phy-xilinx-phy-zynqmp-dynamic-clock-support-for-powe.patch
+phy-xilinx-phy-zynqmp-fix-sgmii-linkup-failure-on-re.patch
+dmaengine-dw-add-peripheral-bus-width-verification.patch
+dmaengine-dw-add-memory-bus-width-verification.patch
+bluetooth-hci_core-fix-not-handling-hibernation-acti.patch
+iommu-do-not-return-0-from-map_pages-if-it-doesn-t-d.patch
+netfilter-nf_tables-restore-ip-sanity-checks-for-net.patch
+wifi-iwlwifi-fw-fix-wgds-rev-3-exact-size.patch
+ethtool-check-device-is-present-when-getting-link-se.patch
+netfilter-nf_tables_ipv6-consider-network-offset-in-.patch
+selftests-forwarding-no_forwarding-down-ports-on-cle.patch
+selftests-forwarding-local_termination-down-ports-on.patch
+bonding-implement-xdo_dev_state_free-and-call-it-aft.patch
+gtp-fix-a-potential-null-pointer-dereference.patch
+sctp-fix-association-labeling-in-the-duplicate-cooki.patch
+drm-amd-display-avoid-using-null-object-of-framebuff.patch
+net-busy-poll-use-ktime_get_ns-instead-of-local_cloc.patch
+nfc-pn533-add-poll-mod-list-filling-check.patch
--- /dev/null
+From 74838267991592d5f6d10ed1844fccbddf4edfb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 25 Aug 2024 19:17:08 +0300
+Subject: wifi: iwlwifi: fw: fix wgds rev 3 exact size
+
+From: Anjaneyulu <pagadala.yesu.anjaneyulu@intel.com>
+
+[ Upstream commit 3ee22f07a35b76939c5b8d17d6af292f5fafb509 ]
+
+Check size of WGDS revision 3 is equal to 8 entries size with some header,
+but doesn't depend on the number of used entries. Check that used entries
+are between min and max but allow more to be present than are used to fix
+operation with some BIOSes that have such data.
+
+Fixes: 97f8a3d1610b ("iwlwifi: ACPI: support revision 3 WGDS tables")
+Signed-off-by: Anjaneyulu <pagadala.yesu.anjaneyulu@intel.com>
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Link: https://patch.msgid.link/20240825191257.cc71dfc67ec3.Ic27ee15ac6128b275c210b6de88f2145bd83ca7b@changeid
+[edit commit message]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/fw/acpi.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index 235963e1d7a9a..c96dfd7fd3dc8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -825,22 +825,25 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+ entry = &wifi_pkg->package.elements[entry_idx];
+ entry_idx++;
+ if (entry->type != ACPI_TYPE_INTEGER ||
+- entry->integer.value > num_profiles) {
++ entry->integer.value > num_profiles ||
++ entry->integer.value <
++ rev_data[idx].min_profiles) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+- num_profiles = entry->integer.value;
+
+ /*
+- * this also validates >= min_profiles since we
+- * otherwise wouldn't have gotten the data when
+- * looking up in ACPI
++ * Check to see if we received package count
++ * same as max # of profiles
+ */
+ if (wifi_pkg->package.count !=
+ hdr_size + profile_size * num_profiles) {
+ ret = -EINVAL;
+ goto out_free;
+ }
++
++ /* Number of valid profiles */
++ num_profiles = entry->integer.value;
+ }
+ goto read_table;
+ }
+--
+2.43.0
+