--- /dev/null
+From 81f6256e3cf5a5604977de3ffbc774716c49bf03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 10:23:59 +0200
+Subject: can: m_can: fix missed interrupts with m_can_pci
+
+From: Matthias Schiffer <matthias.schiffer@ew.tq-group.com>
+
+[ Upstream commit 743375f8deee360b0e902074bab99b0c9368d42f ]
+
+The interrupt line of PCI devices is interpreted as edge-triggered,
+however the interrupt signal of the m_can controller integrated in Intel
+Elkhart Lake CPUs appears to be generated level-triggered.
+
+Consider the following sequence of events:
+
+- IR register is read, interrupt X is set
+- A new interrupt Y is triggered in the m_can controller
+- IR register is written to acknowledge interrupt X. Y remains set in IR
+
+As at no point in this sequence no interrupt flag is set in IR, the
+m_can interrupt line will never become deasserted, and no edge will ever
+be observed to trigger another run of the ISR. This was observed to
+result in the TX queue of the EHL m_can to get stuck under high load,
+because frames were queued to the hardware in m_can_start_xmit(), but
+m_can_finish_tx() was never run to account for their successful
+transmission.
+
+On an Elkhart Lake based board with the two CAN interfaces connected to
+each other, the following script can reproduce the issue:
+
+ ip link set can0 up type can bitrate 1000000
+ ip link set can1 up type can bitrate 1000000
+
+ cangen can0 -g 2 -I 000 -L 8 &
+ cangen can0 -g 2 -I 001 -L 8 &
+ cangen can0 -g 2 -I 002 -L 8 &
+ cangen can0 -g 2 -I 003 -L 8 &
+ cangen can0 -g 2 -I 004 -L 8 &
+ cangen can0 -g 2 -I 005 -L 8 &
+ cangen can0 -g 2 -I 006 -L 8 &
+ cangen can0 -g 2 -I 007 -L 8 &
+
+ cangen can1 -g 2 -I 100 -L 8 &
+ cangen can1 -g 2 -I 101 -L 8 &
+ cangen can1 -g 2 -I 102 -L 8 &
+ cangen can1 -g 2 -I 103 -L 8 &
+ cangen can1 -g 2 -I 104 -L 8 &
+ cangen can1 -g 2 -I 105 -L 8 &
+ cangen can1 -g 2 -I 106 -L 8 &
+ cangen can1 -g 2 -I 107 -L 8 &
+
+ stress-ng --matrix 0 &
+
+To fix the issue, repeatedly read and acknowledge interrupts at the
+start of the ISR until no interrupt flags are set, so the next incoming
+interrupt will also result in an edge on the interrupt line.
+
+While we have received a report that even with this patch, the TX queue
+can become stuck under certain (currently unknown) circumstances on the
+Elkhart Lake, this patch completely fixes the issue with the above
+reproducer, and it is unclear whether the remaining issue has a similar
+cause at all.
+
+Fixes: cab7ffc0324f ("can: m_can: add PCI glue driver for Intel Elkhart Lake")
+Signed-off-by: Matthias Schiffer <matthias.schiffer@ew.tq-group.com>
+Reviewed-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Link: https://patch.msgid.link/fdf0439c51bcb3a46c21e9fb21c7f1d06363be84.1728288535.git.matthias.schiffer@ew.tq-group.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 22 +++++++++++++++++-----
+ drivers/net/can/m_can/m_can.h | 1 +
+ drivers/net/can/m_can/m_can_pci.c | 1 +
+ 3 files changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 67c404fbe166..97cd8bbf2e32 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1220,20 +1220,32 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
+ static int m_can_interrupt_handler(struct m_can_classdev *cdev)
+ {
+ struct net_device *dev = cdev->net;
+- u32 ir;
++ u32 ir = 0, ir_read;
+ int ret;
+
+ if (pm_runtime_suspended(cdev->dev))
+ return IRQ_NONE;
+
+- ir = m_can_read(cdev, M_CAN_IR);
++ /* The m_can controller signals its interrupt status as a level, but
++ * depending in the integration the CPU may interpret the signal as
++ * edge-triggered (for example with m_can_pci). For these
++ * edge-triggered integrations, we must observe that IR is 0 at least
++ * once to be sure that the next interrupt will generate an edge.
++ */
++ while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
++ ir |= ir_read;
++
++ /* ACK all irqs */
++ m_can_write(cdev, M_CAN_IR, ir);
++
++ if (!cdev->irq_edge_triggered)
++ break;
++ }
++
+ m_can_coalescing_update(cdev, ir);
+ if (!ir)
+ return IRQ_NONE;
+
+- /* ACK all irqs */
+- m_can_write(cdev, M_CAN_IR, ir);
+-
+ if (cdev->ops->clear_interrupts)
+ cdev->ops->clear_interrupts(cdev);
+
+diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
+index 92b2bd8628e6..ef39e8e527ab 100644
+--- a/drivers/net/can/m_can/m_can.h
++++ b/drivers/net/can/m_can/m_can.h
+@@ -99,6 +99,7 @@ struct m_can_classdev {
+ int pm_clock_support;
+ int pm_wake_source;
+ int is_peripheral;
++ bool irq_edge_triggered;
+
+ // Cached M_CAN_IE register content
+ u32 active_interrupts;
+diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
+index d72fe771dfc7..9ad7419f88f8 100644
+--- a/drivers/net/can/m_can/m_can_pci.c
++++ b/drivers/net/can/m_can/m_can_pci.c
+@@ -127,6 +127,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
+ mcan_class->can.clock.freq = id->driver_data;
++ mcan_class->irq_edge_triggered = true;
+ mcan_class->ops = &m_can_pci_ops;
+
+ pci_set_drvdata(pci, mcan_class);
+--
+2.39.5
+
--- /dev/null
+From 21e74217e78b2ce3b0ca75a537742297a08b6f98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 10:23:58 +0200
+Subject: can: m_can: set init flag earlier in probe
+
+From: Matthias Schiffer <matthias.schiffer@ew.tq-group.com>
+
+[ Upstream commit fca2977629f49dee437e217c3fc423b6e0cad98c ]
+
+While an m_can controller usually already has the init flag from a
+hardware reset, no such reset happens on the integrated m_can_pci of the
+Intel Elkhart Lake. If the CAN controller is found in an active state,
+m_can_dev_setup() would fail because m_can_niso_supported() calls
+m_can_cccr_update_bits(), which refuses to modify any other configuration
+bits when CCCR_INIT is not set.
+
+To avoid this issue, set CCCR_INIT before attempting to modify any other
+configuration flags.
+
+Fixes: cd5a46ce6fa6 ("can: m_can: don't enable transceiver when probing")
+Signed-off-by: Matthias Schiffer <matthias.schiffer@ew.tq-group.com>
+Reviewed-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Link: https://patch.msgid.link/e247f331cb72829fcbdfda74f31a59cbad1a6006.1728288535.git.matthias.schiffer@ew.tq-group.com
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 533bcb77c9f9..67c404fbe166 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1695,6 +1695,14 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
+ return -EINVAL;
+ }
+
++ /* Write the INIT bit, in case no hardware reset has happened before
++ * the probe (for example, it was observed that the Intel Elkhart Lake
++ * SoCs do not properly reset the CAN controllers on reboot)
++ */
++ err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
++ if (err)
++ return err;
++
+ if (!cdev->is_peripheral)
+ netif_napi_add(dev, &cdev->napi, m_can_poll);
+
+@@ -1746,11 +1754,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
+ return -EINVAL;
+ }
+
+- /* Forcing standby mode should be redundant, as the chip should be in
+- * standby after a reset. Write the INIT bit anyways, should the chip
+- * be configured by previous stage.
+- */
+- return m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
++ return 0;
+ }
+
+ static void m_can_stop(struct net_device *dev)
+--
+2.39.5
+
--- /dev/null
+From f7e7dd669188fb3c2985f1a231b7bc1ae97fddca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Nov 2024 09:00:32 -0800
+Subject: cxl/pci: Fix potential bogus return value upon successful probing
+
+From: Davidlohr Bueso <dave@stgolabs.net>
+
+[ Upstream commit da4d8c83358163df9a4addaeba0ef8bcb03b22e8 ]
+
+If cxl_pci_ras_unmask() returns non-zero, cxl_pci_probe() will end up
+returning that value, instead of zero.
+
+Fixes: 248529edc86f ("cxl: add RAS status unmasking for CXL")
+Reviewed-by: Fan Ni <fan.ni@samsung.com>
+Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Link: https://patch.msgid.link/20241115170032.108445-1-dave@stgolabs.net
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/pci.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
+index 188412d45e0d..6e553b5752b1 100644
+--- a/drivers/cxl/pci.c
++++ b/drivers/cxl/pci.c
+@@ -942,8 +942,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (rc)
+ return rc;
+
+- rc = cxl_pci_ras_unmask(pdev);
+- if (rc)
++ if (cxl_pci_ras_unmask(pdev))
+ dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
+
+ pci_save_state(pdev);
+--
+2.39.5
+
--- /dev/null
+From 24789b5f9a38e1332117a489b9c143d5158e5310 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Dec 2024 15:33:02 -0800
+Subject: cxl/region: Fix region creation for greater than x2 switches
+
+From: Huaisheng Ye <huaisheng.ye@intel.com>
+
+[ Upstream commit 76467a94810c2aa4dd3096903291ac6df30c399e ]
+
+The cxl_port_setup_targets() algorithm fails to identify valid target list
+ordering in the presence of 4-way and above switches resulting in
+'cxl create-region' failures of the form:
+
+ $ cxl create-region -d decoder0.0 -g 1024 -s 2G -t ram -w 8 -m mem4 mem1 mem6 mem3 mem2 mem5 mem7 mem0
+ cxl region: create_region: region0: failed to set target7 to mem0
+ cxl region: cmd_create_region: created 0 regions
+
+ [kernel debug message]
+ check_last_peer:1213: cxl region0: pci0000:0c:port1: cannot host mem6:decoder7.0 at 2
+ bus_remove_device:574: bus: 'cxl': remove device region0
+
+QEMU can create this failing topology:
+
+ ACPI0017:00 [root0]
+ |
+ HB_0 [port1]
+ / \
+ RP_0 RP_1
+ | |
+ USP [port2] USP [port3]
+ / / \ \ / / \ \
+ DSP DSP DSP DSP DSP DSP DSP DSP
+ | | | | | | | |
+ mem4 mem6 mem2 mem7 mem1 mem3 mem5 mem0
+ Pos: 0 2 4 6 1 3 5 7
+
+ HB: Host Bridge
+ RP: Root Port
+ USP: Upstream Port
+ DSP: Downstream Port
+
+...with the following command steps:
+
+$ qemu-system-x86_64 -machine q35,cxl=on,accel=tcg \
+ -smp cpus=8 \
+ -m 8G \
+ -hda /home/work/vm-images/centos-stream8-02.qcow2 \
+ -object memory-backend-ram,size=4G,id=m0 \
+ -object memory-backend-ram,size=4G,id=m1 \
+ -object memory-backend-ram,size=2G,id=cxl-mem0 \
+ -object memory-backend-ram,size=2G,id=cxl-mem1 \
+ -object memory-backend-ram,size=2G,id=cxl-mem2 \
+ -object memory-backend-ram,size=2G,id=cxl-mem3 \
+ -object memory-backend-ram,size=2G,id=cxl-mem4 \
+ -object memory-backend-ram,size=2G,id=cxl-mem5 \
+ -object memory-backend-ram,size=2G,id=cxl-mem6 \
+ -object memory-backend-ram,size=2G,id=cxl-mem7 \
+ -numa node,memdev=m0,cpus=0-3,nodeid=0 \
+ -numa node,memdev=m1,cpus=4-7,nodeid=1 \
+ -netdev user,id=net0,hostfwd=tcp::2222-:22 \
+ -device virtio-net-pci,netdev=net0 \
+ -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
+ -device cxl-rp,port=0,bus=cxl.1,id=root_port0,chassis=0,slot=0 \
+ -device cxl-rp,port=1,bus=cxl.1,id=root_port1,chassis=0,slot=1 \
+ -device cxl-upstream,bus=root_port0,id=us0 \
+ -device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \
+ -device cxl-type3,bus=swport0,volatile-memdev=cxl-mem0,id=cxl-vmem0 \
+ -device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \
+ -device cxl-type3,bus=swport1,volatile-memdev=cxl-mem1,id=cxl-vmem1 \
+ -device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \
+ -device cxl-type3,bus=swport2,volatile-memdev=cxl-mem2,id=cxl-vmem2 \
+ -device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \
+ -device cxl-type3,bus=swport3,volatile-memdev=cxl-mem3,id=cxl-vmem3 \
+ -device cxl-upstream,bus=root_port1,id=us1 \
+ -device cxl-downstream,port=4,bus=us1,id=swport4,chassis=0,slot=8 \
+ -device cxl-type3,bus=swport4,volatile-memdev=cxl-mem4,id=cxl-vmem4 \
+ -device cxl-downstream,port=5,bus=us1,id=swport5,chassis=0,slot=9 \
+ -device cxl-type3,bus=swport5,volatile-memdev=cxl-mem5,id=cxl-vmem5 \
+ -device cxl-downstream,port=6,bus=us1,id=swport6,chassis=0,slot=10 \
+ -device cxl-type3,bus=swport6,volatile-memdev=cxl-mem6,id=cxl-vmem6 \
+ -device cxl-downstream,port=7,bus=us1,id=swport7,chassis=0,slot=11 \
+ -device cxl-type3,bus=swport7,volatile-memdev=cxl-mem7,id=cxl-vmem7 \
+ -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=32G &
+
+In Guest OS:
+$ cxl create-region -d decoder0.0 -g 1024 -s 2G -t ram -w 8 -m mem4 mem1 mem6 mem3 mem2 mem5 mem7 mem0
+
+Fix the method to calculate @distance by iterativeley multiplying the
+number of targets per switch port. This also follows the algorithm
+recommended here [1].
+
+Fixes: 27b3f8d13830 ("cxl/region: Program target lists")
+Link: http://lore.kernel.org/6538824b52349_7258329466@dwillia2-xfh.jf.intel.com.notmuch [1]
+Signed-off-by: Huaisheng Ye <huaisheng.ye@intel.com>
+Tested-by: Li Zhijian <lizhijian@fujitsu.com>
+[djbw: add a comment explaining 'distance']
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Link: https://patch.msgid.link/173378716722.1270362.9546805175813426729.stgit@dwillia2-xfh.jf.intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/region.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index dff618c708dc..a0d6e8d7f42c 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1295,6 +1295,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_switch_decoder *cxlsd;
++ struct cxl_port *iter = port;
+ u16 eig, peig;
+ u8 eiw, peiw;
+
+@@ -1311,16 +1312,26 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+
+ cxlsd = to_cxl_switch_decoder(&cxld->dev);
+ if (cxl_rr->nr_targets_set) {
+- int i, distance;
++ int i, distance = 1;
++ struct cxl_region_ref *cxl_rr_iter;
+
+ /*
+- * Passthrough decoders impose no distance requirements between
+- * peers
++ * The "distance" between peer downstream ports represents which
++ * endpoint positions in the region interleave a given port can
++ * host.
++ *
++ * For example, at the root of a hierarchy the distance is
++ * always 1 as every index targets a different host-bridge. At
++ * each subsequent switch level those ports map every Nth region
++ * position where N is the width of the switch == distance.
+ */
+- if (cxl_rr->nr_targets == 1)
+- distance = 0;
+- else
+- distance = p->nr_targets / cxl_rr->nr_targets;
++ do {
++ cxl_rr_iter = cxl_rr_load(iter, cxlr);
++ distance *= cxl_rr_iter->nr_targets;
++ iter = to_cxl_port(iter->dev.parent);
++ } while (!is_cxl_root(iter));
++ distance *= cxlrd->cxlsd.cxld.interleave_ways;
++
+ for (i = 0; i < cxl_rr->nr_targets_set; i++)
+ if (ep->dport == cxlsd->target[i]) {
+ rc = check_last_peer(cxled, ep, cxl_rr,
+--
+2.39.5
+
--- /dev/null
+From a77789df7706c523999eefa1c7593e1516e402d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Dec 2024 13:31:55 -0800
+Subject: ionic: Fix netdev notifier unregister on failure
+
+From: Brett Creeley <brett.creeley@amd.com>
+
+[ Upstream commit 9590d32e090ea2751e131ae5273859ca22f5ac14 ]
+
+If register_netdev() fails, then the driver leaks the netdev notifier.
+Fix this by calling ionic_lif_unregister() on register_netdev()
+failure. This will also call ionic_lif_unregister_phc() if it has
+already been registered.
+
+Fixes: 30b87ab4c0b3 ("ionic: remove lif list concept")
+Signed-off-by: Brett Creeley <brett.creeley@amd.com>
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20241212213157.12212-2-shannon.nelson@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 40496587b2b3..3d3f936779f7 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -3869,8 +3869,8 @@ int ionic_lif_register(struct ionic_lif *lif)
+ /* only register LIF0 for now */
+ err = register_netdev(lif->netdev);
+ if (err) {
+- dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
+- ionic_lif_unregister_phc(lif);
++ dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err);
++ ionic_lif_unregister(lif);
+ return err;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From ef76e96a5e8b213bcf8941c179f6952165aab655 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Dec 2024 13:31:56 -0800
+Subject: ionic: no double destroy workqueue
+
+From: Shannon Nelson <shannon.nelson@amd.com>
+
+[ Upstream commit 746e6ae2e202b062b9deee7bd86d94937997ecd7 ]
+
+There are some FW error handling paths that can cause us to
+try to destroy the workqueue more than once, so let's be sure
+we're checking for that.
+
+The case where this popped up was in an AER event where the
+handlers got called in such a way that ionic_reset_prepare()
+and thus ionic_dev_teardown() got called twice in a row.
+The second time through the workqueue was already destroyed,
+and destroy_workqueue() choked on the bad wq pointer.
+
+We didn't hit this in AER handler testing before because at
+that time we weren't using a private workqueue. Later we
+replaced the use of the system workqueue with our own private
+workqueue but hadn't rerun the AER handler testing since then.
+
+Fixes: 9e25450da700 ("ionic: add private workqueue per-device")
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20241212213157.12212-3-shannon.nelson@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_dev.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+index 9e42d599840d..57edcde9e6f8 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+@@ -277,7 +277,10 @@ void ionic_dev_teardown(struct ionic *ionic)
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
+
+- destroy_workqueue(ionic->wq);
++ if (ionic->wq) {
++ destroy_workqueue(ionic->wq);
++ ionic->wq = NULL;
++ }
+ mutex_destroy(&idev->cmb_inuse_lock);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 9572c5c499691daeb58a043e204e3ac987273322 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Dec 2024 13:31:57 -0800
+Subject: ionic: use ee->offset when returning sprom data
+
+From: Shannon Nelson <shannon.nelson@amd.com>
+
+[ Upstream commit b096d62ba1323391b2db98b7704e2468cf3b1588 ]
+
+Some calls into ionic_get_module_eeprom() don't use a single
+full buffer size, but instead multiple calls with an offset.
+Teach our driver to use the offset correctly so we can
+respond appropriately to the caller.
+
+Fixes: 4d03e00a2140 ("ionic: Add initial ethtool support")
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20241212213157.12212-4-shannon.nelson@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_ethtool.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+index dda22fa4448c..9b7f78b6cdb1 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+@@ -961,8 +961,8 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
+ len = min_t(u32, sizeof(xcvr->sprom), ee->len);
+
+ do {
+- memcpy(data, xcvr->sprom, len);
+- memcpy(tbuf, xcvr->sprom, len);
++ memcpy(data, &xcvr->sprom[ee->offset], len);
++ memcpy(tbuf, &xcvr->sprom[ee->offset], len);
+
+ /* Let's make sure we got a consistent copy */
+ if (!memcmp(data, tbuf, len))
+--
+2.39.5
+
--- /dev/null
+From dd811788c84a14ed5440cc0b499f588494145603 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Dec 2024 17:30:53 +0000
+Subject: ipvs: Fix clamp() of ip_vs_conn_tab on small memory systems
+
+From: David Laight <David.Laight@ACULAB.COM>
+
+[ Upstream commit cf2c97423a4f89c8b798294d3f34ecfe7e7035c3 ]
+
+The 'max_avail' value is calculated from the system memory
+size using order_base_2().
+order_base_2(x) is defined as '(x) ? fn(x) : 0'.
+The compiler generates two copies of the code that follows
+and then expands clamp(max, min, PAGE_SHIFT - 12) (11 on 32bit).
+This triggers a compile-time assert since min is 5.
+
+In reality a system would have to have less than 512MB memory
+for the bounds passed to clamp to be reversed.
+
+Swap the order of the arguments to clamp() to avoid the warning.
+
+Replace the clamp_val() on the line below with clamp().
+clamp_val() is just 'an accident waiting to happen' and not needed here.
+
+Detected by compile time checks added to clamp(), specifically:
+minmax.h: use BUILD_BUG_ON_MSG() for the lo < hi test in clamp()
+
+Reported-by: Linux Kernel Functional Testing <lkft@linaro.org>
+Closes: https://lore.kernel.org/all/CA+G9fYsT34UkGFKxus63H6UVpYi5GRZkezT9MRLfAbM3f6ke0g@mail.gmail.com/
+Fixes: 4f325e26277b ("ipvs: dynamically limit the connection hash table")
+Tested-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Reviewed-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: David Laight <david.laight@aculab.com>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipvs/ip_vs_conn.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index 98d7dbe3d787..c0289f83f96d 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -1495,8 +1495,8 @@ int __init ip_vs_conn_init(void)
+ max_avail -= 2; /* ~4 in hash row */
+ max_avail -= 1; /* IPVS up to 1/2 of mem */
+ max_avail -= order_base_2(sizeof(struct ip_vs_conn));
+- max = clamp(max, min, max_avail);
+- ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max);
++ max = clamp(max_avail, min, max);
++ ip_vs_conn_tab_bits = clamp(ip_vs_conn_tab_bits, min, max);
+ ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
+ ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
+
+--
+2.39.5
+
--- /dev/null
+From 849883fb1b1f441c2bfef0ecea22d964b5f60de1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Dec 2024 12:16:45 +0900
+Subject: ksmbd: count all requests in req_running counter
+
+From: Marios Makassikis <mmakassikis@freebox.fr>
+
+[ Upstream commit 83c47d9e0ce79b5d7c0b21b9f35402dbde0fa15c ]
+
+This changes the semantics of req_running to count all in-flight
+requests on a given connection, rather than the number of elements
+in the conn->request list. The latter is used only in smb2_cancel,
+and the counter is not used
+
+Signed-off-by: Marios Makassikis <mmakassikis@freebox.fr>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 43fb7bce8866 ("ksmbd: fix broken transfers when exceeding max simultaneous operations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/connection.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index e6a72f75ab94..3980645085ed 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -120,8 +120,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
+ if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
+ requests_queue = &conn->requests;
+
++ atomic_inc(&conn->req_running);
+ if (requests_queue) {
+- atomic_inc(&conn->req_running);
+ spin_lock(&conn->request_lock);
+ list_add_tail(&work->request_entry, requests_queue);
+ spin_unlock(&conn->request_lock);
+@@ -132,11 +132,12 @@ void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+ {
+ struct ksmbd_conn *conn = work->conn;
+
++ atomic_dec(&conn->req_running);
++
+ if (list_empty(&work->request_entry) &&
+ list_empty(&work->async_request_entry))
+ return;
+
+- atomic_dec(&conn->req_running);
+ spin_lock(&conn->request_lock);
+ list_del_init(&work->request_entry);
+ spin_unlock(&conn->request_lock);
+--
+2.39.5
+
--- /dev/null
+From a01241b017fbf43ce3511ef7c4e5660bc57cd274 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Dec 2024 12:17:23 +0900
+Subject: ksmbd: fix broken transfers when exceeding max simultaneous
+ operations
+
+From: Marios Makassikis <mmakassikis@freebox.fr>
+
+[ Upstream commit 43fb7bce8866e793275c4f9f25af6a37745f3416 ]
+
+Since commit 0a77d947f599 ("ksmbd: check outstanding simultaneous SMB
+operations"), ksmbd enforces a maximum number of simultaneous operations
+for a connection. The problem is that reaching the limit causes ksmbd to
+close the socket, and the client has no indication that it should have
+slowed down.
+
+This behaviour can be reproduced by setting "smb2 max credits = 128" (or
+lower), and transferring a large file (25GB).
+
+smbclient fails as below:
+
+ $ smbclient //192.168.1.254/testshare -U user%pass
+ smb: \> put file.bin
+ cli_push returned NT_STATUS_USER_SESSION_DELETED
+ putting file file.bin as \file.bin smb2cli_req_compound_submit:
+ Insufficient credits. 0 available, 1 needed
+ NT_STATUS_INTERNAL_ERROR closing remote file \file.bin
+ smb: \> smb2cli_req_compound_submit: Insufficient credits. 0 available,
+ 1 needed
+
+Windows clients fail with 0x8007003b (with smaller files even).
+
+Fix this by delaying reading from the socket until there's room to
+allocate a request. This effectively applies backpressure on the client,
+so the transfer completes, albeit at a slower rate.
+
+Fixes: 0a77d947f599 ("ksmbd: check outstanding simultaneous SMB operations")
+Signed-off-by: Marios Makassikis <mmakassikis@freebox.fr>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/connection.c | 13 +++++++++++--
+ fs/smb/server/connection.h | 1 -
+ fs/smb/server/server.c | 7 +------
+ fs/smb/server/server.h | 1 +
+ fs/smb/server/transport_ipc.c | 5 ++++-
+ 5 files changed, 17 insertions(+), 10 deletions(-)
+
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index 3980645085ed..bf45822db5d5 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -70,7 +70,6 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ atomic_set(&conn->req_running, 0);
+ atomic_set(&conn->r_count, 0);
+ atomic_set(&conn->refcnt, 1);
+- atomic_set(&conn->mux_smb_requests, 0);
+ conn->total_credits = 1;
+ conn->outstanding_credits = 0;
+
+@@ -133,6 +132,8 @@ void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+ struct ksmbd_conn *conn = work->conn;
+
+ atomic_dec(&conn->req_running);
++ if (waitqueue_active(&conn->req_running_q))
++ wake_up(&conn->req_running_q);
+
+ if (list_empty(&work->request_entry) &&
+ list_empty(&work->async_request_entry))
+@@ -309,7 +310,7 @@ int ksmbd_conn_handler_loop(void *p)
+ {
+ struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
+ struct ksmbd_transport *t = conn->transport;
+- unsigned int pdu_size, max_allowed_pdu_size;
++ unsigned int pdu_size, max_allowed_pdu_size, max_req;
+ char hdr_buf[4] = {0,};
+ int size;
+
+@@ -319,6 +320,7 @@ int ksmbd_conn_handler_loop(void *p)
+ if (t->ops->prepare && t->ops->prepare(t))
+ goto out;
+
++ max_req = server_conf.max_inflight_req;
+ conn->last_active = jiffies;
+ set_freezable();
+ while (ksmbd_conn_alive(conn)) {
+@@ -328,6 +330,13 @@ int ksmbd_conn_handler_loop(void *p)
+ kvfree(conn->request_buf);
+ conn->request_buf = NULL;
+
++recheck:
++ if (atomic_read(&conn->req_running) + 1 > max_req) {
++ wait_event_interruptible(conn->req_running_q,
++ atomic_read(&conn->req_running) < max_req);
++ goto recheck;
++ }
++
+ size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
+ if (size != sizeof(hdr_buf))
+ break;
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 8ddd5a3c7baf..b379ae4fdcdf 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -107,7 +107,6 @@ struct ksmbd_conn {
+ __le16 signing_algorithm;
+ bool binding;
+ atomic_t refcnt;
+- atomic_t mux_smb_requests;
+ };
+
+ struct ksmbd_conn_ops {
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index 698af37e988d..d146b0e7c3a9 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -270,7 +270,6 @@ static void handle_ksmbd_work(struct work_struct *wk)
+
+ ksmbd_conn_try_dequeue_request(work);
+ ksmbd_free_work_struct(work);
+- atomic_dec(&conn->mux_smb_requests);
+ /*
+ * Checking waitqueue to dropping pending requests on
+ * disconnection. waitqueue_active is safe because it
+@@ -300,11 +299,6 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
+ if (err)
+ return 0;
+
+- if (atomic_inc_return(&conn->mux_smb_requests) >= conn->vals->max_credits) {
+- atomic_dec_return(&conn->mux_smb_requests);
+- return -ENOSPC;
+- }
+-
+ work = ksmbd_alloc_work_struct();
+ if (!work) {
+ pr_err("allocation for work failed\n");
+@@ -367,6 +361,7 @@ static int server_conf_init(void)
+ server_conf.auth_mechs |= KSMBD_AUTH_KRB5 |
+ KSMBD_AUTH_MSKRB5;
+ #endif
++ server_conf.max_inflight_req = SMB2_MAX_CREDITS;
+ return 0;
+ }
+
+diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
+index 4fc529335271..94187628ff08 100644
+--- a/fs/smb/server/server.h
++++ b/fs/smb/server/server.h
+@@ -42,6 +42,7 @@ struct ksmbd_server_config {
+ struct smb_sid domain_sid;
+ unsigned int auth_mechs;
+ unsigned int max_connections;
++ unsigned int max_inflight_req;
+
+ char *conf[SERVER_CONF_WORK_GROUP + 1];
+ struct task_struct *dh_task;
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 2f27afb695f6..6de351cc2b60 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -319,8 +319,11 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ init_smb2_max_write_size(req->smb2_max_write);
+ if (req->smb2_max_trans)
+ init_smb2_max_trans_size(req->smb2_max_trans);
+- if (req->smb2_max_credits)
++ if (req->smb2_max_credits) {
+ init_smb2_max_credits(req->smb2_max_credits);
++ server_conf.max_inflight_req =
++ req->smb2_max_credits;
++ }
+ if (req->smbd_max_io_size)
+ init_smbd_max_io_size(req->smbd_max_io_size);
+
+--
+2.39.5
+
--- /dev/null
+From fbb538047e803dbd458b8c6e179016ce4316e1c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Dec 2024 15:50:59 +0200
+Subject: net: dsa: restore dsa_software_vlan_untag() ability to operate on
+ VLAN-untagged traffic
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 16f027cd40eeedd2325f7e720689462ca8d9d13e ]
+
+Robert Hodaszi reports that locally terminated traffic towards
+VLAN-unaware bridge ports is broken with ocelot-8021q. He is describing
+the same symptoms as for commit 1f9fc48fd302 ("net: dsa: sja1105: fix
+reception from VLAN-unaware bridges").
+
+For context, the set merged as "VLAN fixes for Ocelot driver":
+https://lore.kernel.org/netdev/20240815000707.2006121-1-vladimir.oltean@nxp.com/
+
+was developed in a slightly different form earlier this year, in January.
+Initially, the switch was unconditionally configured to set OCELOT_ES0_TAG
+when using ocelot-8021q, regardless of port operating mode.
+
+This led to the situation where VLAN-unaware bridge ports would always
+push their PVID - see ocelot_vlan_unaware_pvid() - a negligible value
+anyway - into RX packets. To strip this in software, we would have needed
+DSA to know what private VID the switch chose for VLAN-unaware bridge
+ports, and pushed into the packets. This was implemented downstream, and
+a remnant of it remains in the form of a comment mentioning
+ds->ops->get_private_vid(), as something which would maybe need to be
+considered in the future.
+
+However, for upstream, it was deemed inappropriate, because it would
+mean introducing yet another behavior for stripping VLAN tags from
+VLAN-unaware bridge ports, when one already existed (ds->untag_bridge_pvid).
+The latter has been marked as obsolete along with an explanation why it
+is logically broken, but still, it would have been confusing.
+
+So, for upstream, felix_update_tag_8021q_rx_rule() was developed, which
+essentially changed the state of affairs from "Felix with ocelot-8021q
+delivers all packets as VLAN-tagged towards the CPU" into "Felix with
+ocelot-8021q delivers all packets from VLAN-aware bridge ports towards
+the CPU". This was done on the premise that in VLAN-unaware mode,
+there's nothing useful in the VLAN tags, and we can avoid introducing
+ds->ops->get_private_vid() in the DSA receive path if we configure the
+switch to not push those VLAN tags into packets in the first place.
+
+Unfortunately, and this is when the trainwreck started, the selftests
+developed initially and posted with the series were not re-ran.
+dsa_software_vlan_untag() was initially written given the assumption
+that users of this feature would send _all_ traffic as VLAN-tagged.
+It was only partially adapted to the new scheme, by removing
+ds->ops->get_private_vid(), which also used to be necessary in
+standalone ports mode.
+
+Where the trainwreck became even worse is that I had a second opportunity
+to think about this, when the dsa_software_vlan_untag() logic change
+initially broke sja1105, in commit 1f9fc48fd302 ("net: dsa: sja1105: fix
+reception from VLAN-unaware bridges"). I did not connect the dots that
+it also breaks ocelot-8021q, for pretty much the same reason that not
+all received packets will be VLAN-tagged.
+
+To be compatible with the optimized Felix control path which runs
+felix_update_tag_8021q_rx_rule() to only push VLAN tags when useful (in
+VLAN-aware mode), we need to restore the old dsa_software_vlan_untag()
+logic. The blamed commit introduced the assumption that
+dsa_software_vlan_untag() will see only VLAN-tagged packets, assumption
+which is false. What corrupts RX traffic is the fact that we call
+skb_vlan_untag() on packets which are not VLAN-tagged in the first
+place.
+
+Fixes: 93e4649efa96 ("net: dsa: provide a software untagging function on RX for VLAN-aware bridges")
+Reported-by: Robert Hodaszi <robert.hodaszi@digi.com>
+Closes: https://lore.kernel.org/netdev/20241215163334.615427-1-robert.hodaszi@digi.com/
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20241216135059.1258266-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dsa/tag.h | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/net/dsa/tag.h b/net/dsa/tag.h
+index d5707870906b..5d80ddad4ff6 100644
+--- a/net/dsa/tag.h
++++ b/net/dsa/tag.h
+@@ -138,9 +138,10 @@ static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
+ * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
+ * @skb: Pointer to socket buffer (packet)
+ *
+- * Receive path method for switches which cannot avoid tagging all packets
+- * towards the CPU port. Called when ds->untag_bridge_pvid (legacy) or
+- * ds->untag_vlan_aware_bridge_pvid is set to true.
++ * Receive path method for switches which send some packets as VLAN-tagged
++ * towards the CPU port (generally from VLAN-aware bridge ports) even when the
++ * packet was not tagged on the wire. Called when ds->untag_bridge_pvid
++ * (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true.
+ *
+ * As a side effect of this method, any VLAN tag from the skb head is moved
+ * to hwaccel.
+@@ -149,14 +150,19 @@ static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
+ {
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+- u16 vid;
++ u16 vid, proto;
++ int err;
+
+ /* software untagging for standalone ports not yet necessary */
+ if (!br)
+ return skb;
+
++ err = br_vlan_get_proto(br, &proto);
++ if (err)
++ return skb;
++
+ /* Move VLAN tag from data to hwaccel */
+- if (!skb_vlan_tag_present(skb)) {
++ if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
+ skb = skb_vlan_untag(skb);
+ if (!skb)
+ return NULL;
+--
+2.39.5
+
--- /dev/null
+From 7aa708625bc670a11167cb3fbb2fdb3edc9ddcc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Dec 2024 10:49:12 +0900
+Subject: net: ethernet: bgmac-platform: fix an OF node reference leak
+
+From: Joe Hattori <joe@pf.is.s.u-tokyo.ac.jp>
+
+[ Upstream commit 0cb2c504d79e7caa3abade3f466750c82ad26f01 ]
+
+The OF node obtained by of_parse_phandle() is not freed. Call
+of_node_put() to balance the refcount.
+
+This bug was found by an experimental static analysis tool that I am
+developing.
+
+Fixes: 1676aba5ef7e ("net: ethernet: bgmac: device tree phy enablement")
+Signed-off-by: Joe Hattori <joe@pf.is.s.u-tokyo.ac.jp>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20241214014912.2810315-1-joe@pf.is.s.u-tokyo.ac.jp
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bgmac-platform.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
+index 77425c7a32db..78f7862ca006 100644
+--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
++++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
+@@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac)
+ static int bgmac_probe(struct platform_device *pdev)
+ {
+ struct device_node *np = pdev->dev.of_node;
++ struct device_node *phy_node;
+ struct bgmac *bgmac;
+ struct resource *regs;
+ int ret;
+@@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
+ bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
+ bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
+ bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
+- if (of_parse_phandle(np, "phy-handle", 0)) {
++ phy_node = of_parse_phandle(np, "phy-handle", 0);
++ if (phy_node) {
++ of_node_put(phy_node);
+ bgmac->phy_connect = platform_phy_connect;
+ } else {
+ bgmac->phy_connect = bgmac_phy_connect_direct;
+--
+2.39.5
+
--- /dev/null
+From b3c6166f35e6b02c3392e1e4e5b692f01ac925cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2024 18:01:58 +0530
+Subject: net: ethernet: oa_tc6: fix infinite loop error when tx credits
+ becomes 0
+
+From: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+
+[ Upstream commit 7d2f320e12744e5906a4fab40381060a81d22c12 ]
+
+SPI thread wakes up to perform SPI transfer whenever there is an TX skb
+from n/w stack or interrupt from MAC-PHY. Ethernet frame from TX skb is
+transferred based on the availability tx credits in the MAC-PHY which is
+reported from the previous SPI transfer. Sometimes there is a possibility
+that TX skb is available to transmit but there is no tx credits from
+MAC-PHY. In this case, there will not be any SPI transfer but the thread
+will be running in an endless loop until tx credits available again.
+
+So checking the availability of tx credits along with TX skb will prevent
+the above infinite loop. When the tx credits available again that will be
+notified through interrupt which will trigger the SPI transfer to get the
+available tx credits.
+
+Fixes: 53fbde8ab21e ("net: ethernet: oa_tc6: implement transmit path to transfer tx ethernet frames")
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/oa_tc6.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
+index f9c0dcd965c2..4c8b0ca922b7 100644
+--- a/drivers/net/ethernet/oa_tc6.c
++++ b/drivers/net/ethernet/oa_tc6.c
+@@ -1111,8 +1111,9 @@ static int oa_tc6_spi_thread_handler(void *data)
+ /* This kthread will be waken up if there is a tx skb or mac-phy
+ * interrupt to perform spi transfer with tx chunks.
+ */
+- wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
+- tc6->int_flag ||
++ wait_event_interruptible(tc6->spi_wq, tc6->int_flag ||
++ (tc6->waiting_tx_skb &&
++ tc6->tx_credits) ||
+ kthread_should_stop());
+
+ if (kthread_should_stop())
+--
+2.39.5
+
--- /dev/null
+From f5f024ec450b5d75b3c00d6a519e1ebc99790c05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2024 18:01:59 +0530
+Subject: net: ethernet: oa_tc6: fix tx skb race condition between reference
+ pointers
+
+From: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+
+[ Upstream commit e592b5110b3e9393881b0a019d86832bbf71a47f ]
+
+There are two skb pointers to manage tx skb's enqueued from n/w stack.
+waiting_tx_skb pointer points to the tx skb which needs to be processed
+and ongoing_tx_skb pointer points to the tx skb which is being processed.
+
+SPI thread prepares the tx data chunks from the tx skb pointed by the
+ongoing_tx_skb pointer. When the tx skb pointed by the ongoing_tx_skb is
+processed, the tx skb pointed by the waiting_tx_skb is assigned to
+ongoing_tx_skb and the waiting_tx_skb pointer is assigned with NULL.
+Whenever there is a new tx skb from n/w stack, it will be assigned to
+waiting_tx_skb pointer if it is NULL. Enqueuing and processing of a tx skb
+handled in two different threads.
+
+Consider a scenario where the SPI thread processed an ongoing_tx_skb and
+it moves next tx skb from waiting_tx_skb pointer to ongoing_tx_skb pointer
+without doing any NULL check. At this time, if the waiting_tx_skb pointer
+is NULL then ongoing_tx_skb pointer is also assigned with NULL. After
+that, if a new tx skb is assigned to waiting_tx_skb pointer by the n/w
+stack and there is a chance to overwrite the tx skb pointer with NULL in
+the SPI thread. Finally one of the tx skb will be left as unhandled,
+resulting packet missing and memory leak.
+
+- Consider the below scenario where the TXC reported from the previous
+transfer is 10 and ongoing_tx_skb holds an tx ethernet frame which can be
+transported in 20 TXCs and waiting_tx_skb is still NULL.
+ tx_credits = 10; /* 21 are filled in the previous transfer */
+ ongoing_tx_skb = 20;
+ waiting_tx_skb = NULL; /* Still NULL */
+- So, (tc6->ongoing_tx_skb || tc6->waiting_tx_skb) becomes true.
+- After oa_tc6_prepare_spi_tx_buf_for_tx_skbs()
+ ongoing_tx_skb = 10;
+ waiting_tx_skb = NULL; /* Still NULL */
+- Perform SPI transfer.
+- Process SPI rx buffer to get the TXC from footers.
+- Now let's assume previously filled 21 TXCs are freed so we are good to
+transport the next remaining 10 tx chunks from ongoing_tx_skb.
+ tx_credits = 21;
+ ongoing_tx_skb = 10;
+ waiting_tx_skb = NULL;
+- So, (tc6->ongoing_tx_skb || tc6->waiting_tx_skb) becomes true again.
+- In the oa_tc6_prepare_spi_tx_buf_for_tx_skbs()
+ ongoing_tx_skb = NULL;
+ waiting_tx_skb = NULL;
+
+- Now the below bad case might happen,
+
+Thread1 (oa_tc6_start_xmit) Thread2 (oa_tc6_spi_thread_handler)
+--------------------------- -----------------------------------
+- if waiting_tx_skb is NULL
+ - if ongoing_tx_skb is NULL
+ - ongoing_tx_skb = waiting_tx_skb
+- waiting_tx_skb = skb
+ - waiting_tx_skb = NULL
+ ...
+ - ongoing_tx_skb = NULL
+- if waiting_tx_skb is NULL
+- waiting_tx_skb = skb
+
+To overcome the above issue, protect the moving of tx skb reference from
+waiting_tx_skb pointer to ongoing_tx_skb pointer and assigning new tx skb
+to waiting_tx_skb pointer, so that the other thread can't access the
+waiting_tx_skb pointer until the current thread completes moving the tx
+skb reference safely.
+
+Fixes: 53fbde8ab21e ("net: ethernet: oa_tc6: implement transmit path to transfer tx ethernet frames")
+Signed-off-by: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/oa_tc6.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
+index 4c8b0ca922b7..db200e4ec284 100644
+--- a/drivers/net/ethernet/oa_tc6.c
++++ b/drivers/net/ethernet/oa_tc6.c
+@@ -113,6 +113,7 @@ struct oa_tc6 {
+ struct mii_bus *mdiobus;
+ struct spi_device *spi;
+ struct mutex spi_ctrl_lock; /* Protects spi control transfer */
++ spinlock_t tx_skb_lock; /* Protects tx skb handling */
+ void *spi_ctrl_tx_buf;
+ void *spi_ctrl_rx_buf;
+ void *spi_data_tx_buf;
+@@ -1004,8 +1005,10 @@ static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
+ for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
+ used_tx_credits++) {
+ if (!tc6->ongoing_tx_skb) {
++ spin_lock_bh(&tc6->tx_skb_lock);
+ tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
+ tc6->waiting_tx_skb = NULL;
++ spin_unlock_bh(&tc6->tx_skb_lock);
+ }
+ if (!tc6->ongoing_tx_skb)
+ break;
+@@ -1210,7 +1213,9 @@ netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
+ return NETDEV_TX_OK;
+ }
+
++ spin_lock_bh(&tc6->tx_skb_lock);
+ tc6->waiting_tx_skb = skb;
++ spin_unlock_bh(&tc6->tx_skb_lock);
+
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+@@ -1240,6 +1245,7 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
+ tc6->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ mutex_init(&tc6->spi_ctrl_lock);
++ spin_lock_init(&tc6->tx_skb_lock);
+
+ /* Set the SPI controller to pump at realtime priority */
+ tc6->spi->rt = true;
+--
+2.39.5
+
--- /dev/null
+From 49edd0f3cfa93dd2387b4cdd337db9f0b8ff2f19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2024 17:28:11 +0300
+Subject: net: hinic: Fix cleanup in create_rxqs/txqs()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 7203d10e93b6e6e1d19481ef7907de6a9133a467 ]
+
+There is a check for NULL at the start of create_txqs() and
+create_rxqs() which tess if "nic_dev->txqs" is non-NULL. The
+intention is that if the device is already open and the queues
+are already created then we don't create them a second time.
+
+However, the bug is that if we have an error in the create_txqs()
+then the pointer doesn't get set back to NULL. The NULL check
+at the start of the function will say that it's already open when
+it's not and the device can't be used.
+
+Set ->txqs back to NULL on cleanup on error.
+
+Fixes: c3e79baf1b03 ("net-next/hinic: Add logical Txq and Rxq")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/0cc98faf-a0ed-4565-a55b-0fa2734bc205@stanley.mountain
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/huawei/hinic/hinic_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
+index 890f213da8d1..ae1f523d6841 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
+@@ -172,6 +172,7 @@ static int create_txqs(struct hinic_dev *nic_dev)
+ hinic_sq_dbgfs_uninit(nic_dev);
+
+ devm_kfree(&netdev->dev, nic_dev->txqs);
++ nic_dev->txqs = NULL;
+ return err;
+ }
+
+@@ -268,6 +269,7 @@ static int create_rxqs(struct hinic_dev *nic_dev)
+ hinic_rq_dbgfs_uninit(nic_dev);
+
+ devm_kfree(&netdev->dev, nic_dev->rxqs);
++ nic_dev->rxqs = NULL;
+ return err;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From ae8e737ca24a9a56b68a1e059490b9deb8e328c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Dec 2024 12:51:06 +0900
+Subject: net: mdiobus: fix an OF node reference leak
+
+From: Joe Hattori <joe@pf.is.s.u-tokyo.ac.jp>
+
+[ Upstream commit 572af9f284669d31d9175122bbef9bc62cea8ded ]
+
+fwnode_find_mii_timestamper() calls of_parse_phandle_with_fixed_args()
+but does not decrement the refcount of the obtained OF node. Add an
+of_node_put() call before returning from the function.
+
+This bug was detected by an experimental static analysis tool that I am
+developing.
+
+Fixes: bc1bee3b87ee ("net: mdiobus: Introduce fwnode_mdiobus_register_phy()")
+Signed-off-by: Joe Hattori <joe@pf.is.s.u-tokyo.ac.jp>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20241218035106.1436405-1-joe@pf.is.s.u-tokyo.ac.jp
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/mdio/fwnode_mdio.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
+index b156493d7084..aea0f0357568 100644
+--- a/drivers/net/mdio/fwnode_mdio.c
++++ b/drivers/net/mdio/fwnode_mdio.c
+@@ -40,6 +40,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode)
+ static struct mii_timestamper *
+ fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
+ {
++ struct mii_timestamper *mii_ts;
+ struct of_phandle_args arg;
+ int err;
+
+@@ -53,10 +54,16 @@ fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
+ else if (err)
+ return ERR_PTR(err);
+
+- if (arg.args_count != 1)
+- return ERR_PTR(-EINVAL);
++ if (arg.args_count != 1) {
++ mii_ts = ERR_PTR(-EINVAL);
++ goto put_node;
++ }
++
++ mii_ts = register_mii_timestamper(arg.np, arg.args[0]);
+
+- return register_mii_timestamper(arg.np, arg.args[0]);
++put_node:
++ of_node_put(arg.np);
++ return mii_ts;
+ }
+
+ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+--
+2.39.5
+
--- /dev/null
+From ca3d4c17a0487e28f07c8ec7213a3f22aff2f149 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Dec 2024 18:55:45 +0200
+Subject: net: mscc: ocelot: fix incorrect IFH SRC_PORT field in
+ ocelot_ifh_set_basic()
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 2d5df3a680ffdaf606baa10636bdb1daf757832e ]
+
+Packets injected by the CPU should have a SRC_PORT field equal to the
+CPU port module index in the Analyzer block (ocelot->num_phys_ports).
+
+The blamed commit copied the ocelot_ifh_set_basic() call incorrectly
+from ocelot_xmit_common() in net/dsa/tag_ocelot.c. Instead of calling
+with "x", it calls with BIT_ULL(x), but the field is not a port mask,
+but rather a single port index.
+
+[ side note: this is the technical debt of code duplication :( ]
+
+The error used to be silent and doesn't appear to have other
+user-visible manifestations, but with new changes in the packing
+library, it now fails loudly as follows:
+
+------------[ cut here ]------------
+Cannot store 0x40 inside bits 46-43 - will truncate
+sja1105 spi2.0: xmit timed out
+WARNING: CPU: 1 PID: 102 at lib/packing.c:98 __pack+0x90/0x198
+sja1105 spi2.0: timed out polling for tstamp
+CPU: 1 UID: 0 PID: 102 Comm: felix_xmit
+Tainted: G W N 6.13.0-rc1-00372-gf706b85d972d-dirty #2605
+Call trace:
+ __pack+0x90/0x198 (P)
+ __pack+0x90/0x198 (L)
+ packing+0x78/0x98
+ ocelot_ifh_set_basic+0x260/0x368
+ ocelot_port_inject_frame+0xa8/0x250
+ felix_port_deferred_xmit+0x14c/0x258
+ kthread_worker_fn+0x134/0x350
+ kthread+0x114/0x138
+
+The code path pertains to the ocelot switchdev driver and to the felix
+secondary DSA tag protocol, ocelot-8021q. Here seen with ocelot-8021q.
+
+The messenger (packing) is not really to blame, so fix the original
+commit instead.
+
+Fixes: e1b9e80236c5 ("net: mscc: ocelot: fix QoS class for injected packets with "ocelot-8021q"")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20241212165546.879567-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mscc/ocelot.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 3d72aa7b1305..ef93df520887 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1432,7 +1432,7 @@ void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
+
+ memset(ifh, 0, OCELOT_TAG_LEN);
+ ocelot_ifh_set_bypass(ifh, 1);
+- ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
++ ocelot_ifh_set_src(ifh, ocelot->num_phys_ports);
+ ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+ ocelot_ifh_set_qos_class(ifh, qos_class);
+ ocelot_ifh_set_tag_type(ifh, tag_type);
+--
+2.39.5
+
--- /dev/null
+From 161a729a945ac00548a45198395329845c196d73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Dec 2024 08:37:03 +0000
+Subject: net: netdevsim: fix nsim_pp_hold_write()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b9b8301d369b4c876de5255dbf067b19ba88ac71 ]
+
+nsim_pp_hold_write() has two problems:
+
+1) It may return with rtnl held, as found by syzbot.
+
+2) Its return value does not propagate an error if any.
+
+Fixes: 1580cbcbfe77 ("net: netdevsim: add some fake page pool use")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20241216083703.1859921-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/netdevsim/netdev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index 017a6102be0a..1b29d1d794a2 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -596,10 +596,10 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
+ page_pool_put_full_page(ns->page->pp, ns->page, false);
+ ns->page = NULL;
+ }
+- rtnl_unlock();
+
+ exit:
+- return count;
++ rtnl_unlock();
++ return ret;
+ }
+
+ static const struct file_operations nsim_pp_hold_fops = {
+--
+2.39.5
+
--- /dev/null
+From 50c841a5685fec96476ea229b04bdc5d6fe587ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Dec 2024 11:25:58 +0500
+Subject: net: renesas: rswitch: rework ts tags management
+
+From: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+
+[ Upstream commit 922b4b955a03d19fea98938f33ef0e62d01f5159 ]
+
+The existing linked list based implementation of how ts tags are
+assigned and managed is unsafe against concurrency and corner cases:
+- element addition in tx processing can race against element removal
+ in ts queue completion,
+- element removal in ts queue completion can race against element
+ removal in device close,
+- if a large number of frames gets added to tx queue without ts queue
+ completions in between, elements with duplicate tag values can get
+ added.
+
+Use a different implementation, based on per-port used tags bitmaps and
+saved skb arrays.
+
+Safety for addition in tx processing vs removal in ts completion is
+provided by:
+
+ tag = find_first_zero_bit(...);
+ smp_mb();
+ <write rdev->ts_skb[tag]>
+ set_bit(...);
+
+ vs
+
+ <read rdev->ts_skb[tag]>
+ smp_mb();
+ clear_bit(...);
+
+Safety for removal in ts completion vs removal in device close is
+provided by using atomic read-and-clear for rdev->ts_skb[tag]:
+
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ if (ts_skb)
+ <handle it>
+
+Fixes: 33f5d733b589 ("net: renesas: rswitch: Improve TX timestamp accuracy")
+Signed-off-by: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+Link: https://patch.msgid.link/20241212062558.436455-1-nikita.yoush@cogentembedded.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/rswitch.c | 74 ++++++++++++++------------
+ drivers/net/ethernet/renesas/rswitch.h | 13 ++---
+ 2 files changed, 42 insertions(+), 45 deletions(-)
+
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 09117110e3dd..f86fcecb91a8 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -547,7 +547,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
+ desc = &gq->ts_ring[gq->ring_size];
+ desc->desc.die_dt = DT_LINKFIX;
+ rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
+- INIT_LIST_HEAD(&priv->gwca.ts_info_list);
+
+ return 0;
+ }
+@@ -1003,9 +1002,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
+ static void rswitch_ts(struct rswitch_private *priv)
+ {
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct rswitch_ts_desc *desc;
++ struct rswitch_device *rdev;
++ struct sk_buff *ts_skb;
+ struct timespec64 ts;
+ unsigned int num;
+ u32 tag, port;
+@@ -1015,23 +1015,28 @@ static void rswitch_ts(struct rswitch_private *priv)
+ dma_rmb();
+
+ port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
+- tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+-
+- list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
+- if (!(ts_info->port == port && ts_info->tag == tag))
+- continue;
+-
+- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+- ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+- ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+- shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+- skb_tstamp_tx(ts_info->skb, &shhwtstamps);
+- dev_consume_skb_irq(ts_info->skb);
+- list_del(&ts_info->list);
+- kfree(ts_info);
+- break;
+- }
++ if (unlikely(port >= RSWITCH_NUM_PORTS))
++ goto next;
++ rdev = priv->rdev[port];
+
++ tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
++ if (unlikely(tag >= TS_TAGS_PER_PORT))
++ goto next;
++ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
++ smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
++ clear_bit(tag, rdev->ts_skb_used);
++
++ if (unlikely(!ts_skb))
++ goto next;
++
++ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
++ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
++ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
++ skb_tstamp_tx(ts_skb, &shhwtstamps);
++ dev_consume_skb_irq(ts_skb);
++
++next:
+ gq->cur = rswitch_next_queue_index(gq, true, 1);
+ desc = &gq->ts_ring[gq->cur];
+ }
+@@ -1576,8 +1581,9 @@ static int rswitch_open(struct net_device *ndev)
+ static int rswitch_stop(struct net_device *ndev)
+ {
+ struct rswitch_device *rdev = netdev_priv(ndev);
+- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
++ struct sk_buff *ts_skb;
+ unsigned long flags;
++ unsigned int tag;
+
+ netif_tx_stop_all_queues(ndev);
+
+@@ -1594,12 +1600,13 @@ static int rswitch_stop(struct net_device *ndev)
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+
+- list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
+- if (ts_info->port != rdev->port)
+- continue;
+- dev_kfree_skb_irq(ts_info->skb);
+- list_del(&ts_info->list);
+- kfree(ts_info);
++ for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
++ tag < TS_TAGS_PER_PORT;
++ tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
++ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
++ clear_bit(tag, rdev->ts_skb_used);
++ if (ts_skb)
++ dev_kfree_skb(ts_skb);
+ }
+
+ return 0;
+@@ -1612,20 +1619,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
+ desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
+ INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+- struct rswitch_gwca_ts_info *ts_info;
++ unsigned int tag;
+
+- ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
+- if (!ts_info)
++ tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
++ if (tag == TS_TAGS_PER_PORT)
+ return false;
++ smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
++ rdev->ts_skb[tag] = skb_get(skb);
++ set_bit(tag, rdev->ts_skb_used);
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+- rdev->ts_tag++;
+- desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
+-
+- ts_info->skb = skb_get(skb);
+- ts_info->port = rdev->port;
+- ts_info->tag = rdev->ts_tag;
+- list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
++ desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
+
+ skb_tx_timestamp(skb);
+ }
+diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
+index e020800dcc57..d8d4ed7d7f8b 100644
+--- a/drivers/net/ethernet/renesas/rswitch.h
++++ b/drivers/net/ethernet/renesas/rswitch.h
+@@ -972,14 +972,6 @@ struct rswitch_gwca_queue {
+ };
+ };
+
+-struct rswitch_gwca_ts_info {
+- struct sk_buff *skb;
+- struct list_head list;
+-
+- int port;
+- u8 tag;
+-};
+-
+ #define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
+ struct rswitch_gwca {
+ unsigned int index;
+@@ -989,7 +981,6 @@ struct rswitch_gwca {
+ struct rswitch_gwca_queue *queues;
+ int num_queues;
+ struct rswitch_gwca_queue ts_queue;
+- struct list_head ts_info_list;
+ DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
+ u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
+ u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
+@@ -997,6 +988,7 @@ struct rswitch_gwca {
+ };
+
+ #define NUM_QUEUES_PER_NDEV 2
++#define TS_TAGS_PER_PORT 256
+ struct rswitch_device {
+ struct rswitch_private *priv;
+ struct net_device *ndev;
+@@ -1004,7 +996,8 @@ struct rswitch_device {
+ void __iomem *addr;
+ struct rswitch_gwca_queue *tx_queue;
+ struct rswitch_gwca_queue *rx_queue;
+- u8 ts_tag;
++ struct sk_buff *ts_skb[TS_TAGS_PER_PORT];
++ DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
+ bool disabled;
+
+ int port;
+--
+2.39.5
+
--- /dev/null
+From e54b6d7a9d1c3806486981f1a1ab18d1942292d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2024 17:21:18 +0800
+Subject: net/smc: check iparea_offset and ipv6_prefixes_cnt when receiving
+ proposal msg
+
+From: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+
+[ Upstream commit a29e220d3c8edbf0e1beb0f028878a4a85966556 ]
+
+When receiving proposal msg in server, the field iparea_offset
+and the field ipv6_prefixes_cnt in proposal msg are from the
+remote client and can not be fully trusted. Especially the
+field iparea_offset, once exceed the max value, there has the
+chance to access wrong address, and crash may happen.
+
+This patch checks iparea_offset and ipv6_prefixes_cnt before using them.
+
+Fixes: e7b7a64a8493 ("smc: support variable CLC proposal messages")
+Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Reviewed-by: Wen Gu <guwen@linux.alibaba.com>
+Reviewed-by: D. Wythe <alibuda@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 6 +++++-
+ net/smc/smc_clc.c | 4 ++++
+ net/smc/smc_clc.h | 6 +++++-
+ 3 files changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 92448f2c362c..9a74c9693f09 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2032,6 +2032,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc,
+ if (pclc->hdr.typev1 == SMC_TYPE_N)
+ return 0;
+ pclc_prfx = smc_clc_proposal_get_prefix(pclc);
++ if (!pclc_prfx)
++ return -EPROTO;
+ if (smc_clc_prfx_match(newclcsock, pclc_prfx))
+ return SMC_CLC_DECL_DIFFPREFIX;
+
+@@ -2221,7 +2223,9 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
+ int rc = 0;
+
+ /* check if ISM V1 is available */
+- if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
++ if (!(ini->smcd_version & SMC_V1) ||
++ !smcd_indicated(ini->smc_type_v1) ||
++ !pclc_smcd)
+ goto not_found;
+ ini->is_smcd = true; /* prepare ISM check */
+ ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index 33fa787c28eb..66a43b97eede 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -354,6 +354,10 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
+
+ v2_ext = smc_get_clc_v2_ext(pclc);
+ pclc_prfx = smc_clc_proposal_get_prefix(pclc);
++ if (!pclc_prfx ||
++ pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX)
++ return false;
++
+ if (hdr->version == SMC_V1) {
+ if (hdr->typev1 == SMC_TYPE_N)
+ return false;
+diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
+index 5625fda2960b..ddad4af8e88f 100644
+--- a/net/smc/smc_clc.h
++++ b/net/smc/smc_clc.h
+@@ -336,8 +336,12 @@ struct smc_clc_msg_decline_v2 { /* clc decline message */
+ static inline struct smc_clc_msg_proposal_prefix *
+ smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
+ {
++ u16 offset = ntohs(pclc->iparea_offset);
++
++ if (offset > sizeof(struct smc_clc_msg_smcd))
++ return NULL;
+ return (struct smc_clc_msg_proposal_prefix *)
+- ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
++ ((u8 *)pclc + sizeof(*pclc) + offset);
+ }
+
+ static inline bool smcr_indicated(int smc_type)
+--
+2.39.5
+
--- /dev/null
+From 368a1b13e51f4f0d6cbfb23236ecae1972042cb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2024 17:21:21 +0800
+Subject: net/smc: check return value of sock_recvmsg when draining clc data
+
+From: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+
+[ Upstream commit c5b8ee5022a19464783058dc6042e8eefa34e8cd ]
+
+When receiving clc msg, the field length in smc_clc_msg_hdr indicates the
+length of msg should be received from network and the value should not be
+fully trusted as it is from the network. Once the value of length exceeds
+the value of buflen in function smc_clc_wait_msg it may run into deadloop
+when trying to drain the remaining data exceeding buflen.
+
+This patch checks the return value of sock_recvmsg when draining data in
+case of deadloop in draining.
+
+Fixes: fb4f79264c0f ("net/smc: tolerate future SMCD versions")
+Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Reviewed-by: Wen Gu <guwen@linux.alibaba.com>
+Reviewed-by: D. Wythe <alibuda@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_clc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index f721d03efcbd..521f5df80e10 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -774,6 +774,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
+ SMC_CLC_RECV_BUF_LEN : datlen;
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
+ len = sock_recvmsg(smc->clcsock, &msg, krflags);
++ if (len < recvlen) {
++ smc->sk.sk_err = EPROTO;
++ reason_code = -EPROTO;
++ goto out;
++ }
+ datlen -= len;
+ }
+ if (clcm->type == SMC_CLC_DECLINE) {
+--
+2.39.5
+
--- /dev/null
+From 235e430d62de35b9f1d063b8c575cdcaa33c1793 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2024 17:21:20 +0800
+Subject: net/smc: check smcd_v2_ext_offset when receiving proposal msg
+
+From: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+
+[ Upstream commit 9ab332deb671d8f7e66d82a2ff2b3f715bc3a4ad ]
+
+When receiving proposal msg in server, the field smcd_v2_ext_offset in
+proposal msg is from the remote client and can not be fully trusted.
+Once the value of smcd_v2_ext_offset exceed the max value, there has
+the chance to access wrong address, and crash may happen.
+
+This patch checks the value of smcd_v2_ext_offset before using it.
+
+Fixes: 5c21c4ccafe8 ("net/smc: determine accepted ISM devices")
+Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Reviewed-by: Wen Gu <guwen@linux.alibaba.com>
+Reviewed-by: D. Wythe <alibuda@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 2 ++
+ net/smc/smc_clc.h | 8 +++++++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 5d96f9de5b5d..6cc7b846cff1 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2147,6 +2147,8 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
+ pclc_smcd = smc_get_clc_msg_smcd(pclc);
+ smc_v2_ext = smc_get_clc_v2_ext(pclc);
+ smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
++ if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext)
++ goto not_found;
+
+ mutex_lock(&smcd_dev_list.mutex);
+ if (pclc_smcd->ism.chid) {
+diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
+index 2ff423224a59..1a7676227f16 100644
+--- a/net/smc/smc_clc.h
++++ b/net/smc/smc_clc.h
+@@ -400,9 +400,15 @@ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
+ static inline struct smc_clc_smcd_v2_extension *
+ smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext)
+ {
++ u16 max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_smcd_v2_ext) -
++ offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
++ offsetof(struct smc_clc_v2_extension, hdr) -
++ offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset);
++
+ if (!prop_v2ext)
+ return NULL;
+- if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset))
++ if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) ||
++ ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) > max_offset)
+ return NULL;
+
+ return (struct smc_clc_smcd_v2_extension *)
+--
+2.39.5
+
--- /dev/null
+From 2c37481a212d09b54cc03b57c9529eab8d0fddfe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2024 17:21:17 +0800
+Subject: net/smc: check sndbuf_space again after NOSPACE flag is set in
+ smc_poll
+
+From: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+
+[ Upstream commit 679e9ddcf90dbdf98aaaa71a492454654b627bcb ]
+
+When application sending data more than sndbuf_space, there have chances
+application will sleep in epoll_wait, and will never be wakeup again. This
+is caused by a race between smc_poll and smc_cdc_tx_handler.
+
+application tasklet
+smc_tx_sendmsg(len > sndbuf_space) |
+epoll_wait for EPOLL_OUT,timeout=0 |
+ smc_poll |
+ if (!smc->conn.sndbuf_space) |
+ | smc_cdc_tx_handler
+ | atomic_add sndbuf_space
+ | smc_tx_sndbuf_nonfull
+ | if (!test_bit SOCK_NOSPACE)
+ | do not sk_write_space;
+ set_bit SOCK_NOSPACE; |
+ return mask=0; |
+
+Application will sleep in epoll_wait as smc_poll returns 0. And
+smc_cdc_tx_handler will not call sk_write_space because the SOCK_NOSPACE
+has not be set. If there is no inflight cdc msg, sk_write_space will not be
+called any more, and application will sleep in epoll_wait forever.
+So check sndbuf_space again after NOSPACE flag is set to break the race.
+
+Fixes: 8dce2786a290 ("net/smc: smc_poll improvements")
+Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 9e6c69d18581..92448f2c362c 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2881,6 +2881,13 @@ __poll_t smc_poll(struct file *file, struct socket *sock,
+ } else {
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
++
++ if (sk->sk_state != SMC_INIT) {
++ /* Race breaker the same way as tcp_poll(). */
++ smp_mb__after_atomic();
++ if (atomic_read(&smc->conn.sndbuf_space))
++ mask |= EPOLLOUT | EPOLLWRNORM;
++ }
+ }
+ if (atomic_read(&smc->conn.bytes_to_rcv))
+ mask |= EPOLLIN | EPOLLRDNORM;
+--
+2.39.5
+
--- /dev/null
+From 9df8f11502325571ff7638cdcacc91e896b58176 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2024 17:21:19 +0800
+Subject: net/smc: check v2_ext_offset/eid_cnt/ism_gid_cnt when receiving
+ proposal msg
+
+From: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+
+[ Upstream commit 7863c9f3d24ba49dbead7e03dfbe40deb5888fdf ]
+
+When receiving proposal msg in server, the fields v2_ext_offset/
+eid_cnt/ism_gid_cnt in proposal msg are from the remote client
+and can not be fully trusted. Especially the field v2_ext_offset,
+once exceed the max value, there has the chance to access wrong
+address, and crash may happen.
+
+This patch checks the fields v2_ext_offset/eid_cnt/ism_gid_cnt
+before using them.
+
+Fixes: 8c3dca341aea ("net/smc: build and send V2 CLC proposal")
+Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Reviewed-by: Wen Gu <guwen@linux.alibaba.com>
+Reviewed-by: D. Wythe <alibuda@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 3 ++-
+ net/smc/smc_clc.c | 8 +++++++-
+ net/smc/smc_clc.h | 8 +++++++-
+ 3 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 9a74c9693f09..5d96f9de5b5d 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2276,7 +2276,8 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
+ goto not_found;
+
+ smc_v2_ext = smc_get_clc_v2_ext(pclc);
+- if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
++ if (!smc_v2_ext ||
++ !smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
+ goto not_found;
+
+ /* prepare RDMA check */
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index 66a43b97eede..f721d03efcbd 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -352,7 +352,6 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
+ struct smc_clc_msg_hdr *hdr = &pclc->hdr;
+ struct smc_clc_v2_extension *v2_ext;
+
+- v2_ext = smc_get_clc_v2_ext(pclc);
+ pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ if (!pclc_prfx ||
+ pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX)
+@@ -369,6 +368,13 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
+ sizeof(struct smc_clc_msg_trail))
+ return false;
+ } else {
++ v2_ext = smc_get_clc_v2_ext(pclc);
++ if ((hdr->typev2 != SMC_TYPE_N &&
++ (!v2_ext || v2_ext->hdr.eid_cnt > SMC_CLC_MAX_UEID)) ||
++ (smcd_indicated(hdr->typev2) &&
++ v2_ext->hdr.ism_gid_cnt > SMCD_CLC_MAX_V2_GID_ENTRIES))
++ return false;
++
+ if (ntohs(hdr->length) !=
+ sizeof(*pclc) +
+ sizeof(struct smc_clc_msg_smcd) +
+diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
+index ddad4af8e88f..2ff423224a59 100644
+--- a/net/smc/smc_clc.h
++++ b/net/smc/smc_clc.h
+@@ -380,8 +380,14 @@ static inline struct smc_clc_v2_extension *
+ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
+ {
+ struct smc_clc_msg_smcd *prop_smcd = smc_get_clc_msg_smcd(prop);
++ u16 max_offset;
+
+- if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset))
++ max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
++ offsetof(struct smc_clc_msg_proposal_area, pclc_smcd) -
++ offsetofend(struct smc_clc_msg_smcd, v2_ext_offset);
++
++ if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset) ||
++ ntohs(prop_smcd->v2_ext_offset) > max_offset)
+ return NULL;
+
+ return (struct smc_clc_v2_extension *)
+--
+2.39.5
+
--- /dev/null
+From 9904567e4a1bdf0df595e2210ac48caa32b41d0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2024 17:21:16 +0800
+Subject: net/smc: protect link down work from execute after lgr freed
+
+From: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+
+[ Upstream commit 2b33eb8f1b3e8c2f87cfdbc8cc117f6bdfabc6ec ]
+
+link down work may be scheduled before lgr freed but execute
+after lgr freed, which may result in crash. So it is need to
+hold a reference before shedule link down work, and put the
+reference after work executed or canceled.
+
+The relevant crash call stack as follows:
+ list_del corruption. prev->next should be ffffb638c9c0fe20,
+ but was 0000000000000000
+ ------------[ cut here ]------------
+ kernel BUG at lib/list_debug.c:51!
+ invalid opcode: 0000 [#1] SMP NOPTI
+ CPU: 6 PID: 978112 Comm: kworker/6:119 Kdump: loaded Tainted: G #1
+ Hardware name: Alibaba Cloud Alibaba Cloud ECS, BIOS 2221b89 04/01/2014
+ Workqueue: events smc_link_down_work [smc]
+ RIP: 0010:__list_del_entry_valid.cold+0x31/0x47
+ RSP: 0018:ffffb638c9c0fdd8 EFLAGS: 00010086
+ RAX: 0000000000000054 RBX: ffff942fb75e5128 RCX: 0000000000000000
+ RDX: ffff943520930aa0 RSI: ffff94352091fc80 RDI: ffff94352091fc80
+ RBP: 0000000000000000 R08: 0000000000000000 R09: ffffb638c9c0fc38
+ R10: ffffb638c9c0fc30 R11: ffffffffa015eb28 R12: 0000000000000002
+ R13: ffffb638c9c0fe20 R14: 0000000000000001 R15: ffff942f9cd051c0
+ FS: 0000000000000000(0000) GS:ffff943520900000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007f4f25214000 CR3: 000000025fbae004 CR4: 00000000007706e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ PKRU: 55555554
+ Call Trace:
+ rwsem_down_write_slowpath+0x17e/0x470
+ smc_link_down_work+0x3c/0x60 [smc]
+ process_one_work+0x1ac/0x350
+ worker_thread+0x49/0x2f0
+ ? rescuer_thread+0x360/0x360
+ kthread+0x118/0x140
+ ? __kthread_bind_mask+0x60/0x60
+ ret_from_fork+0x1f/0x30
+
+Fixes: 541afa10c126 ("net/smc: add smcr_port_err() and smcr_link_down() processing")
+Signed-off-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Reviewed-by: Tony Lu <tonylu@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 4e694860ece4..68515a41d776 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1818,7 +1818,9 @@ void smcr_link_down_cond_sched(struct smc_link *lnk)
+ {
+ if (smc_link_downing(&lnk->state)) {
+ trace_smcr_link_down(lnk, __builtin_return_address(0));
+- schedule_work(&lnk->link_down_wrk);
++ smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */
++ if (!schedule_work(&lnk->link_down_wrk))
++ smcr_link_put(lnk);
+ }
+ }
+
+@@ -1850,11 +1852,14 @@ static void smc_link_down_work(struct work_struct *work)
+ struct smc_link_group *lgr = link->lgr;
+
+ if (list_empty(&lgr->list))
+- return;
++ goto out;
+ wake_up_all(&lgr->llc_msg_waiter);
+ down_write(&lgr->llc_conf_mutex);
+ smcr_link_down(link);
+ up_write(&lgr->llc_conf_mutex);
++
++out:
++ smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */
+ }
+
+ static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
+--
+2.39.5
+
--- /dev/null
+From 831ba8dc6aa595495a4d02cc3add677b269359b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2024 07:22:40 -0800
+Subject: netdev: fix repeated netlink messages in queue dump
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit b1f3a2f5a742c1e939a73031bd31b9e557a2d77d ]
+
+The context is supposed to record the next queue to dump,
+not last dumped. If the dump doesn't fit we will restart
+from the already-dumped queue, duplicating the message.
+
+Before this fix and with the selftest improvements later
+in this series we see:
+
+ # ./run_kselftest.sh -t drivers/net:queues.py
+ timeout set to 45
+ selftests: drivers/net: queues.py
+ KTAP version 1
+ 1..2
+ # Check| At /root/ksft-net-drv/drivers/net/./queues.py, line 32, in get_queues:
+ # Check| ksft_eq(queues, expected)
+ # Check failed 102 != 100
+ # Check| At /root/ksft-net-drv/drivers/net/./queues.py, line 32, in get_queues:
+ # Check| ksft_eq(queues, expected)
+ # Check failed 101 != 100
+ not ok 1 queues.get_queues
+ ok 2 queues.addremove_queues
+ # Totals: pass:1 fail:1 xfail:0 xpass:0 skip:0 error:0
+ not ok 1 selftests: drivers/net: queues.py # exit=1
+
+With the fix:
+
+ # ./ksft-net-drv/run_kselftest.sh -t drivers/net:queues.py
+ timeout set to 45
+ selftests: drivers/net: queues.py
+ KTAP version 1
+ 1..2
+ ok 1 queues.get_queues
+ ok 2 queues.addremove_queues
+ # Totals: pass:2 fail:0 xfail:0 xpass:0 skip:0 error:0
+
+Fixes: 6b6171db7fc8 ("netdev-genl: Add netlink framework functions for queue")
+Reviewed-by: Joe Damato <jdamato@fastly.com>
+Link: https://patch.msgid.link/20241213152244.3080955-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/netdev-genl.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index d2baa1af9df0..71359922ae8b 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -417,24 +417,21 @@ netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
+ struct netdev_nl_dump_ctx *ctx)
+ {
+ int err = 0;
+- int i;
+
+ if (!(netdev->flags & IFF_UP))
+ return err;
+
+- for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) {
+- err = netdev_nl_queue_fill_one(rsp, netdev, i,
++ for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
++ err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
+ NETDEV_QUEUE_TYPE_RX, info);
+ if (err)
+ return err;
+- ctx->rxq_idx = i++;
+ }
+- for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) {
+- err = netdev_nl_queue_fill_one(rsp, netdev, i,
++ for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
++ err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
+ NETDEV_QUEUE_TYPE_TX, info);
+ if (err)
+ return err;
+- ctx->txq_idx = i++;
+ }
+
+ return err;
+--
+2.39.5
+
--- /dev/null
+From b38e7f1966d96563f9cbb7338e5ffa8440edf109 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2024 07:22:41 -0800
+Subject: netdev: fix repeated netlink messages in queue stats
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit ecc391a541573da46b7ccc188105efedd40aef1b ]
+
+The context is supposed to record the next queue to dump,
+not last dumped. If the dump doesn't fit we will restart
+from the already-dumped queue, duplicating the message.
+
+Before this fix and with the selftest improvements later
+in this series we see:
+
+ # ./run_kselftest.sh -t drivers/net:stats.py
+ timeout set to 45
+ selftests: drivers/net: stats.py
+ KTAP version 1
+ 1..5
+ ok 1 stats.check_pause
+ ok 2 stats.check_fec
+ ok 3 stats.pkt_byte_sum
+ # Check| At /root/ksft-net-drv/drivers/net/./stats.py, line 125, in qstat_by_ifindex:
+ # Check| ksft_eq(len(queues[qtype]), len(set(queues[qtype])),
+ # Check failed 45 != 44 repeated queue keys
+ # Check| At /root/ksft-net-drv/drivers/net/./stats.py, line 127, in qstat_by_ifindex:
+ # Check| ksft_eq(len(queues[qtype]), max(queues[qtype]) + 1,
+ # Check failed 45 != 44 missing queue keys
+ # Check| At /root/ksft-net-drv/drivers/net/./stats.py, line 125, in qstat_by_ifindex:
+ # Check| ksft_eq(len(queues[qtype]), len(set(queues[qtype])),
+ # Check failed 45 != 44 repeated queue keys
+ # Check| At /root/ksft-net-drv/drivers/net/./stats.py, line 127, in qstat_by_ifindex:
+ # Check| ksft_eq(len(queues[qtype]), max(queues[qtype]) + 1,
+ # Check failed 45 != 44 missing queue keys
+ # Check| At /root/ksft-net-drv/drivers/net/./stats.py, line 125, in qstat_by_ifindex:
+ # Check| ksft_eq(len(queues[qtype]), len(set(queues[qtype])),
+ # Check failed 103 != 100 repeated queue keys
+ # Check| At /root/ksft-net-drv/drivers/net/./stats.py, line 127, in qstat_by_ifindex:
+ # Check| ksft_eq(len(queues[qtype]), max(queues[qtype]) + 1,
+ # Check failed 103 != 100 missing queue keys
+ # Check| At /root/ksft-net-drv/drivers/net/./stats.py, line 125, in qstat_by_ifindex:
+ # Check| ksft_eq(len(queues[qtype]), len(set(queues[qtype])),
+ # Check failed 102 != 100 repeated queue keys
+ # Check| At /root/ksft-net-drv/drivers/net/./stats.py, line 127, in qstat_by_ifindex:
+ # Check| ksft_eq(len(queues[qtype]), max(queues[qtype]) + 1,
+ # Check failed 102 != 100 missing queue keys
+ not ok 4 stats.qstat_by_ifindex
+ ok 5 stats.check_down
+ # Totals: pass:4 fail:1 xfail:0 xpass:0 skip:0 error:0
+
+With the fix:
+
+ # ./ksft-net-drv/run_kselftest.sh -t drivers/net:stats.py
+ timeout set to 45
+ selftests: drivers/net: stats.py
+ KTAP version 1
+ 1..5
+ ok 1 stats.check_pause
+ ok 2 stats.check_fec
+ ok 3 stats.pkt_byte_sum
+ ok 4 stats.qstat_by_ifindex
+ ok 5 stats.check_down
+ # Totals: pass:5 fail:0 xfail:0 xpass:0 skip:0 error:0
+
+Fixes: ab63a2387cb9 ("netdev: add per-queue statistics")
+Reviewed-by: Joe Damato <jdamato@fastly.com>
+Link: https://patch.msgid.link/20241213152244.3080955-3-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/netdev-genl.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index 71359922ae8b..224d1b5b79a7 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -597,7 +597,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ i, info);
+ if (err)
+ return err;
+- ctx->rxq_idx = i++;
++ ctx->rxq_idx = ++i;
+ }
+ i = ctx->txq_idx;
+ while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
+@@ -605,7 +605,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ i, info);
+ if (err)
+ return err;
+- ctx->txq_idx = i++;
++ ctx->txq_idx = ++i;
+ }
+
+ ctx->rxq_idx = 0;
+--
+2.39.5
+
--- /dev/null
+From bfded197ed6a538a4e022793e059072a3e7adaaa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Dec 2024 18:25:08 -0800
+Subject: netdev-genl: avoid empty messages in queue dump
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 5eb70dbebf32c2fd1f2814c654ae17fc47d6e859 ]
+
+Empty netlink responses from do() are not correct (as opposed to
+dump() where not dumping anything is perfectly fine).
+We should return an error if the target object does not exist,
+in this case if the netdev is down it has no queues.
+
+Fixes: 6b6171db7fc8 ("netdev-genl: Add netlink framework functions for queue")
+Reported-by: syzbot+0a884bc2d304ce4af70f@syzkaller.appspotmail.com
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Joe Damato <jdamato@fastly.com>
+Link: https://patch.msgid.link/20241218022508.815344-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/netdev-genl.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index 224d1b5b79a7..7ce22f40db5b 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -359,10 +359,10 @@ static int
+ netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
+ u32 q_type, const struct genl_info *info)
+ {
+- int err = 0;
++ int err;
+
+ if (!(netdev->flags & IFF_UP))
+- return err;
++ return -ENOENT;
+
+ err = netdev_nl_queue_validate(netdev, q_idx, q_type);
+ if (err)
+--
+2.39.5
+
--- /dev/null
+From 30517f0d96763083c53aadfab68ca54f9286da99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2024 17:25:18 +0000
+Subject: netdevsim: prevent bad user input in nsim_dev_health_break_write()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ee76746387f6233bdfa93d7406990f923641568f ]
+
+If either a zero count or a large one is provided, kernel can crash.
+
+Fixes: 82c93a87bf8b ("netdevsim: implement couple of testing devlink health reporters")
+Reported-by: syzbot+ea40e4294e58b0292f74@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/675c6862.050a0220.37aaf.00b1.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Joe Damato <jdamato@fastly.com>
+Link: https://patch.msgid.link/20241213172518.2415666-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/netdevsim/health.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
+index 70e8bdf34be9..688f05316b5e 100644
+--- a/drivers/net/netdevsim/health.c
++++ b/drivers/net/netdevsim/health.c
+@@ -149,6 +149,8 @@ static ssize_t nsim_dev_health_break_write(struct file *file,
+ char *break_msg;
+ int err;
+
++ if (count == 0 || count > PAGE_SIZE)
++ return -EINVAL;
+ break_msg = memdup_user_nul(data, count);
+ if (IS_ERR(break_msg))
+ return PTR_ERR(break_msg);
+--
+2.39.5
+
--- /dev/null
+From d52a75a92e78c33df99bf660d6fa10d81dacd1eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Dec 2024 20:56:55 +0100
+Subject: netfilter: ipset: Fix for recursive locking warning
+
+From: Phil Sutter <phil@nwl.cc>
+
+[ Upstream commit 70b6f46a4ed8bd56c85ffff22df91e20e8c85e33 ]
+
+With CONFIG_PROVE_LOCKING, when creating a set of type bitmap:ip, adding
+it to a set of type list:set and populating it from iptables SET target
+triggers a kernel warning:
+
+| WARNING: possible recursive locking detected
+| 6.12.0-rc7-01692-g5e9a28f41134-dirty #594 Not tainted
+| --------------------------------------------
+| ping/4018 is trying to acquire lock:
+| ffff8881094a6848 (&set->lock){+.-.}-{2:2}, at: ip_set_add+0x28c/0x360 [ip_set]
+|
+| but task is already holding lock:
+| ffff88811034c048 (&set->lock){+.-.}-{2:2}, at: ip_set_add+0x28c/0x360 [ip_set]
+
+This is a false alarm: ipset does not allow nested list:set type, so the
+loop in list_set_kadd() can never encounter the outer set itself. No
+other set type supports embedded sets, so this is the only case to
+consider.
+
+To avoid the false report, create a distinct lock class for list:set
+type ipset locks.
+
+Fixes: f830837f0eed ("netfilter: ipset: list:set set type support")
+Signed-off-by: Phil Sutter <phil@nwl.cc>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipset/ip_set_list_set.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index bfae7066936b..db794fe1300e 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -611,6 +611,8 @@ init_list_set(struct net *net, struct ip_set *set, u32 size)
+ return true;
+ }
+
++static struct lock_class_key list_set_lockdep_key;
++
+ static int
+ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
+@@ -627,6 +629,7 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ if (size < IP_SET_LIST_MIN_SIZE)
+ size = IP_SET_LIST_MIN_SIZE;
+
++ lockdep_set_class(&set->lock, &list_set_lockdep_key);
+ set->variant = &set_variant;
+ set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
+ __alignof__(struct set_elem));
+--
+2.39.5
+
--- /dev/null
+From e90106d06641796fc6024528bf66b10e9409ec91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Dec 2024 12:37:39 +0100
+Subject: psample: adjust size if rate_as_probability is set
+
+From: Adrian Moreno <amorenoz@redhat.com>
+
+[ Upstream commit 5eecd85c77a254a43bde3212da8047b001745c9f ]
+
+If PSAMPLE_ATTR_SAMPLE_PROBABILITY flag is to be sent, the available
+size for the packet data has to be adjusted accordingly.
+
+Also, check the error code returned by nla_put_flag.
+
+Fixes: 7b1b2b60c63f ("net: psample: allow using rate as probability")
+Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
+Reviewed-by: Aaron Conole <aconole@redhat.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/20241217113739.3929300-1-amorenoz@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/psample/psample.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/net/psample/psample.c b/net/psample/psample.c
+index a0ddae8a65f9..25f92ba0840c 100644
+--- a/net/psample/psample.c
++++ b/net/psample/psample.c
+@@ -393,7 +393,9 @@ void psample_sample_packet(struct psample_group *group,
+ nla_total_size_64bit(sizeof(u64)) + /* timestamp */
+ nla_total_size(sizeof(u16)) + /* protocol */
+ (md->user_cookie_len ?
+- nla_total_size(md->user_cookie_len) : 0); /* user cookie */
++ nla_total_size(md->user_cookie_len) : 0) + /* user cookie */
++ (md->rate_as_probability ?
++ nla_total_size(0) : 0); /* rate as probability */
+
+ #ifdef CONFIG_INET
+ tun_info = skb_tunnel_info(skb);
+@@ -498,8 +500,9 @@ void psample_sample_packet(struct psample_group *group,
+ md->user_cookie))
+ goto error;
+
+- if (md->rate_as_probability)
+- nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY);
++ if (md->rate_as_probability &&
++ nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY))
++ goto error;
+
+ genlmsg_end(nl_skb, data);
+ genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
+--
+2.39.5
+
--- /dev/null
+From 93f684db7d95164206270ca840c09ef5ff4ed47b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Dec 2024 22:16:51 +0100
+Subject: selftests: openvswitch: fix tcpdump execution
+
+From: Adrian Moreno <amorenoz@redhat.com>
+
+[ Upstream commit a17975992cc11588767175247ccaae1213a8b582 ]
+
+Fix the way tcpdump is executed by:
+- Using the right variable for the namespace. Currently the use of the
+ empty "ns" makes the command fail.
+- Waiting until it starts to capture to ensure the interesting traffic
+ is caught on slow systems.
+- Using line-buffered output to ensure logs are available when the test
+ is paused with "-p". Otherwise the last chunk of data might only be
+ written when tcpdump is killed.
+
+Fixes: 74cc26f416b9 ("selftests: openvswitch: add interface support")
+Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
+Acked-by: Eelco Chaudron <echaudro@redhat.com>
+Link: https://patch.msgid.link/20241217211652.483016-1-amorenoz@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/openvswitch/openvswitch.sh | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+index cc0bfae2bafa..960e1ab4dd04 100755
+--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
++++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+@@ -171,8 +171,10 @@ ovs_add_netns_and_veths () {
+ ovs_add_if "$1" "$2" "$4" -u || return 1
+ fi
+
+- [ $TRACING -eq 1 ] && ovs_netns_spawn_daemon "$1" "$ns" \
+- tcpdump -i any -s 65535
++ if [ $TRACING -eq 1 ]; then
++ ovs_netns_spawn_daemon "$1" "$3" tcpdump -l -i any -s 6553
++ ovs_wait grep -q "listening on any" ${ovs_dir}/stderr
++ fi
+
+ return 0
+ }
+--
+2.39.5
+
xfs-fix-off-by-one-error-in-fsmap-s-end_daddr-usage.patch
xfs-fix-sb_spino_align-checks-for-large-fsblock-size.patch
xfs-fix-zero-byte-checking-in-the-superblock-scrubbe.patch
+tools-hv-change-permissions-of-networkmanager-config.patch
+cxl-pci-fix-potential-bogus-return-value-upon-succes.patch
+cxl-region-fix-region-creation-for-greater-than-x2-s.patch
+net-smc-protect-link-down-work-from-execute-after-lg.patch
+net-smc-check-sndbuf_space-again-after-nospace-flag-.patch
+net-smc-check-iparea_offset-and-ipv6_prefixes_cnt-wh.patch
+net-smc-check-v2_ext_offset-eid_cnt-ism_gid_cnt-when.patch
+net-smc-check-smcd_v2_ext_offset-when-receiving-prop.patch
+net-smc-check-return-value-of-sock_recvmsg-when-drai.patch
+net-mscc-ocelot-fix-incorrect-ifh-src_port-field-in-.patch
+netdevsim-prevent-bad-user-input-in-nsim_dev_health_.patch
+tools-net-ynl-fix-sub-message-key-lookup-for-nested-.patch
+ionic-fix-netdev-notifier-unregister-on-failure.patch
+ionic-no-double-destroy-workqueue.patch
+ionic-use-ee-offset-when-returning-sprom-data.patch
+net-renesas-rswitch-rework-ts-tags-management.patch
+ksmbd-count-all-requests-in-req_running-counter.patch
+ksmbd-fix-broken-transfers-when-exceeding-max-simult.patch
+netdev-fix-repeated-netlink-messages-in-queue-dump.patch
+netdev-fix-repeated-netlink-messages-in-queue-stats.patch
+team-fix-feature-exposure-when-no-ports-are-present.patch
+net-hinic-fix-cleanup-in-create_rxqs-txqs.patch
+net-ethernet-oa_tc6-fix-infinite-loop-error-when-tx-.patch
+net-ethernet-oa_tc6-fix-tx-skb-race-condition-betwee.patch
+net-ethernet-bgmac-platform-fix-an-of-node-reference.patch
+net-netdevsim-fix-nsim_pp_hold_write.patch
+can-m_can-set-init-flag-earlier-in-probe.patch
+can-m_can-fix-missed-interrupts-with-m_can_pci.patch
+ipvs-fix-clamp-of-ip_vs_conn_tab-on-small-memory-sys.patch
+netfilter-ipset-fix-for-recursive-locking-warning.patch
+selftests-openvswitch-fix-tcpdump-execution.patch
+net-dsa-restore-dsa_software_vlan_untag-ability-to-o.patch
+netdev-genl-avoid-empty-messages-in-queue-dump.patch
+psample-adjust-size-if-rate_as_probability-is-set.patch
+net-mdiobus-fix-an-of-node-reference-leak.patch
--- /dev/null
+From a9301f8fa44e1e8d1d33837ba73140dd2af5d459 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2024 13:36:57 +0100
+Subject: team: Fix feature exposure when no ports are present
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit e78c20f327bd94dabac68b98218dff069a8780f0 ]
+
+Small follow-up to align this to an equivalent behavior as the bond driver.
+The change in 3625920b62c3 ("teaming: fix vlan_features computing") removed
+the netdevice vlan_features when there is no team port attached, yet it
+leaves the full set of enc_features intact.
+
+Instead, leave the default features as pre 3625920b62c3, and recompute once
+we do have ports attached. Also, similarly as in bonding case, call the
+netdev_base_features() helper on the enc_features.
+
+Fixes: 3625920b62c3 ("teaming: fix vlan_features computing")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20241213123657.401868-1-daniel@iogearbox.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/team/team_core.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
+index 6ace5a74cddb..1c85dda83825 100644
+--- a/drivers/net/team/team_core.c
++++ b/drivers/net/team/team_core.c
+@@ -998,9 +998,13 @@ static void __team_compute_features(struct team *team)
+ unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
+ IFF_XMIT_DST_RELEASE_PERM;
+
++ rcu_read_lock();
++ if (list_empty(&team->port_list))
++ goto done;
++
+ vlan_features = netdev_base_features(vlan_features);
++ enc_features = netdev_base_features(enc_features);
+
+- rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ vlan_features = netdev_increment_features(vlan_features,
+ port->dev->vlan_features,
+@@ -1010,11 +1014,11 @@ static void __team_compute_features(struct team *team)
+ port->dev->hw_enc_features,
+ TEAM_ENC_FEATURES);
+
+-
+ dst_release_flag &= port->dev->priv_flags;
+ if (port->dev->hard_header_len > max_hard_header_len)
+ max_hard_header_len = port->dev->hard_header_len;
+ }
++done:
+ rcu_read_unlock();
+
+ team->dev->vlan_features = vlan_features;
+--
+2.39.5
+
--- /dev/null
+From 6b77d82d7908403d7f4f56a9cd3f968e71a1dd82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2024 16:35:10 +0200
+Subject: tools: hv: change permissions of NetworkManager configuration file
+
+From: Olaf Hering <olaf@aepfle.de>
+
+[ Upstream commit 91ae69c7ed9e262f24240c425ad1eef2cf6639b7 ]
+
+Align permissions of the resulting .nmconnection file, instead of
+the input file from hv_kvp_daemon. To avoid the tiny time frame
+where the output file is world-readable, use umask instead of chmod.
+
+Fixes: 42999c904612 ("hv/hv_kvp_daemon:Support for keyfile based connection profile")
+Signed-off-by: Olaf Hering <olaf@aepfle.de>
+Reviewed-by: Shradha Gupta <shradhagupta@linux.microsoft.com>
+Link: https://lore.kernel.org/r/20241016143521.3735-1-olaf@aepfle.de
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Message-ID: <20241016143521.3735-1-olaf@aepfle.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/hv/hv_set_ifconfig.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
+index 440a91b35823..2f8baed2b8f7 100755
+--- a/tools/hv/hv_set_ifconfig.sh
++++ b/tools/hv/hv_set_ifconfig.sh
+@@ -81,7 +81,7 @@ echo "ONBOOT=yes" >> $1
+
+ cp $1 /etc/sysconfig/network-scripts/
+
+-chmod 600 $2
++umask 0177
+ interface=$(echo $2 | awk -F - '{ print $2 }')
+ filename="${2##*/}"
+
+--
+2.39.5
+
--- /dev/null
+From 39ebbcc3090b2bce7aeacd96aac183975c9e2064 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2024 13:07:11 +0000
+Subject: tools/net/ynl: fix sub-message key lookup for nested attributes
+
+From: Donald Hunter <donald.hunter@gmail.com>
+
+[ Upstream commit 663ad7481f068057f6f692c5368c47150e855370 ]
+
+Use the correct attribute space for sub-message key lookup in nested
+attributes when adding attributes. This fixes rt_link where the "kind"
+key and "data" sub-message are nested attributes in "linkinfo".
+
+For example:
+
+./tools/net/ynl/cli.py \
+ --create \
+ --spec Documentation/netlink/specs/rt_link.yaml \
+ --do newlink \
+ --json '{"link": 99,
+ "linkinfo": { "kind": "vlan", "data": {"id": 4 } }
+ }'
+
+Signed-off-by: Donald Hunter <donald.hunter@gmail.com>
+Fixes: ab463c4342d1 ("tools/net/ynl: Add support for encoding sub-messages")
+Link: https://patch.msgid.link/20241213130711.40267-1-donald.hunter@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/net/ynl/lib/ynl.py | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
+index c22c22bf2cb7..a3f741fed0a3 100644
+--- a/tools/net/ynl/lib/ynl.py
++++ b/tools/net/ynl/lib/ynl.py
+@@ -553,10 +553,10 @@ class YnlFamily(SpecFamily):
+ if attr["type"] == 'nest':
+ nl_type |= Netlink.NLA_F_NESTED
+ attr_payload = b''
+- sub_attrs = SpaceAttrs(self.attr_sets[space], value, search_attrs)
++ sub_space = attr['nested-attributes']
++ sub_attrs = SpaceAttrs(self.attr_sets[sub_space], value, search_attrs)
+ for subname, subvalue in value.items():
+- attr_payload += self._add_attr(attr['nested-attributes'],
+- subname, subvalue, sub_attrs)
++ attr_payload += self._add_attr(sub_space, subname, subvalue, sub_attrs)
+ elif attr["type"] == 'flag':
+ if not value:
+ # If value is absent or false then skip attribute creation.
+--
+2.39.5
+