--- /dev/null
+From d730192ff0246356a2d7e63ff5bd501060670eec Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Sat, 6 Apr 2024 13:40:52 +0200
+Subject: ACPI: scan: Do not increase dep_unmet for already met dependencies
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit d730192ff0246356a2d7e63ff5bd501060670eec upstream.
+
+On the Toshiba Encore WT10-A tablet the BATC battery ACPI device depends
+on 3 other devices:
+
+ Name (_DEP, Package (0x03) // _DEP: Dependencies
+ {
+ I2C1,
+ GPO2,
+ GPO0
+ })
+
+acpi_scan_check_dep() adds all 3 of these to the acpi_dep_list and then
+before an acpi_device is created for the BATC handle (and thus before
+acpi_scan_dep_init() runs) acpi_scan_clear_dep() gets called for both
+GPIO depenencies, with free_when_met not set for the dependencies.
+
+Since there is no adev for BATC yet, there also is no dep_unmet to
+decrement. The only result of acpi_scan_clear_dep() in this case is
+dep->met getting set.
+
+Soon after acpi_scan_clear_dep() has been called for the GPIO dependencies
+the acpi_device gets created for the BATC handle and acpi_scan_dep_init()
+runs, this sees 3 dependencies on the acpi_dep_list and initializes
+unmet_dep to 3. Later when the dependency for I2C1 is met unmet_dep
+becomes 2, but since the 2 GPIO deps where already met it never becomes 0
+causing battery monitoring to not work.
+
+Fix this by modifying acpi_scan_dep_init() to not increase dep_met for
+dependencies which have already been marked as being met.
+
+Fixes: 3ba12d8de3fa ("ACPI: scan: Reduce overhead related to devices with dependencies")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Cc: 6.5+ <stable@vger.kernel.org> # 6.5+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/scan.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1802,7 +1802,8 @@ static void acpi_scan_dep_init(struct ac
+ if (dep->honor_dep)
+ adev->flags.honor_deps = 1;
+
+- adev->dep_unmet++;
++ if (!dep->met)
++ adev->dep_unmet++;
+ }
+ }
+ }
--- /dev/null
+From 135f218255b28c5bbf71e9e32a49e5c734cabbe5 Mon Sep 17 00:00:00 2001
+From: Fabio Estevam <festevam@denx.de>
+Date: Thu, 28 Mar 2024 12:19:54 -0300
+Subject: ARM: dts: imx7s-warp: Pass OV2680 link-frequencies
+
+From: Fabio Estevam <festevam@denx.de>
+
+commit 135f218255b28c5bbf71e9e32a49e5c734cabbe5 upstream.
+
+Since commit 63b0cd30b78e ("media: ov2680: Add bus-cfg / endpoint
+property verification") the ov2680 no longer probes on a imx7s-warp7:
+
+ov2680 1-0036: error -EINVAL: supported link freq 330000000 not found
+ov2680 1-0036: probe with driver ov2680 failed with error -22
+
+Fix it by passing the required 'link-frequencies' property as
+recommended by:
+
+https://www.kernel.org/doc/html/v6.9-rc1/driver-api/media/camera-sensor.html#handling-clocks
+
+Cc: stable@vger.kernel.org
+Fixes: 63b0cd30b78e ("media: ov2680: Add bus-cfg / endpoint property verification")
+Signed-off-by: Fabio Estevam <festevam@denx.de>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/nxp/imx/imx7s-warp.dts | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
+@@ -210,6 +210,7 @@
+ remote-endpoint = <&mipi_from_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1>;
++ link-frequencies = /bits/ 64 <330000000>;
+ };
+ };
+ };
--- /dev/null
+From e3ba51ab24fddef79fc212f9840de54db8fd1685 Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gshan@redhat.com>
+Date: Fri, 5 Apr 2024 13:58:50 +1000
+Subject: arm64: tlb: Fix TLBI RANGE operand
+
+From: Gavin Shan <gshan@redhat.com>
+
+commit e3ba51ab24fddef79fc212f9840de54db8fd1685 upstream.
+
+KVM/arm64 relies on TLBI RANGE feature to flush TLBs when the dirty
+pages are collected by VMM and the page table entries become write
+protected during live migration. Unfortunately, the operand passed
+to the TLBI RANGE instruction isn't correctly sorted out due to the
+commit 117940aa6e5f ("KVM: arm64: Define kvm_tlb_flush_vmid_range()").
+It leads to crash on the destination VM after live migration because
+TLBs aren't flushed completely and some of the dirty pages are missed.
+
+For example, I have a VM where 8GB memory is assigned, starting from
+0x40000000 (1GB). Note that the host has 4KB as the base page size.
+In the middile of migration, kvm_tlb_flush_vmid_range() is executed
+to flush TLBs. It passes MAX_TLBI_RANGE_PAGES as the argument to
+__kvm_tlb_flush_vmid_range() and __flush_s2_tlb_range_op(). SCALE#3
+and NUM#31, corresponding to MAX_TLBI_RANGE_PAGES, isn't supported
+by __TLBI_RANGE_NUM(). In this specific case, -1 has been returned
+from __TLBI_RANGE_NUM() for SCALE#3/2/1/0 and rejected by the loop
+in the __flush_tlb_range_op() until the variable @scale underflows
+and becomes -9, 0xffff708000040000 is set as the operand. The operand
+is wrong since it's sorted out by __TLBI_VADDR_RANGE() according to
+invalid @scale and @num.
+
+Fix it by extending __TLBI_RANGE_NUM() to support the combination of
+SCALE#3 and NUM#31. With the changes, [-1 31] instead of [-1 30] can
+be returned from the macro, meaning the TLBs for 0x200000 pages in the
+above example can be flushed in one shoot with SCALE#3 and NUM#31. The
+macro TLBI_RANGE_MASK is dropped since no one uses it any more. The
+comments are also adjusted accordingly.
+
+Fixes: 117940aa6e5f ("KVM: arm64: Define kvm_tlb_flush_vmid_range()")
+Cc: stable@kernel.org # v6.6+
+Reported-by: Yihuang Yu <yihyu@redhat.com>
+Suggested-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Gavin Shan <gshan@redhat.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
+Link: https://lore.kernel.org/r/20240405035852.1532010-2-gshan@redhat.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/tlbflush.h | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -161,12 +161,18 @@ static inline unsigned long get_trans_gr
+ #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
+
+ /*
+- * Generate 'num' values from -1 to 30 with -1 rejected by the
+- * __flush_tlb_range() loop below.
+- */
+-#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
+-#define __TLBI_RANGE_NUM(pages, scale) \
+- ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
++ * Generate 'num' values from -1 to 31 with -1 rejected by the
++ * __flush_tlb_range() loop below. Its return value is only
++ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
++ * 'pages' is more than that, you must iterate over the overall
++ * range.
++ */
++#define __TLBI_RANGE_NUM(pages, scale) \
++ ({ \
++ int __pages = min((pages), \
++ __TLBI_RANGE_PAGES(31, (scale))); \
++ (__pages >> (5 * (scale) + 1)) - 1; \
++ })
+
+ /*
+ * TLB Invalidation
+@@ -379,10 +385,6 @@ static inline void arch_tlbbatch_flush(s
+ * 3. If there is 1 page remaining, flush it through non-range operations. Range
+ * operations can only span an even number of pages. We save this for last to
+ * ensure 64KB start alignment is maintained for the LPA2 case.
+- *
+- * Note that certain ranges can be represented by either num = 31 and
+- * scale or num = 0 and scale + 1. The loop below favours the latter
+- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
+ */
+ #define __flush_tlb_range_op(op, start, pages, stride, \
+ asid, tlb_level, tlbi_user, lpa2) \
--- /dev/null
+From c0297e7dd50795d559f3534887a6de1756b35d0f Mon Sep 17 00:00:00 2001
+From: Igor Pylypiv <ipylypiv@google.com>
+Date: Thu, 11 Apr 2024 20:12:24 +0000
+Subject: ata: libata-core: Allow command duration limits detection for ACS-4 drives
+
+From: Igor Pylypiv <ipylypiv@google.com>
+
+commit c0297e7dd50795d559f3534887a6de1756b35d0f upstream.
+
+Even though the command duration limits (CDL) feature was first added
+in ACS-5 (major version 12), there are some ACS-4 (major version 11)
+drives that implement CDL as well.
+
+IDENTIFY_DEVICE, SUPPORTED_CAPABILITIES, and CURRENT_SETTINGS log pages
+are mandatory in the ACS-4 standard so it should be safe to read these
+log pages on older drives implementing the ACS-4 standard.
+
+Fixes: 62e4a60e0cdb ("scsi: ata: libata: Detect support for command duration limits")
+Cc: stable@vger.kernel.org
+Signed-off-by: Igor Pylypiv <ipylypiv@google.com>
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct at
+ bool cdl_enabled;
+ u64 val;
+
+- if (ata_id_major_version(dev->id) < 12)
++ if (ata_id_major_version(dev->id) < 11)
+ goto not_supported;
+
+ if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
--- /dev/null
+From 79336504781e7fee5ddaf046dcc186c8dfdf60b1 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <dlemoal@kernel.org>
+Date: Fri, 12 Apr 2024 08:41:15 +0900
+Subject: ata: libata-scsi: Fix ata_scsi_dev_rescan() error path
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+commit 79336504781e7fee5ddaf046dcc186c8dfdf60b1 upstream.
+
+Commit 0c76106cb975 ("scsi: sd: Fix TCG OPAL unlock on system resume")
+incorrectly handles failures of scsi_resume_device() in
+ata_scsi_dev_rescan(), leading to a double call to
+spin_unlock_irqrestore() to unlock a device port. Fix this by redefining
+the goto labels used in case of errors and only unlock the port
+scsi_scan_mutex when scsi_resume_device() fails.
+
+Bug found with the Smatch static checker warning:
+
+ drivers/ata/libata-scsi.c:4774 ata_scsi_dev_rescan()
+ error: double unlocked 'ap->lock' (orig line 4757)
+
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Fixes: 0c76106cb975 ("scsi: sd: Fix TCG OPAL unlock on system resume")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ata/libata-scsi.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4745,7 +4745,7 @@ void ata_scsi_dev_rescan(struct work_str
+ * bail out.
+ */
+ if (ap->pflags & ATA_PFLAG_SUSPENDED)
+- goto unlock;
++ goto unlock_ap;
+
+ if (!sdev)
+ continue;
+@@ -4758,7 +4758,7 @@ void ata_scsi_dev_rescan(struct work_str
+ if (do_resume) {
+ ret = scsi_resume_device(sdev);
+ if (ret == -EWOULDBLOCK)
+- goto unlock;
++ goto unlock_scan;
+ dev->flags &= ~ATA_DFLAG_RESUMING;
+ }
+ ret = scsi_rescan_device(sdev);
+@@ -4766,12 +4766,13 @@ void ata_scsi_dev_rescan(struct work_str
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ret)
+- goto unlock;
++ goto unlock_ap;
+ }
+ }
+
+-unlock:
++unlock_ap:
+ spin_unlock_irqrestore(ap->lock, flags);
++unlock_scan:
+ mutex_unlock(&ap->scsi_scan_mutex);
+
+ /* Reschedule with a delay if scsi_rescan_device() returned an error */
--- /dev/null
+From b1f532a3b1e6d2e5559c7ace49322922637a28aa Mon Sep 17 00:00:00 2001
+From: Sven Eckelmann <sven@narfation.org>
+Date: Mon, 12 Feb 2024 13:58:33 +0100
+Subject: batman-adv: Avoid infinite loop trying to resize local TT
+
+From: Sven Eckelmann <sven@narfation.org>
+
+commit b1f532a3b1e6d2e5559c7ace49322922637a28aa upstream.
+
+If the MTU of one of an attached interface becomes too small to transmit
+the local translation table then it must be resized to fit inside all
+fragments (when enabled) or a single packet.
+
+But if the MTU becomes too low to transmit even the header + the VLAN
+specific part then the resizing of the local TT will never succeed. This
+can for example happen when the usable space is 110 bytes and 11 VLANs are
+on top of batman-adv. In this case, at least 116 byte would be needed.
+There will just be an endless spam of
+
+ batman_adv: batadv0: Forced to purge local tt entries to fit new maximum fragment MTU (110)
+
+in the log but the function will never finish. Problem here is that the
+timeout will be halved all the time and will then stagnate at 0 and
+therefore never be able to reduce the table even more.
+
+There are other scenarios possible with a similar result. The number of
+BATADV_TT_CLIENT_NOPURGE entries in the local TT can for example be too
+high to fit inside a packet. Such a scenario can therefore happen also with
+only a single VLAN + 7 non-purgable addresses - requiring at least 120
+bytes.
+
+While this should be handled proactively when:
+
+* interface with too low MTU is added
+* VLAN is added
+* non-purgeable local mac is added
+* MTU of an attached interface is reduced
+* fragmentation setting gets disabled (which most likely requires dropping
+ attached interfaces)
+
+not all of these scenarios can be prevented because batman-adv is only
+consuming events without the the possibility to prevent these actions
+(non-purgable MAC address added, MTU of an attached interface is reduced).
+It is therefore necessary to also make sure that the code is able to handle
+also the situations when there were already incompatible system
+configuration are present.
+
+Cc: stable@vger.kernel.org
+Fixes: a19d3d85e1b8 ("batman-adv: limit local translation table max size")
+Reported-by: syzbot+a6a4b5bb3da165594cff@syzkaller.appspotmail.com
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/translation-table.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -3948,7 +3948,7 @@ void batadv_tt_local_resize_to_mtu(struc
+
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+
+- while (true) {
++ while (timeout) {
+ table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ if (packet_size_max >= table_size)
+ break;
--- /dev/null
+From 45d355a926ab40f3ae7bc0b0a00cb0e3e8a5a810 Mon Sep 17 00:00:00 2001
+From: Dmitry Antipov <dmantipov@yandex.ru>
+Date: Tue, 2 Apr 2024 14:32:05 +0300
+Subject: Bluetooth: Fix memory leak in hci_req_sync_complete()
+
+From: Dmitry Antipov <dmantipov@yandex.ru>
+
+commit 45d355a926ab40f3ae7bc0b0a00cb0e3e8a5a810 upstream.
+
+In 'hci_req_sync_complete()', always free the previous sync
+request state before assigning reference to a new one.
+
+Reported-by: syzbot+39ec16ff6cc18b1d066d@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=39ec16ff6cc18b1d066d
+Cc: stable@vger.kernel.org
+Fixes: f60cb30579d3 ("Bluetooth: Convert hci_req_sync family of function to new request API")
+Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bluetooth/hci_request.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -105,8 +105,10 @@ void hci_req_sync_complete(struct hci_de
+ if (hdev->req_status == HCI_REQ_PEND) {
+ hdev->req_result = result;
+ hdev->req_status = HCI_REQ_DONE;
+- if (skb)
++ if (skb) {
++ kfree_skb(hdev->req_skb);
+ hdev->req_skb = skb_get(skb);
++ }
+ wake_up_interruptible(&hdev->req_wait_q);
+ }
+ }
--- /dev/null
+From b2136cc288fce2f24a92f3d656531b2d50ebec5a Mon Sep 17 00:00:00 2001
+From: David Sterba <dsterba@suse.com>
+Date: Mon, 29 Jan 2024 19:04:33 +0100
+Subject: btrfs: tests: allocate dummy fs_info and root in test_find_delalloc()
+
+From: David Sterba <dsterba@suse.com>
+
+commit b2136cc288fce2f24a92f3d656531b2d50ebec5a upstream.
+
+Allocate fs_info and root to have a valid fs_info pointer in case it's
+dereferenced by a helper outside of tests, like find_lock_delalloc_range().
+
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/tests/extent-io-tests.c | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+--- a/fs/btrfs/tests/extent-io-tests.c
++++ b/fs/btrfs/tests/extent-io-tests.c
+@@ -11,6 +11,7 @@
+ #include "btrfs-tests.h"
+ #include "../ctree.h"
+ #include "../extent_io.h"
++#include "../disk-io.h"
+ #include "../btrfs_inode.h"
+
+ #define PROCESS_UNLOCK (1 << 0)
+@@ -105,9 +106,11 @@ static void dump_extent_io_tree(const st
+ }
+ }
+
+-static int test_find_delalloc(u32 sectorsize)
++static int test_find_delalloc(u32 sectorsize, u32 nodesize)
+ {
+- struct inode *inode;
++ struct btrfs_fs_info *fs_info;
++ struct btrfs_root *root = NULL;
++ struct inode *inode = NULL;
+ struct extent_io_tree *tmp;
+ struct page *page;
+ struct page *locked_page = NULL;
+@@ -121,12 +124,27 @@ static int test_find_delalloc(u32 sector
+
+ test_msg("running find delalloc tests");
+
++ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
++ if (!fs_info) {
++ test_std_err(TEST_ALLOC_FS_INFO);
++ return -ENOMEM;
++ }
++
++ root = btrfs_alloc_dummy_root(fs_info);
++ if (IS_ERR(root)) {
++ test_std_err(TEST_ALLOC_ROOT);
++ ret = PTR_ERR(root);
++ goto out;
++ }
++
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto out;
+ }
+ tmp = &BTRFS_I(inode)->io_tree;
++ BTRFS_I(inode)->root = root;
+
+ /*
+ * Passing NULL as we don't have fs_info but tracepoints are not used
+@@ -316,6 +334,8 @@ out:
+ process_page_range(inode, 0, total_dirty - 1,
+ PROCESS_UNLOCK | PROCESS_RELEASE);
+ iput(inode);
++ btrfs_free_dummy_root(root);
++ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+ }
+
+@@ -794,7 +814,7 @@ int btrfs_test_extent_io(u32 sectorsize,
+
+ test_msg("running extent I/O tests");
+
+- ret = test_find_delalloc(sectorsize);
++ ret = test_find_delalloc(sectorsize, nodesize);
+ if (ret)
+ goto out;
+
--- /dev/null
+From b372e96bd0a32729d55d27f613c8bc80708a82e1 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Mon, 25 Mar 2024 09:21:20 +1100
+Subject: ceph: redirty page before returning AOP_WRITEPAGE_ACTIVATE
+
+From: NeilBrown <neilb@suse.de>
+
+commit b372e96bd0a32729d55d27f613c8bc80708a82e1 upstream.
+
+The page has been marked clean before writepage is called. If we don't
+redirty it before postponing the write, it might never get written.
+
+Cc: stable@vger.kernel.org
+Fixes: 503d4fa6ee28 ("ceph: remove reliance on bdi congestion")
+Signed-off-by: NeilBrown <neilb@suse.de>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: Xiubo Li <xiubli@redhat.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/addr.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -795,8 +795,10 @@ static int ceph_writepage(struct page *p
+ ihold(inode);
+
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+- ceph_inode_to_fs_client(inode)->write_congested)
++ ceph_inode_to_fs_client(inode)->write_congested) {
++ redirty_page_for_writepage(wbc, page);
+ return AOP_WRITEPAGE_ACTIVATE;
++ }
+
+ wait_on_page_fscache(page);
+
--- /dev/null
+From 17f8dc2db52185460f212052f3a692c1fdc167ba Mon Sep 17 00:00:00 2001
+From: Xiubo Li <xiubli@redhat.com>
+Date: Tue, 9 Apr 2024 08:56:03 +0800
+Subject: ceph: switch to use cap_delay_lock for the unlink delay list
+
+From: Xiubo Li <xiubli@redhat.com>
+
+commit 17f8dc2db52185460f212052f3a692c1fdc167ba upstream.
+
+The same list item will be used in both cap_delay_list and
+cap_unlink_delay_list, so it's buggy to use two different locks
+to protect them.
+
+Cc: stable@vger.kernel.org
+Fixes: dbc347ef7f0c ("ceph: add ceph_cap_unlink_work to fire check_caps() immediately")
+Link: https://lists.ceph.io/hyperkitty/list/ceph-users@ceph.io/thread/AODC76VXRAMXKLFDCTK4TKFDDPWUSCN5
+Reported-by: Marc Ruhmann <ruhmann@luis.uni-hannover.de>
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Tested-by: Marc Ruhmann <ruhmann@luis.uni-hannover.de>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/caps.c | 4 ++--
+ fs/ceph/mds_client.c | 9 ++++-----
+ fs/ceph/mds_client.h | 3 +--
+ 3 files changed, 7 insertions(+), 9 deletions(-)
+
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4775,13 +4775,13 @@ int ceph_drop_caps_for_unlink(struct ino
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+- spin_lock(&mdsc->cap_unlink_delay_lock);
++ spin_lock(&mdsc->cap_delay_lock);
+ ci->i_ceph_flags |= CEPH_I_FLUSH;
+ if (!list_empty(&ci->i_cap_delay_list))
+ list_del_init(&ci->i_cap_delay_list);
+ list_add_tail(&ci->i_cap_delay_list,
+ &mdsc->cap_unlink_delay_list);
+- spin_unlock(&mdsc->cap_unlink_delay_lock);
++ spin_unlock(&mdsc->cap_delay_lock);
+
+ /*
+ * Fire the work immediately, because the MDS maybe
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -2504,7 +2504,7 @@ static void ceph_cap_unlink_work(struct
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "begin\n");
+- spin_lock(&mdsc->cap_unlink_delay_lock);
++ spin_lock(&mdsc->cap_delay_lock);
+ while (!list_empty(&mdsc->cap_unlink_delay_list)) {
+ struct ceph_inode_info *ci;
+ struct inode *inode;
+@@ -2516,15 +2516,15 @@ static void ceph_cap_unlink_work(struct
+
+ inode = igrab(&ci->netfs.inode);
+ if (inode) {
+- spin_unlock(&mdsc->cap_unlink_delay_lock);
++ spin_unlock(&mdsc->cap_delay_lock);
+ doutc(cl, "on %p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH);
+ iput(inode);
+- spin_lock(&mdsc->cap_unlink_delay_lock);
++ spin_lock(&mdsc->cap_delay_lock);
+ }
+ }
+- spin_unlock(&mdsc->cap_unlink_delay_lock);
++ spin_unlock(&mdsc->cap_delay_lock);
+ doutc(cl, "done\n");
+ }
+
+@@ -5404,7 +5404,6 @@ int ceph_mdsc_init(struct ceph_fs_client
+ INIT_LIST_HEAD(&mdsc->cap_wait_list);
+ spin_lock_init(&mdsc->cap_delay_lock);
+ INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
+- spin_lock_init(&mdsc->cap_unlink_delay_lock);
+ INIT_LIST_HEAD(&mdsc->snap_flush_list);
+ spin_lock_init(&mdsc->snap_flush_lock);
+ mdsc->last_cap_flush_tid = 1;
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -461,9 +461,8 @@ struct ceph_mds_client {
+ struct delayed_work delayed_work; /* delayed work */
+ unsigned long last_renew_caps; /* last time we renewed our caps */
+ struct list_head cap_delay_list; /* caps with delayed release */
+- spinlock_t cap_delay_lock; /* protects cap_delay_list */
+ struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
+- spinlock_t cap_unlink_delay_lock; /* protects cap_unlink_delay_list */
++ spinlock_t cap_delay_lock; /* protects cap_delay_list and cap_unlink_delay_list */
+ struct list_head snap_flush_list; /* cap_snaps ready to flush */
+ spinlock_t snap_flush_lock;
+
--- /dev/null
+From 31729e8c21ecfd671458e02b6511eb68c2225113 Mon Sep 17 00:00:00 2001
+From: Tim Huang <Tim.Huang@amd.com>
+Date: Wed, 27 Mar 2024 13:10:37 +0800
+Subject: drm/amd/pm: fixes a random hang in S4 for SMU v13.0.4/11
+
+From: Tim Huang <Tim.Huang@amd.com>
+
+commit 31729e8c21ecfd671458e02b6511eb68c2225113 upstream.
+
+While doing multiple S4 stress tests, GC/RLC/PMFW get into
+an invalid state resulting into hard hangs.
+
+Adding a GFX reset as workaround just before sending the
+MP1_UNLOAD message avoids this failure.
+
+Signed-off-by: Tim Huang <Tim.Huang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Mario Limonciello <superm1@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_c
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+- if (!en && !adev->in_s0ix)
++ if (!en && !adev->in_s0ix) {
++ /* Adds a GFX reset as workaround just before sending the
++ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
++ * an invalid state.
++ */
++ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
++ SMU_RESET_MODE_2, NULL);
++ if (ret)
++ return ret;
++
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
++ }
+
+ return ret;
+ }
--- /dev/null
+From eed14eb48ee176fe0144c6a999d00c855d0b199b Mon Sep 17 00:00:00 2001
+From: Peyton Lee <peytolee@amd.com>
+Date: Wed, 13 Mar 2024 16:53:49 +0800
+Subject: drm/amdgpu/vpe: power on vpe when hw_init
+
+From: Peyton Lee <peytolee@amd.com>
+
+commit eed14eb48ee176fe0144c6a999d00c855d0b199b upstream.
+
+To fix mode2 reset failure.
+Should power on VPE when hw_init.
+
+Signed-off-by: Peyton Lee <peytolee@amd.com>
+Reviewed-by: Lang Yu <lang.yu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Gong, Richard" <richard.gong@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+@@ -390,6 +390,12 @@ static int vpe_hw_init(void *handle)
+ struct amdgpu_vpe *vpe = &adev->vpe;
+ int ret;
+
++ /* Power on VPE */
++ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
++ AMD_PG_STATE_UNGATE);
++ if (ret)
++ return ret;
++
+ ret = vpe_load_microcode(vpe);
+ if (ret)
+ return ret;
--- /dev/null
+From ce5d241c3ad4568c12842168288993234345c0eb Mon Sep 17 00:00:00 2001
+From: Nini Song <nini.song@mediatek.com>
+Date: Thu, 25 Jan 2024 21:28:45 +0800
+Subject: media: cec: core: remove length check of Timer Status
+
+From: Nini Song <nini.song@mediatek.com>
+
+commit ce5d241c3ad4568c12842168288993234345c0eb upstream.
+
+The valid_la is used to check the length requirements,
+including special cases of Timer Status. If the length is
+shorter than 5, that means no Duration Available is returned,
+the message will be forced to be invalid.
+
+However, the description of Duration Available in the spec
+is that this parameter may be returned when these cases, or
+that it can be optionally return when these cases. The key
+words in the spec description are flexible choices.
+
+Remove the special length check of Timer Status to fit the
+spec which is not compulsory about that.
+
+Signed-off-by: Nini Song <nini.song@mediatek.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/cec/core/cec-adap.c | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -1151,20 +1151,6 @@ void cec_received_msg_ts(struct cec_adap
+ if (valid_la && min_len) {
+ /* These messages have special length requirements */
+ switch (cmd) {
+- case CEC_MSG_TIMER_STATUS:
+- if (msg->msg[2] & 0x10) {
+- switch (msg->msg[2] & 0xf) {
+- case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+- case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+- if (msg->len < 5)
+- valid_la = false;
+- break;
+- }
+- } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+- if (msg->len < 5)
+- valid_la = false;
+- }
+- break;
+ case CEC_MSG_RECORD_ON:
+ switch (msg->msg[2]) {
+ case CEC_OP_RECORD_SRC_OWN:
--- /dev/null
+From 5e700b384ec13f5bcac9855cb28fcc674f1d3593 Mon Sep 17 00:00:00 2001
+From: Noah Loomans <noah@noahloomans.com>
+Date: Wed, 10 Apr 2024 20:26:19 +0200
+Subject: platform/chrome: cros_ec_uart: properly fix race condition
+
+From: Noah Loomans <noah@noahloomans.com>
+
+commit 5e700b384ec13f5bcac9855cb28fcc674f1d3593 upstream.
+
+The cros_ec_uart_probe() function calls devm_serdev_device_open() before
+it calls serdev_device_set_client_ops(). This can trigger a NULL pointer
+dereference:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000000
+ ...
+ Call Trace:
+ <TASK>
+ ...
+ ? ttyport_receive_buf
+
+A simplified version of crashing code is as follows:
+
+ static inline size_t serdev_controller_receive_buf(struct serdev_controller *ctrl,
+ const u8 *data,
+ size_t count)
+ {
+ struct serdev_device *serdev = ctrl->serdev;
+
+ if (!serdev || !serdev->ops->receive_buf) // CRASH!
+ return 0;
+
+ return serdev->ops->receive_buf(serdev, data, count);
+ }
+
+It assumes that if SERPORT_ACTIVE is set and serdev exists, serdev->ops
+will also exist. This conflicts with the existing cros_ec_uart_probe()
+logic, as it first calls devm_serdev_device_open() (which sets
+SERPORT_ACTIVE), and only later sets serdev->ops via
+serdev_device_set_client_ops().
+
+Commit 01f95d42b8f4 ("platform/chrome: cros_ec_uart: fix race
+condition") attempted to fix a similar race condition, but while doing
+so, made the window of error for this race condition to happen much
+wider.
+
+Attempt to fix the race condition again, making sure we fully setup
+before calling devm_serdev_device_open().
+
+Fixes: 01f95d42b8f4 ("platform/chrome: cros_ec_uart: fix race condition")
+Cc: stable@vger.kernel.org
+Signed-off-by: Noah Loomans <noah@noahloomans.com>
+Reviewed-by: Guenter Roeck <groeck@chromium.org>
+Link: https://lore.kernel.org/r/20240410182618.169042-2-noah@noahloomans.com
+Signed-off-by: Tzung-Bi Shih <tzungbi@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/chrome/cros_ec_uart.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+--- a/drivers/platform/chrome/cros_ec_uart.c
++++ b/drivers/platform/chrome/cros_ec_uart.c
+@@ -263,12 +263,6 @@ static int cros_ec_uart_probe(struct ser
+ if (!ec_dev)
+ return -ENOMEM;
+
+- ret = devm_serdev_device_open(dev, serdev);
+- if (ret) {
+- dev_err(dev, "Unable to open UART device");
+- return ret;
+- }
+-
+ serdev_device_set_drvdata(serdev, ec_dev);
+ init_waitqueue_head(&ec_uart->response.wait_queue);
+
+@@ -280,14 +274,6 @@ static int cros_ec_uart_probe(struct ser
+ return ret;
+ }
+
+- ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+- if (ret < 0) {
+- dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+- return ret;
+- }
+-
+- serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+-
+ /* Initialize ec_dev for cros_ec */
+ ec_dev->phys_name = dev_name(dev);
+ ec_dev->dev = dev;
+@@ -301,6 +287,20 @@ static int cros_ec_uart_probe(struct ser
+
+ serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+
++ ret = devm_serdev_device_open(dev, serdev);
++ if (ret) {
++ dev_err(dev, "Unable to open UART device");
++ return ret;
++ }
++
++ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
++ if (ret < 0) {
++ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
++ return ret;
++ }
++
++ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
++
+ return cros_ec_register(ec_dev);
+ }
+
--- /dev/null
+From 3c89a068bfd0698a5478f4cf39493595ef757d5e Mon Sep 17 00:00:00 2001
+From: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Date: Mon, 8 Apr 2024 09:02:23 +0200
+Subject: PM: s2idle: Make sure CPUs will wakeup directly on resume
+
+From: Anna-Maria Behnsen <anna-maria@linutronix.de>
+
+commit 3c89a068bfd0698a5478f4cf39493595ef757d5e upstream.
+
+s2idle works like a regular suspend with freezing processes and freezing
+devices. All CPUs except the control CPU go into idle. Once this is
+completed the control CPU kicks all other CPUs out of idle, so that they
+reenter the idle loop and then enter s2idle state. The control CPU then
+issues an swait() on the suspend state and therefore enters the idle loop
+as well.
+
+Due to being kicked out of idle, the other CPUs leave their NOHZ states,
+which means the tick is active and the corresponding hrtimer is programmed
+to the next jiffie.
+
+On entering s2idle the CPUs shut down their local clockevent device to
+prevent wakeups. The last CPU which enters s2idle shuts down its local
+clockevent and freezes timekeeping.
+
+On resume, one of the CPUs receives the wakeup interrupt, unfreezes
+timekeeping and its local clockevent and starts the resume process. At that
+point all other CPUs are still in s2idle with their clockevents switched
+off. They only resume when they are kicked by another CPU or after resuming
+devices and then receiving a device interrupt.
+
+That means there is no guarantee that all CPUs will wakeup directly on
+resume. As a consequence there is no guarantee that timers which are queued
+on those CPUs and should expire directly after resume, are handled. Also
+timer list timers which are remotely queued to one of those CPUs after
+resume will not result in a reprogramming IPI as the tick is
+active. Queueing a hrtimer will also not result in a reprogramming IPI
+because the first hrtimer event is already in the past.
+
+The recent introduction of the timer pull model (7ee988770326 ("timers:
+Implement the hierarchical pull model")) amplifies this problem, if the
+current migrator is one of the non woken up CPUs. When a non pinned timer
+list timer is queued and the queuing CPU goes idle, it relies on the still
+suspended migrator CPU to expire the timer which will happen by chance.
+
+The problem exists since commit 8d89835b0467 ("PM: suspend: Do not pause
+cpuidle in the suspend-to-idle path"). There the cpuidle_pause() call which
+in turn invoked a wakeup for all idle CPUs was moved to a later point in
+the resume process. This might not be reached or reached very late because
+it waits on a timer of a still suspended CPU.
+
+Address this by kicking all CPUs out of idle after the control CPU returns
+from swait() so that they resume their timers and restore consistent system
+state.
+
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218641
+Fixes: 8d89835b0467 ("PM: suspend: Do not pause cpuidle in the suspend-to-idle path")
+Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Mario Limonciello <mario.limonciello@amd.com>
+Cc: 5.16+ <stable@kernel.org> # 5.16+
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/suspend.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -106,6 +106,12 @@ static void s2idle_enter(void)
+ swait_event_exclusive(s2idle_wait_head,
+ s2idle_state == S2IDLE_STATE_WAKE);
+
++ /*
++ * Kick all CPUs to ensure that they resume their timers and restore
++ * consistent system state.
++ */
++ wake_up_all_idle_cpus();
++
+ cpus_read_unlock();
+
+ raw_spin_lock_irq(&s2idle_lock);
--- /dev/null
+From 19fa4f2a85d777a8052e869c1b892a2f7556569d Mon Sep 17 00:00:00 2001
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Mon, 8 Apr 2024 20:47:40 +0200
+Subject: r8169: fix LED-related deadlock on module removal
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+commit 19fa4f2a85d777a8052e869c1b892a2f7556569d upstream.
+
+Binding devm_led_classdev_register() to the netdev is problematic
+because on module removal we get a RTNL-related deadlock. Fix this
+by avoiding the device-managed LED functions.
+
+Note: We can safely call led_classdev_unregister() for a LED even
+if registering it failed, because led_classdev_unregister() detects
+this and is a no-op in this case.
+
+Fixes: 18764b883e15 ("r8169: add support for LED's on RTL8168/RTL8101")
+Cc: stable@vger.kernel.org
+Reported-by: Lukas Wunner <lukas@wunner.de>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.h | 6 ++--
+ drivers/net/ethernet/realtek/r8169_leds.c | 35 +++++++++++++++--------
+ drivers/net/ethernet/realtek/r8169_main.c | 8 ++++--
+ 3 files changed, 33 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
+index 4c043052198d..00882ffc7a02 100644
+--- a/drivers/net/ethernet/realtek/r8169.h
++++ b/drivers/net/ethernet/realtek/r8169.h
+@@ -73,6 +73,7 @@ enum mac_version {
+ };
+
+ struct rtl8169_private;
++struct r8169_led_classdev;
+
+ void r8169_apply_firmware(struct rtl8169_private *tp);
+ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
+@@ -84,7 +85,8 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
+ char *buf, int buf_len);
+ int rtl8168_get_led_mode(struct rtl8169_private *tp);
+ int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
+-void rtl8168_init_leds(struct net_device *ndev);
++struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev);
+ int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
+ int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
+-void rtl8125_init_leds(struct net_device *ndev);
++struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev);
++void r8169_remove_leds(struct r8169_led_classdev *leds);
+diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
+index 7c5dc9d0df85..e10bee706bc6 100644
+--- a/drivers/net/ethernet/realtek/r8169_leds.c
++++ b/drivers/net/ethernet/realtek/r8169_leds.c
+@@ -146,22 +146,22 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
+ led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
+
+ /* ignore errors */
+- devm_led_classdev_register(&ndev->dev, led_cdev);
++ led_classdev_register(&ndev->dev, led_cdev);
+ }
+
+-void rtl8168_init_leds(struct net_device *ndev)
++struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev)
+ {
+- /* bind resource mgmt to netdev */
+- struct device *dev = &ndev->dev;
+ struct r8169_led_classdev *leds;
+ int i;
+
+- leds = devm_kcalloc(dev, RTL8168_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
++ leds = kcalloc(RTL8168_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+- return;
++ return NULL;
+
+ for (i = 0; i < RTL8168_NUM_LEDS; i++)
+ rtl8168_setup_ldev(leds + i, ndev, i);
++
++ return leds;
+ }
+
+ static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
+@@ -245,20 +245,31 @@ static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
+ led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
+
+ /* ignore errors */
+- devm_led_classdev_register(&ndev->dev, led_cdev);
++ led_classdev_register(&ndev->dev, led_cdev);
+ }
+
+-void rtl8125_init_leds(struct net_device *ndev)
++struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev)
+ {
+- /* bind resource mgmt to netdev */
+- struct device *dev = &ndev->dev;
+ struct r8169_led_classdev *leds;
+ int i;
+
+- leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
++ leds = kcalloc(RTL8125_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+- return;
++ return NULL;
+
+ for (i = 0; i < RTL8125_NUM_LEDS; i++)
+ rtl8125_setup_led_ldev(leds + i, ndev, i);
++
++ return leds;
++}
++
++void r8169_remove_leds(struct r8169_led_classdev *leds)
++{
++ if (!leds)
++ return;
++
++ for (struct r8169_led_classdev *l = leds; l->ndev; l++)
++ led_classdev_unregister(&l->led);
++
++ kfree(leds);
+ }
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 6f1e6f386b7b..8a27328eae34 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -647,6 +647,8 @@ struct rtl8169_private {
+ const char *fw_name;
+ struct rtl_fw *rtl_fw;
+
++ struct r8169_led_classdev *leds;
++
+ u32 ocp_base;
+ };
+
+@@ -5044,6 +5046,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
+
+ cancel_work_sync(&tp->wk.work);
+
++ r8169_remove_leds(tp->leds);
++
+ unregister_netdev(tp->dev);
+
+ if (tp->dash_type != RTL_DASH_NONE)
+@@ -5501,9 +5505,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ if (IS_ENABLED(CONFIG_R8169_LEDS)) {
+ if (rtl_is_8125(tp))
+- rtl8125_init_leds(dev);
++ tp->leds = rtl8125_init_leds(dev);
+ else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+- rtl8168_init_leds(dev);
++ tp->leds = rtl8168_init_leds(dev);
+ }
+
+ netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
+--
+2.44.0
+
--- /dev/null
+From fcf3f7e2fc8a53a6140beee46ec782a4c88e4744 Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Fri, 8 Mar 2024 17:37:26 +0800
+Subject: raid1: fix use-after-free for original bio in raid1_write_request()
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit fcf3f7e2fc8a53a6140beee46ec782a4c88e4744 upstream.
+
+r1_bio->bios[] is used to record new bios that will be issued to
+underlying disks, however, in raid1_write_request(), r1_bio->bios[]
+will set to the original bio temporarily. Meanwhile, if blocked rdev
+is set, free_r1bio() will be called causing that all r1_bio->bios[]
+to be freed:
+
+raid1_write_request()
+ r1_bio = alloc_r1bio(mddev, bio); -> r1_bio->bios[] is NULL
+ for (i = 0; i < disks; i++) -> for each rdev in conf
+ // first rdev is normal
+ r1_bio->bios[0] = bio; -> set to original bio
+ // second rdev is blocked
+ if (test_bit(Blocked, &rdev->flags))
+ break
+
+ if (blocked_rdev)
+ free_r1bio()
+ put_all_bios()
+ bio_put(r1_bio->bios[0]) -> original bio is freed
+
+Test scripts:
+
+mdadm -CR /dev/md0 -l1 -n4 /dev/sd[abcd] --assume-clean
+fio -filename=/dev/md0 -ioengine=libaio -rw=write -bs=4k -numjobs=1 \
+ -iodepth=128 -name=test -direct=1
+echo blocked > /sys/block/md0/md/rd2/state
+
+Test result:
+
+BUG bio-264 (Not tainted): Object already free
+-----------------------------------------------------------------------------
+
+Allocated in mempool_alloc_slab+0x24/0x50 age=1 cpu=1 pid=869
+ kmem_cache_alloc+0x324/0x480
+ mempool_alloc_slab+0x24/0x50
+ mempool_alloc+0x6e/0x220
+ bio_alloc_bioset+0x1af/0x4d0
+ blkdev_direct_IO+0x164/0x8a0
+ blkdev_write_iter+0x309/0x440
+ aio_write+0x139/0x2f0
+ io_submit_one+0x5ca/0xb70
+ __do_sys_io_submit+0x86/0x270
+ __x64_sys_io_submit+0x22/0x30
+ do_syscall_64+0xb1/0x210
+ entry_SYSCALL_64_after_hwframe+0x6c/0x74
+Freed in mempool_free_slab+0x1f/0x30 age=1 cpu=1 pid=869
+ kmem_cache_free+0x28c/0x550
+ mempool_free_slab+0x1f/0x30
+ mempool_free+0x40/0x100
+ bio_free+0x59/0x80
+ bio_put+0xf0/0x220
+ free_r1bio+0x74/0xb0
+ raid1_make_request+0xadf/0x1150
+ md_handle_request+0xc7/0x3b0
+ md_submit_bio+0x76/0x130
+ __submit_bio+0xd8/0x1d0
+ submit_bio_noacct_nocheck+0x1eb/0x5c0
+ submit_bio_noacct+0x169/0xd40
+ submit_bio+0xee/0x1d0
+ blkdev_direct_IO+0x322/0x8a0
+ blkdev_write_iter+0x309/0x440
+ aio_write+0x139/0x2f0
+
+Since that bios for underlying disks are not allocated yet, fix this
+problem by using mempool_free() directly to free the r1_bio.
+
+Fixes: 992db13a4aee ("md/raid1: free the r1bio before waiting for blocked rdev")
+Cc: stable@vger.kernel.org # v6.6+
+Reported-by: Coly Li <colyli@suse.de>
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Tested-by: Coly Li <colyli@suse.de>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20240308093726.1047420-1-yukuai1@huaweicloud.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid1.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1474,7 +1474,7 @@ static void raid1_write_request(struct m
+ for (j = 0; j < i; j++)
+ if (r1_bio->bios[j])
+ rdev_dec_pending(conf->mirrors[j].rdev, mddev);
+- free_r1bio(r1_bio);
++ mempool_free(r1_bio, &conf->r1bio_pool);
+ allow_barrier(conf, bio->bi_iter.bi_sector);
+
+ if (bio->bi_opf & REQ_NOWAIT) {
--- /dev/null
+From ffe3986fece696cf65e0ef99e74c75f848be8e30 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 9 Apr 2024 15:13:09 -0400
+Subject: ring-buffer: Only update pages_touched when a new page is touched
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit ffe3986fece696cf65e0ef99e74c75f848be8e30 upstream.
+
+The "buffer_percent" logic that is used by the ring buffer splice code to
+only wake up the tasks when there's no data after the buffer is filled to
+the percentage of the "buffer_percent" file is dependent on three
+variables that determine the amount of data that is in the ring buffer:
+
+ 1) pages_read - incremented whenever a new sub-buffer is consumed
+ 2) pages_lost - incremented every time a writer overwrites a sub-buffer
+ 3) pages_touched - incremented when a write goes to a new sub-buffer
+
+The percentage is the calculation of:
+
+ (pages_touched - (pages_lost + pages_read)) / nr_pages
+
+Basically, the amount of data is the total number of sub-bufs that have been
+touched, minus the number of sub-bufs lost and sub-bufs consumed. This is
+divided by the total count to give the buffer percentage. When the
+percentage is greater than the value in the "buffer_percent" file, it
+wakes up splice readers waiting for that amount.
+
+It was observed that over time, the amount read from the splice was
+constantly decreasing the longer the trace was running. That is, if one
+asked for 60%, it would read over 60% when it first starts tracing, but
+then it would be woken up at under 60% and would slowly decrease the
+amount of data read after being woken up, where the amount becomes much
+less than the buffer percent.
+
+This was due to an accounting of the pages_touched incrementation. This
+value is incremented whenever a writer transfers to a new sub-buffer. But
+the place where it was incremented was incorrect. If a writer overflowed
+the current sub-buffer it would go to the next one. If it gets preempted
+by an interrupt at that time, and the interrupt performs a trace, it too
+will end up going to the next sub-buffer. But only one should increment
+the counter. Unfortunately, that was not the case.
+
+Change the cmpxchg() that does the real switch of the tail-page into a
+try_cmpxchg(), and on success, perform the increment of pages_touched. This
+will only increment the counter once for when the writer moves to a new
+sub-buffer, and not when there's a race and is incremented for when a
+writer and its preempting writer both move to the same new sub-buffer.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240409151309.0d0e5056@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Fixes: 2c2b0a78b3739 ("ring-buffer: Add percentage of ring buffer full to wake up reader")
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1400,7 +1400,6 @@ static void rb_tail_page_update(struct r
+ old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
+ old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
+
+- local_inc(&cpu_buffer->pages_touched);
+ /*
+ * Just make sure we have seen our old_write and synchronize
+ * with any interrupts that come in.
+@@ -1437,8 +1436,9 @@ static void rb_tail_page_update(struct r
+ */
+ local_set(&next_page->page->commit, 0);
+
+- /* Again, either we update tail_page or an interrupt does */
+- (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
++ /* Either we update tail_page or an interrupt does */
++ if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
++ local_inc(&cpu_buffer->pages_touched);
+ }
+ }
+
--- /dev/null
+smb3-fix-open-files-on-server-counter-going-negative.patch
+smb-client-instantiate-when-creating-sfu-files.patch
+ata-libata-core-allow-command-duration-limits-detection-for-acs-4-drives.patch
+ata-libata-scsi-fix-ata_scsi_dev_rescan-error-path.patch
+drm-amdgpu-vpe-power-on-vpe-when-hw_init.patch
+batman-adv-avoid-infinite-loop-trying-to-resize-local-tt.patch
+ceph-redirty-page-before-returning-aop_writepage_activate.patch
+ceph-switch-to-use-cap_delay_lock-for-the-unlink-delay-list.patch
+virtio_net-do-not-send-rss-key-if-it-is-not-supported.patch
+arm64-tlb-fix-tlbi-range-operand.patch
+arm-dts-imx7s-warp-pass-ov2680-link-frequencies.patch
+raid1-fix-use-after-free-for-original-bio-in-raid1_write_request.patch
+ring-buffer-only-update-pages_touched-when-a-new-page-is-touched.patch
+bluetooth-fix-memory-leak-in-hci_req_sync_complete.patch
+drm-amd-pm-fixes-a-random-hang-in-s4-for-smu-v13.0.4-11.patch
+platform-chrome-cros_ec_uart-properly-fix-race-condition.patch
+acpi-scan-do-not-increase-dep_unmet-for-already-met-dependencies.patch
+pm-s2idle-make-sure-cpus-will-wakeup-directly-on-resume.patch
+media-cec-core-remove-length-check-of-timer-status.patch
+btrfs-tests-allocate-dummy-fs_info-and-root-in-test_find_delalloc.patch
+r8169-fix-led-related-deadlock-on-module-removal.patch
--- /dev/null
+From c6ff459037b2e35450af2351037eac4c8aca1d6b Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 9 Apr 2024 11:28:59 -0300
+Subject: smb: client: instantiate when creating SFU files
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit c6ff459037b2e35450af2351037eac4c8aca1d6b upstream.
+
+In cifs_sfu_make_node(), on success, instantiate rather than leave it
+with dentry unhashed negative to support callers that expect mknod(2)
+to always instantiate.
+
+This fixes the following test case:
+
+ mount.cifs //srv/share /mnt -o ...,sfu
+ mkfifo /mnt/fifo
+ ./xfstests/ltp/growfiles -b -W test -e 1 -u -i 0 -L 30 /mnt/fifo
+ ...
+ BUG: unable to handle page fault for address: 000000034cec4e58
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 1 PREEMPT SMP PTI
+ CPU: 0 PID: 138098 Comm: growfiles Kdump: loaded Not tainted
+ 5.14.0-436.3987_1240945149.el9.x86_64 #1
+ Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+ RIP: 0010:_raw_callee_save__kvm_vcpu_is_preempted+0x0/0x20
+ Code: e8 15 d9 61 00 e9 63 ff ff ff 41 bd ea ff ff ff e9 58 ff ff ff e8
+ d0 71 c0 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 <48> 8b 04
+ fd 60 2b c1 99 80 b8 90 50 03 00 00 0f 95 c0 c3 cc cc cc
+ RSP: 0018:ffffb6a143cf7cf8 EFLAGS: 00010206
+ RAX: ffff8a9bc30fb038 RBX: ffff8a9bc666a200 RCX: ffff8a9cc0260000
+ RDX: 00000000736f622e RSI: ffff8a9bc30fb038 RDI: 000000007665645f
+ RBP: ffffb6a143cf7d70 R08: 0000000000001000 R09: 0000000000000001
+ R10: 0000000000000001 R11: 0000000000000000 R12: ffff8a9bc666a200
+ R13: 0000559a302a12b0 R14: 0000000000001000 R15: 0000000000000000
+ FS: 00007fbed1dbb740(0000) GS:ffff8a9cf0000000(0000)
+ knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 000000034cec4e58 CR3: 0000000128ec6006 CR4: 0000000000770ef0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ PKRU: 55555554
+ Call Trace:
+ <TASK>
+ ? show_trace_log_lvl+0x1c4/0x2df
+ ? show_trace_log_lvl+0x1c4/0x2df
+ ? __mutex_lock.constprop.0+0x5f7/0x6a0
+ ? __die_body.cold+0x8/0xd
+ ? page_fault_oops+0x134/0x170
+ ? exc_page_fault+0x62/0x150
+ ? asm_exc_page_fault+0x22/0x30
+ ? _pfx_raw_callee_save__kvm_vcpu_is_preempted+0x10/0x10
+ __mutex_lock.constprop.0+0x5f7/0x6a0
+ ? __mod_memcg_lruvec_state+0x84/0xd0
+ pipe_write+0x47/0x650
+ ? do_anonymous_page+0x258/0x410
+ ? inode_security+0x22/0x60
+ ? selinux_file_permission+0x108/0x150
+ vfs_write+0x2cb/0x410
+ ksys_write+0x5f/0xe0
+ do_syscall_64+0x5c/0xf0
+ ? syscall_exit_to_user_mode+0x22/0x40
+ ? do_syscall_64+0x6b/0xf0
+ ? sched_clock_cpu+0x9/0xc0
+ ? exc_page_fault+0x62/0x150
+ entry_SYSCALL_64_after_hwframe+0x6e/0x76
+
+Cc: stable@vger.kernel.org
+Fixes: 72bc63f5e23a ("smb3: fix creating FIFOs when mounting with "sfu" mount option")
+Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2ops.c | 94 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 55 insertions(+), 39 deletions(-)
+
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -5066,68 +5066,84 @@ static int smb2_next_header(struct TCP_S
+ return 0;
+ }
+
+-int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+- struct dentry *dentry, struct cifs_tcon *tcon,
+- const char *full_path, umode_t mode, dev_t dev)
++static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev)
+ {
+- struct cifs_open_info_data buf = {};
+ struct TCP_Server_Info *server = tcon->ses->server;
+ struct cifs_open_parms oparms;
+ struct cifs_io_parms io_parms = {};
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_fid fid;
+ unsigned int bytes_written;
+- struct win_dev *pdev;
++ struct win_dev pdev = {};
+ struct kvec iov[2];
+ __u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
+ int rc;
+
+- if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
++ switch (mode & S_IFMT) {
++ case S_IFCHR:
++ strscpy(pdev.type, "IntxCHR");
++ pdev.major = cpu_to_le64(MAJOR(dev));
++ pdev.minor = cpu_to_le64(MINOR(dev));
++ break;
++ case S_IFBLK:
++ strscpy(pdev.type, "IntxBLK");
++ pdev.major = cpu_to_le64(MAJOR(dev));
++ pdev.minor = cpu_to_le64(MINOR(dev));
++ break;
++ case S_IFIFO:
++ strscpy(pdev.type, "LnxFIFO");
++ break;
++ default:
+ return -EPERM;
++ }
+
+- oparms = (struct cifs_open_parms) {
+- .tcon = tcon,
+- .cifs_sb = cifs_sb,
+- .desired_access = GENERIC_WRITE,
+- .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+- CREATE_OPTION_SPECIAL),
+- .disposition = FILE_CREATE,
+- .path = full_path,
+- .fid = &fid,
+- };
++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
++ FILE_CREATE, CREATE_NOT_DIR |
++ CREATE_OPTION_SPECIAL, ACL_NO_MODE);
++ oparms.fid = &fid;
+
+- rc = server->ops->open(xid, &oparms, &oplock, &buf);
++ rc = server->ops->open(xid, &oparms, &oplock, NULL);
+ if (rc)
+ return rc;
+
+- /*
+- * BB Do not bother to decode buf since no local inode yet to put
+- * timestamps in, but we can reuse it safely.
+- */
+- pdev = (struct win_dev *)&buf.fi;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = tcon;
+- io_parms.length = sizeof(*pdev);
+- iov[1].iov_base = pdev;
+- iov[1].iov_len = sizeof(*pdev);
+- if (S_ISCHR(mode)) {
+- memcpy(pdev->type, "IntxCHR", 8);
+- pdev->major = cpu_to_le64(MAJOR(dev));
+- pdev->minor = cpu_to_le64(MINOR(dev));
+- } else if (S_ISBLK(mode)) {
+- memcpy(pdev->type, "IntxBLK", 8);
+- pdev->major = cpu_to_le64(MAJOR(dev));
+- pdev->minor = cpu_to_le64(MINOR(dev));
+- } else if (S_ISFIFO(mode)) {
+- memcpy(pdev->type, "LnxFIFO", 8);
+- }
++ io_parms.length = sizeof(pdev);
++ iov[1].iov_base = &pdev;
++ iov[1].iov_len = sizeof(pdev);
+
+ rc = server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written, iov, 1);
+ server->ops->close(xid, tcon, &fid);
+- d_drop(dentry);
+- /* FIXME: add code here to set EAs */
+- cifs_free_open_info(&buf);
++ return rc;
++}
++
++int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev)
++{
++ struct inode *new = NULL;
++ int rc;
++
++ rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
++ full_path, mode, dev);
++ if (rc)
++ return rc;
++
++ if (tcon->posix_extensions) {
++ rc = smb311_posix_get_inode_info(&new, full_path, NULL,
++ inode->i_sb, xid);
++ } else if (tcon->unix_ext) {
++ rc = cifs_get_inode_info_unix(&new, full_path,
++ inode->i_sb, xid);
++ } else {
++ rc = cifs_get_inode_info(&new, full_path, NULL,
++ inode->i_sb, xid, NULL);
++ }
++ if (!rc)
++ d_instantiate(dentry, new);
+ return rc;
+ }
+
--- /dev/null
+From 28e0947651ce6a2200b9a7eceb93282e97d7e51a Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Sat, 6 Apr 2024 23:16:08 -0500
+Subject: smb3: fix Open files on server counter going negative
+
+From: Steve French <stfrench@microsoft.com>
+
+commit 28e0947651ce6a2200b9a7eceb93282e97d7e51a upstream.
+
+We were decrementing the count of open files on server twice
+for the case where we were closing cached directories.
+
+Fixes: 8e843bf38f7b ("cifs: return a single-use cfid if we did not get a lease")
+Cc: stable@vger.kernel.org
+Acked-by: Bharath SM <bharathsm@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -433,8 +433,8 @@ smb2_close_cached_fid(struct kref *ref)
+ if (cfid->is_open) {
+ rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
+- if (rc != -EBUSY && rc != -EAGAIN)
+- atomic_dec(&cfid->tcon->num_remote_opens);
++ if (rc) /* should we retry on -EBUSY or -EAGAIN? */
++ cifs_dbg(VFS, "close cached dir rc %d\n", rc);
+ }
+
+ free_cached_dir(cfid);
--- /dev/null
+From 059a49aa2e25c58f90b50151f109dd3c4cdb3a47 Mon Sep 17 00:00:00 2001
+From: Breno Leitao <leitao@debian.org>
+Date: Wed, 3 Apr 2024 08:43:12 -0700
+Subject: virtio_net: Do not send RSS key if it is not supported
+
+From: Breno Leitao <leitao@debian.org>
+
+commit 059a49aa2e25c58f90b50151f109dd3c4cdb3a47 upstream.
+
+There is a bug when setting the RSS options in virtio_net that can break
+the whole machine, getting the kernel into an infinite loop.
+
+Running the following command in any QEMU virtual machine with virtionet
+will reproduce this problem:
+
+ # ethtool -X eth0 hfunc toeplitz
+
+This is how the problem happens:
+
+1) ethtool_set_rxfh() calls virtnet_set_rxfh()
+
+2) virtnet_set_rxfh() calls virtnet_commit_rss_command()
+
+3) virtnet_commit_rss_command() populates 4 entries for the rss
+scatter-gather
+
+4) Since the command above does not have a key, then the last
+scatter-gatter entry will be zeroed, since rss_key_size == 0.
+sg_buf_size = vi->rss_key_size;
+
+5) This buffer is passed to qemu, but qemu is not happy with a buffer
+with zero length, and do the following in virtqueue_map_desc() (QEMU
+function):
+
+ if (!sz) {
+ virtio_error(vdev, "virtio: zero sized buffers are not allowed");
+
+6) virtio_error() (also QEMU function) set the device as broken
+
+ vdev->broken = true;
+
+7) Qemu bails out, and do not repond this crazy kernel.
+
+8) The kernel is waiting for the response to come back (function
+virtnet_send_command())
+
+9) The kernel is waiting doing the following :
+
+ while (!virtqueue_get_buf(vi->cvq, &tmp) &&
+ !virtqueue_is_broken(vi->cvq))
+ cpu_relax();
+
+10) None of the following functions above is true, thus, the kernel
+loops here forever. Keeping in mind that virtqueue_is_broken() does
+not look at the qemu `vdev->broken`, so, it never realizes that the
+vitio is broken at QEMU side.
+
+Fix it by not sending RSS commands if the feature is not available in
+the device.
+
+Fixes: c7114b1249fa ("drivers/net/virtio_net: Added basic RSS support.")
+Cc: stable@vger.kernel.org
+Cc: qemu-devel@nongnu.org
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Heng Qi <hengqi@linux.alibaba.com>
+Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/virtio_net.c | 26 ++++++++++++++++++++++----
+ 1 file changed, 22 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3768,6 +3768,7 @@ static int virtnet_set_rxfh(struct net_d
+ struct netlink_ext_ack *extack)
+ {
+ struct virtnet_info *vi = netdev_priv(dev);
++ bool update = false;
+ int i;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+@@ -3775,13 +3776,28 @@ static int virtnet_set_rxfh(struct net_d
+ return -EOPNOTSUPP;
+
+ if (rxfh->indir) {
++ if (!vi->has_rss)
++ return -EOPNOTSUPP;
++
+ for (i = 0; i < vi->rss_indir_table_size; ++i)
+ vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
++ update = true;
+ }
+- if (rxfh->key)
++
++ if (rxfh->key) {
++ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
++ * device provides hash calculation capabilities, that is,
++ * hash_key is configured.
++ */
++ if (!vi->has_rss && !vi->has_rss_hash_report)
++ return -EOPNOTSUPP;
++
+ memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
++ update = true;
++ }
+
+- virtnet_commit_rss_command(vi);
++ if (update)
++ virtnet_commit_rss_command(vi);
+
+ return 0;
+ }
+@@ -4686,13 +4702,15 @@ static int virtnet_probe(struct virtio_d
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+ vi->has_rss_hash_report = true;
+
+- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
++ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
+ vi->has_rss = true;
+
+- if (vi->has_rss || vi->has_rss_hash_report) {
+ vi->rss_indir_table_size =
+ virtio_cread16(vdev, offsetof(struct virtio_net_config,
+ rss_max_indirection_table_length));
++ }
++
++ if (vi->has_rss || vi->has_rss_hash_report) {
+ vi->rss_key_size =
+ virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+