--- /dev/null
+From stable+bounces-177908-greg=kroah.com@vger.kernel.org Sat Sep 6 01:54:04 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 19:53:54 -0400
+Subject: ALSA: hda/realtek - Add new HP ZBook laptop with micmute led fixup
+To: stable@vger.kernel.org
+Cc: Chris Chiu <chris.chiu@canonical.com>, Takashi Iwai <tiwai@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250905235354.3584137-1-sashal@kernel.org>
+
+From: Chris Chiu <chris.chiu@canonical.com>
+
+[ Upstream commit f709b78aecab519dbcefa9a6603b94ad18c553e3 ]
+
+New HP ZBook with Realtek HDA codec ALC3247 needs the quirk
+ALC236_FIXUP_HP_GPIO_LED to fix the micmute LED.
+
+Signed-off-by: Chris Chiu <chris.chiu@canonical.com>
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20250520132101.120685-1-chris.chiu@canonical.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8390,6 +8390,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8e1d, "HP ZBook X Gli 16 G12", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
--- /dev/null
+From stable+bounces-177999-greg=kroah.com@vger.kernel.org Sat Sep 6 22:20:32 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 16:19:18 -0400
+Subject: cpufreq/sched: Explicitly synchronize limits_changed flag handling
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Christian Loehle <christian.loehle@arm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250906201918.261460-1-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 79443a7e9da3c9f68290a8653837e23aba0fa89f ]
+
+The handling of the limits_changed flag in struct sugov_policy needs to
+be explicitly synchronized to ensure that cpufreq policy limits updates
+will not be missed in some cases.
+
+Without that synchronization it is theoretically possible that
+the limits_changed update in sugov_should_update_freq() will be
+reordered with respect to the reads of the policy limits in
+cpufreq_driver_resolve_freq() and in that case, if the limits_changed
+update in sugov_limits() clobbers the one in sugov_should_update_freq(),
+the new policy limits may not take effect for a long time.
+
+Likewise, the limits_changed update in sugov_limits() may theoretically
+get reordered with respect to the updates of the policy limits in
+cpufreq_set_policy() and if sugov_should_update_freq() runs between
+them, the policy limits change may be missed.
+
+To ensure that the above situations will not take place, add memory
+barriers preventing the reordering in question from taking place and
+add READ_ONCE() and WRITE_ONCE() annotations around all of the
+limits_changed flag updates to prevent the compiler from messing up
+with that code.
+
+Fixes: 600f5badb78c ("cpufreq: schedutil: Don't skip freq update when limits change")
+Cc: 5.3+ <stable@vger.kernel.org> # 5.3+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Christian Loehle <christian.loehle@arm.com>
+Link: https://patch.msgid.link/3376719.44csPzL39Z@rjwysocki.net
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/cpufreq_schedutil.c | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -88,9 +88,20 @@ static bool sugov_should_update_freq(str
+ if (!cpufreq_this_cpu_can_update(sg_policy->policy))
+ return false;
+
+- if (unlikely(sg_policy->limits_changed)) {
+- sg_policy->limits_changed = false;
++ if (unlikely(READ_ONCE(sg_policy->limits_changed))) {
++ WRITE_ONCE(sg_policy->limits_changed, false);
+ sg_policy->need_freq_update = true;
++
++ /*
++ * The above limits_changed update must occur before the reads
++ * of policy limits in cpufreq_driver_resolve_freq() or a policy
++ * limits update might be missed, so use a memory barrier to
++ * ensure it.
++ *
++ * This pairs with the write memory barrier in sugov_limits().
++ */
++ smp_mb();
++
+ return true;
+ }
+
+@@ -444,7 +455,7 @@ static inline bool sugov_cpu_is_busy(str
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
+ {
+ if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+- sg_policy->limits_changed = true;
++ WRITE_ONCE(sg_policy->limits_changed, true);
+ }
+
+ static void sugov_update_single(struct update_util_data *hook, u64 time,
+@@ -894,7 +905,16 @@ static void sugov_limits(struct cpufreq_
+ mutex_unlock(&sg_policy->work_lock);
+ }
+
+- sg_policy->limits_changed = true;
++ /*
++ * The limits_changed update below must take place before the updates
++ * of policy limits in cpufreq_set_policy() or a policy limits update
++ * might be missed, so use a memory barrier to ensure it.
++ *
++ * This pairs with the memory barrier in sugov_should_update_freq().
++ */
++ smp_wmb();
++
++ WRITE_ONCE(sg_policy->limits_changed, true);
+ }
+
+ struct cpufreq_governor schedutil_gov = {
--- /dev/null
+From stable+bounces-177902-greg=kroah.com@vger.kernel.org Fri Sep 5 23:26:41 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 17:26:31 -0400
+Subject: dmaengine: mediatek: Fix a possible deadlock error in mtk_cqdma_tx_status()
+To: stable@vger.kernel.org
+Cc: Qiu-ji Chen <chenqiuji666@gmail.com>, AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>, Vinod Koul <vkoul@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250905212631.3474476-1-sashal@kernel.org>
+
+From: Qiu-ji Chen <chenqiuji666@gmail.com>
+
+[ Upstream commit 157ae5ffd76a2857ccb4b7ce40bc5a344ca00395 ]
+
+Fix a potential deadlock bug. Observe that in the mtk-cqdma.c
+file, functions like mtk_cqdma_issue_pending() and
+mtk_cqdma_free_active_desc() properly acquire the pc lock before the vc
+lock when handling pc and vc fields. However, mtk_cqdma_tx_status()
+violates this order by first acquiring the vc lock before invoking
+mtk_cqdma_find_active_desc(), which subsequently takes the pc lock. This
+reversed locking sequence (vc → pc) contradicts the established
+pc → vc order and creates deadlock risks.
+
+Fix the issue by moving the vc lock acquisition code from
+mtk_cqdma_find_active_desc() to mtk_cqdma_tx_status(). Ensure the pc lock
+is acquired before the vc lock in the calling function to maintain correct
+locking hierarchy. Note that since mtk_cqdma_find_active_desc() is a
+static function with only one caller (mtk_cqdma_tx_status()), this
+modification safely eliminates the deadlock possibility without affecting
+other components.
+
+This possible bug is found by an experimental static analysis tool
+developed by our team. This tool analyzes the locking APIs to extract
+function pairs that can be concurrently executed, and then analyzes the
+instructions in the paired functions to identify possible concurrency bugs
+including deadlocks, data races and atomicity violations.
+
+Fixes: b1f01e48df5a ("dmaengine: mediatek: Add MediaTek Command-Queue DMA controller for MT6765 SoC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Qiu-ji Chen <chenqiuji666@gmail.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://lore.kernel.org/r/20250508073634.3719-1-chenqiuji666@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/mediatek/mtk-cqdma.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/dma/mediatek/mtk-cqdma.c
++++ b/drivers/dma/mediatek/mtk-cqdma.c
+@@ -421,15 +421,11 @@ static struct virt_dma_desc *mtk_cqdma_f
+ {
+ struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
+ struct virt_dma_desc *vd;
+- unsigned long flags;
+
+- spin_lock_irqsave(&cvc->pc->lock, flags);
+ list_for_each_entry(vd, &cvc->pc->queue, node)
+ if (vd->tx.cookie == cookie) {
+- spin_unlock_irqrestore(&cvc->pc->lock, flags);
+ return vd;
+ }
+- spin_unlock_irqrestore(&cvc->pc->lock, flags);
+
+ list_for_each_entry(vd, &cvc->vc.desc_issued, node)
+ if (vd->tx.cookie == cookie)
+@@ -453,9 +449,11 @@ static enum dma_status mtk_cqdma_tx_stat
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
++ spin_lock_irqsave(&cvc->pc->lock, flags);
+ spin_lock_irqsave(&cvc->vc.lock, flags);
+ vd = mtk_cqdma_find_active_desc(c, cookie);
+ spin_unlock_irqrestore(&cvc->vc.lock, flags);
++ spin_unlock_irqrestore(&cvc->pc->lock, flags);
+
+ if (vd) {
+ cvd = to_cqdma_vdesc(vd);
--- /dev/null
+From stable+bounces-177915-greg=kroah.com@vger.kernel.org Sat Sep 6 03:17:28 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 21:17:17 -0400
+Subject: gpio: pca953x: fix IRQ storm on system wake up
+To: stable@vger.kernel.org
+Cc: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>, Francesco Dolcini <francesco.dolcini@toradex.com>, Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Geert Uytterhoeven <geert+renesas@glider.be>, Bartosz Golaszewski <bartosz.golaszewski@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250906011717.3642558-1-sashal@kernel.org>
+
+From: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+
+[ Upstream commit 3e38f946062b4845961ab86b726651b4457b2af8 ]
+
+If an input changes state during wake-up and is used as an interrupt
+source, the IRQ handler reads the volatile input register to clear the
+interrupt mask and deassert the IRQ line. However, the IRQ handler is
+triggered before access to the register is granted, causing the read
+operation to fail.
+
+As a result, the IRQ handler enters a loop, repeatedly printing the
+"failed reading register" message, until `pca953x_resume()` is eventually
+called, which restores the driver context and enables access to
+registers.
+
+Fix by disabling the IRQ line before entering suspend mode, and
+re-enabling it after the driver context is restored in `pca953x_resume()`.
+
+An IRQ can be disabled with disable_irq() and still wake the system as
+long as the IRQ has wake enabled, so the wake-up functionality is
+preserved.
+
+Fixes: b76574300504 ("gpio: pca953x: Restore registers after suspend/resume cycle")
+Cc: stable@vger.kernel.org
+Signed-off-by: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+Signed-off-by: Francesco Dolcini <francesco.dolcini@toradex.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Link: https://lore.kernel.org/r/20250512095441.31645-1-francesco@dolcini.it
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+[ Apply directly to suspend/resume functions ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-pca953x.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -1199,6 +1199,9 @@ static int pca953x_suspend(struct device
+ struct pca953x_chip *chip = dev_get_drvdata(dev);
+
+ mutex_lock(&chip->i2c_lock);
++ /* Disable IRQ to prevent early triggering while regmap "cache only" is on */
++ if (chip->client->irq > 0)
++ disable_irq(chip->client->irq);
+ regcache_cache_only(chip->regmap, true);
+ mutex_unlock(&chip->i2c_lock);
+
+@@ -1224,6 +1227,8 @@ static int pca953x_resume(struct device
+ }
+
+ mutex_lock(&chip->i2c_lock);
++ if (chip->client->irq > 0)
++ enable_irq(chip->client->irq);
+ regcache_cache_only(chip->regmap, false);
+ regcache_mark_dirty(chip->regmap);
+ ret = pca953x_regcache_sync(dev);
--- /dev/null
+From stable+bounces-177947-greg=kroah.com@vger.kernel.org Sat Sep 6 15:02:51 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 09:02:44 -0400
+Subject: iio: chemical: pms7003: use aligned_s64 for timestamp
+To: stable@vger.kernel.org
+Cc: "David Lechner" <dlechner@baylibre.com>, "Nuno Sá" <nuno.sa@analog.com>, Stable@vger.kernel.org, "Jonathan Cameron" <Jonathan.Cameron@huawei.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20250906130244.3876234-1-sashal@kernel.org>
+
+From: David Lechner <dlechner@baylibre.com>
+
+[ Upstream commit 6ffa698674053e82e811520642db2650d00d2c01 ]
+
+Follow the pattern of other drivers and use aligned_s64 for the
+timestamp. This will ensure that the timestamp is correctly aligned on
+all architectures.
+
+Also move the unaligned.h header while touching this since it was the
+only one not in alphabetical order.
+
+Fixes: 13e945631c2f ("iio:chemical:pms7003: Fix timestamp alignment and prevent data leak.")
+Signed-off-by: David Lechner <dlechner@baylibre.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Link: https://patch.msgid.link/20250417-iio-more-timestamp-alignment-v1-4-eafac1e22318@baylibre.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+[ linux/unaligned.h => asm/unaligned.h ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/chemical/pms7003.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/chemical/pms7003.c
++++ b/drivers/iio/chemical/pms7003.c
+@@ -5,7 +5,6 @@
+ * Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
+ */
+
+-#include <asm/unaligned.h>
+ #include <linux/completion.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
+@@ -19,6 +18,8 @@
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/serdev.h>
++#include <linux/types.h>
++#include <asm/unaligned.h>
+
+ #define PMS7003_DRIVER_NAME "pms7003"
+
+@@ -76,7 +77,7 @@ struct pms7003_state {
+ /* Used to construct scan to push to the IIO buffer */
+ struct {
+ u16 data[3]; /* PM1, PM2P5, PM10 */
+- s64 ts;
++ aligned_s64 ts;
+ } scan;
+ };
+
--- /dev/null
+From stable+bounces-177936-greg=kroah.com@vger.kernel.org Sat Sep 6 05:45:21 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 23:45:11 -0400
+Subject: iio: light: opt3001: fix deadlock due to concurrent flag access
+To: stable@vger.kernel.org
+Cc: Luca Ceresoli <luca.ceresoli@bootlin.com>, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250906034511.3693987-1-sashal@kernel.org>
+
+From: Luca Ceresoli <luca.ceresoli@bootlin.com>
+
+[ Upstream commit f063a28002e3350088b4577c5640882bf4ea17ea ]
+
+The threaded IRQ function in this driver is reading the flag twice: once to
+lock a mutex and once to unlock it. Even though the code setting the flag
+is designed to prevent it, there are subtle cases where the flag could be
+true at the mutex_lock stage and false at the mutex_unlock stage. This
+results in the mutex not being unlocked, resulting in a deadlock.
+
+Fix it by making the opt3001_irq() code generally more robust, reading the
+flag into a variable and using the variable value at both stages.
+
+Fixes: 94a9b7b1809f ("iio: light: add support for TI's opt3001 light sensor")
+Cc: stable@vger.kernel.org
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Link: https://patch.msgid.link/20250321-opt3001-irq-fix-v1-1-6c520d851562@bootlin.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/light/opt3001.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/light/opt3001.c
++++ b/drivers/iio/light/opt3001.c
+@@ -691,8 +691,9 @@ static irqreturn_t opt3001_irq(int irq,
+ struct opt3001 *opt = iio_priv(iio);
+ int ret;
+ bool wake_result_ready_queue = false;
++ bool ok_to_ignore_lock = opt->ok_to_ignore_lock;
+
+- if (!opt->ok_to_ignore_lock)
++ if (!ok_to_ignore_lock)
+ mutex_lock(&opt->lock);
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+@@ -729,7 +730,7 @@ static irqreturn_t opt3001_irq(int irq,
+ }
+
+ out:
+- if (!opt->ok_to_ignore_lock)
++ if (!ok_to_ignore_lock)
+ mutex_unlock(&opt->lock);
+
+ if (wake_result_ready_queue)
--- /dev/null
+From stable+bounces-177969-greg=kroah.com@vger.kernel.org Sat Sep 6 17:12:13 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 11:12:05 -0400
+Subject: KVM: x86: Take irqfds.lock when adding/deleting IRQ bypass producer
+To: stable@vger.kernel.org
+Cc: Sean Christopherson <seanjc@google.com>, Paolo Bonzini <pbonzini@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250906151205.82243-1-sashal@kernel.org>
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit f1fb088d9cecde5c3066d8ff8846789667519b7d ]
+
+Take irqfds.lock when adding/deleting an IRQ bypass producer to ensure
+irqfd->producer isn't modified while kvm_irq_routing_update() is running.
+The only lock held when a producer is added/removed is irqbypass's mutex.
+
+Fixes: 872768800652 ("KVM: x86: select IRQ_BYPASS_MANAGER")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-ID: <20250404193923.1413163-5-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10394,11 +10394,18 @@ int kvm_arch_irq_bypass_add_producer(str
+ {
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
++ struct kvm *kvm = irqfd->kvm;
++ int ret;
+
++ spin_lock_irq(&kvm->irqfds.lock);
+ irqfd->producer = prod;
+
+- return kvm_x86_ops->update_pi_irte(irqfd->kvm,
++ ret = kvm_x86_ops->update_pi_irte(irqfd->kvm,
+ prod->irq, irqfd->gsi, 1);
++
++ spin_unlock_irq(&kvm->irqfds.lock);
++
++ return ret;
+ }
+
+ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+@@ -10407,9 +10414,9 @@ void kvm_arch_irq_bypass_del_producer(st
+ int ret;
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
++ struct kvm *kvm = irqfd->kvm;
+
+ WARN_ON(irqfd->producer != prod);
+- irqfd->producer = NULL;
+
+ /*
+ * When producer of consumer is unregistered, we change back to
+@@ -10417,10 +10424,15 @@ void kvm_arch_irq_bypass_del_producer(st
+ * when the irq is masked/disabled or the consumer side (KVM
+ * int this case doesn't want to receive the interrupts.
+ */
++ spin_lock_irq(&kvm->irqfds.lock);
++ irqfd->producer = NULL;
++
+ ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
+ if (ret)
+ printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
+ " fails: %d\n", irqfd->consumer.token, ret);
++
++ spin_unlock_irq(&kvm->irqfds.lock);
+ }
+
+ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
--- /dev/null
+From 023f47a8250c6bdb4aebe744db4bf7f73414028b Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Wed, 11 Jan 2023 14:33:51 +0100
+Subject: mm/khugepaged: fix ->anon_vma race
+
+From: Jann Horn <jannh@google.com>
+
+commit 023f47a8250c6bdb4aebe744db4bf7f73414028b upstream.
+
+If an ->anon_vma is attached to the VMA, collapse_and_free_pmd() requires
+it to be locked.
+
+Page table traversal is allowed under any one of the mmap lock, the
+anon_vma lock (if the VMA is associated with an anon_vma), and the
+mapping lock (if the VMA is associated with a mapping); and so to be
+able to remove page tables, we must hold all three of them.
+retract_page_tables() bails out if an ->anon_vma is attached, but does
+this check before holding the mmap lock (as the comment above the check
+explains).
+
+If we racily merged an existing ->anon_vma (shared with a child
+process) from a neighboring VMA, subsequent rmap traversals on pages
+belonging to the child will be able to see the page tables that we are
+concurrently removing while assuming that nothing else can access them.
+
+Repeat the ->anon_vma check once we hold the mmap lock to ensure that
+there really is no concurrent page table access.
+
+Hitting this bug causes a lockdep warning in collapse_and_free_pmd(),
+in the line "lockdep_assert_held_write(&vma->anon_vma->root->rwsem)".
+It can also lead to use-after-free access.
+
+Link: https://lore.kernel.org/linux-mm/CAG48ez3434wZBKFFbdx4M9j6eUwSUVPd4dxhzW_k_POneSDF+A@mail.gmail.com/
+Link: https://lkml.kernel.org/r/20230111133351.807024-1-jannh@google.com
+Fixes: f3f0e1d2150b ("khugepaged: add support of collapse for tmpfs/shmem pages")
+Signed-off-by: Jann Horn <jannh@google.com>
+Reported-by: Zach O'Keefe <zokeefe@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@intel.linux.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[doebel@amazon.de: Kernel 5.4 uses different control flow and locking
+ mechanism. Context adjustments.]
+Signed-off-by: Bjoern Doebel <doebel@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/khugepaged.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1476,7 +1476,7 @@ static void retract_page_tables(struct a
+ * has higher cost too. It would also probably require locking
+ * the anon_vma.
+ */
+- if (vma->anon_vma)
++ if (READ_ONCE(vma->anon_vma))
+ continue;
+ addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ if (addr & ~HPAGE_PMD_MASK)
+@@ -1498,6 +1498,18 @@ static void retract_page_tables(struct a
+ if (!khugepaged_test_exit(mm)) {
+ struct mmu_notifier_range range;
+
++ /*
++ * Re-check whether we have an ->anon_vma, because
++ * collapse_and_free_pmd() requires that either no
++ * ->anon_vma exists or the anon_vma is locked.
++ * We already checked ->anon_vma above, but that check
++ * is racy because ->anon_vma can be populated under the
++ * mmap_sem in read mode.
++ */
++ if (vma->anon_vma) {
++ up_write(&mm->mmap_sem);
++ continue;
++ }
+ mmu_notifier_range_init(&range,
+ MMU_NOTIFY_CLEAR, 0,
+ NULL, mm, addr,
--- /dev/null
+From stable+bounces-178018-greg=kroah.com@vger.kernel.org Sun Sep 7 04:38:28 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 22:38:21 -0400
+Subject: mm/slub: avoid accessing metadata when pointer is invalid in object_err()
+To: stable@vger.kernel.org
+Cc: Li Qiong <liqiong@nfschina.com>, Harry Yoo <harry.yoo@oracle.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, Vlastimil Babka <vbabka@suse.cz>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250907023821.418818-1-sashal@kernel.org>
+
+From: Li Qiong <liqiong@nfschina.com>
+
+[ Upstream commit b4efccec8d06ceb10a7d34d7b1c449c569d53770 ]
+
+object_err() reports details of an object for further debugging, such as
+the freelist pointer, redzone, etc. However, if the pointer is invalid,
+attempting to access object metadata can lead to a crash since it does
+not point to a valid object.
+
+One known path to the crash is when alloc_consistency_checks()
+determines the pointer to the allocated object is invalid because of a
+freelist corruption, and calls object_err() to report it. The debug code
+should report and handle the corruption gracefully and not crash in the
+process.
+
+In case the pointer is NULL or check_valid_pointer() returns false for
+the pointer, only print the pointer value and skip accessing metadata.
+
+Fixes: 81819f0fc828 ("SLUB core")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Li Qiong <liqiong@nfschina.com>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+[ struct page + print_page_info() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -719,7 +719,12 @@ void object_err(struct kmem_cache *s, st
+ u8 *object, char *reason)
+ {
+ slab_bug(s, "%s", reason);
+- print_trailer(s, page, object);
++ if (!object || !check_valid_pointer(s, page, object)) {
++ print_page_info(page);
++ pr_err("Invalid pointer 0x%p\n", object);
++ } else {
++ print_trailer(s, page, object);
++ }
+ }
+
+ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
--- /dev/null
+From stable+bounces-177885-greg=kroah.com@vger.kernel.org Fri Sep 5 21:23:02 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 15:22:45 -0400
+Subject: net: dsa: microchip: linearize skb for tail-tagging switches
+To: stable@vger.kernel.org
+Cc: Jakob Unterwurzacher <jakobunt@gmail.com>, Jakob Unterwurzacher <jakob.unterwurzacher@cherry.de>, Vladimir Oltean <olteanv@gmail.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250905192245.3322547-2-sashal@kernel.org>
+
+From: Jakob Unterwurzacher <jakobunt@gmail.com>
+
+[ Upstream commit ba54bce747fa9e07896c1abd9b48545f7b4b31d2 ]
+
+The pointer arithmentic for accessing the tail tag only works
+for linear skbs.
+
+For nonlinear skbs, it reads uninitialized memory inside the
+skb headroom, essentially randomizing the tag. I have observed
+it gets set to 6 most of the time.
+
+Example where ksz9477_rcv thinks that the packet from port 1 comes from port 6
+(which does not exist for the ksz9896 that's in use), dropping the packet.
+Debug prints added by me (not included in this patch):
+
+ [ 256.645337] ksz9477_rcv:323 tag0=6
+ [ 256.645349] skb len=47 headroom=78 headlen=0 tailroom=0
+ mac=(64,14) mac_len=14 net=(78,0) trans=78
+ shinfo(txflags=0 nr_frags=1 gso(size=0 type=0 segs=0))
+ csum(0x0 start=0 offset=0 ip_summed=0 complete_sw=0 valid=0 level=0)
+ hash(0x0 sw=0 l4=0) proto=0x00f8 pkttype=1 iif=3
+ priority=0x0 mark=0x0 alloc_cpu=0 vlan_all=0x0
+ encapsulation=0 inner(proto=0x0000, mac=0, net=0, trans=0)
+ [ 256.645377] dev name=end1 feat=0x0002e10200114bb3
+ [ 256.645386] skb headroom: 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ [ 256.645395] skb headroom: 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ [ 256.645403] skb headroom: 00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ [ 256.645411] skb headroom: 00000030: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ [ 256.645420] skb headroom: 00000040: ff ff ff ff ff ff 00 1c 19 f2 e2 db 08 06
+ [ 256.645428] skb frag: 00000000: 00 01 08 00 06 04 00 01 00 1c 19 f2 e2 db 0a 02
+ [ 256.645436] skb frag: 00000010: 00 83 00 00 00 00 00 00 0a 02 a0 2f 00 00 00 00
+ [ 256.645444] skb frag: 00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01
+ [ 256.645452] ksz_common_rcv:92 dsa_conduit_find_user returned NULL
+
+Call skb_linearize before trying to access the tag.
+
+This patch fixes ksz9477_rcv which is used by the ksz9896 I have at
+hand, and also applies the same fix to ksz8795_rcv which seems to have
+the same problem.
+
+Signed-off-by: Jakob Unterwurzacher <jakob.unterwurzacher@cherry.de>
+CC: stable@vger.kernel.org
+Fixes: 016e43a26bab ("net: dsa: ksz: Add KSZ8795 tag code")
+Fixes: 8b8010fb7876 ("dsa: add support for Microchip KSZ tail tagging")
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/20250515072920.2313014-1-jakob.unterwurzacher@cherry.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dsa/tag_ksz.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/net/dsa/tag_ksz.c
++++ b/net/dsa/tag_ksz.c
+@@ -115,7 +115,12 @@ static struct sk_buff *ksz8795_xmit(stru
+ static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+ {
+- u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
++ u8 *tag;
++
++ if (skb_linearize(skb))
++ return NULL;
++
++ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+
+ return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN);
+ }
+@@ -184,10 +189,16 @@ static struct sk_buff *ksz9477_xmit(stru
+ static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+ {
+- /* Tag decoding */
+- u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+- unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
+ unsigned int len = KSZ_EGRESS_TAG_LEN;
++ unsigned int port;
++ u8 *tag;
++
++ if (skb_linearize(skb))
++ return NULL;
++
++ /* Tag decoding */
++ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
++ port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
+
+ /* Extra 4-bytes PTP timestamp */
+ if (tag[0] & KSZ9477_PTP_TAG_INDICATION)
--- /dev/null
+From stable+bounces-177884-greg=kroah.com@vger.kernel.org Fri Sep 5 21:23:05 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 15:22:44 -0400
+Subject: net: dsa: microchip: update tag_ksz masks for KSZ9477 family
+To: stable@vger.kernel.org
+Cc: Pieter Van Trappen <pieter.van.trappen@cern.ch>, Florian Fainelli <florian.fainelli@broadcom.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250905192245.3322547-1-sashal@kernel.org>
+
+From: Pieter Van Trappen <pieter.van.trappen@cern.ch>
+
+[ Upstream commit 3f464b193d40e49299dcd087b10cc3b77cbbea68 ]
+
+Remove magic number 7 by introducing a GENMASK macro instead.
+Remove magic number 0x80 by using the BIT macro instead.
+
+Signed-off-by: Pieter Van Trappen <pieter.van.trappen@cern.ch>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20240909134301.75448-1-vtpieter@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: ba54bce747fa ("net: dsa: microchip: linearize skb for tail-tagging switches")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dsa/tag_ksz.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/dsa/tag_ksz.c
++++ b/net/dsa/tag_ksz.c
+@@ -149,8 +149,9 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROT
+
+ #define KSZ9477_INGRESS_TAG_LEN 2
+ #define KSZ9477_PTP_TAG_LEN 4
+-#define KSZ9477_PTP_TAG_INDICATION 0x80
++#define KSZ9477_PTP_TAG_INDICATION BIT(7)
+
++#define KSZ9477_TAIL_TAG_EG_PORT_M GENMASK(2, 0)
+ #define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
+ #define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
+
+@@ -185,7 +186,7 @@ static struct sk_buff *ksz9477_rcv(struc
+ {
+ /* Tag decoding */
+ u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+- unsigned int port = tag[0] & 7;
++ unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
+ unsigned int len = KSZ_EGRESS_TAG_LEN;
+
+ /* Extra 4-bytes PTP timestamp */
--- /dev/null
+From stable+bounces-177852-greg=kroah.com@vger.kernel.org Fri Sep 5 18:37:01 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 12:36:53 -0400
+Subject: randstruct: gcc-plugin: Fix attribute addition
+To: stable@vger.kernel.org
+Cc: Kees Cook <kees@kernel.org>, Thiago Jung Bauermann <thiago.bauermann@linaro.org>, Ingo Saitz <ingo@hannover.ccc.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250905163653.1746262-2-sashal@kernel.org>
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit f39f18f3c3531aa802b58a20d39d96e82eb96c14 ]
+
+Based on changes in the 2021 public version of the randstruct
+out-of-tree GCC plugin[1], more carefully update the attributes on
+resulting decls, to avoid tripping checks in GCC 15's
+comptypes_check_enum_int() when it has been configured with
+"--enable-checking=misc":
+
+arch/arm64/kernel/kexec_image.c:132:14: internal compiler error: in comptypes_check_enum_int, at c/c-typeck.cc:1519
+ 132 | const struct kexec_file_ops kexec_image_ops = {
+ | ^~~~~~~~~~~~~~
+ internal_error(char const*, ...), at gcc/gcc/diagnostic-global-context.cc:517
+ fancy_abort(char const*, int, char const*), at gcc/gcc/diagnostic.cc:1803
+ comptypes_check_enum_int(tree_node*, tree_node*, bool*), at gcc/gcc/c/c-typeck.cc:1519
+ ...
+
+Link: https://archive.org/download/grsecurity/grsecurity-3.1-5.10.41-202105280954.patch.gz [1]
+Reported-by: Thiago Jung Bauermann <thiago.bauermann@linaro.org>
+Closes: https://github.com/KSPP/linux/issues/367
+Closes: https://lore.kernel.org/lkml/20250530000646.104457-1-thiago.bauermann@linaro.org/
+Reported-by: Ingo Saitz <ingo@hannover.ccc.de>
+Closes: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1104745
+Fixes: 313dd1b62921 ("gcc-plugins: Add the randstruct plugin")
+Tested-by: Thiago Jung Bauermann <thiago.bauermann@linaro.org>
+Link: https://lore.kernel.org/r/20250530221824.work.623-kees@kernel.org
+Signed-off-by: Kees Cook <kees@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/gcc-plugins/gcc-common.h | 32 ++++++++++++++++++++++++++
+ scripts/gcc-plugins/randomize_layout_plugin.c | 22 ++++++++---------
+ 2 files changed, 43 insertions(+), 11 deletions(-)
+
+--- a/scripts/gcc-plugins/gcc-common.h
++++ b/scripts/gcc-plugins/gcc-common.h
+@@ -182,6 +182,38 @@ static inline tree build_const_char_stri
+ return cstr;
+ }
+
++static inline void __add_type_attr(tree type, const char *attr, tree args)
++{
++ tree oldattr;
++
++ if (type == NULL_TREE)
++ return;
++ oldattr = lookup_attribute(attr, TYPE_ATTRIBUTES(type));
++ if (oldattr != NULL_TREE) {
++ gcc_assert(TREE_VALUE(oldattr) == args || TREE_VALUE(TREE_VALUE(oldattr)) == TREE_VALUE(args));
++ return;
++ }
++
++ TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
++ TYPE_ATTRIBUTES(type) = tree_cons(get_identifier(attr), args, TYPE_ATTRIBUTES(type));
++}
++
++static inline void add_type_attr(tree type, const char *attr, tree args)
++{
++ tree main_variant = TYPE_MAIN_VARIANT(type);
++
++ __add_type_attr(TYPE_CANONICAL(type), attr, args);
++ __add_type_attr(TYPE_CANONICAL(main_variant), attr, args);
++ __add_type_attr(main_variant, attr, args);
++
++ for (type = TYPE_NEXT_VARIANT(main_variant); type; type = TYPE_NEXT_VARIANT(type)) {
++ if (!lookup_attribute(attr, TYPE_ATTRIBUTES(type)))
++ TYPE_ATTRIBUTES(type) = TYPE_ATTRIBUTES(main_variant);
++
++ __add_type_attr(TYPE_CANONICAL(type), attr, args);
++ }
++}
++
+ #define PASS_INFO(NAME, REF, ID, POS) \
+ struct register_pass_info NAME##_pass_info = { \
+ .pass = make_##NAME##_pass(), \
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -95,6 +95,9 @@ static tree handle_randomize_layout_attr
+
+ if (TYPE_P(*node)) {
+ type = *node;
++ } else if (TREE_CODE(*node) == FIELD_DECL) {
++ *no_add_attrs = false;
++ return NULL_TREE;
+ } else {
+ gcc_assert(TREE_CODE(*node) == TYPE_DECL);
+ type = TREE_TYPE(*node);
+@@ -381,15 +384,14 @@ static int relayout_struct(tree type)
+ TREE_CHAIN(newtree[i]) = newtree[i+1];
+ TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE;
+
++ add_type_attr(type, "randomize_performed", NULL_TREE);
++ add_type_attr(type, "designated_init", NULL_TREE);
++ if (has_flexarray)
++ add_type_attr(type, "has_flexarray", NULL_TREE);
++
+ main_variant = TYPE_MAIN_VARIANT(type);
+- for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant)) {
++ for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant))
+ TYPE_FIELDS(variant) = newtree[0];
+- TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant));
+- TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant));
+- TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("designated_init"), NULL_TREE, TYPE_ATTRIBUTES(variant));
+- if (has_flexarray)
+- TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("has_flexarray"), NULL_TREE, TYPE_ATTRIBUTES(type));
+- }
+
+ /*
+ * force a re-layout of the main variant
+@@ -457,10 +459,8 @@ static void randomize_type(tree type)
+ if (lookup_attribute("randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))) || is_pure_ops_struct(type))
+ relayout_struct(type);
+
+- for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) {
+- TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
+- TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("randomize_considered"), NULL_TREE, TYPE_ATTRIBUTES(type));
+- }
++ add_type_attr(type, "randomize_considered", NULL_TREE);
++
+ #ifdef __DEBUG_PLUGIN
+ fprintf(stderr, "Marking randomize_considered on struct %s\n", ORIG_TYPE_NAME(type));
+ #ifdef __DEBUG_VERBOSE
--- /dev/null
+From stable+bounces-177851-greg=kroah.com@vger.kernel.org Fri Sep 5 18:36:58 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 12:36:52 -0400
+Subject: randstruct: gcc-plugin: Remove bogus void member
+To: stable@vger.kernel.org
+Cc: Kees Cook <kees@kernel.org>, "Dr. David Alan Gilbert" <linux@treblig.org>, Mark Brown <broonie@kernel.org>, WangYuli <wangyuli@uniontech.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250905163653.1746262-1-sashal@kernel.org>
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit e136a4062174a9a8d1c1447ca040ea81accfa6a8 ]
+
+When building the randomized replacement tree of struct members, the
+randstruct GCC plugin would insert, as the first member, a 0-sized void
+member. This appears as though it was done to catch non-designated
+("unnamed") static initializers, which wouldn't be stable since they
+depend on the original struct layout order.
+
+This was accomplished by having the side-effect of the "void member"
+tripping an assert in GCC internals (count_type_elements) if the member
+list ever needed to be counted (e.g. for figuring out the order of members
+during a non-designated initialization), which would catch impossible type
+(void) in the struct:
+
+security/landlock/fs.c: In function ‘hook_file_ioctl_common’:
+security/landlock/fs.c:1745:61: internal compiler error: in count_type_elements, at expr.cc:7075
+ 1745 | .u.op = &(struct lsm_ioctlop_audit) {
+ | ^
+
+static HOST_WIDE_INT
+count_type_elements (const_tree type, bool for_ctor_p)
+{
+ switch (TREE_CODE (type))
+...
+ case VOID_TYPE:
+ default:
+ gcc_unreachable ();
+ }
+}
+
+However this is a redundant safety measure since randstruct uses the
+__designated_initializer attribute both internally and within the
+__randomized_layout attribute macro so that this would be enforced
+by the compiler directly even when randstruct was not enabled (via
+-Wdesignated-init).
+
+A recent change in Landlock ended up tripping the same member counting
+routine when using a full-struct copy initializer as part of an anonymous
+initializer. This, however, is a false positive as the initializer is
+copying between identical structs (and hence identical layouts). The
+"path" member is "struct path", a randomized struct, and is being copied
+to from another "struct path", the "f_path" member:
+
+ landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_FS_ACCESS,
+ .audit = {
+ .type = LSM_AUDIT_DATA_IOCTL_OP,
+ .u.op = &(struct lsm_ioctlop_audit) {
+ .path = file->f_path,
+ .cmd = cmd,
+ },
+ },
+ ...
+
+As can be seen with the coming randstruct KUnit test, there appears to
+be no behavioral problems with this kind of initialization when the void
+member is removed from the randstruct GCC plugin, so remove it.
+
+Reported-by: "Dr. David Alan Gilbert" <linux@treblig.org>
+Closes: https://lore.kernel.org/lkml/Z_PRaKx7q70MKgCA@gallifrey/
+Reported-by: Mark Brown <broonie@kernel.org>
+Closes: https://lore.kernel.org/lkml/20250407-kbuild-disable-gcc-plugins-v1-1-5d46ae583f5e@kernel.org/
+Reported-by: WangYuli <wangyuli@uniontech.com>
+Closes: https://lore.kernel.org/lkml/337D5D4887277B27+3c677db3-a8b9-47f0-93a4-7809355f1381@uniontech.com/
+Fixes: 313dd1b62921 ("gcc-plugins: Add the randstruct plugin")
+Signed-off-by: Kees Cook <kees@kernel.org>
+Stable-dep-of: f39f18f3c353 ("randstruct: gcc-plugin: Fix attribute addition")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/gcc-plugins/randomize_layout_plugin.c | 18 +-----------------
+ 1 file changed, 1 insertion(+), 17 deletions(-)
+
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -377,29 +377,13 @@ static int relayout_struct(tree type)
+
+ shuffle(type, (tree *)newtree, shuffle_length);
+
+- /*
+- * set up a bogus anonymous struct field designed to error out on unnamed struct initializers
+- * as gcc provides no other way to detect such code
+- */
+- list = make_node(FIELD_DECL);
+- TREE_CHAIN(list) = newtree[0];
+- TREE_TYPE(list) = void_type_node;
+- DECL_SIZE(list) = bitsize_zero_node;
+- DECL_NONADDRESSABLE_P(list) = 1;
+- DECL_FIELD_BIT_OFFSET(list) = bitsize_zero_node;
+- DECL_SIZE_UNIT(list) = size_zero_node;
+- DECL_FIELD_OFFSET(list) = size_zero_node;
+- DECL_CONTEXT(list) = type;
+- // to satisfy the constify plugin
+- TREE_READONLY(list) = 1;
+-
+ for (i = 0; i < num_fields - 1; i++)
+ TREE_CHAIN(newtree[i]) = newtree[i+1];
+ TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE;
+
+ main_variant = TYPE_MAIN_VARIANT(type);
+ for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant)) {
+- TYPE_FIELDS(variant) = list;
++ TYPE_FIELDS(variant) = newtree[0];
+ TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant));
+ TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant));
+ TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("designated_init"), NULL_TREE, TYPE_ATTRIBUTES(variant));
--- /dev/null
+From stable+bounces-178020-greg=kroah.com@vger.kernel.org Sun Sep 7 05:18:52 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Sep 2025 23:18:45 -0400
+Subject: scsi: lpfc: Fix buffer free/clear order in deferred receive path
+To: stable@vger.kernel.org
+Cc: John Evans <evans1210144@gmail.com>, Justin Tee <justin.tee@broadcom.com>, "Martin K. Petersen" <martin.petersen@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250907031845.445653-1-sashal@kernel.org>
+
+From: John Evans <evans1210144@gmail.com>
+
+[ Upstream commit 9dba9a45c348e8460da97c450cddf70b2056deb3 ]
+
+Fix a use-after-free window by correcting the buffer release sequence in
+the deferred receive path. The code freed the RQ buffer first and only
+then cleared the context pointer under the lock. Concurrent paths (e.g.,
+ABTS and the repost path) also inspect and release the same pointer under
+the lock, so the old order could lead to double-free/UAF.
+
+Note that the repost path already uses the correct pattern: detach the
+pointer under the lock, then free it after dropping the lock. The
+deferred path should do the same.
+
+Fixes: 472e146d1cf3 ("scsi: lpfc: Correct upcalling nvmet_fc transport during io done downcall")
+Cc: stable@vger.kernel.org
+Signed-off-by: John Evans <evans1210144@gmail.com>
+Link: https://lore.kernel.org/r/20250828044008.743-1-evans1210144@gmail.com
+Reviewed-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/lpfc/lpfc_nvmet.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -1172,7 +1172,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_tar
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_rcv_ctx *ctxp =
+ container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+- struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
++ struct rqb_dmabuf *nvmebuf;
+ struct lpfc_hba *phba = ctxp->phba;
+ unsigned long iflag;
+
+@@ -1180,13 +1180,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_tar
+ lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+ ctxp->oxid, ctxp->size, raw_smp_processor_id());
+
++ spin_lock_irqsave(&ctxp->ctxlock, iflag);
++ nvmebuf = ctxp->rqb_buffer;
+ if (!nvmebuf) {
++ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6425 Defer rcv: no buffer oxid x%x: "
+ "flg %x ste %x\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
+ return;
+ }
++ ctxp->rqb_buffer = NULL;
++ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+ tgtp = phba->targetport->private;
+ if (tgtp)
+@@ -1194,9 +1199,6 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_tar
+
+ /* Free the nvmebuf since a new buffer already replaced it */
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+- spin_lock_irqsave(&ctxp->ctxlock, iflag);
+- ctxp->rqb_buffer = NULL;
+- spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ }
+
+ static void
drm-amdgpu-drop-hw-access-in-non-dc-audio-fini.patch
batman-adv-fix-oob-read-write-in-network-coding-decode.patch
e1000e-fix-heap-overflow-in-e1000_set_eeprom.patch
+mm-khugepaged-fix-anon_vma-race.patch
+scsi-lpfc-fix-buffer-free-clear-order-in-deferred-receive-path.patch
+mm-slub-avoid-accessing-metadata-when-pointer-is-invalid-in-object_err.patch
+cpufreq-sched-explicitly-synchronize-limits_changed-flag-handling.patch
+kvm-x86-take-irqfds.lock-when-adding-deleting-irq-bypass-producer.patch
+iio-chemical-pms7003-use-aligned_s64-for-timestamp.patch
+iio-light-opt3001-fix-deadlock-due-to-concurrent-flag-access.patch
+gpio-pca953x-fix-irq-storm-on-system-wake-up.patch
+alsa-hda-realtek-add-new-hp-zbook-laptop-with-micmute-led-fixup.patch
+dmaengine-mediatek-fix-a-possible-deadlock-error-in-mtk_cqdma_tx_status.patch
+net-dsa-microchip-update-tag_ksz-masks-for-ksz9477-family.patch
+net-dsa-microchip-linearize-skb-for-tail-tagging-switches.patch
+vmxnet3-update-mtu-after-device-quiesce.patch
+randstruct-gcc-plugin-remove-bogus-void-member.patch
+randstruct-gcc-plugin-fix-attribute-addition.patch
--- /dev/null
+From stable+bounces-177873-greg=kroah.com@vger.kernel.org Fri Sep 5 20:23:17 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 14:23:12 -0400
+Subject: vmxnet3: update MTU after device quiesce
+To: stable@vger.kernel.org
+Cc: Ronak Doshi <ronak.doshi@broadcom.com>, Guolin Yang <guolin.yang@broadcom.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250905182312.3024955-1-sashal@kernel.org>
+
+From: Ronak Doshi <ronak.doshi@broadcom.com>
+
+[ Upstream commit 43f0999af011fba646e015f0bb08b6c3002a0170 ]
+
+Currently, when device mtu is updated, vmxnet3 updates netdev mtu, quiesces
+the device and then reactivates it for the ESXi to know about the new mtu.
+So, technically the OS stack can start using the new mtu before ESXi knows
+about the new mtu.
+
+This can lead to issues for TSO packets which use mss as per the new mtu
+configured. This patch fixes this issue by moving the mtu write after
+device quiesce.
+
+Cc: stable@vger.kernel.org
+Fixes: d1a890fa37f2 ("net: VMware virtual Ethernet NIC driver: vmxnet3")
+Signed-off-by: Ronak Doshi <ronak.doshi@broadcom.com>
+Acked-by: Guolin Yang <guolin.yang@broadcom.com>
+Changes v1-> v2:
+ Moved MTU write after destroy of rx rings
+Link: https://patch.msgid.link/20250515190457.8597-1-ronak.doshi@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ no WRITE_ONCE() in older trees ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vmxnet3/vmxnet3_drv.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -2998,8 +2998,6 @@ vmxnet3_change_mtu(struct net_device *ne
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+- netdev->mtu = new_mtu;
+-
+ /*
+ * Reset_work may be in the middle of resetting the device, wait for its
+ * completion.
+@@ -3013,6 +3011,7 @@ vmxnet3_change_mtu(struct net_device *ne
+
+ /* we need to re-create the rx queue based on the new mtu */
+ vmxnet3_rq_destroy_all(adapter);
++ netdev->mtu = new_mtu;
+ vmxnet3_adjust_rx_ring_size(adapter);
+ err = vmxnet3_rq_create_all(adapter);
+ if (err) {
+@@ -3029,6 +3028,8 @@ vmxnet3_change_mtu(struct net_device *ne
+ "Closing it\n", err);
+ goto out;
+ }
++ } else {
++ netdev->mtu = new_mtu;
+ }
+
+ out: