--- /dev/null
+From 8d59fba49362c65332395789fd82771f1028d87e Mon Sep 17 00:00:00 2001
+From: Ilia Gavrilov <Ilia.Gavrilov@infotecs.ru>
+Date: Mon, 20 Oct 2025 15:12:55 +0000
+Subject: Bluetooth: MGMT: Fix OOB access in parse_adv_monitor_pattern()
+
+From: Ilia Gavrilov <Ilia.Gavrilov@infotecs.ru>
+
+commit 8d59fba49362c65332395789fd82771f1028d87e upstream.
+
+In the parse_adv_monitor_pattern() function, the value of
+the 'length' variable is currently limited to HCI_MAX_EXT_AD_LENGTH(251).
+The size of the 'value' array in the mgmt_adv_pattern structure is 31.
+If the value of 'pattern[i].length' is set in the user space
+and exceeds 31, the 'patterns[i].value' array can be accessed
+out of bound when copied.
+
+Increasing the size of the 'value' array in
+the 'mgmt_adv_pattern' structure will break the userspace.
+Considering this, and to avoid OOB access revert the limits for 'offset'
+and 'length' back to the value of HCI_MAX_AD_LENGTH.
+
+Found by InfoTeCS on behalf of Linux Verification Center
+(linuxtesting.org) with SVACE.
+
+Fixes: db08722fc7d4 ("Bluetooth: hci_core: Fix missing instances using HCI_MAX_AD_LENGTH")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ilia Gavrilov <Ilia.Gavrilov@infotecs.ru>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/bluetooth/mgmt.h | 2 +-
+ net/bluetooth/mgmt.c | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/net/bluetooth/mgmt.h
++++ b/include/net/bluetooth/mgmt.h
+@@ -774,7 +774,7 @@ struct mgmt_adv_pattern {
+ __u8 ad_type;
+ __u8 offset;
+ __u8 length;
+- __u8 value[31];
++ __u8 value[HCI_MAX_AD_LENGTH];
+ } __packed;
+
+ #define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -5358,9 +5358,9 @@ static u8 parse_adv_monitor_pattern(stru
+ for (i = 0; i < pattern_count; i++) {
+ offset = patterns[i].offset;
+ length = patterns[i].length;
+- if (offset >= HCI_MAX_EXT_AD_LENGTH ||
+- length > HCI_MAX_EXT_AD_LENGTH ||
+- (offset + length) > HCI_MAX_EXT_AD_LENGTH)
++ if (offset >= HCI_MAX_AD_LENGTH ||
++ length > HCI_MAX_AD_LENGTH ||
++ (offset + length) > HCI_MAX_AD_LENGTH)
+ return MGMT_STATUS_INVALID_PARAMS;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
--- /dev/null
+From 3c6a743c6961cc2cab453b343bb157d6bbbf8120 Mon Sep 17 00:00:00 2001
+From: Wayne Lin <Wayne.Lin@amd.com>
+Date: Wed, 5 Nov 2025 10:36:31 +0800
+Subject: drm/amd/display: Enable mst when it's detected but yet to be initialized
+
+From: Wayne Lin <Wayne.Lin@amd.com>
+
+commit 3c6a743c6961cc2cab453b343bb157d6bbbf8120 upstream.
+
+[Why]
+drm_dp_mst_topology_queue_probe() is used under the assumption that
+mst is already initialized. If we connect system with SST first
+then switch to the mst branch during suspend, we will fail probing
+topology by calling the wrong API since the mst manager is yet to
+be initialized.
+
+[How]
+At dm_resume(), once it's detected as mst branc connected, check if
+the mst is initialized already. If not, call
+dm_helpers_dp_mst_start_top_mgr() instead to initialize mst
+
+V2: Adjust the commit msg a bit
+
+Fixes: bc068194f548 ("drm/amd/display: Don't write DP_MSTM_CTRL after LT")
+Cc: Fangzhi Zuo <jerry.zuo@amd.com>
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Tom Chung <chiahsuan.chung@amd.com>
+Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 62320fb8d91a0bddc44a228203cfa9bfbb5395bd)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2993,6 +2993,7 @@ static int dm_resume(void *handle)
+ /* Do mst topology probing after resuming cached state*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
++ bool init = false;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+@@ -3002,7 +3003,14 @@ static int dm_resume(void *handle)
+ aconnector->mst_root)
+ continue;
+
+- drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
++ scoped_guard(mutex, &aconnector->mst_mgr.lock) {
++ init = !aconnector->mst_mgr.mst_primary;
++ }
++ if (init)
++ dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
++ aconnector->dc_link, false);
++ else
++ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
+ }
+ drm_connector_list_iter_end(&iter);
+
--- /dev/null
+From 487df8b698345dd5a91346335f05170ed5f29d4e Mon Sep 17 00:00:00 2001
+From: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Date: Tue, 4 Nov 2025 10:53:57 +0100
+Subject: drm/sched: Fix deadlock in drm_sched_entity_kill_jobs_cb
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+
+commit 487df8b698345dd5a91346335f05170ed5f29d4e upstream.
+
+The Mesa issue referenced below pointed out a possible deadlock:
+
+[ 1231.611031] Possible interrupt unsafe locking scenario:
+
+[ 1231.611033] CPU0 CPU1
+[ 1231.611034] ---- ----
+[ 1231.611035] lock(&xa->xa_lock#17);
+[ 1231.611038] local_irq_disable();
+[ 1231.611039] lock(&fence->lock);
+[ 1231.611041] lock(&xa->xa_lock#17);
+[ 1231.611044] <Interrupt>
+[ 1231.611045] lock(&fence->lock);
+[ 1231.611047]
+ *** DEADLOCK ***
+
+In this example, CPU0 would be any function accessing job->dependencies
+through the xa_* functions that don't disable interrupts (eg:
+drm_sched_job_add_dependency(), drm_sched_entity_kill_jobs_cb()).
+
+CPU1 is executing drm_sched_entity_kill_jobs_cb() as a fence signalling
+callback so in an interrupt context. It will deadlock when trying to
+grab the xa_lock which is already held by CPU0.
+
+Replacing all xa_* usage by their xa_*_irq counterparts would fix
+this issue, but Christian pointed out another issue: dma_fence_signal
+takes fence.lock and so does dma_fence_add_callback.
+
+ dma_fence_signal() // locks f1.lock
+ -> drm_sched_entity_kill_jobs_cb()
+ -> foreach dependencies
+ -> dma_fence_add_callback() // locks f2.lock
+
+This will deadlock if f1 and f2 share the same spinlock.
+
+To fix both issues, the code iterating on dependencies and re-arming them
+is moved out to drm_sched_entity_kill_jobs_work().
+
+Cc: stable@vger.kernel.org # v6.2+
+Fixes: 2fdb8a8f07c2 ("drm/scheduler: rework entity flush, kill and fini")
+Link: https://gitlab.freedesktop.org/mesa/mesa/-/issues/13908
+Reported-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
+Suggested-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+[phasta: commit message nits]
+Signed-off-by: Philipp Stanner <phasta@kernel.org>
+Link: https://patch.msgid.link/20251104095358.15092-1-pierre-eric.pelloux-prayer@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/scheduler/sched_entity.c | 34 +++++++++++++++++--------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -163,26 +163,15 @@ int drm_sched_entity_error(struct drm_sc
+ }
+ EXPORT_SYMBOL(drm_sched_entity_error);
+
++static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
++ struct dma_fence_cb *cb);
++
+ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
+ {
+ struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+-
+- drm_sched_fence_scheduled(job->s_fence, NULL);
+- drm_sched_fence_finished(job->s_fence, -ESRCH);
+- WARN_ON(job->s_fence->parent);
+- job->sched->ops->free_job(job);
+-}
+-
+-/* Signal the scheduler finished fence when the entity in question is killed. */
+-static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+- struct dma_fence_cb *cb)
+-{
+- struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+- finish_cb);
++ struct dma_fence *f;
+ unsigned long index;
+
+- dma_fence_put(f);
+-
+ /* Wait for all dependencies to avoid data corruptions */
+ xa_for_each(&job->dependencies, index, f) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
+@@ -210,6 +199,21 @@ static void drm_sched_entity_kill_jobs_c
+ dma_fence_put(f);
+ }
+
++ drm_sched_fence_scheduled(job->s_fence, NULL);
++ drm_sched_fence_finished(job->s_fence, -ESRCH);
++ WARN_ON(job->s_fence->parent);
++ job->sched->ops->free_job(job);
++}
++
++/* Signal the scheduler finished fence when the entity in question is killed. */
++static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
++ struct dma_fence_cb *cb)
++{
++ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
++ finish_cb);
++
++ dma_fence_put(f);
++
+ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+ schedule_work(&job->work);
+ }
--- /dev/null
+From 2b81082ad37cc3f28355fb73a6a69b91ff7dbf20 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Mon, 3 Nov 2025 12:11:24 -0700
+Subject: lib/crypto: curve25519-hacl64: Fix older clang KASAN workaround for GCC
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 2b81082ad37cc3f28355fb73a6a69b91ff7dbf20 upstream.
+
+Commit 2f13daee2a72 ("lib/crypto/curve25519-hacl64: Disable KASAN with
+clang-17 and older") inadvertently disabled KASAN in curve25519-hacl64.o
+for GCC unconditionally because clang-min-version will always evaluate
+to nothing for GCC. Add a check for CONFIG_CC_IS_CLANG to avoid applying
+the workaround for GCC, which is only needed for clang-17 and older.
+
+Cc: stable@vger.kernel.org
+Fixes: 2f13daee2a72 ("lib/crypto/curve25519-hacl64: Disable KASAN with clang-17 and older")
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Acked-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20251103-curve25519-hacl64-fix-kasan-workaround-v2-1-ab581cbd8035@kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/crypto/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/crypto/Makefile
++++ b/lib/crypto/Makefile
+@@ -31,7 +31,7 @@ libcurve25519-generic-y := curve25519
+ libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o
+ libcurve25519-generic-y += curve25519-generic.o
+ # clang versions prior to 18 may blow out the stack with KASAN
+-ifeq ($(call clang-min-version, 180000),)
++ifeq ($(CONFIG_CC_IS_CLANG)_$(call clang-min-version, 180000),y_)
+ KASAN_SANITIZE_curve25519-hacl64.o := n
+ endif
+
--- /dev/null
+From 162f24cbb0f6ec596e7e9f3e91610d79dc805229 Mon Sep 17 00:00:00 2001
+From: Yuta Hayama <hayama@lineo.co.jp>
+Date: Wed, 15 Oct 2025 12:07:05 +0900
+Subject: rtc: rx8025: fix incorrect register reference
+
+From: Yuta Hayama <hayama@lineo.co.jp>
+
+commit 162f24cbb0f6ec596e7e9f3e91610d79dc805229 upstream.
+
+This code is intended to operate on the CTRL1 register, but ctrl[1] is
+actually CTRL2. Correctly, ctrl[0] is CTRL1.
+
+Signed-off-by: Yuta Hayama <hayama@lineo.co.jp>
+Fixes: 71af91565052 ("rtc: rx8025: fix 12/24 hour mode detection on RX-8035")
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/eae5f479-5d28-4a37-859d-d54794e7628c@lineo.co.jp
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/rtc/rtc-rx8025.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/rtc/rtc-rx8025.c
++++ b/drivers/rtc/rtc-rx8025.c
+@@ -316,7 +316,7 @@ static int rx8025_init_client(struct i2c
+ return hour_reg;
+ rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224);
+ } else {
+- rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224);
++ rx8025->is_24 = (ctrl[0] & RX8025_BIT_CTRL1_1224);
+ }
+ out:
+ return err;
--- /dev/null
+From bb44826c3bdbf1fa3957008a04908f45e5666463 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Fri, 24 Oct 2025 11:59:15 +0300
+Subject: scsi: ufs: ufs-pci: Fix S0ix/S3 for Intel controllers
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit bb44826c3bdbf1fa3957008a04908f45e5666463 upstream.
+
+Intel platforms with UFS, can support Suspend-to-Idle (S0ix) and
+Suspend-to-RAM (S3). For S0ix the link state should be HIBERNATE. For
+S3, state is lost, so the link state must be OFF. Driver policy,
+expressed by spm_lvl, can be 3 (link HIBERNATE, device SLEEP) for S0ix
+but must be changed to 5 (link OFF, device POWEROFF) for S3.
+
+Fix support for S0ix/S3 by switching spm_lvl as needed. During suspend
+->prepare(), if the suspend target state is not Suspend-to-Idle, ensure
+the spm_lvl is at least 5 to ensure that resume will be possible from
+deep sleep states. During suspend ->complete(), restore the spm_lvl to
+its original value that is suitable for S0ix.
+
+This fix is first needed in Intel Alder Lake based controllers.
+
+Fixes: 7dc9fb47bc9a ("scsi: ufs: ufs-pci: Add support for Intel ADL")
+Cc: stable@vger.kernel.org
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20251024085918.31825-2-adrian.hunter@intel.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ufs/host/ufshcd-pci.c | 67 ++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 65 insertions(+), 2 deletions(-)
+
+--- a/drivers/ufs/host/ufshcd-pci.c
++++ b/drivers/ufs/host/ufshcd-pci.c
+@@ -15,6 +15,7 @@
+ #include <linux/pci.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/pm_qos.h>
++#include <linux/suspend.h>
+ #include <linux/debugfs.h>
+ #include <linux/uuid.h>
+ #include <linux/acpi.h>
+@@ -34,6 +35,7 @@ struct intel_host {
+ u32 dsm_fns;
+ u32 active_ltr;
+ u32 idle_ltr;
++ int saved_spm_lvl;
+ struct dentry *debugfs_root;
+ struct gpio_desc *reset_gpio;
+ };
+@@ -375,6 +377,7 @@ static int ufs_intel_common_init(struct
+ host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
++ host->saved_spm_lvl = -1;
+ ufshcd_set_variant(hba, host);
+ intel_dsm_init(host, hba->dev);
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
+@@ -542,6 +545,66 @@ static int ufshcd_pci_restore(struct dev
+
+ return ufshcd_system_resume(dev);
+ }
++
++static int ufs_intel_suspend_prepare(struct device *dev)
++{
++ struct ufs_hba *hba = dev_get_drvdata(dev);
++ struct intel_host *host = ufshcd_get_variant(hba);
++ int err;
++
++ /*
++ * Only s2idle (S0ix) retains link state. Force power-off
++ * (UFS_PM_LVL_5) for any other case.
++ */
++ if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE && hba->spm_lvl < UFS_PM_LVL_5) {
++ host->saved_spm_lvl = hba->spm_lvl;
++ hba->spm_lvl = UFS_PM_LVL_5;
++ }
++
++ err = ufshcd_suspend_prepare(dev);
++
++ if (err < 0 && host->saved_spm_lvl != -1) {
++ hba->spm_lvl = host->saved_spm_lvl;
++ host->saved_spm_lvl = -1;
++ }
++
++ return err;
++}
++
++static void ufs_intel_resume_complete(struct device *dev)
++{
++ struct ufs_hba *hba = dev_get_drvdata(dev);
++ struct intel_host *host = ufshcd_get_variant(hba);
++
++ ufshcd_resume_complete(dev);
++
++ if (host->saved_spm_lvl != -1) {
++ hba->spm_lvl = host->saved_spm_lvl;
++ host->saved_spm_lvl = -1;
++ }
++}
++
++static int ufshcd_pci_suspend_prepare(struct device *dev)
++{
++ struct ufs_hba *hba = dev_get_drvdata(dev);
++
++ if (!strcmp(hba->vops->name, "intel-pci"))
++ return ufs_intel_suspend_prepare(dev);
++
++ return ufshcd_suspend_prepare(dev);
++}
++
++static void ufshcd_pci_resume_complete(struct device *dev)
++{
++ struct ufs_hba *hba = dev_get_drvdata(dev);
++
++ if (!strcmp(hba->vops->name, "intel-pci")) {
++ ufs_intel_resume_complete(dev);
++ return;
++ }
++
++ ufshcd_resume_complete(dev);
++}
+ #endif
+
+ /**
+@@ -624,8 +687,8 @@ static const struct dev_pm_ops ufshcd_pc
+ .thaw = ufshcd_system_resume,
+ .poweroff = ufshcd_system_suspend,
+ .restore = ufshcd_pci_restore,
+- .prepare = ufshcd_suspend_prepare,
+- .complete = ufshcd_resume_complete,
++ .prepare = ufshcd_pci_suspend_prepare,
++ .complete = ufshcd_pci_resume_complete,
+ #endif
+ };
+
net-bridge-fix-use-after-free-due-to-mst-port-state-.patch
net-bridge-fix-mst-static-key-usage.patch
tracing-fix-memory-leaks-in-create_field_var.patch
+drm-amd-display-enable-mst-when-it-s-detected-but-yet-to-be-initialized.patch
+drm-sched-fix-deadlock-in-drm_sched_entity_kill_jobs_cb.patch
+bluetooth-mgmt-fix-oob-access-in-parse_adv_monitor_pattern.patch
+rtc-rx8025-fix-incorrect-register-reference.patch
+x86-microcode-amd-add-more-known-models-to-entry-sign-checking.patch
+smb-client-validate-change-notify-buffer-before-copy.patch
+smb-client-fix-potential-uaf-in-smb2_close_cached_fid.patch
+virtio-net-fix-received-length-check-in-big-packets.patch
+lib-crypto-curve25519-hacl64-fix-older-clang-kasan-workaround-for-gcc.patch
+scsi-ufs-ufs-pci-fix-s0ix-s3-for-intel-controllers.patch
--- /dev/null
+From 734e99623c5b65bf2c03e35978a0b980ebc3c2f8 Mon Sep 17 00:00:00 2001
+From: Henrique Carvalho <henrique.carvalho@suse.com>
+Date: Mon, 3 Nov 2025 19:52:55 -0300
+Subject: smb: client: fix potential UAF in smb2_close_cached_fid()
+
+From: Henrique Carvalho <henrique.carvalho@suse.com>
+
+commit 734e99623c5b65bf2c03e35978a0b980ebc3c2f8 upstream.
+
+find_or_create_cached_dir() could grab a new reference after kref_put()
+had seen the refcount drop to zero but before cfid_list_lock is acquired
+in smb2_close_cached_fid(), leading to use-after-free.
+
+Switch to kref_put_lock() so cfid_release() is called with
+cfid_list_lock held, closing that gap.
+
+Fixes: ebe98f1447bb ("cifs: enable caching of directories for which a lease is held")
+Cc: stable@vger.kernel.org
+Reported-by: Jay Shin <jaeshin@redhat.com>
+Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Signed-off-by: Henrique Carvalho <henrique.carvalho@suse.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -362,11 +362,11 @@ out:
+ * lease. Release one here, and the second below.
+ */
+ cfid->has_lease = false;
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++ close_cached_dir(cfid);
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++ close_cached_dir(cfid);
+ } else {
+ *ret_cfid = cfid;
+ atomic_inc(&tcon->num_remote_opens);
+@@ -406,12 +406,14 @@ int open_cached_dir_by_dentry(struct cif
+
+ static void
+ smb2_close_cached_fid(struct kref *ref)
++__releases(&cfid->cfids->cfid_list_lock)
+ {
+ struct cached_fid *cfid = container_of(ref, struct cached_fid,
+ refcount);
+ int rc;
+
+- spin_lock(&cfid->cfids->cfid_list_lock);
++ lockdep_assert_held(&cfid->cfids->cfid_list_lock);
++
+ if (cfid->on_list) {
+ list_del(&cfid->entry);
+ cfid->on_list = false;
+@@ -446,7 +448,7 @@ void drop_cached_dir_by_name(const unsig
+ spin_lock(&cfid->cfids->cfid_list_lock);
+ if (cfid->has_lease) {
+ cfid->has_lease = false;
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++ close_cached_dir(cfid);
+ }
+ spin_unlock(&cfid->cfids->cfid_list_lock);
+ close_cached_dir(cfid);
+@@ -455,7 +457,7 @@ void drop_cached_dir_by_name(const unsig
+
+ void close_cached_dir(struct cached_fid *cfid)
+ {
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++ kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock);
+ }
+
+ /*
+@@ -566,7 +568,7 @@ cached_dir_offload_close(struct work_str
+
+ WARN_ON(cfid->on_list);
+
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++ close_cached_dir(cfid);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
+ }
+
+@@ -743,7 +745,7 @@ static void cfids_laundromat_worker(stru
+ * Drop the ref-count from above, either the lease-ref (if there
+ * was one) or the extra one acquired.
+ */
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++ close_cached_dir(cfid);
+ }
+ queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
--- /dev/null
+From 4012abe8a78fbb8869634130024266eaef7081fe Mon Sep 17 00:00:00 2001
+From: Joshua Rogers <linux@joshua.hu>
+Date: Fri, 7 Nov 2025 00:09:37 +0800
+Subject: smb: client: validate change notify buffer before copy
+
+From: Joshua Rogers <linux@joshua.hu>
+
+commit 4012abe8a78fbb8869634130024266eaef7081fe upstream.
+
+SMB2_change_notify called smb2_validate_iov() but ignored the return
+code, then kmemdup()ed using server provided OutputBufferOffset/Length.
+
+Check the return of smb2_validate_iov() and bail out on error.
+
+Discovered with help from the ZeroPath security tooling.
+
+Signed-off-by: Joshua Rogers <linux@joshua.hu>
+Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Cc: stable@vger.kernel.org
+Fixes: e3e9463414f61 ("smb3: improve SMB3 change notification support")
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2pdu.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -4068,9 +4068,12 @@ replay_again:
+
+ smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
+
+- smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
+- le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov,
++ rc = smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
++ le32_to_cpu(smb_rsp->OutputBufferLength),
++ &rsp_iov,
+ sizeof(struct file_notify_information));
++ if (rc)
++ goto cnotify_exit;
+
+ *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
+ le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
--- /dev/null
+From 0c716703965ffc5ef4311b65cb5d84a703784717 Mon Sep 17 00:00:00 2001
+From: Bui Quang Minh <minhquangbui99@gmail.com>
+Date: Thu, 30 Oct 2025 21:44:38 +0700
+Subject: virtio-net: fix received length check in big packets
+
+From: Bui Quang Minh <minhquangbui99@gmail.com>
+
+commit 0c716703965ffc5ef4311b65cb5d84a703784717 upstream.
+
+Since commit 4959aebba8c0 ("virtio-net: use mtu size as buffer length
+for big packets"), when guest gso is off, the allocated size for big
+packets is not MAX_SKB_FRAGS * PAGE_SIZE anymore but depends on
+negotiated MTU. The number of allocated frags for big packets is stored
+in vi->big_packets_num_skbfrags.
+
+Because the host announced buffer length can be malicious (e.g. the host
+vhost_net driver's get_rx_bufs is modified to announce incorrect
+length), we need a check in virtio_net receive path. Currently, the
+check is not adapted to the new change which can lead to NULL page
+pointer dereference in the below while loop when receiving length that
+is larger than the allocated one.
+
+This commit fixes the received length check corresponding to the new
+change.
+
+Fixes: 4959aebba8c0 ("virtio-net: use mtu size as buffer length for big packets")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
+Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Tested-by: Lei Yang <leiyang@redhat.com>
+Link: https://patch.msgid.link/20251030144438.7582-1-minhquangbui99@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/virtio_net.c | 25 ++++++++++++-------------
+ 1 file changed, 12 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -591,17 +591,6 @@ static struct sk_buff *page_to_skb(struc
+ goto ok;
+ }
+
+- /*
+- * Verify that we can indeed put this data into a skb.
+- * This is here to handle cases when the device erroneously
+- * tries to receive more than is possible. This is usually
+- * the case of a broken device.
+- */
+- if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
+- net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
+- dev_kfree_skb(skb);
+- return NULL;
+- }
+ BUG_ON(offset >= PAGE_SIZE);
+ while (len) {
+ unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
+@@ -1344,9 +1333,19 @@ static struct sk_buff *receive_big(struc
+ struct virtnet_rq_stats *stats)
+ {
+ struct page *page = buf;
+- struct sk_buff *skb =
+- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
++ struct sk_buff *skb;
++
++ /* Make sure that len does not exceed the size allocated in
++ * add_recvbuf_big.
++ */
++ if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) {
++ pr_debug("%s: rx error: len %u exceeds allocated size %lu\n",
++ dev->name, len,
++ (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE);
++ goto err;
++ }
+
++ skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
+ u64_stats_add(&stats->bytes, len - vi->hdr_len);
+ if (unlikely(!skb))
+ goto err;
--- /dev/null
+From d23550efc6800841b4d1639784afaebdea946ae0 Mon Sep 17 00:00:00 2001
+From: "Mario Limonciello (AMD)" <superm1@kernel.org>
+Date: Thu, 6 Nov 2025 12:28:54 -0600
+Subject: x86/microcode/AMD: Add more known models to entry sign checking
+
+From: Mario Limonciello (AMD) <superm1@kernel.org>
+
+commit d23550efc6800841b4d1639784afaebdea946ae0 upstream.
+
+Two Zen5 systems are missing from need_sha_check(). Add them.
+
+Fixes: 50cef76d5cb0 ("x86/microcode/AMD: Load only SHA256-checksummed patches")
+Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Link: https://patch.msgid.link/20251106182904.4143757-1-superm1@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/microcode/amd.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -210,10 +210,12 @@ static bool need_sha_check(u32 cur_rev)
+ case 0xaa001: return cur_rev <= 0xaa00116; break;
+ case 0xaa002: return cur_rev <= 0xaa00218; break;
+ case 0xb0021: return cur_rev <= 0xb002146; break;
++ case 0xb0081: return cur_rev <= 0xb008111; break;
+ case 0xb1010: return cur_rev <= 0xb101046; break;
+ case 0xb2040: return cur_rev <= 0xb204031; break;
+ case 0xb4040: return cur_rev <= 0xb404031; break;
+ case 0xb6000: return cur_rev <= 0xb600031; break;
++ case 0xb6080: return cur_rev <= 0xb608031; break;
+ case 0xb7000: return cur_rev <= 0xb700031; break;
+ default: break;
+ }