--- /dev/null
+From 7faf14a7b0366f153284db0ad3347c457ea70136 Mon Sep 17 00:00:00 2001
+From: Li Lingfeng <lilingfeng3@huawei.com>
+Date: Sun, 26 Jan 2025 17:47:22 +0800
+Subject: nfsd: clear acl_access/acl_default after releasing them
+
+From: Li Lingfeng <lilingfeng3@huawei.com>
+
+commit 7faf14a7b0366f153284db0ad3347c457ea70136 upstream.
+
+If getting acl_default fails, acl_access and acl_default will be released
+simultaneously. However, acl_access will still retain a pointer pointing
+to the released posix_acl, which will trigger a WARNING in
+nfs3svc_release_getacl like this:
+
+------------[ cut here ]------------
+refcount_t: underflow; use-after-free.
+WARNING: CPU: 26 PID: 3199 at lib/refcount.c:28
+refcount_warn_saturate+0xb5/0x170
+Modules linked in:
+CPU: 26 UID: 0 PID: 3199 Comm: nfsd Not tainted
+6.12.0-rc6-00079-g04ae226af01f-dirty #8
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+1.16.1-2.fc37 04/01/2014
+RIP: 0010:refcount_warn_saturate+0xb5/0x170
+Code: cc cc 0f b6 1d b3 20 a5 03 80 fb 01 0f 87 65 48 d8 00 83 e3 01 75
+e4 48 c7 c7 c0 3b 9b 85 c6 05 97 20 a5 03 01 e8 fb 3e 30 ff <0f> 0b eb
+cd 0f b6 1d 8a3
+RSP: 0018:ffffc90008637cd8 EFLAGS: 00010282
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffffffff83904fde
+RDX: dffffc0000000000 RSI: 0000000000000008 RDI: ffff88871ed36380
+RBP: ffff888158beeb40 R08: 0000000000000001 R09: fffff520010c6f56
+R10: ffffc90008637ab7 R11: 0000000000000001 R12: 0000000000000001
+R13: ffff888140e77400 R14: ffff888140e77408 R15: ffffffff858b42c0
+FS: 0000000000000000(0000) GS:ffff88871ed00000(0000)
+knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000562384d32158 CR3: 000000055cc6a000 CR4: 00000000000006f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ ? refcount_warn_saturate+0xb5/0x170
+ ? __warn+0xa5/0x140
+ ? refcount_warn_saturate+0xb5/0x170
+ ? report_bug+0x1b1/0x1e0
+ ? handle_bug+0x53/0xa0
+ ? exc_invalid_op+0x17/0x40
+ ? asm_exc_invalid_op+0x1a/0x20
+ ? tick_nohz_tick_stopped+0x1e/0x40
+ ? refcount_warn_saturate+0xb5/0x170
+ ? refcount_warn_saturate+0xb5/0x170
+ nfs3svc_release_getacl+0xc9/0xe0
+ svc_process_common+0x5db/0xb60
+ ? __pfx_svc_process_common+0x10/0x10
+ ? __rcu_read_unlock+0x69/0xa0
+ ? __pfx_nfsd_dispatch+0x10/0x10
+ ? svc_xprt_received+0xa1/0x120
+ ? xdr_init_decode+0x11d/0x190
+ svc_process+0x2a7/0x330
+ svc_handle_xprt+0x69d/0x940
+ svc_recv+0x180/0x2d0
+ nfsd+0x168/0x200
+ ? __pfx_nfsd+0x10/0x10
+ kthread+0x1a2/0x1e0
+ ? kthread+0xf4/0x1e0
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x34/0x60
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+Kernel panic - not syncing: kernel: panic_on_warn set ...
+
+Clear acl_access/acl_default after posix_acl_release is called to prevent
+UAF from being triggered.
+
+Fixes: a257cdd0e217 ("[PATCH] NFSD: Add server support for NFSv3 ACLs.")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20241107014705.2509463-1-lilingfeng@huaweicloud.com/
+Signed-off-by: Li Lingfeng <lilingfeng3@huawei.com>
+Reviewed-by: Rick Macklem <rmacklem@uoguelph.ca>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs2acl.c | 2 ++
+ fs/nfsd/nfs3acl.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+--- a/fs/nfsd/nfs2acl.c
++++ b/fs/nfsd/nfs2acl.c
+@@ -84,6 +84,8 @@ out:
+ fail:
+ posix_acl_release(resp->acl_access);
+ posix_acl_release(resp->acl_default);
++ resp->acl_access = NULL;
++ resp->acl_default = NULL;
+ goto out;
+ }
+
+--- a/fs/nfsd/nfs3acl.c
++++ b/fs/nfsd/nfs3acl.c
+@@ -76,6 +76,8 @@ out:
+ fail:
+ posix_acl_release(resp->acl_access);
+ posix_acl_release(resp->acl_default);
++ resp->acl_access = NULL;
++ resp->acl_default = NULL;
+ goto out;
+ }
+
--- /dev/null
+From 036ac2778f7b28885814c6fbc07e156ad1624d03 Mon Sep 17 00:00:00 2001
+From: Dai Ngo <dai.ngo@oracle.com>
+Date: Thu, 30 Jan 2025 11:01:27 -0800
+Subject: NFSD: fix hang in nfsd4_shutdown_callback
+
+From: Dai Ngo <dai.ngo@oracle.com>
+
+commit 036ac2778f7b28885814c6fbc07e156ad1624d03 upstream.
+
+If nfs4_client is in courtesy state then there is no point to send
+the callback. This causes nfsd4_shutdown_callback to hang since
+cl_cb_inflight is not 0. This hang lasts about 15 minutes until TCP
+notifies NFSD that the connection was dropped.
+
+This patch modifies nfsd4_run_cb_work to skip the RPC call if
+nfs4_client is in courtesy state.
+
+Signed-off-by: Dai Ngo <dai.ngo@oracle.com>
+Fixes: 66af25799940 ("NFSD: add courteous server support for thread with only delegation")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs4callback.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -1486,8 +1486,11 @@ nfsd4_run_cb_work(struct work_struct *wo
+ nfsd4_process_cb_update(cb);
+
+ clnt = clp->cl_cb_client;
+- if (!clnt) {
+- /* Callback channel broken, or client killed; give up: */
++ if (!clnt || clp->cl_state == NFSD4_COURTESY) {
++ /*
++ * Callback channel broken, client killed or
++ * nfs4_client in courtesy state; give up.
++ */
+ nfsd41_destroy_cb(cb);
+ return;
+ }
--- /dev/null
+From b9382e29ca538b879645899ce45d652a304e2ed2 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Sat, 25 Jan 2025 20:13:18 -0500
+Subject: nfsd: validate the nfsd_serv pointer before calling svc_wake_up
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit b9382e29ca538b879645899ce45d652a304e2ed2 upstream.
+
+nfsd_file_dispose_list_delayed can be called from the filecache
+laundrette, which is shut down after the nfsd threads are shut down and
+the nfsd_serv pointer is cleared. If nn->nfsd_serv is NULL then there
+are no threads to wake.
+
+Ensure that the nn->nfsd_serv pointer is non-NULL before calling
+svc_wake_up in nfsd_file_dispose_list_delayed. This is safe since the
+svc_serv is not freed until after the filecache laundrette is cancelled.
+
+Reported-by: Salvatore Bonaccorso <carnil@debian.org>
+Closes: https://bugs.debian.org/1093734
+Fixes: ffb402596147 ("nfsd: Don't leave work of closing files to a work queue")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/filecache.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -445,11 +445,20 @@ nfsd_file_dispose_list_delayed(struct li
+ struct nfsd_file, nf_gc);
+ struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
++ struct svc_serv *serv;
+
+ spin_lock(&l->lock);
+ list_move_tail(&nf->nf_gc, &l->freeme);
+ spin_unlock(&l->lock);
+- svc_wake_up(nn->nfsd_serv);
++
++ /*
++ * The filecache laundrette is shut down after the
++ * nn->nfsd_serv pointer is cleared, but before the
++ * svc_serv is freed.
++ */
++ serv = nn->nfsd_serv;
++ if (serv)
++ svc_wake_up(serv);
+ }
+ }
+
--- /dev/null
+nfsd-clear-acl_access-acl_default-after-releasing-them.patch
+nfsd-fix-hang-in-nfsd4_shutdown_callback.patch
+nfsd-validate-the-nfsd_serv-pointer-before-calling-svc_wake_up.patch
+x86-cpu-kvm-srso-fix-possible-missing-ibpb-on-vm-exit.patch
--- /dev/null
+From 318e8c339c9a0891c389298bb328ed0762a9935e Mon Sep 17 00:00:00 2001
+From: Patrick Bellasi <derkling@google.com>
+Date: Wed, 5 Feb 2025 14:04:41 +0000
+Subject: x86/cpu/kvm: SRSO: Fix possible missing IBPB on VM-Exit
+
+From: Patrick Bellasi <derkling@google.com>
+
+commit 318e8c339c9a0891c389298bb328ed0762a9935e upstream.
+
+In [1] the meaning of the synthetic IBPB flags has been redefined for a
+better separation of concerns:
+ - ENTRY_IBPB -- issue IBPB on entry only
+ - IBPB_ON_VMEXIT -- issue IBPB on VM-Exit only
+and the Retbleed mitigations have been updated to match this new
+semantics.
+
+Commit [2] was merged shortly before [1], and their interaction was not
+handled properly. This resulted in IBPB not being triggered on VM-Exit
+in all SRSO mitigation configs requesting an IBPB there.
+
+Specifically, an IBPB on VM-Exit is triggered only when
+X86_FEATURE_IBPB_ON_VMEXIT is set. However:
+
+ - X86_FEATURE_IBPB_ON_VMEXIT is not set for "spec_rstack_overflow=ibpb",
+ because before [1] having X86_FEATURE_ENTRY_IBPB was enough. Hence,
+ an IBPB is triggered on entry but the expected IBPB on VM-exit is
+ not.
+
+ - X86_FEATURE_IBPB_ON_VMEXIT is not set also when
+ "spec_rstack_overflow=ibpb-vmexit" if X86_FEATURE_ENTRY_IBPB is
+ already set.
+
+ That's because before [1] this was effectively redundant. Hence, e.g.
+ a "retbleed=ibpb spec_rstack_overflow=bpb-vmexit" config mistakenly
+ reports the machine still vulnerable to SRSO, despite an IBPB being
+ triggered both on entry and VM-Exit, because of the Retbleed selected
+ mitigation config.
+
+ - UNTRAIN_RET_VM won't still actually do anything unless
+ CONFIG_MITIGATION_IBPB_ENTRY is set.
+
+For "spec_rstack_overflow=ibpb", enable IBPB on both entry and VM-Exit
+and clear X86_FEATURE_RSB_VMEXIT which is made superfluous by
+X86_FEATURE_IBPB_ON_VMEXIT. This effectively makes this mitigation
+option similar to the one for 'retbleed=ibpb', thus re-order the code
+for the RETBLEED_MITIGATION_IBPB option to be less confusing by having
+all features enabling before the disabling of the not needed ones.
+
+For "spec_rstack_overflow=ibpb-vmexit", guard this mitigation setting
+with CONFIG_MITIGATION_IBPB_ENTRY to ensure UNTRAIN_RET_VM sequence is
+effectively compiled in. Drop instead the CONFIG_MITIGATION_SRSO guard,
+since none of the SRSO compile cruft is required in this configuration.
+Also, check only that the required microcode is present to effectively
+enabled the IBPB on VM-Exit.
+
+Finally, update the KConfig description for CONFIG_MITIGATION_IBPB_ENTRY
+to list also all SRSO config settings enabled by this guard.
+
+Fixes: 864bcaa38ee4 ("x86/cpu/kvm: Provide UNTRAIN_RET_VM") [1]
+Fixes: d893832d0e1e ("x86/srso: Add IBPB on VMEXIT") [2]
+Reported-by: Yosry Ahmed <yosryahmed@google.com>
+Signed-off-by: Patrick Bellasi <derkling@google.com>
+Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 3 ++-
+ arch/x86/kernel/cpu/bugs.c | 21 ++++++++++++++-------
+ 2 files changed, 16 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2582,7 +2582,8 @@ config MITIGATION_IBPB_ENTRY
+ depends on CPU_SUP_AMD && X86_64
+ default y
+ help
+- Compile the kernel with support for the retbleed=ibpb mitigation.
++ Compile the kernel with support for the retbleed=ibpb and
++ spec_rstack_overflow={ibpb,ibpb-vmexit} mitigations.
+
+ config MITIGATION_IBRS_ENTRY
+ bool "Enable IBRS on kernel entry"
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1115,6 +1115,8 @@ do_cmd_auto:
+
+ case RETBLEED_MITIGATION_IBPB:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
++ mitigate_smt = true;
+
+ /*
+ * IBPB on entry already obviates the need for
+@@ -1124,9 +1126,6 @@ do_cmd_auto:
+ setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+
+- setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+- mitigate_smt = true;
+-
+ /*
+ * There is no need for RSB filling: entry_ibpb() ensures
+ * all predictions, including the RSB, are invalidated,
+@@ -2643,6 +2642,7 @@ static void __init srso_select_mitigatio
+ if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
+ if (has_microcode) {
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ srso_mitigation = SRSO_MITIGATION_IBPB;
+
+ /*
+@@ -2652,6 +2652,13 @@ static void __init srso_select_mitigatio
+ */
+ setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
++
++ /*
++ * There is no need for RSB filling: entry_ibpb() ensures
++ * all predictions, including the RSB, are invalidated,
++ * regardless of IBPB implementation.
++ */
++ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ }
+ } else {
+ pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
+@@ -2659,8 +2666,8 @@ static void __init srso_select_mitigatio
+ break;
+
+ case SRSO_CMD_IBPB_ON_VMEXIT:
+- if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
+- if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
++ if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
++ if (has_microcode) {
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
+
+@@ -2672,8 +2679,8 @@ static void __init srso_select_mitigatio
+ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ }
+ } else {
+- pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
+- }
++ pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
++ }
+ break;
+ default:
+ break;