--- /dev/null
+From 53ff5cf89142b978b1a5ca8dc4d4425e6a09745f Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 4 Oct 2023 18:25:01 +0900
+Subject: ksmbd: fix race condition between session lookup and expire
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 53ff5cf89142b978b1a5ca8dc4d4425e6a09745f upstream.
+
+ Thread A + Thread B
+ ksmbd_session_lookup | smb2_sess_setup
+ sess = xa_load |
+ |
+ | xa_erase(&conn->sessions, sess->id);
+ |
+ | ksmbd_session_destroy(sess) --> kfree(sess)
+ |
+ // UAF! |
+ sess->last_active = jiffies |
+ +
+
+This patch add rwsem to fix race condition between ksmbd_session_lookup
+and ksmbd_expire_session.
+
+Reported-by: luosili <rootlab@huawei.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 2 ++
+ fs/smb/server/connection.h | 1 +
+ fs/smb/server/mgmt/user_session.c | 10 +++++++---
+ 3 files changed, 10 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -84,6 +84,8 @@ struct ksmbd_conn *ksmbd_conn_alloc(void
+ spin_lock_init(&conn->llist_lock);
+ INIT_LIST_HEAD(&conn->lock_list);
+
++ init_rwsem(&conn->session_lock);
++
+ down_write(&conn_list_lock);
+ list_add(&conn->conns_list, &conn_list);
+ up_write(&conn_list_lock);
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -50,6 +50,7 @@ struct ksmbd_conn {
+ struct nls_table *local_nls;
+ struct unicode_map *um;
+ struct list_head conns_list;
++ struct rw_semaphore session_lock;
+ /* smb session 1 per user */
+ struct xarray sessions;
+ unsigned long last_active;
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -174,7 +174,7 @@ static void ksmbd_expire_session(struct
+ unsigned long id;
+ struct ksmbd_session *sess;
+
+- down_write(&sessions_table_lock);
++ down_write(&conn->session_lock);
+ xa_for_each(&conn->sessions, id, sess) {
+ if (sess->state != SMB2_SESSION_VALID ||
+ time_after(jiffies,
+@@ -185,7 +185,7 @@ static void ksmbd_expire_session(struct
+ continue;
+ }
+ }
+- up_write(&sessions_table_lock);
++ up_write(&conn->session_lock);
+ }
+
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+@@ -227,7 +227,9 @@ void ksmbd_sessions_deregister(struct ks
+ }
+ }
+ }
++ up_write(&sessions_table_lock);
+
++ down_write(&conn->session_lock);
+ xa_for_each(&conn->sessions, id, sess) {
+ unsigned long chann_id;
+ struct channel *chann;
+@@ -244,7 +246,7 @@ void ksmbd_sessions_deregister(struct ks
+ ksmbd_session_destroy(sess);
+ }
+ }
+- up_write(&sessions_table_lock);
++ up_write(&conn->session_lock);
+ }
+
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+@@ -252,9 +254,11 @@ struct ksmbd_session *ksmbd_session_look
+ {
+ struct ksmbd_session *sess;
+
++ down_read(&conn->session_lock);
+ sess = xa_load(&conn->sessions, id);
+ if (sess)
+ sess->last_active = jiffies;
++ up_read(&conn->session_lock);
+ return sess;
+ }
+
--- /dev/null
+From 75ac9a3dd65f7eab4d12b0a0f744234b5300a491 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 4 Oct 2023 18:31:03 +0900
+Subject: ksmbd: fix race condition from parallel smb2 lock requests
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 75ac9a3dd65f7eab4d12b0a0f744234b5300a491 upstream.
+
+There is a race condition issue between parallel smb2 lock request.
+
+ Time
+ +
+Thread A | Thread A
+smb2_lock | smb2_lock
+ |
+ insert smb_lock to lock_list |
+ spin_unlock(&work->conn->llist_lock) |
+ |
+ | spin_lock(&conn->llist_lock);
+ | kfree(cmp_lock);
+ |
+ // UAF! |
+ list_add(&smb_lock->llist, &rollback_list) +
+
+This patch swaps the line for adding the smb lock to the rollback list and
+adding the lock list of connection to fix the race issue.
+
+Reported-by: luosili <rootlab@huawei.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7029,10 +7029,6 @@ skip:
+
+ ksmbd_debug(SMB,
+ "would have to wait for getting lock\n");
+- spin_lock(&work->conn->llist_lock);
+- list_add_tail(&smb_lock->clist,
+- &work->conn->lock_list);
+- spin_unlock(&work->conn->llist_lock);
+ list_add(&smb_lock->llist, &rollback_list);
+
+ argv = kmalloc(sizeof(void *), GFP_KERNEL);
+@@ -7063,9 +7059,6 @@ skip:
+
+ if (work->state != KSMBD_WORK_ACTIVE) {
+ list_del(&smb_lock->llist);
+- spin_lock(&work->conn->llist_lock);
+- list_del(&smb_lock->clist);
+- spin_unlock(&work->conn->llist_lock);
+ locks_free_lock(flock);
+
+ if (work->state == KSMBD_WORK_CANCELLED) {
+@@ -7087,19 +7080,16 @@ skip:
+ }
+
+ list_del(&smb_lock->llist);
+- spin_lock(&work->conn->llist_lock);
+- list_del(&smb_lock->clist);
+- spin_unlock(&work->conn->llist_lock);
+ release_async_work(work);
+ goto retry;
+ } else if (!rc) {
++ list_add(&smb_lock->llist, &rollback_list);
+ spin_lock(&work->conn->llist_lock);
+ list_add_tail(&smb_lock->clist,
+ &work->conn->lock_list);
+ list_add_tail(&smb_lock->flist,
+ &fp->lock_list);
+ spin_unlock(&work->conn->llist_lock);
+- list_add(&smb_lock->llist, &rollback_list);
+ ksmbd_debug(SMB, "successful in taking lock\n");
+ } else {
+ goto out;
--- /dev/null
+From c69813471a1ec081a0b9bf0c6bd7e8afd818afce Mon Sep 17 00:00:00 2001
+From: luosili <rootlab@huawei.com>
+Date: Wed, 4 Oct 2023 18:29:36 +0900
+Subject: ksmbd: fix uaf in smb20_oplock_break_ack
+
+From: luosili <rootlab@huawei.com>
+
+commit c69813471a1ec081a0b9bf0c6bd7e8afd818afce upstream.
+
+drop reference after use opinfo.
+
+Signed-off-by: luosili <rootlab@huawei.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -8036,10 +8036,10 @@ static void smb20_oplock_break_ack(struc
+ goto err_out;
+ }
+
+- opinfo_put(opinfo);
+- ksmbd_fd_put(work, fp);
+ opinfo->op_state = OPLOCK_STATE_NONE;
+ wake_up_interruptible_all(&opinfo->oplock_q);
++ opinfo_put(opinfo);
++ ksmbd_fd_put(work, fp);
+
+ rsp->StructureSize = cpu_to_le16(24);
+ rsp->OplockLevel = rsp_oplevel;
rdma-mlx5-fix-null-string-error.patch
alsa-hda-realtek-fix-spelling-mistake-powe-power.patch
alsa-hda-realtek-fixed-two-speaker-platform.patch
+x86-sev-change-npages-to-unsigned-long-in-snp_accept_memory.patch
+x86-sev-use-the-ghcb-protocol-when-available-for-snp-cpuid-requests.patch
+ksmbd-fix-race-condition-between-session-lookup-and-expire.patch
+ksmbd-fix-uaf-in-smb20_oplock_break_ack.patch
+ksmbd-fix-race-condition-from-parallel-smb2-lock-requests.patch
--- /dev/null
+From 62d5e970d022ef4bde18948dd67247c3194384c1 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 22 Jun 2023 08:45:05 -0500
+Subject: x86/sev: Change npages to unsigned long in snp_accept_memory()
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 62d5e970d022ef4bde18948dd67247c3194384c1 upstream.
+
+In snp_accept_memory(), the npages variables value is calculated from
+phys_addr_t variables but is an unsigned int. A very large range passed
+into snp_accept_memory() could lead to truncating npages to zero. This
+doesn't happen at the moment but let's be prepared.
+
+Fixes: 6c3211796326 ("x86/sev: Add SNP-specific unaccepted memory support")
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/6d511c25576494f682063c9fb6c705b526a3757e.1687441505.git.thomas.lendacky@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/sev.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index 2787826d9f60..d8c1e3be74c0 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -868,8 +868,7 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
+
+ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
+ {
+- unsigned long vaddr;
+- unsigned int npages;
++ unsigned long vaddr, npages;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+--
+2.42.0
+
--- /dev/null
+From 6bc6f7d9d7ac3cdbe9e8b0495538b4a0cc11f032 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 28 Jul 2023 16:09:26 -0500
+Subject: x86/sev: Use the GHCB protocol when available for SNP CPUID requests
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 6bc6f7d9d7ac3cdbe9e8b0495538b4a0cc11f032 upstream.
+
+SNP retrieves the majority of CPUID information from the SNP CPUID page.
+But there are times when that information needs to be supplemented by the
+hypervisor, for example, obtaining the initial APIC ID of the vCPU from
+leaf 1.
+
+The current implementation uses the MSR protocol to retrieve the data from
+the hypervisor, even when a GHCB exists. The problem arises when an NMI
+arrives on return from the VMGEXIT. The NMI will be immediately serviced
+and may generate a #VC requiring communication with the hypervisor.
+
+Since a GHCB exists in this case, it will be used. As part of using the
+GHCB, the #VC handler will write the GHCB physical address into the GHCB
+MSR and the #VC will be handled.
+
+When the NMI completes, processing resumes at the site of the VMGEXIT
+which is expecting to read the GHCB MSR and find a CPUID MSR protocol
+response. Since the NMI handling overwrote the GHCB MSR response, the
+guest will see an invalid reply from the hypervisor and self-terminate.
+
+Fix this problem by using the GHCB when it is available. Any NMI
+received is properly handled because the GHCB contents are copied into
+a backup page and restored on NMI exit, thus preserving the active GHCB
+request or result.
+
+ [ bp: Touchups. ]
+
+Fixes: ee0bfa08a345 ("x86/compressed/64: Add support for SEV-SNP CPUID table in #VC handlers")
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/a5856fa1ebe3879de91a8f6298b6bbd901c61881.1690578565.git.thomas.lendacky@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/sev-shared.c | 69 ++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 55 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -256,7 +256,7 @@ static int __sev_cpuid_hv(u32 fn, int re
+ return 0;
+ }
+
+-static int sev_cpuid_hv(struct cpuid_leaf *leaf)
++static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
+ {
+ int ret;
+
+@@ -279,6 +279,45 @@ static int sev_cpuid_hv(struct cpuid_lea
+ return ret;
+ }
+
++static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++{
++ u32 cr4 = native_read_cr4();
++ int ret;
++
++ ghcb_set_rax(ghcb, leaf->fn);
++ ghcb_set_rcx(ghcb, leaf->subfn);
++
++ if (cr4 & X86_CR4_OSXSAVE)
++ /* Safe to read xcr0 */
++ ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
++ else
++ /* xgetbv will cause #UD - use reset value for xcr0 */
++ ghcb_set_xcr0(ghcb, 1);
++
++ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
++ if (ret != ES_OK)
++ return ret;
++
++ if (!(ghcb_rax_is_valid(ghcb) &&
++ ghcb_rbx_is_valid(ghcb) &&
++ ghcb_rcx_is_valid(ghcb) &&
++ ghcb_rdx_is_valid(ghcb)))
++ return ES_VMM_ERROR;
++
++ leaf->eax = ghcb->save.rax;
++ leaf->ebx = ghcb->save.rbx;
++ leaf->ecx = ghcb->save.rcx;
++ leaf->edx = ghcb->save.rdx;
++
++ return ES_OK;
++}
++
++static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++{
++ return ghcb ? __sev_cpuid_hv_ghcb(ghcb, ctxt, leaf)
++ : __sev_cpuid_hv_msr(leaf);
++}
++
+ /*
+ * This may be called early while still running on the initial identity
+ * mapping. Use RIP-relative addressing to obtain the correct address
+@@ -388,19 +427,20 @@ snp_cpuid_get_validated_func(struct cpui
+ return false;
+ }
+
+-static void snp_cpuid_hv(struct cpuid_leaf *leaf)
++static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+- if (sev_cpuid_hv(leaf))
++ if (sev_cpuid_hv(ghcb, ctxt, leaf))
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
+ }
+
+-static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
++static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
++ struct cpuid_leaf *leaf)
+ {
+ struct cpuid_leaf leaf_hv = *leaf;
+
+ switch (leaf->fn) {
+ case 0x1:
+- snp_cpuid_hv(&leaf_hv);
++ snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+
+ /* initial APIC ID */
+ leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0));
+@@ -419,7 +459,7 @@ static int snp_cpuid_postprocess(struct
+ break;
+ case 0xB:
+ leaf_hv.subfn = 0;
+- snp_cpuid_hv(&leaf_hv);
++ snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+
+ /* extended APIC ID */
+ leaf->edx = leaf_hv.edx;
+@@ -467,7 +507,7 @@ static int snp_cpuid_postprocess(struct
+ }
+ break;
+ case 0x8000001E:
+- snp_cpuid_hv(&leaf_hv);
++ snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+
+ /* extended APIC ID */
+ leaf->eax = leaf_hv.eax;
+@@ -488,7 +528,7 @@ static int snp_cpuid_postprocess(struct
+ * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
+ * should be treated as fatal by caller.
+ */
+-static int snp_cpuid(struct cpuid_leaf *leaf)
++static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+ const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+
+@@ -522,7 +562,7 @@ static int snp_cpuid(struct cpuid_leaf *
+ return 0;
+ }
+
+- return snp_cpuid_postprocess(leaf);
++ return snp_cpuid_postprocess(ghcb, ctxt, leaf);
+ }
+
+ /*
+@@ -544,14 +584,14 @@ void __init do_vc_no_ghcb(struct pt_regs
+ leaf.fn = fn;
+ leaf.subfn = subfn;
+
+- ret = snp_cpuid(&leaf);
++ ret = snp_cpuid(NULL, NULL, &leaf);
+ if (!ret)
+ goto cpuid_done;
+
+ if (ret != -EOPNOTSUPP)
+ goto fail;
+
+- if (sev_cpuid_hv(&leaf))
++ if (__sev_cpuid_hv_msr(&leaf))
+ goto fail;
+
+ cpuid_done:
+@@ -848,14 +888,15 @@ static enum es_result vc_handle_ioio(str
+ return ret;
+ }
+
+-static int vc_handle_cpuid_snp(struct pt_regs *regs)
++static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+ {
++ struct pt_regs *regs = ctxt->regs;
+ struct cpuid_leaf leaf;
+ int ret;
+
+ leaf.fn = regs->ax;
+ leaf.subfn = regs->cx;
+- ret = snp_cpuid(&leaf);
++ ret = snp_cpuid(ghcb, ctxt, &leaf);
+ if (!ret) {
+ regs->ax = leaf.eax;
+ regs->bx = leaf.ebx;
+@@ -874,7 +915,7 @@ static enum es_result vc_handle_cpuid(st
+ enum es_result ret;
+ int snp_cpuid_ret;
+
+- snp_cpuid_ret = vc_handle_cpuid_snp(regs);
++ snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
+ if (!snp_cpuid_ret)
+ return ES_OK;
+ if (snp_cpuid_ret != -EOPNOTSUPP)