--- /dev/null
+From 048c796beb6eb4fa3a5a647ee1c81f5c6f0f6a2a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Maciej=20=C5=BBenczykowski?= <maze@google.com>
+Date: Mon, 7 Aug 2023 03:25:32 -0700
+Subject: ipv6: adjust ndisc_is_useropt() to also return true for PIO
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maciej Żenczykowski <maze@google.com>
+
+commit 048c796beb6eb4fa3a5a647ee1c81f5c6f0f6a2a upstream.
+
+The upcoming (and nearly finalized):
+ https://datatracker.ietf.org/doc/draft-collink-6man-pio-pflag/
+will update the IPv6 RA to include a new flag in the PIO field,
+which will serve as a hint to perform DHCPv6-PD.
+
+As we don't want DHCPv6 related logic inside the kernel, this piece of
+information needs to be exposed to userspace. The simplest option is to
+simply expose the entire PIO through the already existing mechanism.
+
+Even without this new flag, the already existing PIO R (router address)
+flag (from RFC6275) cannot AFAICT be handled entirely in kernel,
+and provides useful information that should be exposed to userspace
+(the router's global address, for use by Mobile IPv6).
+
+Also cc'ing stable@ for inclusion in LTS, as while technically this is
+not quite a bugfix, and instead more of a feature, it is absolutely
+trivial and the alternative is manually cherrypicking into all Android
+Common Kernel trees - and I know Greg will ask for it to be sent in via
+LTS instead...
+
+Cc: Jen Linkova <furry@google.com>
+Cc: Lorenzo Colitti <lorenzo@google.com>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: YOSHIFUJI Hideaki / 吉藤英明 <yoshfuji@linux-ipv6.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Link: https://lore.kernel.org/r/20230807102533.1147559-1-maze@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ndisc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -197,7 +197,8 @@ static struct nd_opt_hdr *ndisc_next_opt
+ static inline int ndisc_is_useropt(const struct net_device *dev,
+ struct nd_opt_hdr *opt)
+ {
+- return opt->nd_opt_type == ND_OPT_RDNSS ||
++ return opt->nd_opt_type == ND_OPT_PREFIX_INFO ||
++ opt->nd_opt_type == ND_OPT_RDNSS ||
+ opt->nd_opt_type == ND_OPT_DNSSL ||
+ opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
+ opt->nd_opt_type == ND_OPT_PREF64 ||
--- /dev/null
+From 79ed288cef201f1f212dfb934bcaac75572fb8f6 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Sun, 6 Aug 2023 08:44:17 +0900
+Subject: ksmbd: fix wrong next length validation of ea buffer in smb2_set_ea()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 79ed288cef201f1f212dfb934bcaac75572fb8f6 upstream.
+
+There are multiple smb2_ea_info buffers in FILE_FULL_EA_INFORMATION request
+from client. ksmbd find next smb2_ea_info using ->NextEntryOffset of
+current smb2_ea_info. ksmbd need to validate buffer length Before
+accessing the next ea. ksmbd should check buffer length using buf_len,
+not next variable. next is the start offset of current ea that got from
+previous ea.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-21598
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2340,9 +2340,16 @@ next:
+ break;
+ buf_len -= next;
+ eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
+- if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
++ if (buf_len < sizeof(struct smb2_ea_info)) {
++ rc = -EINVAL;
+ break;
++ }
+
++ if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
++ le16_to_cpu(eabuf->EaValueLength)) {
++ rc = -EINVAL;
++ break;
++ }
+ } while (next != 0);
+
+ kfree(attr_name);
--- /dev/null
+From 5aa4fda5aa9c2a5a7bac67b4a12b089ab81fee3c Mon Sep 17 00:00:00 2001
+From: Long Li <leo.lilong@huawei.com>
+Date: Sat, 29 Jul 2023 11:36:18 +0800
+Subject: ksmbd: validate command request size
+
+From: Long Li <leo.lilong@huawei.com>
+
+commit 5aa4fda5aa9c2a5a7bac67b4a12b089ab81fee3c upstream.
+
+In commit 2b9b8f3b68ed ("ksmbd: validate command payload size"), except
+for SMB2_OPLOCK_BREAK_HE command, the request size of other commands
+is not checked, it's not expected. Fix it by add check for request
+size of other commands.
+
+Cc: stable@vger.kernel.org
+Fixes: 2b9b8f3b68ed ("ksmbd: validate command payload size")
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Long Li <leo.lilong@huawei.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2misc.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/fs/smb/server/smb2misc.c
++++ b/fs/smb/server/smb2misc.c
+@@ -380,13 +380,13 @@ int ksmbd_smb2_check_message(struct ksmb
+ }
+
+ if (smb2_req_struct_sizes[command] != pdu->StructureSize2) {
+- if (command == SMB2_OPLOCK_BREAK_HE &&
+- le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
+- le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
++ if (!(command == SMB2_OPLOCK_BREAK_HE &&
++ (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 ||
++ le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) {
+ /* special case for SMB2.1 lease break message */
+ ksmbd_debug(SMB,
+- "Illegal request size %d for oplock break\n",
+- le16_to_cpu(pdu->StructureSize2));
++ "Illegal request size %u for command %d\n",
++ le16_to_cpu(pdu->StructureSize2), command);
+ return 1;
+ }
+ }
--- /dev/null
+From 7588dbcebcbf0193ab5b76987396d0254270b04a Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 4 Aug 2023 12:56:36 -0400
+Subject: KVM: SEV: only access GHCB fields once
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 7588dbcebcbf0193ab5b76987396d0254270b04a upstream.
+
+A KVM guest using SEV-ES or SEV-SNP with multiple vCPUs can trigger
+a double fetch race condition vulnerability and invoke the VMGEXIT
+handler recursively.
+
+sev_handle_vmgexit() maps the GHCB page using kvm_vcpu_map() and then
+fetches the exit code using ghcb_get_sw_exit_code(). Soon after,
+sev_es_validate_vmgexit() fetches the exit code again. Since the GHCB
+page is shared with the guest, the guest is able to quickly swap the
+values with another vCPU and hence bypass the validation. One vmexit code
+that can be rejected by sev_es_validate_vmgexit() is SVM_EXIT_VMGEXIT;
+if sev_handle_vmgexit() observes it in the second fetch, the call
+to svm_invoke_exit_handler() will invoke sev_handle_vmgexit() again
+recursively.
+
+To avoid the race, always fetch the GHCB data from the places where
+sev_es_sync_from_ghcb stores it.
+
+Exploiting recursions on linux kernel has been proven feasible
+in the past, but the impact is mitigated by stack guard pages
+(CONFIG_VMAP_STACK). Still, if an attacker manages to call the handler
+multiple times, they can theoretically trigger a stack overflow and
+cause a denial-of-service, or potentially guest-to-host escape in kernel
+configurations without stack guard pages.
+
+Note that winning the race reliably in every iteration is very tricky
+due to the very tight window of the fetches; depending on the compiler
+settings, they are often consecutive because of optimization and inlining.
+
+Tested by booting an SEV-ES RHEL9 guest.
+
+Fixes: CVE-2023-4155
+Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT")
+Cc: stable@vger.kernel.org
+Reported-by: Andy Nguyen <theflow@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c | 25 ++++++++++++++-----------
+ 1 file changed, 14 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2438,9 +2438,15 @@ static void sev_es_sync_from_ghcb(struct
+ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+ }
+
++static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
++{
++ return (((u64)control->exit_code_hi) << 32) | control->exit_code;
++}
++
+ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+ {
+- struct kvm_vcpu *vcpu;
++ struct vmcb_control_area *control = &svm->vmcb->control;
++ struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct ghcb *ghcb;
+ u64 exit_code;
+ u64 reason;
+@@ -2451,7 +2457,7 @@ static int sev_es_validate_vmgexit(struc
+ * Retrieve the exit code now even though it may not be marked valid
+ * as it could help with debugging.
+ */
+- exit_code = ghcb_get_sw_exit_code(ghcb);
++ exit_code = kvm_ghcb_get_sw_exit_code(control);
+
+ /* Only GHCB Usage code 0 is supported */
+ if (ghcb->ghcb_usage) {
+@@ -2466,7 +2472,7 @@ static int sev_es_validate_vmgexit(struc
+ !kvm_ghcb_sw_exit_info_2_is_valid(svm))
+ goto vmgexit_err;
+
+- switch (ghcb_get_sw_exit_code(ghcb)) {
++ switch (exit_code) {
+ case SVM_EXIT_READ_DR7:
+ break;
+ case SVM_EXIT_WRITE_DR7:
+@@ -2483,18 +2489,18 @@ static int sev_es_validate_vmgexit(struc
+ if (!kvm_ghcb_rax_is_valid(svm) ||
+ !kvm_ghcb_rcx_is_valid(svm))
+ goto vmgexit_err;
+- if (ghcb_get_rax(ghcb) == 0xd)
++ if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
+ if (!kvm_ghcb_xcr0_is_valid(svm))
+ goto vmgexit_err;
+ break;
+ case SVM_EXIT_INVD:
+ break;
+ case SVM_EXIT_IOIO:
+- if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
++ if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
+ if (!kvm_ghcb_sw_scratch_is_valid(svm))
+ goto vmgexit_err;
+ } else {
+- if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
++ if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
+ if (!kvm_ghcb_rax_is_valid(svm))
+ goto vmgexit_err;
+ }
+@@ -2502,7 +2508,7 @@ static int sev_es_validate_vmgexit(struc
+ case SVM_EXIT_MSR:
+ if (!kvm_ghcb_rcx_is_valid(svm))
+ goto vmgexit_err;
+- if (ghcb_get_sw_exit_info_1(ghcb)) {
++ if (control->exit_info_1) {
+ if (!kvm_ghcb_rax_is_valid(svm) ||
+ !kvm_ghcb_rdx_is_valid(svm))
+ goto vmgexit_err;
+@@ -2546,8 +2552,6 @@ static int sev_es_validate_vmgexit(struc
+ return 0;
+
+ vmgexit_err:
+- vcpu = &svm->vcpu;
+-
+ if (reason == GHCB_ERR_INVALID_USAGE) {
+ vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
+ ghcb->ghcb_usage);
+@@ -2845,8 +2849,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *
+
+ trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
+
+- exit_code = ghcb_get_sw_exit_code(ghcb);
+-
+ sev_es_sync_from_ghcb(svm);
+ ret = sev_es_validate_vmgexit(svm);
+ if (ret)
+@@ -2855,6 +2857,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
++ exit_code = kvm_ghcb_get_sw_exit_code(control);
+ switch (exit_code) {
+ case SVM_VMGEXIT_MMIO_READ:
+ ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
--- /dev/null
+From 4e15a0ddc3ff40e8ea84032213976ecf774d7f77 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 4 Aug 2023 12:42:45 -0400
+Subject: KVM: SEV: snapshot the GHCB before accessing it
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 4e15a0ddc3ff40e8ea84032213976ecf774d7f77 upstream.
+
+Validation of the GHCB is susceptible to time-of-check/time-of-use vulnerabilities.
+To avoid them, we would like to always snapshot the fields that are read in
+sev_es_validate_vmgexit(), and not use the GHCB anymore after it returns.
+
+This means:
+
+- invoking sev_es_sync_from_ghcb() before any GHCB access, including before
+ sev_es_validate_vmgexit()
+
+- snapshotting all fields including the valid bitmap and the sw_scratch field,
+ which are currently not caching anywhere.
+
+The valid bitmap is the first thing to be copied out of the GHCB; then,
+further accesses will use the copy in svm->sev_es.
+
+Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c | 69 ++++++++++++++++++++++++-------------------------
+ arch/x86/kvm/svm/svm.h | 26 ++++++++++++++++++
+ 2 files changed, 61 insertions(+), 34 deletions(-)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2410,15 +2410,18 @@ static void sev_es_sync_from_ghcb(struct
+ */
+ memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
+
+- vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
+- vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
+- vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
+- vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
+- vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
++ BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
++ memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
+
+- svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
++ vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
++ vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
++ vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
++ vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
++ vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
+
+- if (ghcb_xcr0_is_valid(ghcb)) {
++ svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
++
++ if (kvm_ghcb_xcr0_is_valid(svm)) {
+ vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
+ kvm_update_cpuid_runtime(vcpu);
+ }
+@@ -2429,6 +2432,7 @@ static void sev_es_sync_from_ghcb(struct
+ control->exit_code_hi = upper_32_bits(exit_code);
+ control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
+ control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
++ svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
+
+ /* Clear the valid entries fields */
+ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+@@ -2457,56 +2461,56 @@ static int sev_es_validate_vmgexit(struc
+
+ reason = GHCB_ERR_MISSING_INPUT;
+
+- if (!ghcb_sw_exit_code_is_valid(ghcb) ||
+- !ghcb_sw_exit_info_1_is_valid(ghcb) ||
+- !ghcb_sw_exit_info_2_is_valid(ghcb))
++ if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
++ !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
++ !kvm_ghcb_sw_exit_info_2_is_valid(svm))
+ goto vmgexit_err;
+
+ switch (ghcb_get_sw_exit_code(ghcb)) {
+ case SVM_EXIT_READ_DR7:
+ break;
+ case SVM_EXIT_WRITE_DR7:
+- if (!ghcb_rax_is_valid(ghcb))
++ if (!kvm_ghcb_rax_is_valid(svm))
+ goto vmgexit_err;
+ break;
+ case SVM_EXIT_RDTSC:
+ break;
+ case SVM_EXIT_RDPMC:
+- if (!ghcb_rcx_is_valid(ghcb))
++ if (!kvm_ghcb_rcx_is_valid(svm))
+ goto vmgexit_err;
+ break;
+ case SVM_EXIT_CPUID:
+- if (!ghcb_rax_is_valid(ghcb) ||
+- !ghcb_rcx_is_valid(ghcb))
++ if (!kvm_ghcb_rax_is_valid(svm) ||
++ !kvm_ghcb_rcx_is_valid(svm))
+ goto vmgexit_err;
+ if (ghcb_get_rax(ghcb) == 0xd)
+- if (!ghcb_xcr0_is_valid(ghcb))
++ if (!kvm_ghcb_xcr0_is_valid(svm))
+ goto vmgexit_err;
+ break;
+ case SVM_EXIT_INVD:
+ break;
+ case SVM_EXIT_IOIO:
+ if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
+- if (!ghcb_sw_scratch_is_valid(ghcb))
++ if (!kvm_ghcb_sw_scratch_is_valid(svm))
+ goto vmgexit_err;
+ } else {
+ if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
+- if (!ghcb_rax_is_valid(ghcb))
++ if (!kvm_ghcb_rax_is_valid(svm))
+ goto vmgexit_err;
+ }
+ break;
+ case SVM_EXIT_MSR:
+- if (!ghcb_rcx_is_valid(ghcb))
++ if (!kvm_ghcb_rcx_is_valid(svm))
+ goto vmgexit_err;
+ if (ghcb_get_sw_exit_info_1(ghcb)) {
+- if (!ghcb_rax_is_valid(ghcb) ||
+- !ghcb_rdx_is_valid(ghcb))
++ if (!kvm_ghcb_rax_is_valid(svm) ||
++ !kvm_ghcb_rdx_is_valid(svm))
+ goto vmgexit_err;
+ }
+ break;
+ case SVM_EXIT_VMMCALL:
+- if (!ghcb_rax_is_valid(ghcb) ||
+- !ghcb_cpl_is_valid(ghcb))
++ if (!kvm_ghcb_rax_is_valid(svm) ||
++ !kvm_ghcb_cpl_is_valid(svm))
+ goto vmgexit_err;
+ break;
+ case SVM_EXIT_RDTSCP:
+@@ -2514,19 +2518,19 @@ static int sev_es_validate_vmgexit(struc
+ case SVM_EXIT_WBINVD:
+ break;
+ case SVM_EXIT_MONITOR:
+- if (!ghcb_rax_is_valid(ghcb) ||
+- !ghcb_rcx_is_valid(ghcb) ||
+- !ghcb_rdx_is_valid(ghcb))
++ if (!kvm_ghcb_rax_is_valid(svm) ||
++ !kvm_ghcb_rcx_is_valid(svm) ||
++ !kvm_ghcb_rdx_is_valid(svm))
+ goto vmgexit_err;
+ break;
+ case SVM_EXIT_MWAIT:
+- if (!ghcb_rax_is_valid(ghcb) ||
+- !ghcb_rcx_is_valid(ghcb))
++ if (!kvm_ghcb_rax_is_valid(svm) ||
++ !kvm_ghcb_rcx_is_valid(svm))
+ goto vmgexit_err;
+ break;
+ case SVM_VMGEXIT_MMIO_READ:
+ case SVM_VMGEXIT_MMIO_WRITE:
+- if (!ghcb_sw_scratch_is_valid(ghcb))
++ if (!kvm_ghcb_sw_scratch_is_valid(svm))
+ goto vmgexit_err;
+ break;
+ case SVM_VMGEXIT_NMI_COMPLETE:
+@@ -2556,9 +2560,6 @@ vmgexit_err:
+ dump_ghcb(svm);
+ }
+
+- /* Clear the valid entries fields */
+- memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+-
+ ghcb_set_sw_exit_info_1(ghcb, 2);
+ ghcb_set_sw_exit_info_2(ghcb, reason);
+
+@@ -2579,7 +2580,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *
+ */
+ if (svm->sev_es.ghcb_sa_sync) {
+ kvm_write_guest(svm->vcpu.kvm,
+- ghcb_get_sw_scratch(svm->sev_es.ghcb),
++ svm->sev_es.sw_scratch,
+ svm->sev_es.ghcb_sa,
+ svm->sev_es.ghcb_sa_len);
+ svm->sev_es.ghcb_sa_sync = false;
+@@ -2630,7 +2631,7 @@ static int setup_vmgexit_scratch(struct
+ u64 scratch_gpa_beg, scratch_gpa_end;
+ void *scratch_va;
+
+- scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
++ scratch_gpa_beg = svm->sev_es.sw_scratch;
+ if (!scratch_gpa_beg) {
+ pr_err("vmgexit: scratch gpa not provided\n");
+ goto e_scratch;
+@@ -2846,11 +2847,11 @@ int sev_handle_vmgexit(struct kvm_vcpu *
+
+ exit_code = ghcb_get_sw_exit_code(ghcb);
+
++ sev_es_sync_from_ghcb(svm);
+ ret = sev_es_validate_vmgexit(svm);
+ if (ret)
+ return ret;
+
+- sev_es_sync_from_ghcb(svm);
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -196,10 +196,12 @@ struct vcpu_sev_es_state {
+ /* SEV-ES support */
+ struct sev_es_save_area *vmsa;
+ struct ghcb *ghcb;
++ u8 valid_bitmap[16];
+ struct kvm_host_map ghcb_map;
+ bool received_first_sipi;
+
+ /* SEV-ES scratch area support */
++ u64 sw_scratch;
+ void *ghcb_sa;
+ u32 ghcb_sa_len;
+ bool ghcb_sa_sync;
+@@ -688,4 +690,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *
+ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
+ void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
+
++#define DEFINE_KVM_GHCB_ACCESSORS(field) \
++ static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
++ { \
++ return test_bit(GHCB_BITMAP_IDX(field), \
++ (unsigned long *)&svm->sev_es.valid_bitmap); \
++ } \
++ \
++ static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
++ { \
++ return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
++ } \
++
++DEFINE_KVM_GHCB_ACCESSORS(cpl)
++DEFINE_KVM_GHCB_ACCESSORS(rax)
++DEFINE_KVM_GHCB_ACCESSORS(rcx)
++DEFINE_KVM_GHCB_ACCESSORS(rdx)
++DEFINE_KVM_GHCB_ACCESSORS(rbx)
++DEFINE_KVM_GHCB_ACCESSORS(rsi)
++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
++DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
++DEFINE_KVM_GHCB_ACCESSORS(xcr0)
++
+ #endif
--- /dev/null
+From d44263222134b5635932974c6177a5cba65a07e8 Mon Sep 17 00:00:00 2001
+From: Sergei Antonov <saproj@gmail.com>
+Date: Tue, 27 Jun 2023 15:05:49 +0300
+Subject: mmc: moxart: read scr register without changing byte order
+
+From: Sergei Antonov <saproj@gmail.com>
+
+commit d44263222134b5635932974c6177a5cba65a07e8 upstream.
+
+Conversion from big-endian to native is done in a common function
+mmc_app_send_scr(). Converting in moxart_transfer_pio() is extra.
+Double conversion on a LE system returns an incorrect SCR value,
+leads to errors:
+
+mmc0: unrecognised SCR structure version 8
+
+Fixes: 1b66e94e6b99 ("mmc: moxart: Add MOXA ART SD/MMC driver")
+Signed-off-by: Sergei Antonov <saproj@gmail.com>
+Cc: Jonas Jensen <jonas.jensen@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230627120549.2400325-1-saproj@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/moxart-mmc.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/drivers/mmc/host/moxart-mmc.c
++++ b/drivers/mmc/host/moxart-mmc.c
+@@ -338,13 +338,7 @@ static void moxart_transfer_pio(struct m
+ return;
+ }
+ for (len = 0; len < remain && len < host->fifo_width;) {
+- /* SCR data must be read in big endian. */
+- if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
+- *sgp = ioread32be(host->base +
+- REG_DATA_WINDOW);
+- else
+- *sgp = ioread32(host->base +
+- REG_DATA_WINDOW);
++ *sgp = ioread32(host->base + REG_DATA_WINDOW);
+ sgp++;
+ len += 4;
+ }
--- /dev/null
+From 38f7c44d6e760a8513557e27340d61b820c91b8f Mon Sep 17 00:00:00 2001
+From: Ido Schimmel <idosch@nvidia.com>
+Date: Tue, 8 Aug 2023 17:14:51 +0300
+Subject: selftests: forwarding: Set default IPv6 traceroute utility
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+commit 38f7c44d6e760a8513557e27340d61b820c91b8f upstream.
+
+The test uses the 'TROUTE6' environment variable to encode the name of
+the IPv6 traceroute utility. By default (without a configuration file),
+this variable is not set, resulting in failures:
+
+ # ./ip6_forward_instats_vrf.sh
+ TEST: ping6 [ OK ]
+ TEST: Ip6InTooBigErrors [ OK ]
+ TEST: Ip6InHdrErrors [FAIL]
+ TEST: Ip6InAddrErrors [ OK ]
+ TEST: Ip6InDiscards [ OK ]
+
+Fix by setting a default utility name and skip the test if the utility
+is not present.
+
+Fixes: 0857d6f8c759 ("ipv6: When forwarding count rx stats on the orig netdev")
+Reported-by: Mirsad Todorovac <mirsad.todorovac@alu.unizg.hr>
+Closes: https://lore.kernel.org/netdev/adc5e40d-d040-a65e-eb26-edf47dac5b02@alu.unizg.hr/
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Tested-by: Mirsad Todorovac <mirsad.todorovac@alu.unizg.hr>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://lore.kernel.org/r/20230808141503.4060661-6-idosch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh | 2 ++
+ tools/testing/selftests/net/forwarding/lib.sh | 1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
++++ b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
+@@ -14,6 +14,8 @@ ALL_TESTS="
+ NUM_NETIFS=4
+ source lib.sh
+
++require_command $TROUTE6
++
+ h1_create()
+ {
+ simple_if_init $h1 2001:1:1::2/64
+--- a/tools/testing/selftests/net/forwarding/lib.sh
++++ b/tools/testing/selftests/net/forwarding/lib.sh
+@@ -30,6 +30,7 @@ REQUIRE_MZ=${REQUIRE_MZ:=yes}
+ REQUIRE_MTOOLS=${REQUIRE_MTOOLS:=no}
+ STABLE_MAC_ADDRS=${STABLE_MAC_ADDRS:=no}
+ TCPDUMP_EXTRA_FLAGS=${TCPDUMP_EXTRA_FLAGS:=}
++TROUTE6=${TROUTE6:=traceroute6}
+
+ relative_path="${BASH_SOURCE%/*}"
+ if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
gcc-plugins-reorganize-gimple-includes-for-gcc-13.patch
revert-loongarch-cpu-switch-to-arch_cpu_finalize_init.patch
+tpm-disable-rng-for-all-amd-ftpms.patch
+tpm-add-a-helper-for-checking-hwrng-enabled.patch
+ksmbd-validate-command-request-size.patch
+ksmbd-fix-wrong-next-length-validation-of-ea-buffer-in-smb2_set_ea.patch
+kvm-sev-snapshot-the-ghcb-before-accessing-it.patch
+kvm-sev-only-access-ghcb-fields-once.patch
+wifi-nl80211-fix-integer-overflow-in-nl80211_parse_mbssid_elems.patch
+wifi-rtw89-fix-8852ae-disconnection-caused-by-rx-full-flags.patch
+selftests-forwarding-set-default-ipv6-traceroute-utility.patch
+wireguard-allowedips-expand-maximum-node-depth.patch
+mmc-moxart-read-scr-register-without-changing-byte-order.patch
+ipv6-adjust-ndisc_is_useropt-to-also-return-true-for-pio.patch
--- /dev/null
+From cacc6e22932f373a91d7be55a9b992dc77f4c59b Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Mon, 7 Aug 2023 23:12:29 -0500
+Subject: tpm: Add a helper for checking hwrng enabled
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit cacc6e22932f373a91d7be55a9b992dc77f4c59b upstream.
+
+The same checks are repeated in three places to decide whether to use
+hwrng. Consolidate these into a helper.
+
+Also this fixes a case that one of them was missing a check in the
+cleanup path.
+
+Fixes: 554b841d4703 ("tpm: Disable RNG for all AMD fTPMs")
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm-chip.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -518,10 +518,20 @@ static int tpm_hwrng_read(struct hwrng *
+ return tpm_get_random(chip, data, max);
+ }
+
++static bool tpm_is_hwrng_enabled(struct tpm_chip *chip)
++{
++ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
++ return false;
++ if (tpm_is_firmware_upgrade(chip))
++ return false;
++ if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)
++ return false;
++ return true;
++}
++
+ static int tpm_add_hwrng(struct tpm_chip *chip)
+ {
+- if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
+- chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)
++ if (!tpm_is_hwrng_enabled(chip))
+ return 0;
+
+ snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+@@ -626,7 +636,7 @@ int tpm_chip_register(struct tpm_chip *c
+ return 0;
+
+ out_hwrng:
+- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
++ if (tpm_is_hwrng_enabled(chip))
+ hwrng_unregister(&chip->hwrng);
+ out_ppi:
+ tpm_bios_log_teardown(chip);
+@@ -651,8 +661,7 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
+ void tpm_chip_unregister(struct tpm_chip *chip)
+ {
+ tpm_del_legacy_sysfs(chip);
+- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
+- !(chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED))
++ if (tpm_is_hwrng_enabled(chip))
+ hwrng_unregister(&chip->hwrng);
+ tpm_bios_log_teardown(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
--- /dev/null
+From 554b841d470338a3b1d6335b14ee1cd0c8f5d754 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed, 2 Aug 2023 07:25:33 -0500
+Subject: tpm: Disable RNG for all AMD fTPMs
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 554b841d470338a3b1d6335b14ee1cd0c8f5d754 upstream.
+
+The TPM RNG functionality is not necessary for entropy when the CPU
+already supports the RDRAND instruction. The TPM RNG functionality
+was previously disabled on a subset of AMD fTPM series, but reports
+continue to show problems on some systems causing stutter root caused
+to TPM RNG functionality.
+
+Expand disabling TPM RNG use for all AMD fTPMs whether they have versions
+that claim to have fixed or not. To accomplish this, move the detection
+into part of the TPM CRB registration and add a flag indicating that
+the TPM should opt-out of registration to hwrng.
+
+Cc: stable@vger.kernel.org # 6.1.y+
+Fixes: b006c439d58d ("hwrng: core - start hwrng kthread also for untrusted sources")
+Fixes: f1324bbc4011 ("tpm: disable hwrng for fTPM on some AMD designs")
+Reported-by: daniil.stas@posteo.net
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217719
+Reported-by: bitlord0xff@gmail.com
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217212
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm-chip.c | 68 +-------------------------------------------
+ drivers/char/tpm/tpm_crb.c | 30 +++++++++++++++++++
+ include/linux/tpm.h | 1
+ 3 files changed, 33 insertions(+), 66 deletions(-)
+
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -507,70 +507,6 @@ static int tpm_add_legacy_sysfs(struct t
+ return 0;
+ }
+
+-/*
+- * Some AMD fTPM versions may cause stutter
+- * https://www.amd.com/en/support/kb/faq/pa-410
+- *
+- * Fixes are available in two series of fTPM firmware:
+- * 6.x.y.z series: 6.0.18.6 +
+- * 3.x.y.z series: 3.57.y.5 +
+- */
+-#ifdef CONFIG_X86
+-static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
+-{
+- u32 val1, val2;
+- u64 version;
+- int ret;
+-
+- if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+- return false;
+-
+- ret = tpm_request_locality(chip);
+- if (ret)
+- return false;
+-
+- ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
+- if (ret)
+- goto release;
+- if (val1 != 0x414D4400U /* AMD */) {
+- ret = -ENODEV;
+- goto release;
+- }
+- ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
+- if (ret)
+- goto release;
+- ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
+-
+-release:
+- tpm_relinquish_locality(chip);
+-
+- if (ret)
+- return false;
+-
+- version = ((u64)val1 << 32) | val2;
+- if ((version >> 48) == 6) {
+- if (version >= 0x0006000000180006ULL)
+- return false;
+- } else if ((version >> 48) == 3) {
+- if (version >= 0x0003005700000005ULL)
+- return false;
+- } else {
+- return false;
+- }
+-
+- dev_warn(&chip->dev,
+- "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
+- version);
+-
+- return true;
+-}
+-#else
+-static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
+-{
+- return false;
+-}
+-#endif /* CONFIG_X86 */
+-
+ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+ struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+@@ -585,7 +521,7 @@ static int tpm_hwrng_read(struct hwrng *
+ static int tpm_add_hwrng(struct tpm_chip *chip)
+ {
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
+- tpm_amd_is_rng_defective(chip))
++ chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)
+ return 0;
+
+ snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+@@ -716,7 +652,7 @@ void tpm_chip_unregister(struct tpm_chip
+ {
+ tpm_del_legacy_sysfs(chip);
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
+- !tpm_amd_is_rng_defective(chip))
++ !(chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED))
+ hwrng_unregister(&chip->hwrng);
+ tpm_bios_log_teardown(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -463,6 +463,28 @@ static bool crb_req_canceled(struct tpm_
+ return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+ }
+
++static int crb_check_flags(struct tpm_chip *chip)
++{
++ u32 val;
++ int ret;
++
++ ret = crb_request_locality(chip, 0);
++ if (ret)
++ return ret;
++
++ ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
++ if (ret)
++ goto release;
++
++ if (val == 0x414D4400U /* AMD */)
++ chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
++
++release:
++ crb_relinquish_locality(chip, 0);
++
++ return ret;
++}
++
+ static const struct tpm_class_ops tpm_crb = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = crb_status,
+@@ -800,6 +822,14 @@ static int crb_acpi_add(struct acpi_devi
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
++ rc = tpm_chip_bootstrap(chip);
++ if (rc)
++ goto out;
++
++ rc = crb_check_flags(chip);
++ if (rc)
++ goto out;
++
+ rc = tpm_chip_register(chip);
+
+ out:
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -282,6 +282,7 @@ enum tpm_chip_flags {
+ TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
+ TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7),
+ TPM_CHIP_FLAG_SUSPENDED = BIT(8),
++ TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9),
+ };
+
+ #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
--- /dev/null
+From 6311071a056272e1e761de8d0305e87cc566f734 Mon Sep 17 00:00:00 2001
+From: Keith Yeo <keithyjy@gmail.com>
+Date: Mon, 31 Jul 2023 11:47:20 +0800
+Subject: wifi: nl80211: fix integer overflow in nl80211_parse_mbssid_elems()
+
+From: Keith Yeo <keithyjy@gmail.com>
+
+commit 6311071a056272e1e761de8d0305e87cc566f734 upstream.
+
+nl80211_parse_mbssid_elems() uses a u8 variable num_elems to count the
+number of MBSSID elements in the nested netlink attribute attrs, which can
+lead to an integer overflow if a user of the nl80211 interface specifies
+256 or more elements in the corresponding attribute in userspace. The
+integer overflow can lead to a heap buffer overflow as num_elems determines
+the size of the trailing array in elems, and this array is thereafter
+written to for each element in attrs.
+
+Note that this vulnerability only affects devices with the
+wiphy->mbssid_max_interfaces member set for the wireless physical device
+struct in the device driver, and can only be triggered by a process with
+CAP_NET_ADMIN capabilities.
+
+Fix this by checking for a maximum of 255 elements in attrs.
+
+Cc: stable@vger.kernel.org
+Fixes: dc1e3cb8da8b ("nl80211: MBSSID and EMA support in AP mode")
+Signed-off-by: Keith Yeo <keithyjy@gmail.com>
+Link: https://lore.kernel.org/r/20230731034719.77206-1-keithyjy@gmail.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/wireless/nl80211.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -5378,8 +5378,11 @@ nl80211_parse_mbssid_elems(struct wiphy
+ if (!wiphy->mbssid_max_interfaces)
+ return ERR_PTR(-EINVAL);
+
+- nla_for_each_nested(nl_elems, attrs, rem_elems)
++ nla_for_each_nested(nl_elems, attrs, rem_elems) {
++ if (num_elems >= 255)
++ return ERR_PTR(-EINVAL);
+ num_elems++;
++ }
+
+ elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL);
+ if (!elems)
--- /dev/null
+From b74bb07cdab6859e1a3fc9fe7351052176322ddf Mon Sep 17 00:00:00 2001
+From: Ping-Ke Shih <pkshih@realtek.com>
+Date: Tue, 8 Aug 2023 08:54:26 +0800
+Subject: wifi: rtw89: fix 8852AE disconnection caused by RX full flags
+
+From: Ping-Ke Shih <pkshih@realtek.com>
+
+commit b74bb07cdab6859e1a3fc9fe7351052176322ddf upstream.
+
+RX full flags are raised if certain types of RX FIFO are full, and then
+drop all following MPDU of AMPDU. In order to resume to receive MPDU
+when RX FIFO becomes available, we clear the register bits by the
+commit a0d99ebb3ecd ("wifi: rtw89: initialize DMA of CMAC"). But, 8852AE
+needs more settings to support this. To quickly fix disconnection problem,
+revert the behavior as before.
+
+Fixes: a0d99ebb3ecd ("wifi: rtw89: initialize DMA of CMAC")
+Reported-by: Damian B <bronecki.damian@gmail.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217710
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Ping-Ke Shih <pkshih@realtek.com>
+Tested-by: Damian B <bronecki.damian@gmail.com>
+Link: https://lore.kernel.org/r/20230808005426.5327-1-pkshih@realtek.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/realtek/rtw89/mac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -2209,7 +2209,7 @@ static int cmac_dma_init(struct rtw89_de
+ u32 reg;
+ int ret;
+
+- if (chip_id != RTL8852A && chip_id != RTL8852B)
++ if (chip_id != RTL8852B)
+ return 0;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
--- /dev/null
+From 46622219aae2b67813fe31a7b8cb7da5baff5c8a Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Mon, 7 Aug 2023 15:21:27 +0200
+Subject: wireguard: allowedips: expand maximum node depth
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 46622219aae2b67813fe31a7b8cb7da5baff5c8a upstream.
+
+In the allowedips self-test, nodes are inserted into the tree, but it
+generated an even amount of nodes, but for checking maximum node depth,
+there is of course the root node, which makes the total number
+necessarily odd. With two few nodes added, it never triggered the
+maximum depth check like it should have. So, add 129 nodes instead of
+128 nodes, and do so with a more straightforward scheme, starting with
+all the bits set, and shifting over one each time. Then increase the
+maximum depth to 129, and choose a better name for that variable to
+make it clear that it represents depth as opposed to bits.
+
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://lore.kernel.org/r/20230807132146.2191597-2-Jason@zx2c4.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/allowedips.c | 8 ++++----
+ drivers/net/wireguard/selftest/allowedips.c | 16 ++++++++++------
+ 2 files changed, 14 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -6,7 +6,7 @@
+ #include "allowedips.h"
+ #include "peer.h"
+
+-enum { MAX_ALLOWEDIPS_BITS = 128 };
++enum { MAX_ALLOWEDIPS_DEPTH = 129 };
+
+ static struct kmem_cache *node_cache;
+
+@@ -42,7 +42,7 @@ static void push_rcu(struct allowedips_n
+ struct allowedips_node __rcu *p, unsigned int *len)
+ {
+ if (rcu_access_pointer(p)) {
+- if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS))
++ if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH))
+ return;
+ stack[(*len)++] = rcu_dereference_raw(p);
+ }
+@@ -55,7 +55,7 @@ static void node_free_rcu(struct rcu_hea
+
+ static void root_free_rcu(struct rcu_head *rcu)
+ {
+- struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = {
++ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = {
+ container_of(rcu, struct allowedips_node, rcu) };
+ unsigned int len = 1;
+
+@@ -68,7 +68,7 @@ static void root_free_rcu(struct rcu_hea
+
+ static void root_remove_peer_lists(struct allowedips_node *root)
+ {
+- struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root };
++ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root };
+ unsigned int len = 1;
+
+ while (len > 0 && (node = stack[--len])) {
+--- a/drivers/net/wireguard/selftest/allowedips.c
++++ b/drivers/net/wireguard/selftest/allowedips.c
+@@ -593,16 +593,20 @@ bool __init wg_allowedips_selftest(void)
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
+ test_negative(4, a, 192, 168, 0, 1);
+
+- /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node
++ /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node
+ * if something goes wrong.
+ */
+- for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) {
+- part = cpu_to_be64(~(1LLU << (i % 64)));
+- memset(&ip, 0xff, 16);
+- memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
++ for (i = 0; i < 64; ++i) {
++ part = cpu_to_be64(~0LLU << i);
++ memset(&ip, 0xff, 8);
++ memcpy((u8 *)&ip + 8, &part, 8);
++ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
++ memcpy(&ip, &part, 8);
++ memset((u8 *)&ip + 8, 0, 8);
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ }
+-
++ memset(&ip, 0, 16);
++ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ wg_allowedips_free(&t, &mutex);
+
+ wg_allowedips_init(&t);