--- /dev/null
+From fbe5e5f030c22ae717ee422aaab0e00ea84fab5e Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+Date: Sat, 8 Nov 2025 00:45:20 +0000
+Subject: KVM: nSVM: Always recalculate LBR MSR intercepts in svm_update_lbrv()
+
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+
+commit fbe5e5f030c22ae717ee422aaab0e00ea84fab5e upstream.
+
+svm_update_lbrv() is called when MSR_IA32_DEBUGCTLMSR is updated, and on
+nested transitions where LBRV is used. It checks whether LBRV enablement
+needs to be changed in the current VMCB, and if it does, it also
+recalculate intercepts to LBR MSRs.
+
+However, there are cases where intercepts need to be updated even when
+LBRV enablement doesn't. Example scenario:
+- L1 has MSR_IA32_DEBUGCTLMSR cleared.
+- L1 runs L2 without LBR_CTL_ENABLE (no LBRV).
+- L2 sets DEBUGCTLMSR_LBR in MSR_IA32_DEBUGCTLMSR, svm_update_lbrv()
+ sets LBR_CTL_ENABLE in VMCB02 and disables intercepts to LBR MSRs.
+- L2 exits to L1, svm_update_lbrv() is not called on this transition.
+- L1 clears MSR_IA32_DEBUGCTLMSR, svm_update_lbrv() finds that
+ LBR_CTL_ENABLE is already cleared in VMCB01 and does nothing.
+- Intercepts remain disabled, L1 reads to LBR MSRs read the host MSRs.
+
+Fix it by always recalculating intercepts in svm_update_lbrv().
+
+Fixes: 1d5a1b5860ed ("KVM: x86: nSVM: correctly virtualize LBR msrs when L2 is running")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Link: https://patch.msgid.link/20251108004524.1600006-3-yosry.ahmed@linux.dev
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c | 29 +++++++++++++++++++----------
+ 1 file changed, 19 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1014,26 +1014,30 @@ static void svm_recalc_lbr_msr_intercept
+ !intercept, !intercept);
+ }
+
+-void svm_enable_lbrv(struct kvm_vcpu *vcpu)
++static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
+- svm_recalc_lbr_msr_intercepts(vcpu);
+
+ /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
+ if (is_guest_mode(vcpu))
+ svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
+ }
+
+-static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
++void svm_enable_lbrv(struct kvm_vcpu *vcpu)
++{
++ __svm_enable_lbrv(vcpu);
++ svm_recalc_lbr_msr_intercepts(vcpu);
++}
++
++static void __svm_disable_lbrv(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
+
+ svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
+- svm_recalc_lbr_msr_intercepts(vcpu);
+
+ /*
+ * Move the LBR msrs back to the vmcb01 to avoid copying them
+@@ -1062,13 +1066,18 @@ void svm_update_lbrv(struct kvm_vcpu *vc
+ (is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+ (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
+
+- if (enable_lbrv == current_enable_lbrv)
+- return;
++ if (enable_lbrv && !current_enable_lbrv)
++ __svm_enable_lbrv(vcpu);
++ else if (!enable_lbrv && current_enable_lbrv)
++ __svm_disable_lbrv(vcpu);
+
+- if (enable_lbrv)
+- svm_enable_lbrv(vcpu);
+- else
+- svm_disable_lbrv(vcpu);
++ /*
++ * During nested transitions, it is possible that the current VMCB has
++ * LBR_CTL set, but the previous LBR_CTL had it cleared (or vice versa).
++ * In this case, even though LBR_CTL does not need an update, intercepts
++ * do, so always recalculate the intercepts here.
++ */
++ svm_recalc_lbr_msr_intercepts(vcpu);
+ }
+
+ void disable_nmi_singlestep(struct vcpu_svm *svm)
--- /dev/null
+From 8a4821412cf2c1429fffa07c012dd150f2edf78c Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+Date: Sat, 8 Nov 2025 00:45:21 +0000
+Subject: KVM: nSVM: Fix and simplify LBR virtualization handling with nested
+
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+
+commit 8a4821412cf2c1429fffa07c012dd150f2edf78c upstream.
+
+The current scheme for handling LBRV when nested is used is very
+complicated, especially when L1 does not enable LBRV (i.e. does not set
+LBR_CTL_ENABLE_MASK).
+
+To avoid copying LBRs between VMCB01 and VMCB02 on every nested
+transition, the current implementation switches between using VMCB01 or
+VMCB02 as the source of truth for the LBRs while L2 is running. If L2
+enables LBR, VMCB02 is used as the source of truth. When L2 disables
+LBR, the LBRs are copied to VMCB01 and VMCB01 is used as the source of
+truth. This introduces significant complexity, and incorrect behavior in
+some cases.
+
+For example, on a nested #VMEXIT, the LBRs are only copied from VMCB02
+to VMCB01 if LBRV is enabled in VMCB01. This is because L2's writes to
+MSR_IA32_DEBUGCTLMSR to enable LBR are intercepted and propagated to
+VMCB01 instead of VMCB02. However, LBRV is only enabled in VMCB02 when
+L2 is running.
+
+This means that if L2 enables LBR and exits to L1, the LBRs will not be
+propagated from VMCB02 to VMCB01, because LBRV is disabled in VMCB01.
+
+There is no meaningful difference in CPUID rate in L2 when copying LBRs
+on every nested transition vs. the current approach, so do the simple
+and correct thing and always copy LBRs between VMCB01 and VMCB02 on
+nested transitions (when LBRV is disabled by L1). Drop the conditional
+LBRs copying in __svm_{enable/disable}_lbrv() as it is now unnecessary.
+
+VMCB02 becomes the only source of truth for LBRs when L2 is running,
+regardless of LBRV being enabled by L1, drop svm_get_lbr_vmcb() and use
+svm->vmcb directly in its place.
+
+Fixes: 1d5a1b5860ed ("KVM: x86: nSVM: correctly virtualize LBR msrs when L2 is running")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Link: https://patch.msgid.link/20251108004524.1600006-4-yosry.ahmed@linux.dev
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 20 ++++++-------------
+ arch/x86/kvm/svm/svm.c | 47 +++++++++-------------------------------------
+ 2 files changed, 17 insertions(+), 50 deletions(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -602,11 +602,10 @@ static void nested_vmcb02_prepare_save(s
+ */
+ svm_copy_lbrs(vmcb02, vmcb12);
+ vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
+- svm_update_lbrv(&svm->vcpu);
+-
+- } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
++ } else {
+ svm_copy_lbrs(vmcb02, vmcb01);
+ }
++ svm_update_lbrv(&svm->vcpu);
+ }
+
+ static inline bool is_evtinj_soft(u32 evtinj)
+@@ -731,11 +730,7 @@ static void nested_vmcb02_prepare_contro
+ svm->soft_int_next_rip = vmcb12_rip;
+ }
+
+- vmcb02->control.virt_ext = vmcb01->control.virt_ext &
+- LBR_CTL_ENABLE_MASK;
+- if (guest_can_use(vcpu, X86_FEATURE_LBRV))
+- vmcb02->control.virt_ext |=
+- (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
++ /* LBR_CTL_ENABLE_MASK is controlled by svm_update_lbrv() */
+
+ if (!nested_vmcb_needs_vls_intercept(svm))
+ vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+@@ -1066,13 +1061,12 @@ int nested_svm_vmexit(struct vcpu_svm *s
+ kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+
+ if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+- (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
++ (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)))
+ svm_copy_lbrs(vmcb12, vmcb02);
+- svm_update_lbrv(vcpu);
+- } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
++ else
+ svm_copy_lbrs(vmcb01, vmcb02);
+- svm_update_lbrv(vcpu);
+- }
++
++ svm_update_lbrv(vcpu);
+
+ if (vnmi) {
+ if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1016,13 +1016,7 @@ static void svm_recalc_lbr_msr_intercept
+
+ static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
+ {
+- struct vcpu_svm *svm = to_svm(vcpu);
+-
+- svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
+-
+- /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
+- if (is_guest_mode(vcpu))
+- svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
++ to_svm(vcpu)->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
+ }
+
+ void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+@@ -1033,36 +1027,15 @@ void svm_enable_lbrv(struct kvm_vcpu *vc
+
+ static void __svm_disable_lbrv(struct kvm_vcpu *vcpu)
+ {
+- struct vcpu_svm *svm = to_svm(vcpu);
+-
+ KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
+-
+- svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
+-
+- /*
+- * Move the LBR msrs back to the vmcb01 to avoid copying them
+- * on nested guest entries.
+- */
+- if (is_guest_mode(vcpu))
+- svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
+-}
+-
+-static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
+-{
+- /*
+- * If LBR virtualization is disabled, the LBR MSRs are always kept in
+- * vmcb01. If LBR virtualization is enabled and L1 is running VMs of
+- * its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
+- */
+- return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
+- svm->vmcb01.ptr;
++ to_svm(vcpu)->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
+ }
+
+ void svm_update_lbrv(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+ bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
+- bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
++ bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) ||
+ (is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+ (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
+
+@@ -2991,19 +2964,19 @@ static int svm_get_msr(struct kvm_vcpu *
+ msr_info->data = svm->tsc_aux;
+ break;
+ case MSR_IA32_DEBUGCTLMSR:
+- msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
++ msr_info->data = svm->vmcb->save.dbgctl;
+ break;
+ case MSR_IA32_LASTBRANCHFROMIP:
+- msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
++ msr_info->data = svm->vmcb->save.br_from;
+ break;
+ case MSR_IA32_LASTBRANCHTOIP:
+- msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
++ msr_info->data = svm->vmcb->save.br_to;
+ break;
+ case MSR_IA32_LASTINTFROMIP:
+- msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
++ msr_info->data = svm->vmcb->save.last_excp_from;
+ break;
+ case MSR_IA32_LASTINTTOIP:
+- msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
++ msr_info->data = svm->vmcb->save.last_excp_to;
+ break;
+ case MSR_VM_HSAVE_PA:
+ msr_info->data = svm->nested.hsave_msr;
+@@ -3276,10 +3249,10 @@ static int svm_set_msr(struct kvm_vcpu *
+ if (data & DEBUGCTL_RESERVED_BITS)
+ return 1;
+
+- if (svm_get_lbr_vmcb(svm)->save.dbgctl == data)
++ if (svm->vmcb->save.dbgctl == data)
+ break;
+
+- svm_get_lbr_vmcb(svm)->save.dbgctl = data;
++ svm->vmcb->save.dbgctl = data;
+ vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
+ svm_update_lbrv(vcpu);
+ break;
--- /dev/null
+From 3fa05f96fc08dff5e846c2cc283a249c1bf029a1 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+Date: Wed, 12 Nov 2025 01:30:17 +0000
+Subject: KVM: SVM: Fix redundant updates of LBR MSR intercepts
+
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+
+commit 3fa05f96fc08dff5e846c2cc283a249c1bf029a1 upstream.
+
+Don't update the LBR MSR intercept bitmaps if they're already up-to-date,
+as unconditionally updating the intercepts forces KVM to recalculate the
+MSR bitmaps for vmcb02 on every nested VMRUN. The redundant updates are
+functionally okay; however, they neuter an optimization in Hyper-V
+nested virtualization enlightenments and this manifests as a self-test
+failure.
+
+In particular, Hyper-V lets L1 mark "nested enlightenments" as clean, i.e.
+tell KVM that no changes were made to the MSR bitmap since the last VMRUN.
+The hyperv_svm_test KVM selftest intentionally changes the MSR bitmap
+"without telling KVM about it" to verify that KVM honors the clean hint,
+correctly fails because KVM notices the changed bitmap anyway:
+
+ ==== Test Assertion Failure ====
+ x86/hyperv_svm_test.c:120: vmcb->control.exit_code == 0x081
+ pid=193558 tid=193558 errno=4 - Interrupted system call
+ 1 0x0000000000411361: assert_on_unhandled_exception at processor.c:659
+ 2 0x0000000000406186: _vcpu_run at kvm_util.c:1699
+ 3 (inlined by) vcpu_run at kvm_util.c:1710
+ 4 0x0000000000401f2a: main at hyperv_svm_test.c:175
+ 5 0x000000000041d0d3: __libc_start_call_main at libc-start.o:?
+ 6 0x000000000041f27c: __libc_start_main_impl at ??:?
+ 7 0x00000000004021a0: _start at ??:?
+ vmcb->control.exit_code == SVM_EXIT_VMMCALL
+
+Do *not* fix this by skipping svm_hv_vmcb_dirty_nested_enlightenments()
+when svm_set_intercept_for_msr() performs a no-op change. changes to
+the L0 MSR interception bitmap are only triggered by full CPUID updates
+and MSR filter updates, both of which should be rare. Changing
+svm_set_intercept_for_msr() risks hiding unintended pessimizations
+like this one, and is actually more complex than this change.
+
+Fixes: fbe5e5f030c2 ("KVM: nSVM: Always recalculate LBR MSR intercepts in svm_update_lbrv()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Link: https://patch.msgid.link/20251112013017.1836863-1-yosry.ahmed@linux.dev
+[Rewritten commit message based on mailing list discussion. - Paolo]
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Tested-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c | 6 ++++++
+ arch/x86/kvm/svm/svm.h | 1 +
+ 2 files changed, 7 insertions(+)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1000,6 +1000,9 @@ static void svm_recalc_lbr_msr_intercept
+ struct vcpu_svm *svm = to_svm(vcpu);
+ bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
+
++ if (intercept == svm->lbr_msrs_intercepted)
++ return;
++
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP,
+ !intercept, !intercept);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP,
+@@ -1012,6 +1015,8 @@ static void svm_recalc_lbr_msr_intercept
+ if (sev_es_guest(vcpu->kvm))
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR,
+ !intercept, !intercept);
++
++ svm->lbr_msrs_intercepted = intercept;
+ }
+
+ static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
+@@ -1450,6 +1455,7 @@ static int svm_vcpu_create(struct kvm_vc
+ }
+
+ svm->x2avic_msrs_intercepted = true;
++ svm->lbr_msrs_intercepted = true;
+
+ svm->vmcb01.ptr = page_address(vmcb01_page);
+ svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -324,6 +324,7 @@ struct vcpu_svm {
+ bool guest_state_loaded;
+
+ bool x2avic_msrs_intercepted;
++ bool lbr_msrs_intercepted;
+
+ /* Guest GIF value, used when vGIF is not enabled */
+ bool guest_gif;
--- /dev/null
+From stable+bounces-195431-greg=kroah.com@vger.kernel.org Fri Nov 21 00:41:03 2025
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+Date: Thu, 20 Nov 2025 23:39:32 +0000
+Subject: KVM: SVM: Introduce svm_recalc_lbr_msr_intercepts()
+To: stable@vger.kernel.org
+Cc: Yosry Ahmed <yosry.ahmed@linux.dev>
+Message-ID: <20251120233936.2407119-2-yosry.ahmed@linux.dev>
+
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+
+Introduce a helper updating the intercepts for LBR MSRs, similar to the
+one introduced upstream by commit 160f143cc131 ("KVM: SVM: Manually
+recalc all MSR intercepts on userspace MSR filter change"). The main
+difference is that this version uses set_msr_interception(), which has
+inverted polarity compared to svm_set_intercept_for_msr().
+
+This is intended to simplify incoming backports. No functional changes
+intended.
+
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c | 32 +++++++++++++++++++++-----------
+ 1 file changed, 21 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -995,18 +995,31 @@ void svm_copy_lbrs(struct vmcb *to_vmcb,
+ vmcb_mark_dirty(to_vmcb, VMCB_LBR);
+ }
+
+-void svm_enable_lbrv(struct kvm_vcpu *vcpu)
++static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
++ bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
+
+- svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
++ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP,
++ !intercept, !intercept);
++ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP,
++ !intercept, !intercept);
++ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP,
++ !intercept, !intercept);
++ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP,
++ !intercept, !intercept);
+
+ if (sev_es_guest(vcpu->kvm))
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
++ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR,
++ !intercept, !intercept);
++}
++
++void svm_enable_lbrv(struct kvm_vcpu *vcpu)
++{
++ struct vcpu_svm *svm = to_svm(vcpu);
++
++ svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
++ svm_recalc_lbr_msr_intercepts(vcpu);
+
+ /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
+ if (is_guest_mode(vcpu))
+@@ -1020,10 +1033,7 @@ static void svm_disable_lbrv(struct kvm_
+ KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
+
+ svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
++ svm_recalc_lbr_msr_intercepts(vcpu);
+
+ /*
+ * Move the LBR msrs back to the vmcb01 to avoid copying them
usb-udc-add-trace-event-for-usb_gadget_set_state.patch
usb-gadget-udc-fix-use-after-free-in-usb_gadget_state_work.patch
mm-huge_memory-fix-null-pointer-deference-when-splitting-folio.patch
+kvm-svm-introduce-svm_recalc_lbr_msr_intercepts.patch
+kvm-nsvm-always-recalculate-lbr-msr-intercepts-in-svm_update_lbrv.patch
+kvm-nsvm-fix-and-simplify-lbr-virtualization-handling-with-nested.patch
+kvm-svm-fix-redundant-updates-of-lbr-msr-intercepts.patch