--- /dev/null
+From d83c36d822be44db4bad0c43bea99c8908f54117 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 7 Jun 2024 10:26:04 -0700
+Subject: KVM: nVMX: Add a helper to get highest pending from Posted Interrupt vector
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit d83c36d822be44db4bad0c43bea99c8908f54117 upstream.
+
+Add a helper to retrieve the highest pending vector given a Posted
+Interrupt descriptor. While the actual operation is straightforward, it's
+surprisingly easy to mess up, e.g. if one tries to reuse lapic.c's
+find_highest_vector(), which doesn't work with PID.PIR due to the APIC's
+IRR and ISR component registers being physically discontiguous (they're
+4-byte registers aligned at 16-byte intervals).
+
+To make PIR handling more consistent with respect to IRR and ISR handling,
+return -1 to indicate "no interrupt pending".
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240607172609.3205077-2-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 5 +++--
+ arch/x86/kvm/vmx/posted_intr.h | 10 ++++++++++
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -12,6 +12,7 @@
+ #include "mmu.h"
+ #include "nested.h"
+ #include "pmu.h"
++#include "posted_intr.h"
+ #include "sgx.h"
+ #include "trace.h"
+ #include "vmx.h"
+@@ -3899,8 +3900,8 @@ static int vmx_complete_nested_posted_in
+ if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+ return 0;
+
+- max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
+- if (max_irr != 256) {
++ max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
++ if (max_irr > 0) {
+ vapic_page = vmx->nested.virtual_apic_map.hva;
+ if (!vapic_page)
+ goto mmio_needed;
+--- a/arch/x86/kvm/vmx/posted_intr.h
++++ b/arch/x86/kvm/vmx/posted_intr.h
+@@ -1,6 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ #ifndef __KVM_X86_VMX_POSTED_INTR_H
+ #define __KVM_X86_VMX_POSTED_INTR_H
++
++#include <linux/find.h>
+ #include <asm/posted_intr.h>
+
+ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
+@@ -12,4 +14,12 @@ int vmx_pi_update_irte(struct kvm *kvm,
+ uint32_t guest_irq, bool set);
+ void vmx_pi_start_assignment(struct kvm *kvm);
+
++static inline int pi_find_highest_vector(struct pi_desc *pi_desc)
++{
++ int vec;
++
++ vec = find_last_bit((unsigned long *)pi_desc->pir, 256);
++ return vec < 256 ? vec : -1;
++}
++
+ #endif /* __KVM_X86_VMX_POSTED_INTR_H */
--- /dev/null
+From 27c4fa42b11af780d49ce704f7fa67b3c2544df4 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 7 Jun 2024 10:26:07 -0700
+Subject: KVM: nVMX: Check for pending posted interrupts when looking for nested events
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 27c4fa42b11af780d49ce704f7fa67b3c2544df4 upstream.
+
+Check for pending (and notified!) posted interrupts when checking if L2
+has a pending wake event, as fully posted/notified virtual interrupt is a
+valid wake event for HLT.
+
+Note that KVM must check vmx->nested.pi_pending to avoid prematurely
+waking L2, e.g. even if KVM sees a non-zero PID.PIR and PID.0N=1, the
+virtual interrupt won't actually be recognized until a notification IRQ is
+received by the vCPU or the vCPU does (nested) VM-Enter.
+
+Fixes: 26844fee6ade ("KVM: x86: never write to memory from kvm_vcpu_check_block()")
+Cc: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>
+Reported-by: Jim Mattson <jmattson@google.com>
+Closes: https://lore.kernel.org/all/20231207010302.2240506-1-jmattson@google.com
+Link: https://lore.kernel.org/r/20240607172609.3205077-5-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 36 ++++++++++++++++++++++++++++++++++--
+ 1 file changed, 34 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4034,8 +4034,40 @@ static bool nested_vmx_preemption_timer_
+
+ static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
+ {
+- return nested_vmx_preemption_timer_pending(vcpu) ||
+- to_vmx(vcpu)->nested.mtf_pending;
++ struct vcpu_vmx *vmx = to_vmx(vcpu);
++ void *vapic = vmx->nested.virtual_apic_map.hva;
++ int max_irr, vppr;
++
++ if (nested_vmx_preemption_timer_pending(vcpu) ||
++ vmx->nested.mtf_pending)
++ return true;
++
++ /*
++ * Virtual Interrupt Delivery doesn't require manual injection. Either
++ * the interrupt is already in GUEST_RVI and will be recognized by CPU
++ * at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move
++ * the interrupt from the PIR to RVI prior to entering the guest.
++ */
++ if (for_injection)
++ return false;
++
++ if (!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
++ __vmx_interrupt_blocked(vcpu))
++ return false;
++
++ if (!vapic)
++ return false;
++
++ vppr = *((u32 *)(vapic + APIC_PROCPRI));
++
++ if (vmx->nested.pi_pending && vmx->nested.pi_desc &&
++ pi_test_on(vmx->nested.pi_desc)) {
++ max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
++ if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0))
++ return true;
++ }
++
++ return false;
+ }
+
+ /*
--- /dev/null
+From 321ef62b0c5f6f57bb8500a2ca5986052675abbf Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 7 Jun 2024 10:26:08 -0700
+Subject: KVM: nVMX: Fold requested virtual interrupt check into has_nested_events()
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 321ef62b0c5f6f57bb8500a2ca5986052675abbf upstream.
+
+Check for a Requested Virtual Interrupt, i.e. a virtual interrupt that is
+pending delivery, in vmx_has_nested_events() and drop the one-off
+kvm_x86_ops.guest_apic_has_interrupt() hook.
+
+In addition to dropping a superfluous hook, this fixes a bug where KVM
+would incorrectly treat virtual interrupts _for L2_ as always enabled due
+to kvm_arch_interrupt_allowed(), by way of vmx_interrupt_blocked(),
+treating IRQs as enabled if L2 is active and vmcs12 is configured to exit
+on IRQs, i.e. KVM would treat a virtual interrupt for L2 as a valid wake
+event based on L1's IRQ blocking status.
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240607172609.3205077-6-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm-x86-ops.h | 1 -
+ arch/x86/include/asm/kvm_host.h | 1 -
+ arch/x86/kvm/vmx/main.c | 1 -
+ arch/x86/kvm/vmx/nested.c | 4 ++++
+ arch/x86/kvm/vmx/vmx.c | 20 --------------------
+ arch/x86/kvm/vmx/x86_ops.h | 1 -
+ arch/x86/kvm/x86.c | 10 +---------
+ 7 files changed, 5 insertions(+), 33 deletions(-)
+
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -85,7 +85,6 @@ KVM_X86_OP_OPTIONAL(update_cr8_intercept
+ KVM_X86_OP(refresh_apicv_exec_ctrl)
+ KVM_X86_OP_OPTIONAL(hwapic_irr_update)
+ KVM_X86_OP_OPTIONAL(hwapic_isr_update)
+-KVM_X86_OP_OPTIONAL_RET0(guest_apic_has_interrupt)
+ KVM_X86_OP_OPTIONAL(load_eoi_exitmap)
+ KVM_X86_OP_OPTIONAL(set_virtual_apic_mode)
+ KVM_X86_OP_OPTIONAL(set_apic_access_page_addr)
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1714,7 +1714,6 @@ struct kvm_x86_ops {
+ void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
+ void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
+ void (*hwapic_isr_update)(int isr);
+- bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
+ void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
+ void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
+--- a/arch/x86/kvm/vmx/main.c
++++ b/arch/x86/kvm/vmx/main.c
+@@ -97,7 +97,6 @@ struct kvm_x86_ops vt_x86_ops __initdata
+ .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
+- .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
+ .sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .deliver_interrupt = vmx_deliver_interrupt,
+ .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4060,6 +4060,10 @@ static bool vmx_has_nested_events(struct
+
+ vppr = *((u32 *)(vapic + APIC_PROCPRI));
+
++ max_irr = vmx_get_rvi();
++ if ((max_irr & 0xf0) > (vppr & 0xf0))
++ return true;
++
+ if (vmx->nested.pi_pending && vmx->nested.pi_desc &&
+ pi_test_on(vmx->nested.pi_desc)) {
+ max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -4108,26 +4108,6 @@ void pt_update_intercept_for_msr(struct
+ }
+ }
+
+-bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+-{
+- struct vcpu_vmx *vmx = to_vmx(vcpu);
+- void *vapic_page;
+- u32 vppr;
+- int rvi;
+-
+- if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
+- !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
+- WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
+- return false;
+-
+- rvi = vmx_get_rvi();
+-
+- vapic_page = vmx->nested.virtual_apic_map.hva;
+- vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
+-
+- return ((rvi & 0xf0) > (vppr & 0xf0));
+-}
+-
+ void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+--- a/arch/x86/kvm/vmx/x86_ops.h
++++ b/arch/x86/kvm/vmx/x86_ops.h
+@@ -49,7 +49,6 @@ void vmx_apicv_pre_state_restore(struct
+ bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
+ void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
+ void vmx_hwapic_isr_update(int max_isr);
+-bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu);
+ int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
+ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -13100,12 +13100,6 @@ void kvm_arch_commit_memory_region(struc
+ kvm_arch_free_memslot(kvm, old);
+ }
+
+-static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+-{
+- return (is_guest_mode(vcpu) &&
+- static_call(kvm_x86_guest_apic_has_interrupt)(vcpu));
+-}
+-
+ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+ {
+ if (!list_empty_careful(&vcpu->async_pf.done))
+@@ -13136,9 +13130,7 @@ static inline bool kvm_vcpu_has_events(s
+ if (kvm_test_request(KVM_REQ_PMI, vcpu))
+ return true;
+
+- if (kvm_arch_interrupt_allowed(vcpu) &&
+- (kvm_cpu_has_interrupt(vcpu) ||
+- kvm_guest_apic_has_interrupt(vcpu)))
++ if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
+ return true;
+
+ if (kvm_hv_has_stimer_pending(vcpu))
--- /dev/null
+From 32f55e475ce2c4b8b124d335fcfaf1152ba977a1 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 7 Jun 2024 10:26:05 -0700
+Subject: KVM: nVMX: Request immediate exit iff pending nested event needs injection
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 32f55e475ce2c4b8b124d335fcfaf1152ba977a1 upstream.
+
+When requesting an immediate exit from L2 in order to inject a pending
+event, do so only if the pending event actually requires manual injection,
+i.e. if and only if KVM actually needs to regain control in order to
+deliver the event.
+
+Avoiding the "immediate exit" isn't simply an optimization, it's necessary
+to make forward progress, as the "already expired" VMX preemption timer
+trick that KVM uses to force a VM-Exit has higher priority than events
+that aren't directly injected.
+
+At present time, this is a glorified nop as all events processed by
+vmx_has_nested_events() require injection, but that will not hold true in
+the future, e.g. if there's a pending virtual interrupt in vmcs02.RVI.
+I.e. if KVM is trying to deliver a virtual interrupt to L2, the expired
+VMX preemption timer will trigger VM-Exit before the virtual interrupt is
+delivered, and KVM will effectively hang the vCPU in an endless loop of
+forced immediate VM-Exits (because the pending virtual interrupt never
+goes away).
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240607172609.3205077-3-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kvm/vmx/nested.c | 2 +-
+ arch/x86/kvm/x86.c | 4 ++--
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1819,7 +1819,7 @@ struct kvm_x86_nested_ops {
+ bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
+ u32 error_code);
+ int (*check_events)(struct kvm_vcpu *vcpu);
+- bool (*has_events)(struct kvm_vcpu *vcpu);
++ bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
+ void (*triple_fault)(struct kvm_vcpu *vcpu);
+ int (*get_state)(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4032,7 +4032,7 @@ static bool nested_vmx_preemption_timer_
+ to_vmx(vcpu)->nested.preemption_timer_expired;
+ }
+
+-static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
++static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
+ {
+ return nested_vmx_preemption_timer_pending(vcpu) ||
+ to_vmx(vcpu)->nested.mtf_pending;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10516,7 +10516,7 @@ static int kvm_check_and_inject_events(s
+
+ if (is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->has_events &&
+- kvm_x86_ops.nested_ops->has_events(vcpu))
++ kvm_x86_ops.nested_ops->has_events(vcpu, true))
+ *req_immediate_exit = true;
+
+ /*
+@@ -13146,7 +13146,7 @@ static inline bool kvm_vcpu_has_events(s
+
+ if (is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->has_events &&
+- kvm_x86_ops.nested_ops->has_events(vcpu))
++ kvm_x86_ops.nested_ops->has_events(vcpu, false))
+ return true;
+
+ if (kvm_xen_has_pending_events(vcpu))
--- /dev/null
+From 55dfb8bed6fe8bda390cc71cca878d11a9407099 Mon Sep 17 00:00:00 2001
+From: Gautam Menghani <gautam@linux.ibm.com>
+Date: Wed, 5 Jun 2024 17:09:09 +0530
+Subject: KVM: PPC: Book3S HV nestedv2: Add DPDES support in helper library for Guest state buffer
+
+From: Gautam Menghani <gautam@linux.ibm.com>
+
+commit 55dfb8bed6fe8bda390cc71cca878d11a9407099 upstream.
+
+Add support for using DPDES in the library for using guest state
+buffers. DPDES support is needed for enabling usage of doorbells in a L2
+KVM on PAPR guest.
+
+Fixes: 6ccbbc33f06a ("KVM: PPC: Add helper library for Guest State Buffers")
+Cc: stable@vger.kernel.org # v6.7+
+Signed-off-by: Gautam Menghani <gautam@linux.ibm.com>
+Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240605113913.83715-2-gautam@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arch/powerpc/kvm-nested.rst | 4 +++-
+ arch/powerpc/include/asm/guest-state-buffer.h | 3 ++-
+ arch/powerpc/include/asm/kvm_book3s.h | 1 +
+ arch/powerpc/kvm/book3s_hv_nestedv2.c | 7 +++++++
+ arch/powerpc/kvm/test-guest-state-buffer.c | 2 +-
+ 5 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/Documentation/arch/powerpc/kvm-nested.rst
++++ b/Documentation/arch/powerpc/kvm-nested.rst
+@@ -546,7 +546,9 @@ table information.
+ +--------+-------+----+--------+----------------------------------+
+ | 0x1052 | 0x08 | RW | T | CTRL |
+ +--------+-------+----+--------+----------------------------------+
+-| 0x1053-| | | | Reserved |
++| 0x1053 | 0x08 | RW | T | DPDES |
+++--------+-------+----+--------+----------------------------------+
++| 0x1054-| | | | Reserved |
+ | 0x1FFF | | | | |
+ +--------+-------+----+--------+----------------------------------+
+ | 0x2000 | 0x04 | RW | T | CR |
+--- a/arch/powerpc/include/asm/guest-state-buffer.h
++++ b/arch/powerpc/include/asm/guest-state-buffer.h
+@@ -81,6 +81,7 @@
+ #define KVMPPC_GSID_HASHKEYR 0x1050
+ #define KVMPPC_GSID_HASHPKEYR 0x1051
+ #define KVMPPC_GSID_CTRL 0x1052
++#define KVMPPC_GSID_DPDES 0x1053
+
+ #define KVMPPC_GSID_CR 0x2000
+ #define KVMPPC_GSID_PIDR 0x2001
+@@ -110,7 +111,7 @@
+ #define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1)
+
+ #define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0)
+-#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL
++#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_DPDES
+ #define KVMPPC_GSE_DW_REGS_COUNT \
+ (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1)
+
+--- a/arch/powerpc/include/asm/kvm_book3s.h
++++ b/arch/powerpc/include/asm/kvm_book3s.h
+@@ -594,6 +594,7 @@ static inline u##size kvmppc_get_##reg(s
+
+
+ KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
++KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES)
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
+--- a/arch/powerpc/kvm/book3s_hv_nestedv2.c
++++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
+@@ -311,6 +311,10 @@ static int gs_msg_ops_vcpu_fill_info(str
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->vtb);
+ break;
++ case KVMPPC_GSID_DPDES:
++ rc = kvmppc_gse_put_u64(gsb, iden,
++ vcpu->arch.vcore->dpdes);
++ break;
+ case KVMPPC_GSID_LPCR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->lpcr);
+@@ -543,6 +547,9 @@ static int gs_msg_ops_vcpu_refresh_info(
+ case KVMPPC_GSID_VTB:
+ vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
+ break;
++ case KVMPPC_GSID_DPDES:
++ vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse);
++ break;
+ case KVMPPC_GSID_LPCR:
+ vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
+ break;
+--- a/arch/powerpc/kvm/test-guest-state-buffer.c
++++ b/arch/powerpc/kvm/test-guest-state-buffer.c
+@@ -151,7 +151,7 @@ static void test_gs_bitmap(struct kunit
+ i++;
+ }
+
+- for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) {
++ for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSE_DW_REGS_END; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
--- /dev/null
+From 54ec2bd9e0173b75daf84675d07c56584f96564b Mon Sep 17 00:00:00 2001
+From: Gautam Menghani <gautam@linux.ibm.com>
+Date: Wed, 5 Jun 2024 17:09:10 +0530
+Subject: KVM: PPC: Book3S HV nestedv2: Fix doorbell emulation
+
+From: Gautam Menghani <gautam@linux.ibm.com>
+
+commit 54ec2bd9e0173b75daf84675d07c56584f96564b upstream.
+
+Doorbell emulation is broken for KVM on PAPR guests as support for DPDES
+was not added in the initial patch series. Due to this, a KVM on PAPR
+guest with SMT > 1 cannot be booted with the XICS interrupt controller
+as doorbells are setup in the initial probe path when using XICS
+(pSeries_smp_probe()).
+
+Command to replicate the above bug:
+
+qemu-system-ppc64 \
+ -drive file=rhel.qcow2,format=qcow2 \
+ -m 20G \
+ -smp 8,cores=1,threads=8 \
+ -cpu host \
+ -nographic \
+ -machine pseries,ic-mode=xics -accel kvm
+
+Add doorbell state handling support in the host KVM code to fix doorbell
+emulation.
+
+Fixes: 19d31c5f1157 ("KVM: PPC: Add support for nestedv2 guests")
+Cc: stable@vger.kernel.org # v6.7+
+Signed-off-by: Gautam Menghani <gautam@linux.ibm.com>
+Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240605113913.83715-3-gautam@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/book3s_hv.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -4116,6 +4116,11 @@ static int kvmhv_vcpu_entry_nestedv2(str
+ int trap;
+ long rc;
+
++ if (vcpu->arch.doorbell_request) {
++ vcpu->arch.doorbell_request = 0;
++ kvmppc_set_dpdes(vcpu, 1);
++ }
++
+ io = &vcpu->arch.nestedv2_io;
+
+ msr = mfmsr();
--- /dev/null
+From 322a569c4b4188a0da2812f9e952780ce09b74ba Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 7 Jun 2024 10:26:06 -0700
+Subject: KVM: VMX: Split out the non-virtualization part of vmx_interrupt_blocked()
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 322a569c4b4188a0da2812f9e952780ce09b74ba upstream.
+
+Move the non-VMX chunk of the "interrupt blocked" checks to a separate
+helper so that KVM can reuse the code to detect if interrupts are blocked
+for L2, e.g. to determine if a virtual interrupt _for L2_ is a valid wake
+event. If L1 disables HLT-exiting for L2, nested APICv is enabled, and L2
+HLTs, then L2 virtual interrupts are valid wake events, but if and only if
+interrupts are unblocked for L2.
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240607172609.3205077-4-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 11 ++++++++---
+ arch/x86/kvm/vmx/vmx.h | 1 +
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -5052,14 +5052,19 @@ int vmx_nmi_allowed(struct kvm_vcpu *vcp
+ return !vmx_nmi_blocked(vcpu);
+ }
+
++bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
++{
++ return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
++ (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
++ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
++}
++
+ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
+ {
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ return false;
+
+- return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
+- (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+- (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
++ return __vmx_interrupt_blocked(vcpu);
+ }
+
+ int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -406,6 +406,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu
+ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
+ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
+ bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
++bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
+ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
+ bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
+ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
--- /dev/null
+From a813f168336ec4ef725b836e598cd9dc14f76dd7 Mon Sep 17 00:00:00 2001
+From: Wentong Wu <wentong.wu@intel.com>
+Date: Fri, 7 Jun 2024 21:25:45 +0800
+Subject: media: ivsc: csi: don't count privacy on as error
+
+From: Wentong Wu <wentong.wu@intel.com>
+
+commit a813f168336ec4ef725b836e598cd9dc14f76dd7 upstream.
+
+Prior to the ongoing command privacy is on, it would return -1 to
+indicate the current privacy status, and the ongoing command would
+be well executed by firmware as well, so this is not error. This
+patch changes its behavior to notify privacy on directly by V4L2
+privacy control instead of reporting error.
+
+Fixes: 29006e196a56 ("media: pci: intel: ivsc: Add CSI submodule")
+Cc: stable@vger.kernel.org # for 6.6 and later
+Reported-by: Hao Yao <hao.yao@intel.com>
+Signed-off-by: Wentong Wu <wentong.wu@intel.com>
+Tested-by: Jason Chen <jason.z.chen@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/pci/intel/ivsc/mei_csi.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/pci/intel/ivsc/mei_csi.c
++++ b/drivers/media/pci/intel/ivsc/mei_csi.c
+@@ -192,7 +192,11 @@ static int mei_csi_send(struct mei_csi *
+
+ /* command response status */
+ ret = csi->cmd_response.status;
+- if (ret) {
++ if (ret == -1) {
++ /* notify privacy on instead of reporting error */
++ ret = 0;
++ v4l2_ctrl_s_ctrl(csi->privacy_ctrl, 1);
++ } else if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
--- /dev/null
+From 19cb6058620620e68f1a9aed99393be5c3629db4 Mon Sep 17 00:00:00 2001
+From: Jason Chen <Jason-ch.Chen@mediatek.com>
+Date: Wed, 3 Jul 2024 11:44:09 +0800
+Subject: remoteproc: mediatek: Increase MT8188/MT8195 SCP core0 DRAM size
+
+From: Jason Chen <Jason-ch.Chen@mediatek.com>
+
+commit 19cb6058620620e68f1a9aed99393be5c3629db4 upstream.
+
+The current DRAM size is insufficient for the HEVC feature, which
+requires more memory for proper functionality. This change ensures the
+feature has the necessary resources.
+
+Signed-off-by: Jason Chen <Jason-ch.Chen@mediatek.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240703034409.698-1-Jason-ch.Chen@mediatek.com
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/remoteproc/mtk_scp.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -1388,7 +1388,7 @@ static const struct mtk_scp_sizes_data d
+ };
+
+ static const struct mtk_scp_sizes_data mt8188_scp_sizes = {
+- .max_dram_size = 0x500000,
++ .max_dram_size = 0x800000,
+ .ipi_share_buffer_size = 600,
+ };
+
+@@ -1397,6 +1397,11 @@ static const struct mtk_scp_sizes_data m
+ .ipi_share_buffer_size = 600,
+ };
+
++static const struct mtk_scp_sizes_data mt8195_scp_sizes = {
++ .max_dram_size = 0x800000,
++ .ipi_share_buffer_size = 288,
++};
++
+ static const struct mtk_scp_of_data mt8183_of_data = {
+ .scp_clk_get = mt8183_scp_clk_get,
+ .scp_before_load = mt8183_scp_before_load,
+@@ -1474,7 +1479,7 @@ static const struct mtk_scp_of_data mt81
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+- .scp_sizes = &default_scp_sizes,
++ .scp_sizes = &mt8195_scp_sizes,
+ };
+
+ static const struct mtk_scp_of_data mt8195_of_data_c1 = {
jbd2-precompute-number-of-transaction-descriptor-blocks.patch
jbd2-avoid-infinite-transaction-commit-loop.patch
media-uvcvideo-fix-integer-overflow-calculating-timestamp.patch
+media-ivsc-csi-don-t-count-privacy-on-as-error.patch
+remoteproc-mediatek-increase-mt8188-mt8195-scp-core0-dram-size.patch
+kvm-ppc-book3s-hv-nestedv2-fix-doorbell-emulation.patch
+kvm-ppc-book3s-hv-nestedv2-add-dpdes-support-in-helper-library-for-guest-state-buffer.patch
+kvm-vmx-split-out-the-non-virtualization-part-of-vmx_interrupt_blocked.patch
+kvm-nvmx-add-a-helper-to-get-highest-pending-from-posted-interrupt-vector.patch
+kvm-nvmx-request-immediate-exit-iff-pending-nested-event-needs-injection.patch
+kvm-nvmx-check-for-pending-posted-interrupts-when-looking-for-nested-events.patch
+kvm-nvmx-fold-requested-virtual-interrupt-check-into-has_nested_events.patch