--- /dev/null
+From 3a25dfa67fe40f3a2690af2c562e0947a78bd6a0 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 20 Oct 2021 06:22:59 -0400
+Subject: KVM: nVMX: promptly process interrupts delivered while in guest mode
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 3a25dfa67fe40f3a2690af2c562e0947a78bd6a0 upstream.
+
+Since commit c300ab9f08df ("KVM: x86: Replace late check_nested_events() hack with
+more precise fix") there is no longer the certainty that check_nested_events()
+tries to inject an external interrupt vmexit to L1 on every call to vcpu_enter_guest.
+Therefore, even in that case we need to set KVM_REQ_EVENT. This ensures
+that inject_pending_event() is called, and from there kvm_check_nested_events().
+
+Fixes: c300ab9f08df ("KVM: x86: Replace late check_nested_events() hack with more precise fix")
+Cc: stable@vger.kernel.org
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 17 ++++++-----------
+ 1 file changed, 6 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6288,18 +6288,13 @@ static int vmx_sync_pir_to_irr(struct kv
+
+ /*
+ * If we are running L2 and L1 has a new pending interrupt
+- * which can be injected, we should re-evaluate
+- * what should be done with this new L1 interrupt.
+- * If L1 intercepts external-interrupts, we should
+- * exit from L2 to L1. Otherwise, interrupt should be
+- * delivered directly to L2.
++ * which can be injected, this may cause a vmexit or it may
++ * be injected into L2. Either way, this interrupt will be
++ * processed via KVM_REQ_EVENT, not RVI, because we do not use
++ * virtual interrupt delivery to inject L1 interrupts into L2.
+ */
+- if (is_guest_mode(vcpu) && max_irr_updated) {
+- if (nested_exit_on_intr(vcpu))
+- kvm_vcpu_exiting_guest_mode(vcpu);
+- else
+- kvm_make_request(KVM_REQ_EVENT, vcpu);
+- }
++ if (is_guest_mode(vcpu) && max_irr_updated)
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ } else {
+ max_irr = kvm_lapic_find_highest_irr(vcpu);
+ }
--- /dev/null
+From 9b4416c5095c20e110c82ae602c254099b83b72f Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Fri, 15 Oct 2021 23:01:48 +1100
+Subject: KVM: PPC: Book3S HV: Fix stack handling in idle_kvm_start_guest()
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 9b4416c5095c20e110c82ae602c254099b83b72f upstream.
+
+In commit 10d91611f426 ("powerpc/64s: Reimplement book3s idle code in
+C") kvm_start_guest() became idle_kvm_start_guest(). The old code
+allocated a stack frame on the emergency stack, but didn't use the
+frame to store anything, and also didn't store anything in its caller's
+frame.
+
+idle_kvm_start_guest() on the other hand is written more like a normal C
+function, it creates a frame on entry, and also stores CR/LR into its
+callers frame (per the ABI). The problem is that there is no caller
+frame on the emergency stack.
+
+The emergency stack for a given CPU is allocated with:
+
+ paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
+
+So emergency_sp actually points to the first address above the emergency
+stack allocation for a given CPU, we must not store above it without
+first decrementing it to create a frame. This is different to the
+regular kernel stack, paca->kstack, which is initialised to point at an
+initial frame that is ready to use.
+
+idle_kvm_start_guest() stores the backchain, CR and LR all of which
+write outside the allocation for the emergency stack. It then creates a
+stack frame and saves the non-volatile registers. Unfortunately the
+frame it creates is not large enough to fit the non-volatiles, and so
+the saving of the non-volatile registers also writes outside the
+emergency stack allocation.
+
+The end result is that we corrupt whatever is at 0-24 bytes, and 112-248
+bytes above the emergency stack allocation.
+
+In practice this has gone unnoticed because the memory immediately above
+the emergency stack happens to be used for other stack allocations,
+either another CPUs mc_emergency_sp or an IRQ stack. See the order of
+calls to irqstack_early_init() and emergency_stack_init().
+
+The low addresses of another stack are the top of that stack, and so are
+only used if that stack is under extreme pressue, which essentially
+never happens in practice - and if it did there's a high likelyhood we'd
+crash due to that stack overflowing.
+
+Still, we shouldn't be corrupting someone else's stack, and it is purely
+luck that we aren't corrupting something else.
+
+To fix it we save CR/LR into the caller's frame using the existing r1 on
+entry, we then create a SWITCH_FRAME_SIZE frame (which has space for
+pt_regs) on the emergency stack with the backchain pointing to the
+existing stack, and then finally we switch to the new frame on the
+emergency stack.
+
+Fixes: 10d91611f426 ("powerpc/64s: Reimplement book3s idle code in C")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20211015133929.832061-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -255,13 +255,15 @@ kvm_novcpu_exit:
+ * r3 contains the SRR1 wakeup value, SRR1 is trashed.
+ */
+ _GLOBAL(idle_kvm_start_guest)
+- ld r4,PACAEMERGSP(r13)
+ mfcr r5
+ mflr r0
+- std r1,0(r4)
+- std r5,8(r4)
+- std r0,16(r4)
+- subi r1,r4,STACK_FRAME_OVERHEAD
++ std r5, 8(r1) // Save CR in caller's frame
++ std r0, 16(r1) // Save LR in caller's frame
++ // Create frame on emergency stack
++ ld r4, PACAEMERGSP(r13)
++ stdu r1, -SWITCH_FRAME_SIZE(r4)
++ // Switch to new frame on emergency stack
++ mr r1, r4
+ SAVE_NVGPRS(r1)
+
+ /*
+@@ -395,10 +397,9 @@ kvm_no_guest:
+ /* set up r3 for return */
+ mfspr r3,SPRN_SRR1
+ REST_NVGPRS(r1)
+- addi r1, r1, STACK_FRAME_OVERHEAD
+- ld r0, 16(r1)
+- ld r5, 8(r1)
+- ld r1, 0(r1)
++ ld r1, 0(r1) // Switch back to caller stack
++ ld r0, 16(r1) // Reload LR
++ ld r5, 8(r1) // Reload CR
+ mtlr r0
+ mtcr r5
+ blr
--- /dev/null
+From cdeb5d7d890e14f3b70e8087e745c4a6a7d9f337 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Fri, 15 Oct 2021 23:02:08 +1100
+Subject: KVM: PPC: Book3S HV: Make idle_kvm_start_guest() return 0 if it went to guest
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit cdeb5d7d890e14f3b70e8087e745c4a6a7d9f337 upstream.
+
+We call idle_kvm_start_guest() from power7_offline() if the thread has
+been requested to enter KVM. We pass it the SRR1 value that was returned
+from power7_idle_insn() which tells us what sort of wakeup we're
+processing.
+
+Depending on the SRR1 value we pass in, the KVM code might enter the
+guest, or it might return to us to do some host action if the wakeup
+requires it.
+
+If idle_kvm_start_guest() is able to handle the wakeup, and enter the
+guest it is supposed to indicate that by returning a zero SRR1 value to
+us.
+
+That was the behaviour prior to commit 10d91611f426 ("powerpc/64s:
+Reimplement book3s idle code in C"), however in that commit the
+handling of SRR1 was reworked, and the zeroing behaviour was lost.
+
+Returning from idle_kvm_start_guest() without zeroing the SRR1 value can
+confuse the host offline code, causing the guest to crash and other
+weirdness.
+
+Fixes: 10d91611f426 ("powerpc/64s: Reimplement book3s idle code in C")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20211015133929.832061-2-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -264,6 +264,7 @@ _GLOBAL(idle_kvm_start_guest)
+ stdu r1, -SWITCH_FRAME_SIZE(r4)
+ // Switch to new frame on emergency stack
+ mr r1, r4
++ std r3, 32(r1) // Save SRR1 wakeup value
+ SAVE_NVGPRS(r1)
+
+ /*
+@@ -315,6 +316,10 @@ kvm_unsplit_wakeup:
+
+ kvm_secondary_got_guest:
+
++ // About to go to guest, clear saved SRR1
++ li r0, 0
++ std r0, 32(r1)
++
+ /* Set HSTATE_DSCR(r13) to something sensible */
+ ld r6, PACA_DSCR_DEFAULT(r13)
+ std r6, HSTATE_DSCR(r13)
+@@ -394,8 +399,8 @@ kvm_no_guest:
+ mfspr r4, SPRN_LPCR
+ rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
+ mtspr SPRN_LPCR, r4
+- /* set up r3 for return */
+- mfspr r3,SPRN_SRR1
++ // Return SRR1 wakeup value, or 0 if we went into the guest
++ ld r3, 32(r1)
+ REST_NVGPRS(r1)
+ ld r1, 0(r1) // Switch back to caller stack
+ ld r0, 16(r1) // Reload LR
--- /dev/null
+From ea724ea420aac58b41bc822d1aed6940b136b78d Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 12 Oct 2021 10:51:55 -0400
+Subject: KVM: SEV-ES: clean up kvm_sev_es_ins/outs
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit ea724ea420aac58b41bc822d1aed6940b136b78d upstream.
+
+A few very small cleanups to the functions, smushed together because
+the patch is already very small like this:
+
+- inline emulator_pio_in_emulated and emulator_pio_out_emulated,
+ since we already have the vCPU
+
+- remove the data argument and pull setting vcpu->arch.sev_pio_data into
+ the caller
+
+- remove unnecessary clearing of vcpu->arch.pio.count when
+ emulation is done by the kernel (and therefore vcpu->arch.pio.count
+ is already clear on exit from emulator_pio_in and emulator_pio_out).
+
+No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 31 +++++++++++++++----------------
+ 1 file changed, 15 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -12330,34 +12330,32 @@ static int complete_sev_es_emulated_ins(
+ }
+
+ static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
+- unsigned int port, void *data, unsigned int count)
++ unsigned int port, unsigned int count)
+ {
+- int ret;
++ int ret = emulator_pio_out(vcpu, size, port,
++ vcpu->arch.sev_pio_data, count);
+
+- ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port,
+- data, count);
+- if (ret)
++ if (ret) {
++ /* Emulation done by the kernel. */
+ return ret;
++ }
+
+ vcpu->arch.pio.count = 0;
+-
+ return 0;
+ }
+
+ static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
+- unsigned int port, void *data, unsigned int count)
++ unsigned int port, unsigned int count)
+ {
+- int ret;
++ int ret = emulator_pio_in(vcpu, size, port,
++ vcpu->arch.sev_pio_data, count);
+
+- ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port,
+- data, count);
+ if (ret) {
+- vcpu->arch.pio.count = 0;
+- } else {
+- vcpu->arch.sev_pio_data = data;
+- vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
++ /* Emulation done by the kernel. */
++ return ret;
+ }
+
++ vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
+ return 0;
+ }
+
+@@ -12365,8 +12363,9 @@ int kvm_sev_es_string_io(struct kvm_vcpu
+ unsigned int port, void *data, unsigned int count,
+ int in)
+ {
+- return in ? kvm_sev_es_ins(vcpu, size, port, data, count)
+- : kvm_sev_es_outs(vcpu, size, port, data, count);
++ vcpu->arch.sev_pio_data = data;
++ return in ? kvm_sev_es_ins(vcpu, size, port, count)
++ : kvm_sev_es_outs(vcpu, size, port, count);
+ }
+ EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+
--- /dev/null
+From 019057bd73d1751fdfec41e43148baf3303d98f9 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 12 Oct 2021 11:07:59 -0400
+Subject: KVM: SEV-ES: fix length of string I/O
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 019057bd73d1751fdfec41e43148baf3303d98f9 upstream.
+
+The size of the data in the scratch buffer is not divided by the size of
+each port I/O operation, so vcpu->arch.pio.count ends up being larger
+than it should be by a factor of size.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2591,7 +2591,7 @@ int sev_es_string_io(struct vcpu_svm *sv
+ return -EINVAL;
+
+ return kvm_sev_es_string_io(&svm->vcpu, size, port,
+- svm->ghcb_sa, svm->ghcb_sa_len, in);
++ svm->ghcb_sa, svm->ghcb_sa_len / size, in);
+ }
+
+ void sev_es_init_vmcb(struct vcpu_svm *svm)
--- /dev/null
+From 95e16b4792b0429f1933872f743410f00e590c55 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 12 Oct 2021 11:33:03 -0400
+Subject: KVM: SEV-ES: go over the sev_pio_data buffer in multiple passes if needed
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 95e16b4792b0429f1933872f743410f00e590c55 upstream.
+
+The PIO scratch buffer is larger than a single page, and therefore
+it is not possible to copy it in a single step to vcpu->arch/pio_data.
+Bound each call to emulator_pio_in/out to a single page; keep
+track of how many I/O operations are left in vcpu->arch.sev_pio_count,
+so that the operation can be restarted in the complete_userspace_io
+callback.
+
+For OUT, this means that the previous kvm_sev_es_outs implementation
+becomes an iterator of the loop, and we can consume the sev_pio_data
+buffer before leaving to userspace.
+
+For IN, instead, consuming the buffer and decreasing sev_pio_count
+is always done in the complete_userspace_io callback, because that
+is when the memcpy is done into sev_pio_data.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Reported-by: Felix Wilhelm <fwilhelm@google.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 1
+ arch/x86/kvm/x86.c | 72 +++++++++++++++++++++++++++++++---------
+ 2 files changed, 57 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -696,6 +696,7 @@ struct kvm_vcpu_arch {
+ struct kvm_pio_request pio;
+ void *pio_data;
+ void *sev_pio_data;
++ unsigned sev_pio_count;
+
+ u8 event_exit_inst_len;
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -12321,38 +12321,77 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu
+ EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
+
+ static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
+- unsigned int port, unsigned int count)
++ unsigned int port);
++
++static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
++{
++ int size = vcpu->arch.pio.size;
++ int port = vcpu->arch.pio.port;
++
++ vcpu->arch.pio.count = 0;
++ if (vcpu->arch.sev_pio_count)
++ return kvm_sev_es_outs(vcpu, size, port);
++ return 1;
++}
++
++static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
++ unsigned int port)
+ {
+- int ret = emulator_pio_out(vcpu, size, port,
+- vcpu->arch.sev_pio_data, count);
++ for (;;) {
++ unsigned int count =
++ min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
++ int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
++
++ /* memcpy done already by emulator_pio_out. */
++ vcpu->arch.sev_pio_count -= count;
++ vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
++ if (!ret)
++ break;
+
+- if (ret) {
+ /* Emulation done by the kernel. */
+- return ret;
++ if (!vcpu->arch.sev_pio_count)
++ return 1;
+ }
+
+- vcpu->arch.pio.count = 0;
++ vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
+ return 0;
+ }
+
++static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
++ unsigned int port);
++
++static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
++{
++ unsigned count = vcpu->arch.pio.count;
++ complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
++ vcpu->arch.sev_pio_count -= count;
++ vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
++}
++
+ static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
+ {
+- memcpy(vcpu->arch.sev_pio_data, vcpu->arch.pio_data,
+- vcpu->arch.pio.count * vcpu->arch.pio.size);
+- vcpu->arch.pio.count = 0;
++ int size = vcpu->arch.pio.size;
++ int port = vcpu->arch.pio.port;
+
++ advance_sev_es_emulated_ins(vcpu);
++ if (vcpu->arch.sev_pio_count)
++ return kvm_sev_es_ins(vcpu, size, port);
+ return 1;
+ }
+
+ static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
+- unsigned int port, unsigned int count)
++ unsigned int port)
+ {
+- int ret = emulator_pio_in(vcpu, size, port,
+- vcpu->arch.sev_pio_data, count);
++ for (;;) {
++ unsigned int count =
++ min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
++ if (!__emulator_pio_in(vcpu, size, port, count))
++ break;
+
+- if (ret) {
+ /* Emulation done by the kernel. */
+- return ret;
++ advance_sev_es_emulated_ins(vcpu);
++ if (!vcpu->arch.sev_pio_count)
++ return 1;
+ }
+
+ vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
+@@ -12364,8 +12403,9 @@ int kvm_sev_es_string_io(struct kvm_vcpu
+ int in)
+ {
+ vcpu->arch.sev_pio_data = data;
+- return in ? kvm_sev_es_ins(vcpu, size, port, count)
+- : kvm_sev_es_outs(vcpu, size, port, count);
++ vcpu->arch.sev_pio_count = count;
++ return in ? kvm_sev_es_ins(vcpu, size, port)
++ : kvm_sev_es_outs(vcpu, size, port);
+ }
+ EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+
--- /dev/null
+From 4fa4b38dae6fc6a3695695add8c18fa8b6a05a1a Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 12 Oct 2021 11:25:45 -0400
+Subject: KVM: SEV-ES: keep INS functions together
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 4fa4b38dae6fc6a3695695add8c18fa8b6a05a1a upstream.
+
+Make the diff a little nicer when we actually get to fixing
+the bug. No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -12320,15 +12320,6 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu
+ }
+ EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
+
+-static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
+-{
+- memcpy(vcpu->arch.sev_pio_data, vcpu->arch.pio_data,
+- vcpu->arch.pio.count * vcpu->arch.pio.size);
+- vcpu->arch.pio.count = 0;
+-
+- return 1;
+-}
+-
+ static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
+ unsigned int port, unsigned int count)
+ {
+@@ -12344,6 +12335,15 @@ static int kvm_sev_es_outs(struct kvm_vc
+ return 0;
+ }
+
++static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
++{
++ memcpy(vcpu->arch.sev_pio_data, vcpu->arch.pio_data,
++ vcpu->arch.pio.count * vcpu->arch.pio.size);
++ vcpu->arch.pio.count = 0;
++
++ return 1;
++}
++
+ static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
+ unsigned int port, unsigned int count)
+ {
--- /dev/null
+From 9f1ee7b169afbd10c3ad254220d1b37beb5798aa Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 18 Oct 2021 06:49:18 -0400
+Subject: KVM: SEV-ES: reduce ghcb_sa_len to 32 bits
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9f1ee7b169afbd10c3ad254220d1b37beb5798aa upstream.
+
+The size of the GHCB scratch area is limited to 16 KiB (GHCB_SCRATCH_AREA_LIMIT),
+so there is no need for it to be a u64. This fixes a build error on 32-bit
+systems:
+
+i686-linux-gnu-ld: arch/x86/kvm/svm/sev.o: in function `sev_es_string_io:
+sev.c:(.text+0x110f): undefined reference to `__udivdi3'
+
+Cc: stable@vger.kernel.org
+Fixes: 019057bd73d1 ("KVM: SEV-ES: fix length of string I/O")
+Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -191,7 +191,7 @@ struct vcpu_svm {
+
+ /* SEV-ES scratch area support */
+ void *ghcb_sa;
+- u64 ghcb_sa_len;
++ u32 ghcb_sa_len;
+ bool ghcb_sa_sync;
+ bool ghcb_sa_free;
+
--- /dev/null
+From b5998402e3de429b5e5f9bdea08ddf77c5fd661e Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 12 Oct 2021 10:22:34 -0400
+Subject: KVM: SEV-ES: rename guest_ins_data to sev_pio_data
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit b5998402e3de429b5e5f9bdea08ddf77c5fd661e upstream.
+
+We will be using this field for OUTS emulation as well, in case the
+data that is pushed via OUTS spans more than one page. In that case,
+there will be a need to save the data pointer across exits to userspace.
+
+So, change the name to something that refers to any kind of PIO.
+Also spell out what it is used for, namely SEV-ES.
+
+No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kvm/x86.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -695,7 +695,7 @@ struct kvm_vcpu_arch {
+
+ struct kvm_pio_request pio;
+ void *pio_data;
+- void *guest_ins_data;
++ void *sev_pio_data;
+
+ u8 event_exit_inst_len;
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -12322,7 +12322,7 @@ EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
+
+ static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
+ {
+- memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data,
++ memcpy(vcpu->arch.sev_pio_data, vcpu->arch.pio_data,
+ vcpu->arch.pio.count * vcpu->arch.pio.size);
+ vcpu->arch.pio.count = 0;
+
+@@ -12354,7 +12354,7 @@ static int kvm_sev_es_ins(struct kvm_vcp
+ if (ret) {
+ vcpu->arch.pio.count = 0;
+ } else {
+- vcpu->arch.guest_ins_data = data;
++ vcpu->arch.sev_pio_data = data;
+ vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
+ }
+
--- /dev/null
+From c8c340a9b4149fe5caa433f3b62463a1c8e07a46 Mon Sep 17 00:00:00 2001
+From: Masahiro Kozuka <masa.koz@kozuka.jp>
+Date: Tue, 14 Sep 2021 14:09:51 -0700
+Subject: KVM: SEV: Flush cache on non-coherent systems before RECEIVE_UPDATE_DATA
+
+From: Masahiro Kozuka <masa.koz@kozuka.jp>
+
+commit c8c340a9b4149fe5caa433f3b62463a1c8e07a46 upstream.
+
+Flush the destination page before invoking RECEIVE_UPDATE_DATA, as the
+PSP encrypts the data with the guest's key when writing to guest memory.
+If the target memory was not previously encrypted, the cache may contain
+dirty, unecrypted data that will persist on non-coherent systems.
+
+Fixes: 15fb7de1a7f5 ("KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command")
+Cc: stable@vger.kernel.org
+Cc: Peter Gonda <pgonda@google.com>
+Cc: Marc Orr <marcorr@google.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Masahiro Kozuka <masa.koz@kozuka.jp>
+[sean: converted bug report to changelog]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-Id: <20210914210951.2994260-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1480,6 +1480,13 @@ static int sev_receive_update_data(struc
+ goto e_free_trans;
+ }
+
++ /*
++ * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
++ * encrypts the written data with the guest's key, and the cache may
++ * contain dirty, unencrypted data.
++ */
++ sev_clflush_pages(guest_page, n);
++
+ /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
+ data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
+ data.guest_address |= sev_me_mask;
--- /dev/null
+From de7cd3f6761f49bef044ec49493d88737a70f1a6 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 20 Oct 2021 06:27:36 -0400
+Subject: KVM: x86: check for interrupts before deciding whether to exit the fast path
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit de7cd3f6761f49bef044ec49493d88737a70f1a6 upstream.
+
+The kvm_x86_sync_pir_to_irr callback can sometimes set KVM_REQ_EVENT.
+If that happens exactly at the time that an exit is handled as
+EXIT_FASTPATH_REENTER_GUEST, vcpu_enter_guest will go incorrectly
+through the loop that calls kvm_x86_run, instead of processing
+the request promptly.
+
+Fixes: 379a3c8ee444 ("KVM: VMX: Optimize posted-interrupt delivery for timer fastpath")
+Cc: stable@vger.kernel.org
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9642,14 +9642,14 @@ static int vcpu_enter_guest(struct kvm_v
+ if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
+ break;
+
+- if (unlikely(kvm_vcpu_exit_request(vcpu))) {
++ if (vcpu->arch.apicv_active)
++ static_call(kvm_x86_sync_pir_to_irr)(vcpu);
++
++ if (unlikely(kvm_vcpu_exit_request(vcpu))) {
+ exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
+ break;
+ }
+-
+- if (vcpu->arch.apicv_active)
+- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+- }
++ }
+
+ /*
+ * Do this here before restoring debug registers on the host. And
--- /dev/null
+From 0d33b1baeb6ca7165d5ed4fdd1a8f969985e35b9 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 13 Oct 2021 12:29:42 -0400
+Subject: KVM: x86: leave vcpu->arch.pio.count alone in emulator_pio_in_out
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 0d33b1baeb6ca7165d5ed4fdd1a8f969985e35b9 upstream.
+
+Currently emulator_pio_in clears vcpu->arch.pio.count twice if
+emulator_pio_in_out performs kernel PIO. Move the clear into
+emulator_pio_out where it is actually necessary.
+
+No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6915,10 +6915,8 @@ static int emulator_pio_in_out(struct kv
+ vcpu->arch.pio.count = count;
+ vcpu->arch.pio.size = size;
+
+- if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+- vcpu->arch.pio.count = 0;
++ if (!kernel_pio(vcpu, vcpu->arch.pio_data))
+ return 1;
+- }
+
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+ vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
+@@ -6964,9 +6962,16 @@ static int emulator_pio_out(struct kvm_v
+ unsigned short port, const void *val,
+ unsigned int count)
+ {
++ int ret;
++
+ memcpy(vcpu->arch.pio_data, val, size * count);
+ trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
+- return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
++ ret = emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
++ if (ret)
++ vcpu->arch.pio.count = 0;
++
++ return ret;
++
+ }
+
+ static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
--- /dev/null
+From 6b5efc930bbc8c97e4a1fe2ccb9a6f286365a56d Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 12 Oct 2021 12:35:20 -0400
+Subject: KVM: x86: remove unnecessary arguments from complete_emulator_pio_in
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 6b5efc930bbc8c97e4a1fe2ccb9a6f286365a56d upstream.
+
+complete_emulator_pio_in can expect that vcpu->arch.pio has been filled in,
+and therefore does not need the size and count arguments. This makes things
+nicer when the function is called directly from a complete_userspace_io
+callback.
+
+No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6936,11 +6936,12 @@ static int __emulator_pio_in(struct kvm_
+ return emulator_pio_in_out(vcpu, size, port, count, true);
+ }
+
+-static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+- unsigned short port, void *val)
++static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
+ {
+- memcpy(val, vcpu->arch.pio_data, size * vcpu->arch.pio.count);
+- trace_kvm_pio(KVM_PIO_IN, port, size, vcpu->arch.pio.count, vcpu->arch.pio_data);
++ int size = vcpu->arch.pio.size;
++ unsigned count = vcpu->arch.pio.count;
++ memcpy(val, vcpu->arch.pio_data, size * count);
++ trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
+ vcpu->arch.pio.count = 0;
+ }
+
+@@ -6958,7 +6959,7 @@ static int emulator_pio_in(struct kvm_vc
+ }
+
+ WARN_ON(count != vcpu->arch.pio.count);
+- complete_emulator_pio_in(vcpu, size, port, val);
++ complete_emulator_pio_in(vcpu, val);
+ return 1;
+ }
+
--- /dev/null
+From 3b27de27183911d461afedf50c6fa30c59740c07 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 13 Oct 2021 12:32:02 -0400
+Subject: KVM: x86: split the two parts of emulator_pio_in
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 3b27de27183911d461afedf50c6fa30c59740c07 upstream.
+
+emulator_pio_in handles both the case where the data is pending in
+vcpu->arch.pio.count, and the case where I/O has to be done via either
+an in-kernel device or a userspace exit. For SEV-ES we would like
+to split these, to identify clearly the moment at which the
+sev_pio_data is consumed. To this end, create two different
+functions: __emulator_pio_in fills in vcpu->arch.pio.count, while
+complete_emulator_pio_in clears it and releases vcpu->arch.pio.data.
+
+Because this patch has to be backported, things are left a bit messy.
+kernel_pio() operates on vcpu->arch.pio, which leads to emulator_pio_in()
+having with two calls to complete_emulator_pio_in(). It will be fixed
+in the next release.
+
+While at it, remove the unused void* val argument of emulator_pio_in_out.
+The function currently hardcodes vcpu->arch.pio_data as the
+source/destination buffer, which sucks but will be fixed after the more
+severe SEV-ES buffer overflow.
+
+No functional change intended.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 45 ++++++++++++++++++++++++++++-----------------
+ 1 file changed, 28 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6907,7 +6907,7 @@ static int kernel_pio(struct kvm_vcpu *v
+ }
+
+ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
+- unsigned short port, void *val,
++ unsigned short port,
+ unsigned int count, bool in)
+ {
+ vcpu->arch.pio.port = port;
+@@ -6928,26 +6928,38 @@ static int emulator_pio_in_out(struct kv
+ return 0;
+ }
+
+-static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+- unsigned short port, void *val, unsigned int count)
++static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
++ unsigned short port, unsigned int count)
+ {
+- int ret;
++ WARN_ON(vcpu->arch.pio.count);
++ memset(vcpu->arch.pio_data, 0, size * count);
++ return emulator_pio_in_out(vcpu, size, port, count, true);
++}
+
+- if (vcpu->arch.pio.count)
+- goto data_avail;
++static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, int size,
++ unsigned short port, void *val)
++{
++ memcpy(val, vcpu->arch.pio_data, size * vcpu->arch.pio.count);
++ trace_kvm_pio(KVM_PIO_IN, port, size, vcpu->arch.pio.count, vcpu->arch.pio_data);
++ vcpu->arch.pio.count = 0;
++}
+
+- memset(vcpu->arch.pio_data, 0, size * count);
++static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
++ unsigned short port, void *val, unsigned int count)
++{
++ if (vcpu->arch.pio.count) {
++ /* Complete previous iteration. */
++ } else {
++ int r = __emulator_pio_in(vcpu, size, port, count);
++ if (!r)
++ return r;
+
+- ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
+- if (ret) {
+-data_avail:
+- memcpy(val, vcpu->arch.pio_data, size * count);
+- trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
+- vcpu->arch.pio.count = 0;
+- return 1;
++ /* Results already available, fall through. */
+ }
+
+- return 0;
++ WARN_ON(count != vcpu->arch.pio.count);
++ complete_emulator_pio_in(vcpu, size, port, val);
++ return 1;
+ }
+
+ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+@@ -6966,12 +6978,11 @@ static int emulator_pio_out(struct kvm_v
+
+ memcpy(vcpu->arch.pio_data, val, size * count);
+ trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
+- ret = emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
++ ret = emulator_pio_in_out(vcpu, size, port, count, false);
+ if (ret)
+ vcpu->arch.pio.count = 0;
+
+ return ret;
+-
+ }
+
+ static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
--- /dev/null
+From 3ddd60268c24bcac9d744404cc277e9dc52fe6b6 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Mon, 18 Oct 2021 15:16:06 -0700
+Subject: mm, slub: fix incorrect memcg slab count for bulk free
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 3ddd60268c24bcac9d744404cc277e9dc52fe6b6 upstream.
+
+kmem_cache_free_bulk() will call memcg_slab_free_hook() for all objects
+when doing bulk free. So we shouldn't call memcg_slab_free_hook() again
+for bulk free to avoid incorrect memcg slab count.
+
+Link: https://lkml.kernel.org/r/20210916123920.48704-6-linmiaohe@huawei.com
+Fixes: d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Bharata B Rao <bharata@linux.ibm.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Faiyaz Mohammed <faiyazm@codeaurora.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3173,7 +3173,9 @@ static __always_inline void do_slab_free
+ struct kmem_cache_cpu *c;
+ unsigned long tid;
+
+- memcg_slab_free_hook(s, &head, 1);
++ /* memcg_slab_free_hook() is already called for bulk free. */
++ if (!tail)
++ memcg_slab_free_hook(s, &head, 1);
+ redo:
+ /*
+ * Determine the currently cpus per cpu slab.
--- /dev/null
+From 899447f669da76cc3605665e1a95ee877bc464cc Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Mon, 18 Oct 2021 15:15:55 -0700
+Subject: mm, slub: fix mismatch between reconstructed freelist depth and cnt
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 899447f669da76cc3605665e1a95ee877bc464cc upstream.
+
+If object's reuse is delayed, it will be excluded from the reconstructed
+freelist. But we forgot to adjust the cnt accordingly. So there will
+be a mismatch between reconstructed freelist depth and cnt. This will
+lead to free_debug_processing() complaining about freelist count or a
+incorrect slub inuse count.
+
+Link: https://lkml.kernel.org/r/20210916123920.48704-3-linmiaohe@huawei.com
+Fixes: c3895391df38 ("kasan, slub: fix handling of kasan_slab_free hook")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Bharata B Rao <bharata@linux.ibm.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Faiyaz Mohammed <faiyazm@codeaurora.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1629,7 +1629,8 @@ static __always_inline bool slab_free_ho
+ }
+
+ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
+- void **head, void **tail)
++ void **head, void **tail,
++ int *cnt)
+ {
+
+ void *object;
+@@ -1656,6 +1657,12 @@ static inline bool slab_free_freelist_ho
+ *head = object;
+ if (!*tail)
+ *tail = object;
++ } else {
++ /*
++ * Adjust the reconstructed freelist depth
++ * accordingly if object's reuse is delayed.
++ */
++ --(*cnt);
+ }
+ } while (object != old_tail);
+
+@@ -3210,7 +3217,7 @@ static __always_inline void slab_free(st
+ * With KASAN enabled slab_free_freelist_hook modifies the freelist
+ * to remove objects, whose reuse must be delayed.
+ */
+- if (slab_free_freelist_hook(s, &head, &tail))
++ if (slab_free_freelist_hook(s, &head, &tail, &cnt))
+ do_slab_free(s, page, head, tail, cnt, addr);
+ }
+
--- /dev/null
+From 9037c57681d25e4dcc442d940d6dbe24dd31f461 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Mon, 18 Oct 2021 15:15:59 -0700
+Subject: mm, slub: fix potential memoryleak in kmem_cache_open()
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 9037c57681d25e4dcc442d940d6dbe24dd31f461 upstream.
+
+In error path, the random_seq of slub cache might be leaked. Fix this
+by using __kmem_cache_release() to release all the relevant resources.
+
+Link: https://lkml.kernel.org/r/20210916123920.48704-4-linmiaohe@huawei.com
+Fixes: 210e7a43fa90 ("mm: SLUB freelist randomization")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Bharata B Rao <bharata@linux.ibm.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Faiyaz Mohammed <faiyazm@codeaurora.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3935,8 +3935,8 @@ static int kmem_cache_open(struct kmem_c
+ if (alloc_kmem_cache_cpus(s))
+ return 0;
+
+- free_kmem_cache_nodes(s);
+ error:
++ __kmem_cache_release(s);
+ return -EINVAL;
+ }
+
--- /dev/null
+From 67823a544414def2a36c212abadb55b23bcda00c Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Mon, 18 Oct 2021 15:16:02 -0700
+Subject: mm, slub: fix potential use-after-free in slab_debugfs_fops
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 67823a544414def2a36c212abadb55b23bcda00c upstream.
+
+When sysfs_slab_add failed, we shouldn't call debugfs_slab_add() for s
+because s will be freed soon. And slab_debugfs_fops will use s later
+leading to a use-after-free.
+
+Link: https://lkml.kernel.org/r/20210916123920.48704-5-linmiaohe@huawei.com
+Fixes: 64dd68497be7 ("mm: slub: move sysfs slab alloc/free interfaces to debugfs")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Bharata B Rao <bharata@linux.ibm.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Faiyaz Mohammed <faiyazm@codeaurora.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4604,13 +4604,15 @@ int __kmem_cache_create(struct kmem_cach
+ return 0;
+
+ err = sysfs_slab_add(s);
+- if (err)
++ if (err) {
+ __kmem_cache_release(s);
++ return err;
++ }
+
+ if (s->flags & SLAB_STORE_USER)
+ debugfs_slab_add(s);
+
+- return err;
++ return 0;
+ }
+
+ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
--- /dev/null
+From 496c5fe25c377ddb7815c4ce8ecfb676f051e9b6 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Wed, 20 Oct 2021 20:48:26 +1100
+Subject: powerpc/idle: Don't corrupt back chain when going idle
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 496c5fe25c377ddb7815c4ce8ecfb676f051e9b6 upstream.
+
+In isa206_idle_insn_mayloss() we store various registers into the stack
+red zone, which is allowed.
+
+However inside the IDLE_STATE_ENTER_SEQ_NORET macro we save r2 again,
+to 0(r1), which corrupts the stack back chain.
+
+We used to do the same in isa206_idle_insn_mayloss() itself, but we
+fixed that in 73287caa9210 ("powerpc64/idle: Fix SP offsets when saving
+GPRs"), however we missed that the macro also corrupts the back chain.
+
+Corrupting the back chain is bad for debuggability but doesn't
+necessarily cause a bug.
+
+However we recently changed the stack handling in some KVM code, and it
+now relies on the stack back chain being valid when it returns. The
+corruption causes that code to return with r1 pointing somewhere in
+kernel data, at some point LR is restored from the stack and we branch
+to NULL or somewhere else invalid.
+
+Only affects Power8 hosts running KVM guests, with dynamic_mt_modes
+enabled (which it is by default).
+
+The fixes tag below points to the commit that changed the KVM stack
+handling, exposing this bug. The actual corruption of the back chain has
+always existed since 948cf67c4726 ("powerpc: Add NAP mode support on
+Power7 in HV mode").
+
+Fixes: 9b4416c5095c ("KVM: PPC: Book3S HV: Fix stack handling in idle_kvm_start_guest()")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20211020094826.3222052-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/idle_book3s.S | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/idle_book3s.S
++++ b/arch/powerpc/kernel/idle_book3s.S
+@@ -126,14 +126,16 @@ _GLOBAL(idle_return_gpr_loss)
+ /*
+ * This is the sequence required to execute idle instructions, as
+ * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
+- *
+- * The 0(r1) slot is used to save r2 in isa206, so use that here.
++ * We have to store a GPR somewhere, ptesync, then reload it, and create
++ * a false dependency on the result of the load. It doesn't matter which
++ * GPR we store, or where we store it. We have already stored r2 to the
++ * stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
+ */
+ #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
+ /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
+- std r2,0(r1); \
++ std r2,-8(r1); \
+ ptesync; \
+- ld r2,0(r1); \
++ ld r2,-8(r1); \
+ 236: cmpd cr0,r2,r2; \
+ bne 236b; \
+ IDLE_INST; \
ucounts-pair-inc_rlimit_ucounts-with-dec_rlimit_ucoutns-in-commit_creds.patch
ucounts-proper-error-handling-in-set_cred_ucounts.patch
ucounts-fix-signal-ucount-refcounting.patch
+kvm-ppc-book3s-hv-fix-stack-handling-in-idle_kvm_start_guest.patch
+kvm-ppc-book3s-hv-make-idle_kvm_start_guest-return-0-if-it-went-to-guest.patch
+powerpc-idle-don-t-corrupt-back-chain-when-going-idle.patch
+mm-slub-fix-mismatch-between-reconstructed-freelist-depth-and-cnt.patch
+mm-slub-fix-potential-memoryleak-in-kmem_cache_open.patch
+mm-slub-fix-potential-use-after-free-in-slab_debugfs_fops.patch
+mm-slub-fix-incorrect-memcg-slab-count-for-bulk-free.patch
+kvm-nvmx-promptly-process-interrupts-delivered-while-in-guest-mode.patch
+kvm-sev-flush-cache-on-non-coherent-systems-before-receive_update_data.patch
+kvm-sev-es-rename-guest_ins_data-to-sev_pio_data.patch
+kvm-sev-es-clean-up-kvm_sev_es_ins-outs.patch
+kvm-sev-es-keep-ins-functions-together.patch
+kvm-sev-es-fix-length-of-string-i-o.patch
+kvm-sev-es-go-over-the-sev_pio_data-buffer-in-multiple-passes-if-needed.patch
+kvm-sev-es-reduce-ghcb_sa_len-to-32-bits.patch
+kvm-x86-leave-vcpu-arch.pio.count-alone-in-emulator_pio_in_out.patch
+kvm-x86-check-for-interrupts-before-deciding-whether-to-exit-the-fast-path.patch
+kvm-x86-split-the-two-parts-of-emulator_pio_in.patch
+kvm-x86-remove-unnecessary-arguments-from-complete_emulator_pio_in.patch