--- /dev/null
+From 55680890ea78be0df5e1384989f1be835043c084 Mon Sep 17 00:00:00 2001
+From: Christian Borntraeger <borntraeger@de.ibm.com>
+Date: Fri, 31 Jan 2020 05:02:00 -0500
+Subject: KVM: s390: do not clobber registers during guest reset/store status
+
+From: Christian Borntraeger <borntraeger@de.ibm.com>
+
+commit 55680890ea78be0df5e1384989f1be835043c084 upstream.
+
+The initial CPU reset clobbers the userspace fpc and the store status
+ioctl clobbers the guest acrs + fpr. As these calls are only done via
+ioctl (and not via vcpu_run), no CPU context is loaded, so we can (and
+must) act directly on the sync regs, not on the thread context.
+
+Cc: stable@kernel.org
+Fixes: e1788bb995be ("KVM: s390: handle floating point registers in the run ioctl not in vcpu_put/load")
+Fixes: 31d8b8d41a7e ("KVM: s390: handle access registers in the run ioctl not in vcpu_put/load")
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
+Link: https://lore.kernel.org/r/20200131100205.74720-2-frankja@linux.ibm.com
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/kvm-s390.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -2564,9 +2564,7 @@ static void kvm_s390_vcpu_initial_reset(
+ vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
+ CR14_UNUSED_33 |
+ CR14_EXTERNAL_DAMAGE_SUBMASK;
+- /* make sure the new fpc will be lazily loaded */
+- save_fpu_regs();
+- current->thread.fpu.fpc = 0;
++ vcpu->run->s.regs.fpc = 0;
+ vcpu->arch.sie_block->gbea = 1;
+ vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+@@ -3994,7 +3992,7 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+ switch (ioctl) {
+ case KVM_S390_STORE_STATUS:
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+- r = kvm_s390_vcpu_store_status(vcpu, arg);
++ r = kvm_s390_store_status_unloaded(vcpu, arg);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ break;
+ case KVM_S390_SET_INITIAL_PSW: {
--- /dev/null
+From b11306b53b2540c6ba068c4deddb6a17d9f8d95b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 10 Dec 2019 14:44:13 -0800
+Subject: KVM: x86: Don't let userspace set host-reserved cr4 bits
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit b11306b53b2540c6ba068c4deddb6a17d9f8d95b upstream.
+
+Calculate the host-reserved cr4 bits at runtime based on the system's
+capabilities (using logic similar to __do_cpuid_func()), and use the
+dynamically generated mask for the reserved bit check in kvm_set_cr4()
+instead using of the static CR4_RESERVED_BITS define. This prevents
+userspace from "enabling" features in cr4 that are not supported by the
+system, e.g. by ignoring KVM_GET_SUPPORTED_CPUID and specifying a bogus
+CPUID for the vCPU.
+
+Allowing userspace to set unsupported bits in cr4 can lead to a variety
+of undesirable behavior, e.g. failed VM-Enter, and in general increases
+KVM's attack surface. A crafty userspace can even abuse CR4.LA57 to
+induce an unchecked #GP on a WRMSR.
+
+On a platform without LA57 support:
+
+ KVM_SET_CPUID2 // CPUID_7_0_ECX.LA57 = 1
+ KVM_SET_SREGS // CR4.LA57 = 1
+ KVM_SET_MSRS // KERNEL_GS_BASE = 0x0004000000000000
+ KVM_RUN
+
+leads to a #GP when writing KERNEL_GS_BASE into hardware:
+
+ unchecked MSR access error: WRMSR to 0xc0000102 (tried to write 0x0004000000000000)
+ at rIP: 0xffffffffa00f239a (vmx_prepare_switch_to_guest+0x10a/0x1d0 [kvm_intel])
+ Call Trace:
+ kvm_arch_vcpu_ioctl_run+0x671/0x1c70 [kvm]
+ kvm_vcpu_ioctl+0x36b/0x5d0 [kvm]
+ do_vfs_ioctl+0xa1/0x620
+ ksys_ioctl+0x66/0x70
+ __x64_sys_ioctl+0x16/0x20
+ do_syscall_64+0x4c/0x170
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7fc08133bf47
+
+Note, the above sequence fails VM-Enter due to invalid guest state.
+Userspace can allow VM-Enter to succeed (after the WRMSR #GP) by adding
+a KVM_SET_SREGS w/ CR4.LA57=0 after KVM_SET_MSRS, in which case KVM will
+technically leak the host's KERNEL_GS_BASE into the guest. But, as
+KERNEL_GS_BASE is a userspace-defined value/address, the leak is largely
+benign as a malicious userspace would simply be exposing its own data to
+the guest, and attacking a benevolent userspace would require multiple
+bugs in the userspace VMM.
+
+Cc: stable@vger.kernel.org
+Cc: Jun Nakajima <jun.nakajima@intel.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 35 ++++++++++++++++++++++++++++++++++-
+ 1 file changed, 34 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -92,6 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~
+ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
+ #endif
+
++static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
++
+ #define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
+ #define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
+
+@@ -793,9 +795,38 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_xcr);
+
++static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
++{
++ u64 reserved_bits = CR4_RESERVED_BITS;
++
++ if (!cpu_has(c, X86_FEATURE_XSAVE))
++ reserved_bits |= X86_CR4_OSXSAVE;
++
++ if (!cpu_has(c, X86_FEATURE_SMEP))
++ reserved_bits |= X86_CR4_SMEP;
++
++ if (!cpu_has(c, X86_FEATURE_SMAP))
++ reserved_bits |= X86_CR4_SMAP;
++
++ if (!cpu_has(c, X86_FEATURE_FSGSBASE))
++ reserved_bits |= X86_CR4_FSGSBASE;
++
++ if (!cpu_has(c, X86_FEATURE_PKU))
++ reserved_bits |= X86_CR4_PKE;
++
++ if (!cpu_has(c, X86_FEATURE_LA57) &&
++ !(cpuid_ecx(0x7) & bit(X86_FEATURE_LA57)))
++ reserved_bits |= X86_CR4_LA57;
++
++ if (!cpu_has(c, X86_FEATURE_UMIP) && !kvm_x86_ops->umip_emulated())
++ reserved_bits |= X86_CR4_UMIP;
++
++ return reserved_bits;
++}
++
+ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ {
+- if (cr4 & CR4_RESERVED_BITS)
++ if (cr4 & cr4_reserved_bits)
+ return -EINVAL;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
+@@ -8864,6 +8895,8 @@ int kvm_arch_hardware_setup(void)
+ if (r != 0)
+ return r;
+
++ cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);
++
+ if (kvm_has_tsc_control) {
+ /*
+ * Make sure the user can only configure tsc_khz values that
--- /dev/null
+From 16be9ddea268ad841457a59109963fff8c9de38d Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 18 Dec 2019 13:54:48 -0800
+Subject: KVM: x86: Free wbinvd_dirty_mask if vCPU creation fails
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 16be9ddea268ad841457a59109963fff8c9de38d upstream.
+
+Free the vCPU's wbinvd_dirty_mask if vCPU creation fails after
+kvm_arch_vcpu_init(), e.g. when installing the vCPU's file descriptor.
+Do the freeing by calling kvm_arch_vcpu_free() instead of open coding
+the freeing. This adds a likely superfluous, but ultimately harmless,
+call to kvmclock_reset(), which only clears vcpu->arch.pv_time_enabled.
+Using kvm_arch_vcpu_free() allows for additional cleanup in the future.
+
+Fixes: f5f48ee15c2ee ("KVM: VMX: Execute WBINVD to keep data consistency with assigned devices")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8702,7 +8702,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vc
+ kvm_mmu_unload(vcpu);
+ vcpu_put(vcpu);
+
+- kvm_x86_ops->vcpu_free(vcpu);
++ kvm_arch_vcpu_free(vcpu);
+ }
+
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm-x86-fix-potential-put_fpu-w-o-load_fpu-on-mpx-platform.patch
kvm-ppc-book3s-hv-uninit-vcpu-if-vcore-creation-fails.patch
kvm-ppc-book3s-pr-free-shared-page-if-mmu-initialization-fails.patch
+x86-kvm-be-careful-not-to-clear-kvm_vcpu_flush_tlb-bit.patch
+kvm-x86-don-t-let-userspace-set-host-reserved-cr4-bits.patch
+kvm-x86-free-wbinvd_dirty_mask-if-vcpu-creation-fails.patch
+kvm-s390-do-not-clobber-registers-during-guest-reset-store-status.patch
--- /dev/null
+From 8c6de56a42e0c657955e12b882a81ef07d1d073e Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Wed, 30 Oct 2019 19:01:31 +0000
+Subject: x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
+
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+
+commit 8c6de56a42e0c657955e12b882a81ef07d1d073e upstream.
+
+kvm_steal_time_set_preempted() may accidentally clear KVM_VCPU_FLUSH_TLB
+bit if it is called more than once while VCPU is preempted.
+
+This is part of CVE-2019-3016.
+
+(This bug was also independently discovered by Jim Mattson
+<jmattson@google.com>)
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3244,6 +3244,9 @@ static void kvm_steal_time_set_preempted
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
++ if (vcpu->arch.st.steal.preempted)
++ return;
++
+ vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+
+ kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,