--- /dev/null
+From 33b5c38852b29736f3b472dd095c9a18ec22746f Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Jun 2017 19:08:35 +0100
+Subject: arm: KVM: Allow unaligned accesses at HYP
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 33b5c38852b29736f3b472dd095c9a18ec22746f upstream.
+
+We currently have the HSCTLR.A bit set, trapping unaligned accesses
+at HYP, but we're not really prepared to deal with it.
+
+Since the rest of the kernel is pretty happy about that, let's follow
+its example and set HSCTLR.A to zero. Modern CPUs don't really care.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/init.S | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kvm/init.S
++++ b/arch/arm/kvm/init.S
+@@ -110,7 +110,6 @@ __do_hyp_init:
+ @ - Write permission implies XN: disabled
+ @ - Instruction cache: enabled
+ @ - Data/Unified cache: enabled
+- @ - Memory alignment checks: enabled
+ @ - MMU: enabled (this code must be run from an identity mapping)
+ mrc p15, 4, r0, c1, c0, 0 @ HSCR
+ ldr r2, =HSCTLR_MASK
+@@ -118,8 +117,8 @@ __do_hyp_init:
+ mrc p15, 0, r1, c1, c0, 0 @ SCTLR
+ ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
+ and r1, r1, r2
+- ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
+- THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
++ ARM( ldr r2, =(HSCTLR_M) )
++ THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
+ orr r1, r1, r2
+ orr r0, r0, r1
+ isb
--- /dev/null
+From bbaf0e2b1c1b4f88abd6ef49576f0efb1734eae5 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 26 Apr 2017 16:56:26 +0200
+Subject: kvm: async_pf: fix rcu_irq_enter() with irqs enabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit bbaf0e2b1c1b4f88abd6ef49576f0efb1734eae5 upstream.
+
+native_safe_halt enables interrupts, and you just shouldn't
+call rcu_irq_enter() with interrupts enabled. Reorder the
+call with the following local_irq_disable() to respect the
+invariant.
+
+Reported-by: Ross Zwisler <ross.zwisler@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Tested-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kvm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
+ */
+ rcu_irq_exit();
+ native_safe_halt();
+- rcu_irq_enter();
+ local_irq_disable();
++ rcu_irq_enter();
+ }
+ }
+ if (!n.halted)
--- /dev/null
+From a3641631d14571242eec0d30c9faa786cbf52d44 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Thu, 8 Jun 2017 01:22:07 -0700
+Subject: KVM: cpuid: Fix read/write out-of-bounds vulnerability in cpuid emulation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit a3641631d14571242eec0d30c9faa786cbf52d44 upstream.
+
+If "i" is the last element in the vcpu->arch.cpuid_entries[] array, it
+potentially can be exploited the vulnerability. this will out-of-bounds
+read and write. Luckily, the effect is small:
+
+ /* when no next entry is found, the current entry[i] is reselected */
+ for (j = i + 1; ; j = (j + 1) % nent) {
+ struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+ if (ej->function == e->function) {
+
+It reads ej->maxphyaddr, which is user controlled. However...
+
+ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+After cpuid_entries there is
+
+ int maxphyaddr;
+ struct x86_emulate_ctxt emulate_ctxt; /* 16-byte aligned */
+
+So we have:
+
+- cpuid_entries at offset 1B50 (6992)
+- maxphyaddr at offset 27D0 (6992 + 3200 = 10192)
+- padding at 27D4...27DF
+- emulate_ctxt at 27E0
+
+And it writes in the padding. Pfew, writing the ops field of emulate_ctxt
+would have been much worse.
+
+This patch fixes it by modding the index to avoid the out-of-bounds
+access. Worst case, i == j and ej->function == e->function,
+the loop can bail out.
+
+Reported-by: Moguofang <moguofang@huawei.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Guofang Mo <moguofang@huawei.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -691,18 +691,20 @@ out:
+ static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
+ {
+ struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
+- int j, nent = vcpu->arch.cpuid_nent;
++ struct kvm_cpuid_entry2 *ej;
++ int j = i;
++ int nent = vcpu->arch.cpuid_nent;
+
+ e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
+ /* when no next entry is found, the current entry[i] is reselected */
+- for (j = i + 1; ; j = (j + 1) % nent) {
+- struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+- if (ej->function == e->function) {
+- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+- return j;
+- }
+- }
+- return 0; /* silence gcc, even though control never reaches here */
++ do {
++ j = (j + 1) % nent;
++ ej = &vcpu->arch.cpuid_entries[j];
++ } while (ej->function != e->function);
++
++ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
++
++ return j;
+ }
+
+ /* find an entry with matching function, matching index (if needed), and that
keys-fix-freeing-uninitialized-memory-in-key_update.patch
crypto-gcm-wait-for-crypto-op-not-signal-safe.patch
nfsd4-fix-null-dereference-on-replay.patch
+kvm-async_pf-fix-rcu_irq_enter-with-irqs-enabled.patch
+kvm-cpuid-fix-read-write-out-of-bounds-vulnerability-in-cpuid-emulation.patch
+arm-kvm-allow-unaligned-accesses-at-hyp.patch