--- /dev/null
+From c3058d5da2222629bc2223c488a4512b59bb4baf Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@linaro.org>
+Date: Fri, 10 Oct 2014 12:14:29 +0200
+Subject: arm/arm64: KVM: Ensure memslots are within KVM_PHYS_SIZE
+
+From: Christoffer Dall <christoffer.dall@linaro.org>
+
+commit c3058d5da2222629bc2223c488a4512b59bb4baf upstream.
+
+[Since we don't backport commit 8eef912 (arm/arm64: KVM: map MMIO regions
+at creation time) for linux-3.14.y, the context of this patch is
+different, while the change itself is same.]
+
+When creating or moving a memslot, make sure the IPA space is within the
+addressable range of the guest. Otherwise, user space can create too
+large a memslot and KVM would try to access potentially unallocated page
+table entries when inserting entries in the Stage-2 page tables.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kvm/mmu.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -926,6 +926,9 @@ int kvm_handle_guest_abort(struct kvm_vc
+
+ memslot = gfn_to_memslot(vcpu->kvm, gfn);
+
++ /* Userspace should not be able to register out-of-bounds IPAs */
++ VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
++
+ ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
+ if (ret == 0)
+ ret = 1;
+@@ -1150,6 +1153,14 @@ int kvm_arch_prepare_memory_region(struc
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
+ {
++ /*
++ * Prevent userspace from creating a memory region outside of the IPA
++ * space addressable by the KVM guest IPA space.
++ */
++ if (memslot->base_gfn + memslot->npages >=
++ (KVM_PHYS_SIZE >> PAGE_SHIFT))
++ return -EFAULT;
++
+ return 0;
+ }
+
--- /dev/null
+From 37b544087ef3f65ca68465ba39291a07195dac26 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 17 Sep 2014 14:56:17 -0700
+Subject: arm/arm64: KVM: fix potential NULL dereference in
+ user_mem_abort()
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 37b544087ef3f65ca68465ba39291a07195dac26 upstream.
+
+Handle the potential NULL return value of find_vma_intersection()
+before dereferencing it.
+
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -778,6 +778,12 @@ static int user_mem_abort(struct kvm_vcp
+ /* Let's check if we will get back a huge page backed by hugetlbfs */
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma_intersection(current->mm, hva, hva + 1);
++ if (unlikely(!vma)) {
++ kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
++ up_read(¤t->mm->mmap_sem);
++ return -EFAULT;
++ }
++
+ if (is_vm_hugetlb_page(vma)) {
+ hugetlb = true;
+ gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
--- /dev/null
+From dbff124e29fa24aff9705b354b5f4648cd96e0bb Mon Sep 17 00:00:00 2001
+From: Joel Schopp <joel.schopp@amd.com>
+Date: Wed, 9 Jul 2014 11:17:04 -0500
+Subject: arm/arm64: KVM: Fix VTTBR_BADDR_MASK and pgd alloc
+
+From: Joel Schopp <joel.schopp@amd.com>
+
+commit dbff124e29fa24aff9705b354b5f4648cd96e0bb upstream.
+
+The current aarch64 calculation for VTTBR_BADDR_MASK masks only 39 bits
+and not all the bits in the PA range. This is clearly a bug that
+manifests itself on systems that allocate memory in the higher address
+space range.
+
+ [ Modified from Joel's original patch to be based on PHYS_MASK_SHIFT
+ instead of a hard-coded value and to move the alignment check of the
+ allocation to mmu.c. Also added a comment explaining why we hardcode
+ the IPA range and changed the stage-2 pgd allocation to be based on
+ the 40 bit IPA range instead of the maximum possible 48 bit PA range.
+ - Christoffer ]
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Joel Schopp <joel.schopp@amd.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/arm.c | 4 ++--
+ arch/arm64/include/asm/kvm_arm.h | 13 ++++++++++++-
+ arch/arm64/include/asm/kvm_mmu.h | 5 ++---
+ 3 files changed, 16 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -427,9 +427,9 @@ static void update_vttbr(struct kvm *kvm
+
+ /* update vttbr to be used with the new vmid */
+ pgd_phys = virt_to_phys(kvm->arch.pgd);
++ BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
+ vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+- kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
+- kvm->arch.vttbr |= vmid;
++ kvm->arch.vttbr = pgd_phys | vmid;
+
+ spin_unlock(&kvm_vmid_lock);
+ }
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -122,6 +122,17 @@
+ #define VTCR_EL2_T0SZ_MASK 0x3f
+ #define VTCR_EL2_T0SZ_40B 24
+
++/*
++ * We configure the Stage-2 page tables to always restrict the IPA space to be
++ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
++ * not known to exist and will break with this configuration.
++ *
++ * Note that when using 4K pages, we concatenate two first level page tables
++ * together.
++ *
++ * The magic numbers used for VTTBR_X in this patch can be found in Tables
++ * D4-23 and D4-25 in ARM DDI 0487A.b.
++ */
+ #ifdef CONFIG_ARM64_64K_PAGES
+ /*
+ * Stage2 translation configuration:
+@@ -151,7 +162,7 @@
+ #endif
+
+ #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
++#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+ #define VTTBR_VMID_SHIFT (48LLU)
+ #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -59,10 +59,9 @@
+ #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
+
+ /*
+- * Align KVM with the kernel's view of physical memory. Should be
+- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
++ * We currently only support a 40bit IPA.
+ */
+-#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
++#define KVM_PHYS_SHIFT (40)
+ #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
+ #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
+
--- /dev/null
+From 6b50f54064a02b77a7b990032b80234fee59bcd6 Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@linaro.org>
+Date: Thu, 6 Nov 2014 11:47:39 +0000
+Subject: arm/arm64: KVM: vgic: Fix error code in kvm_vgic_create()
+
+From: Christoffer Dall <christoffer.dall@linaro.org>
+
+commit 6b50f54064a02b77a7b990032b80234fee59bcd6 upstream.
+
+If we detect another vCPU is running we just exit and return 0 as if we
+succesfully created the VGIC, but the VGIC wouldn't actual be created.
+
+This shouldn't break in-kernel behavior because the kernel will not
+observe the failed the attempt to create the VGIC, but userspace could
+be rightfully confused.
+
+Cc: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1611,7 +1611,7 @@ out:
+
+ int kvm_vgic_create(struct kvm *kvm)
+ {
+- int i, vcpu_lock_idx = -1, ret = 0;
++ int i, vcpu_lock_idx = -1, ret;
+ struct kvm_vcpu *vcpu;
+
+ mutex_lock(&kvm->lock);
+@@ -1626,6 +1626,7 @@ int kvm_vgic_create(struct kvm *kvm)
+ * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
+ * that no other VCPUs are run while we create the vgic.
+ */
++ ret = -EBUSY;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!mutex_trylock(&vcpu->mutex))
+ goto out_unlock;
+@@ -1633,11 +1634,10 @@ int kvm_vgic_create(struct kvm *kvm)
+ }
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+- if (vcpu->arch.has_run_once) {
+- ret = -EBUSY;
++ if (vcpu->arch.has_run_once)
+ goto out_unlock;
+- }
+ }
++ ret = 0;
+
+ spin_lock_init(&kvm->arch.vgic.lock);
+ kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
--- /dev/null
+From 37a34ac1d4775aafbc73b9db53c7daebbbc67e6a Mon Sep 17 00:00:00 2001
+From: Vladimir Murzin <vladimir.murzin@arm.com>
+Date: Mon, 22 Sep 2014 15:52:48 +0100
+Subject: arm: kvm: fix CPU hotplug
+
+From: Vladimir Murzin <vladimir.murzin@arm.com>
+
+commit 37a34ac1d4775aafbc73b9db53c7daebbbc67e6a upstream.
+
+On some platforms with no power management capabilities, the hotplug
+implementation is allowed to return from a smp_ops.cpu_die() call as a
+function return. Upon a CPU onlining event, the KVM CPU notifier tries
+to reinstall the hyp stub, which fails on platform where no reset took
+place following a hotplug event, with the message:
+
+CPU1: smp_ops.cpu_die() returned, trying to resuscitate
+CPU1: Booted secondary processor
+Kernel panic - not syncing: unexpected prefetch abort in Hyp mode at: 0x80409540
+unexpected data abort in Hyp mode at: 0x80401fe8
+unexpected HVC/SVC trap in Hyp mode at: 0x805c6170
+
+since KVM code is trying to reinstall the stub on a system where it is
+already configured.
+
+To prevent this issue, this patch adds a check in the KVM hotplug
+notifier that detects if the HYP stub really needs re-installing when a
+CPU is onlined and skips the installation call if the stub is already in
+place, which means that the CPU has not been reset.
+
+Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/arm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -825,7 +825,8 @@ static int hyp_init_cpu_notify(struct no
+ switch (action) {
+ case CPU_STARTING:
+ case CPU_STARTING_FROZEN:
+- cpu_init_hyp_mode(NULL);
++ if (__hyp_get_vectors() == hyp_default_vectors)
++ cpu_init_hyp_mode(NULL);
+ break;
+ }
+
--- /dev/null
+From 3d08c629244257473450a8ba17cb8184b91e68f8 Mon Sep 17 00:00:00 2001
+From: Steve Capper <steve.capper@linaro.org>
+Date: Tue, 14 Oct 2014 15:02:15 +0100
+Subject: arm: kvm: STRICT_MM_TYPECHECKS fix for user_mem_abort
+
+From: Steve Capper <steve.capper@linaro.org>
+
+commit 3d08c629244257473450a8ba17cb8184b91e68f8 upstream.
+
+Commit:
+b886576 ARM: KVM: user_mem_abort: support stage 2 MMIO page mapping
+
+introduced some code in user_mem_abort that failed to compile if
+STRICT_MM_TYPECHECKS was enabled.
+
+This patch fixes up the failing comparison.
+
+Signed-off-by: Steve Capper <steve.capper@linaro.org>
+Reviewed-by: Kim Phillips <kim.phillips@linaro.org>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -850,7 +850,7 @@ static int user_mem_abort(struct kvm_vcp
+ }
+ coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
+- mem_type == PAGE_S2_DEVICE);
++ pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
+ }
+
+
--- /dev/null
+From 286fb1cc32b11c18da3573a8c8c37a4f9da16e30 Mon Sep 17 00:00:00 2001
+From: Geoff Levand <geoff@infradead.org>
+Date: Fri, 31 Oct 2014 23:06:47 +0000
+Subject: arm64/kvm: Fix assembler compatibility of macros
+
+From: Geoff Levand <geoff@infradead.org>
+
+commit 286fb1cc32b11c18da3573a8c8c37a4f9da16e30 upstream.
+
+Some of the macros defined in kvm_arm.h are useful in assembly files, but are
+not compatible with the assembler. Change any C language integer constant
+definitions using appended U, UL, or ULL to the UL() preprocessor macro. Also,
+add a preprocessor include of the asm/memory.h file which defines the UL()
+macro.
+
+Fixes build errors like these when using kvm_arm.h in assembly
+source files:
+
+ Error: unexpected characters following instruction at operand 3 -- `and x0,x1,#((1U<<25)-1)'
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Geoff Levand <geoff@infradead.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/kvm_arm.h | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -18,6 +18,7 @@
+ #ifndef __ARM64_KVM_ARM_H__
+ #define __ARM64_KVM_ARM_H__
+
++#include <asm/memory.h>
+ #include <asm/types.h>
+
+ /* Hyp Configuration Register (HCR) bits */
+@@ -162,9 +163,9 @@
+ #endif
+
+ #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+-#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+-#define VTTBR_VMID_SHIFT (48LLU)
+-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
++#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
++#define VTTBR_VMID_SHIFT (UL(48))
++#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
+
+ /* Hyp System Trap Register */
+ #define HSTR_EL2_TTEE (1 << 16)
+@@ -187,13 +188,13 @@
+
+ /* Exception Syndrome Register (ESR) bits */
+ #define ESR_EL2_EC_SHIFT (26)
+-#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
+-#define ESR_EL2_IL (1U << 25)
++#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
++#define ESR_EL2_IL (UL(1) << 25)
+ #define ESR_EL2_ISS (ESR_EL2_IL - 1)
+ #define ESR_EL2_ISV_SHIFT (24)
+-#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
++#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
+ #define ESR_EL2_SAS_SHIFT (22)
+-#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
++#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
+ #define ESR_EL2_SSE (1 << 21)
+ #define ESR_EL2_SRT_SHIFT (16)
+ #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
+@@ -207,16 +208,16 @@
+ #define ESR_EL2_FSC_TYPE (0x3c)
+
+ #define ESR_EL2_CV_SHIFT (24)
+-#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
++#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
+ #define ESR_EL2_COND_SHIFT (20)
+-#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
++#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
+
+
+ #define FSC_FAULT (0x04)
+ #define FSC_PERM (0x0c)
+
+ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+-#define HPFAR_MASK (~0xFUL)
++#define HPFAR_MASK (~UL(0xf))
+
+ #define ESR_EL2_EC_UNKNOWN (0x00)
+ #define ESR_EL2_EC_WFI (0x01)
--- /dev/null
+From 7cbb87d67e38cfc55680290a706fd7517f10050d Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Tue, 28 Oct 2014 19:36:45 +0000
+Subject: arm64: KVM: fix unmapping with 48-bit VAs
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 7cbb87d67e38cfc55680290a706fd7517f10050d upstream.
+
+Currently if using a 48-bit VA, tearing down the hyp page tables (which
+can happen in the absence of a GICH or GICV resource) results in the
+rather nasty splat below, evidently becasue we access a table that
+doesn't actually exist.
+
+Commit 38f791a4e499792e (arm64: KVM: Implement 48 VA support for KVM EL2
+and Stage-2) added a pgd_none check to __create_hyp_mappings to account
+for the additional level of tables, but didn't add a corresponding check
+to unmap_range, and this seems to be the source of the problem.
+
+This patch adds the missing pgd_none check, ensuring we don't try to
+access tables that don't exist.
+
+Original splat below:
+
+kvm [1]: Using HYP init bounce page @83fe94a000
+kvm [1]: Cannot obtain GICH resource
+Unable to handle kernel paging request at virtual address ffff7f7fff000000
+pgd = ffff800000770000
+[ffff7f7fff000000] *pgd=0000000000000000
+Internal error: Oops: 96000004 [#1] PREEMPT SMP
+Modules linked in:
+CPU: 1 PID: 1 Comm: swapper/0 Not tainted 3.18.0-rc2+ #89
+task: ffff8003eb500000 ti: ffff8003eb45c000 task.ti: ffff8003eb45c000
+PC is at unmap_range+0x120/0x580
+LR is at free_hyp_pgds+0xac/0xe4
+pc : [<ffff80000009b768>] lr : [<ffff80000009cad8>] pstate: 80000045
+sp : ffff8003eb45fbf0
+x29: ffff8003eb45fbf0 x28: ffff800000736000
+x27: ffff800000735000 x26: ffff7f7fff000000
+x25: 0000000040000000 x24: ffff8000006f5000
+x23: 0000000000000000 x22: 0000007fffffffff
+x21: 0000800000000000 x20: 0000008000000000
+x19: 0000000000000000 x18: ffff800000648000
+x17: ffff800000537228 x16: 0000000000000000
+x15: 000000000000001f x14: 0000000000000000
+x13: 0000000000000001 x12: 0000000000000020
+x11: 0000000000000062 x10: 0000000000000006
+x9 : 0000000000000000 x8 : 0000000000000063
+x7 : 0000000000000018 x6 : 00000003ff000000
+x5 : ffff800000744188 x4 : 0000000000000001
+x3 : 0000000040000000 x2 : ffff800000000000
+x1 : 0000007fffffffff x0 : 000000003fffffff
+
+Process swapper/0 (pid: 1, stack limit = 0xffff8003eb45c058)
+Stack: (0xffff8003eb45fbf0 to 0xffff8003eb460000)
+fbe0: eb45fcb0 ffff8003 0009cad8 ffff8000
+fc00: 00000000 00000080 00736140 ffff8000 00736000 ffff8000 00000000 00007c80
+fc20: 00000000 00000080 006f5000 ffff8000 00000000 00000080 00743000 ffff8000
+fc40: 00735000 ffff8000 006d3030 ffff8000 006fe7b8 ffff8000 00000000 00000080
+fc60: ffffffff 0000007f fdac1000 ffff8003 fd94b000 ffff8003 fda47000 ffff8003
+fc80: 00502b40 ffff8000 ff000000 ffff7f7f fdec6000 00008003 fdac1630 ffff8003
+fca0: eb45fcb0 ffff8003 ffffffff 0000007f eb45fd00 ffff8003 0009b378 ffff8000
+fcc0: ffffffea 00000000 006fe000 ffff8000 00736728 ffff8000 00736120 ffff8000
+fce0: 00000040 00000000 00743000 ffff8000 006fe7b8 ffff8000 0050cd48 00000000
+fd00: eb45fd60 ffff8003 00096070 ffff8000 006f06e0 ffff8000 006f06e0 ffff8000
+fd20: fd948b40 ffff8003 0009a320 ffff8000 00000000 00000000 00000000 00000000
+fd40: 00000ae0 00000000 006aa25c ffff8000 eb45fd60 ffff8003 0017ca44 00000002
+fd60: eb45fdc0 ffff8003 0009a33c ffff8000 006f06e0 ffff8000 006f06e0 ffff8000
+fd80: fd948b40 ffff8003 0009a320 ffff8000 00000000 00000000 00735000 ffff8000
+fda0: 006d3090 ffff8000 006aa25c ffff8000 00735000 ffff8000 006d3030 ffff8000
+fdc0: eb45fdd0 ffff8003 000814c0 ffff8000 eb45fe50 ffff8003 006aaac4 ffff8000
+fde0: 006ddd90 ffff8000 00000006 00000000 006d3000 ffff8000 00000095 00000000
+fe00: 006a1e90 ffff8000 00735000 ffff8000 006d3000 ffff8000 006aa25c ffff8000
+fe20: 00735000 ffff8000 006d3030 ffff8000 eb45fe50 ffff8003 006fac68 ffff8000
+fe40: 00000006 00000006 fe293ee6 ffff8003 eb45feb0 ffff8003 004f8ee8 ffff8000
+fe60: 004f8ed4 ffff8000 00735000 ffff8000 00000000 00000000 00000000 00000000
+fe80: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+fea0: 00000000 00000000 00000000 00000000 00000000 00000000 000843d0 ffff8000
+fec0: 004f8ed4 ffff8000 00000000 00000000 00000000 00000000 00000000 00000000
+fee0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ff00: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ff20: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ff40: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ff60: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ff80: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ffa0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+ffc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000005 00000000
+ffe0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+Call trace:
+[<ffff80000009b768>] unmap_range+0x120/0x580
+[<ffff80000009cad4>] free_hyp_pgds+0xa8/0xe4
+[<ffff80000009b374>] kvm_arch_init+0x268/0x44c
+[<ffff80000009606c>] kvm_init+0x24/0x260
+[<ffff80000009a338>] arm_init+0x18/0x24
+[<ffff8000000814bc>] do_one_initcall+0x88/0x1a0
+[<ffff8000006aaac0>] kernel_init_freeable+0x148/0x1e8
+[<ffff8000004f8ee4>] kernel_init+0x10/0xd4
+Code: 8b000263 92628479 d1000720 eb01001f (f9400340)
+---[ end trace 3bc230562e926fa4 ]---
+Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Jungseok Lee <jungseoklee85@gmail.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm,
+ pgd = pgdp + pgd_index(addr);
+ do {
+ next = kvm_pgd_addr_end(addr, end);
+- unmap_puds(kvm, pgd, addr, next);
++ if (!pgd_none(*pgd))
++ unmap_puds(kvm, pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+ }
+
arm-arm64-kvm-fix-use-of-wnr-bit-in-kvm_is_write_fault.patch
kvm-arm-vgic-plug-irq-injection-race.patch
arm-arm64-kvm-fix-set_clear_sgi_pend_reg-offset.patch
+arm-arm64-kvm-fix-vttbr_baddr_mask-and-pgd-alloc.patch
+arm-kvm-fix-cpu-hotplug.patch
+arm-arm64-kvm-fix-potential-null-dereference-in.patch
+arm-arm64-kvm-ensure-memslots-are-within-kvm_phys_size.patch
+arm-kvm-strict_mm_typechecks-fix-for-user_mem_abort.patch
+arm64-kvm-fix-unmapping-with-48-bit-vas.patch
+arm-arm64-kvm-vgic-fix-error-code-in-kvm_vgic_create.patch
+arm64-kvm-fix-assembler-compatibility-of-macros.patch