--- /dev/null
+From ae705930fca6322600690df9dc1c7d0516145a93 Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@linaro.org>
+Date: Fri, 13 Mar 2015 17:02:56 +0000
+Subject: arm/arm64: KVM: Keep elrsr/aisr in sync with software model
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christoffer Dall <christoffer.dall@linaro.org>
+
+commit ae705930fca6322600690df9dc1c7d0516145a93 upstream.
+
+[Note the upstream one of this patch requires applying full GICv3 support
+but it's out of the scope of stable kernel. So this patch has a huge
+modification for stable kernel comparing to the upstream one.]
+
+There is an interesting bug in the vgic code, which manifests itself
+when the KVM run loop has a signal pending or needs a vmid generation
+rollover after having disabled interrupts but before actually switching
+to the guest.
+
+In this case, we flush the vgic as usual, but we sync back the vgic
+state and exit to userspace before entering the guest. The consequence
+is that we will be syncing the list registers back to the software model
+using the GICH_ELRSR and GICH_EISR from the last execution of the guest,
+potentially overwriting a list register containing an interrupt.
+
+This showed up during migration testing where we would capture a state
+where the VM has masked the arch timer but there were no interrupts,
+resulting in a hung test.
+
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Reported-by: Alex Bennee <alex.bennee@linaro.org>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/arm/vgic.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1042,6 +1042,7 @@ static bool vgic_queue_irq(struct kvm_vc
+ lr, irq, vgic_cpu->vgic_lr[lr]);
+ BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
+ vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
++ __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+ return true;
+ }
+
+@@ -1055,6 +1056,7 @@ static bool vgic_queue_irq(struct kvm_vc
+ vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
+ vgic_cpu->vgic_irq_lr_map[irq] = lr;
+ set_bit(lr, vgic_cpu->lr_used);
++ __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+
+ if (!vgic_irq_is_edge(vcpu, irq))
+ vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
+@@ -1209,6 +1211,14 @@ static bool vgic_process_maintenance(str
+ if (vgic_cpu->vgic_misr & GICH_MISR_U)
+ vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
+
++ /*
++ * In the next iterations of the vcpu loop, if we sync the vgic state
++ * after flushing it, but before entering the guest (this happens for
++ * pending signals and vmid rollovers), then make sure we don't pick
++ * up any old maintenance interrupts here.
++ */
++ memset(vgic_cpu->vgic_eisr, 0, sizeof(vgic_cpu->vgic_eisr[0]) * 2);
++
+ return level_pending;
+ }
+
--- /dev/null
+From 05971120fca43e0357789a14b3386bb56eef2201 Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@linaro.org>
+Date: Fri, 12 Dec 2014 21:19:23 +0100
+Subject: arm/arm64: KVM: Require in-kernel vgic for the arch timers
+
+From: Christoffer Dall <christoffer.dall@linaro.org>
+
+commit 05971120fca43e0357789a14b3386bb56eef2201 upstream.
+
+[Note this patch is a bit different from the original one as the names of
+vgic_initialized and kvm_vgic_init are different.]
+
+It is curently possible to run a VM with architected timers support
+without creating an in-kernel VGIC, which will result in interrupts from
+the virtual timer going nowhere.
+
+To address this issue, move the architected timers initialization to the
+time when we run a VCPU for the first time, and then only initialize
+(and enable) the architected timers if we have a properly created and
+initialized in-kernel VGIC.
+
+When injecting interrupts from the virtual timer to the vgic, the
+current setup should ensure that this never calls an on-demand init of
+the VGIC, which is the only call path that could return an error from
+kvm_vgic_inject_irq(), so capture the return value and raise a warning
+if there's an error there.
+
+We also change the kvm_timer_init() function from returning an int to be
+a void function, since the function always succeeds.
+
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kvm/arm.c | 13 +++++++++++--
+ include/kvm/arm_arch_timer.h | 10 ++++------
+ virt/kvm/arm/arch_timer.c | 30 ++++++++++++++++++++++--------
+ 3 files changed, 37 insertions(+), 16 deletions(-)
+
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -441,6 +441,7 @@ static void update_vttbr(struct kvm *kvm
+
+ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ {
++ struct kvm *kvm = vcpu->kvm;
+ int ret;
+
+ if (likely(vcpu->arch.has_run_once))
+@@ -452,12 +453,20 @@ static int kvm_vcpu_first_run_init(struc
+ * Initialize the VGIC before running a vcpu the first time on
+ * this VM.
+ */
+- if (unlikely(!vgic_initialized(vcpu->kvm))) {
+- ret = kvm_vgic_init(vcpu->kvm);
++ if (unlikely(!vgic_initialized(kvm))) {
++ ret = kvm_vgic_init(kvm);
+ if (ret)
+ return ret;
+ }
+
++ /*
++ * Enable the arch timers only if we have an in-kernel VGIC
++ * and it has been properly initialized, since we cannot handle
++ * interrupts from the virtual timer with a userspace gic.
++ */
++ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
++ kvm_timer_enable(kvm);
++
+ return 0;
+ }
+
+--- a/include/kvm/arm_arch_timer.h
++++ b/include/kvm/arm_arch_timer.h
+@@ -60,7 +60,8 @@ struct arch_timer_cpu {
+
+ #ifdef CONFIG_KVM_ARM_TIMER
+ int kvm_timer_hyp_init(void);
+-int kvm_timer_init(struct kvm *kvm);
++void kvm_timer_enable(struct kvm *kvm);
++void kvm_timer_init(struct kvm *kvm);
+ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq);
+ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+@@ -73,11 +74,8 @@ static inline int kvm_timer_hyp_init(voi
+ return 0;
+ };
+
+-static inline int kvm_timer_init(struct kvm *kvm)
+-{
+- return 0;
+-}
+-
++static inline void kvm_timer_enable(struct kvm *kvm) {}
++static inline void kvm_timer_init(struct kvm *kvm) {}
+ static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+ const struct kvm_irq_level *irq) {}
+ static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_tim
+
+ static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
+ {
++ int ret;
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
+- kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+- timer->irq->irq,
+- timer->irq->level);
++ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
++ timer->irq->irq,
++ timer->irq->level);
++ WARN_ON(ret);
+ }
+
+ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
+@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm
+ timer_disarm(timer);
+ }
+
+-int kvm_timer_init(struct kvm *kvm)
++void kvm_timer_enable(struct kvm *kvm)
+ {
+- if (timecounter && wqueue) {
+- kvm->arch.timer.cntvoff = kvm_phys_timer_read();
++ if (kvm->arch.timer.enabled)
++ return;
++
++ /*
++ * There is a potential race here between VCPUs starting for the first
++ * time, which may be enabling the timer multiple times. That doesn't
++ * hurt though, because we're just setting a variable to the same
++ * variable that it already was. The important thing is that all
++ * VCPUs have the enabled variable set, before entering the guest, if
++ * the arch timers are enabled.
++ */
++ if (timecounter && wqueue)
+ kvm->arch.timer.enabled = 1;
+- }
++}
+
+- return 0;
++void kvm_timer_init(struct kvm *kvm)
++{
++ kvm->arch.timer.cntvoff = kvm_phys_timer_read();
+ }
--- /dev/null
+From 04b8dc85bf4a64517e3cf20e409eeaa503b15cc1 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 10 Mar 2015 19:07:00 +0000
+Subject: arm64: KVM: Do not use pgd_index to index stage-2 pgd
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 04b8dc85bf4a64517e3cf20e409eeaa503b15cc1 upstream.
+
+[Since we don't backport commit c647355 (KVM: arm: Add initial dirty page
+locking support) for linux-3.14.y, there is no stage2_wp_range in
+arch/arm/kvm/mmu.c. So ignore the change in stage2_wp_range introduced
+by this patch.]
+
+The kernel's pgd_index macro is designed to index a normal, page
+sized array. KVM is a bit diffferent, as we can use concatenated
+pages to have a bigger address space (for example 40bit IPA with
+4kB pages gives us an 8kB PGD.
+
+In the above case, the use of pgd_index will always return an index
+inside the first 4kB, which makes a guest that has memory above
+0x8000000000 rather unhappy, as it spins forever in a page fault,
+whist the host happilly corrupts the lower pgd.
+
+The obvious fix is to get our own kvm_pgd_index that does the right
+thing(tm).
+
+Tested on X-Gene with a hacked kvmtool that put memory at a stupidly
+high address.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/kvm_mmu.h | 3 ++-
+ arch/arm/kvm/mmu.c | 6 +++---
+ arch/arm64/include/asm/kvm_mmu.h | 2 ++
+ 3 files changed, 7 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -117,13 +117,14 @@ static inline void kvm_set_s2pmd_writabl
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+ })
+
++#define kvm_pgd_index(addr) pgd_index(addr)
++
+ static inline bool kvm_page_empty(void *ptr)
+ {
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+ }
+
+-
+ #define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
+ #define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+ #define kvm_pud_table_empty(pudp) (0)
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -194,7 +194,7 @@ static void unmap_range(struct kvm *kvm,
+ phys_addr_t addr = start, end = start + size;
+ phys_addr_t next;
+
+- pgd = pgdp + pgd_index(addr);
++ pgd = pgdp + kvm_pgd_index(addr);
+ do {
+ next = kvm_pgd_addr_end(addr, end);
+ if (!pgd_none(*pgd))
+@@ -264,7 +264,7 @@ static void stage2_flush_memslot(struct
+ phys_addr_t next;
+ pgd_t *pgd;
+
+- pgd = kvm->arch.pgd + pgd_index(addr);
++ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+ do {
+ next = kvm_pgd_addr_end(addr, end);
+ stage2_flush_puds(kvm, pgd, addr, next);
+@@ -649,7 +649,7 @@ static pmd_t *stage2_get_pmd(struct kvm
+ pud_t *pud;
+ pmd_t *pmd;
+
+- pgd = kvm->arch.pgd + pgd_index(addr);
++ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ if (!cache)
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -69,6 +69,8 @@
+ #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
+ #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+
++#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
++
+ int create_hyp_mappings(void *from, void *to);
+ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+ void free_boot_hyp_pgd(void);
--- /dev/null
+From 801f6772cecea6cfc7da61aa197716ab64db5f9e Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Sun, 11 Jan 2015 14:10:11 +0100
+Subject: arm64: KVM: Fix HCR setting for 32bit guests
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 801f6772cecea6cfc7da61aa197716ab64db5f9e upstream.
+
+Commit b856a59141b1 (arm/arm64: KVM: Reset the HCR on each vcpu
+when resetting the vcpu) moved the init of the HCR register to
+happen later in the init of a vcpu, but left out the fixup
+done in kvm_reset_vcpu when preparing for a 32bit guest.
+
+As a result, the 32bit guest is run as a 64bit guest, but the
+rest of the kernel still manages it as a 32bit. Fun follows.
+
+Moving the fixup to vcpu_reset_hcr solves the problem for good.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/kvm_emulate.h | 2 ++
+ arch/arm64/kvm/reset.c | 1 -
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vc
+ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
++ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
++ vcpu->arch.hcr_el2 &= ~HCR_RW;
+ }
+
+ static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu
+ if (!cpu_has_32bit_el1())
+ return -EINVAL;
+ cpu_reset = &default_regs_reset32;
+- vcpu->arch.hcr_el2 &= ~HCR_RW;
+ } else {
+ cpu_reset = &default_regs_reset;
+ }
--- /dev/null
+From 55e858b75808347378e5117c3c2339f46cc03575 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Sun, 11 Jan 2015 14:10:10 +0100
+Subject: arm64: KVM: Fix TLB invalidation by IPA/VMID
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 55e858b75808347378e5117c3c2339f46cc03575 upstream.
+
+It took about two years for someone to notice that the IPA passed
+to TLBI IPAS2E1IS must be shifted by 12 bits. Clearly our reviewing
+is not as good as it should be...
+
+Paper bag time for me.
+
+Reported-by: Mario Smarduch <m.smarduch@samsung.com>
+Tested-by: Mario Smarduch <m.smarduch@samsung.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/hyp.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -629,6 +629,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
+ * Instead, we invalidate Stage-2 for this IPA, and the
+ * whole of Stage-1. Weep...
+ */
++ lsr x1, x1, #12
+ tlbi ipas2e1is, x1
+ /*
+ * We have to ensure completion of the invalidation at Stage-2,
fs-fix-s_nosec-handling.patch
vfs-remove-incorrect-debugging-warn-in-prepend_path.patch
vfs-ignore-unlocked-mounts-in-fs_fully_visible.patch
+arm-arm64-kvm-require-in-kernel-vgic-for-the-arch-timers.patch
+arm64-kvm-fix-tlb-invalidation-by-ipa-vmid.patch
+arm64-kvm-fix-hcr-setting-for-32bit-guests.patch
+arm64-kvm-do-not-use-pgd_index-to-index-stage-2-pgd.patch
+arm-arm64-kvm-keep-elrsr-aisr-in-sync-with-software-model.patch