]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 15 May 2015 23:09:18 +0000 (16:09 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 15 May 2015 23:09:18 +0000 (16:09 -0700)
added patches:
arm-kvm-add-world-switch-for-amair-0-1.patch
arm-kvm-fix-handling-of-trapped-64bit-coprocessor-accesses.patch
arm-kvm-fix-ordering-of-64bit-coprocessor-accesses.patch
arm-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch
arm-kvm-introduce-kvm_p-d_addr_end.patch
arm-kvm-introduce-per-vcpu-hyp-configuration-register.patch
arm-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch
arm64-kvm-allows-discrimination-of-aarch32-sysreg-access.patch
arm64-kvm-flush-vm-pages-before-letting-the-guest-enable-caches.patch
arm64-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch
arm64-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch
arm64-kvm-use-inner-shareable-barriers-for-inner-shareable-maintenance.patch
deal-with-deadlock-in-d_walk.patch
kvm-arm-arm64-vgic-fix-gicd_icfgr-register-accesses.patch
kvm-arm-vgic-fix-the-overlap-check-action-about-setting-the-gicd-gicc-base-address.patch
kvm-arm64-vgic-fix-hyp-panic-with-64k-pages-on-juno-platform.patch

17 files changed:
queue-3.14/arm-kvm-add-world-switch-for-amair-0-1.patch [new file with mode: 0644]
queue-3.14/arm-kvm-fix-handling-of-trapped-64bit-coprocessor-accesses.patch [new file with mode: 0644]
queue-3.14/arm-kvm-fix-ordering-of-64bit-coprocessor-accesses.patch [new file with mode: 0644]
queue-3.14/arm-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch [new file with mode: 0644]
queue-3.14/arm-kvm-introduce-kvm_p-d_addr_end.patch [new file with mode: 0644]
queue-3.14/arm-kvm-introduce-per-vcpu-hyp-configuration-register.patch [new file with mode: 0644]
queue-3.14/arm-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch [new file with mode: 0644]
queue-3.14/arm64-kvm-allows-discrimination-of-aarch32-sysreg-access.patch [new file with mode: 0644]
queue-3.14/arm64-kvm-flush-vm-pages-before-letting-the-guest-enable-caches.patch [new file with mode: 0644]
queue-3.14/arm64-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch [new file with mode: 0644]
queue-3.14/arm64-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch [new file with mode: 0644]
queue-3.14/arm64-kvm-use-inner-shareable-barriers-for-inner-shareable-maintenance.patch [new file with mode: 0644]
queue-3.14/deal-with-deadlock-in-d_walk.patch [new file with mode: 0644]
queue-3.14/kvm-arm-arm64-vgic-fix-gicd_icfgr-register-accesses.patch [new file with mode: 0644]
queue-3.14/kvm-arm-vgic-fix-the-overlap-check-action-about-setting-the-gicd-gicc-base-address.patch [new file with mode: 0644]
queue-3.14/kvm-arm64-vgic-fix-hyp-panic-with-64k-pages-on-juno-platform.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/arm-kvm-add-world-switch-for-amair-0-1.patch b/queue-3.14/arm-kvm-add-world-switch-for-amair-0-1.patch
new file mode 100644 (file)
index 0000000..857f649
--- /dev/null
@@ -0,0 +1,96 @@
+From af20814ee927ed888288d98917a766b4179c4fe0 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 22 Jan 2014 10:20:09 +0000
+Subject: ARM: KVM: add world-switch for AMAIR{0,1}
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit af20814ee927ed888288d98917a766b4179c4fe0 upstream.
+
+HCR.TVM traps (among other things) accesses to AMAIR0 and AMAIR1.
+In order to minimise the amount of surprise a guest could generate by
+trying to access these registers with caches off, add them to the
+list of registers we switch/handle.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_asm.h |    4 +++-
+ arch/arm/kvm/coproc.c          |    6 ++++++
+ arch/arm/kvm/interrupts_head.S |   12 ++++++++++--
+ 3 files changed, 19 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_asm.h
++++ b/arch/arm/include/asm/kvm_asm.h
+@@ -48,7 +48,9 @@
+ #define c13_TID_URO   26      /* Thread ID, User R/O */
+ #define c13_TID_PRIV  27      /* Thread ID, Privileged */
+ #define c14_CNTKCTL   28      /* Timer Control Register (PL1) */
+-#define NR_CP15_REGS  29      /* Number of regs (incl. invalid) */
++#define c10_AMAIR0    29      /* Auxilary Memory Attribute Indirection Reg0 */
++#define c10_AMAIR1    30      /* Auxilary Memory Attribute Indirection Reg1 */
++#define NR_CP15_REGS  31      /* Number of regs (incl. invalid) */
+ #define ARM_EXCEPTION_RESET     0
+ #define ARM_EXCEPTION_UNDEFINED   1
+--- a/arch/arm/kvm/coproc.c
++++ b/arch/arm/kvm/coproc.c
+@@ -328,6 +328,12 @@ static const struct coproc_reg cp15_regs
+       { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
+                       NULL, reset_unknown, c10_NMRR},
++      /* AMAIR0/AMAIR1: swapped by interrupt.S. */
++      { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
++                      access_vm_reg, reset_unknown, c10_AMAIR0},
++      { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
++                      access_vm_reg, reset_unknown, c10_AMAIR1},
++
+       /* VBAR: swapped by interrupt.S. */
+       { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
+                       NULL, reset_val, c12_VBAR, 0x00000000 },
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -303,13 +303,17 @@ vcpu     .req    r0              @ vcpu pointer always in r
+       mrc     p15, 0, r2, c14, c1, 0  @ CNTKCTL
+       mrrc    p15, 0, r4, r5, c7      @ PAR
++      mrc     p15, 0, r6, c10, c3, 0  @ AMAIR0
++      mrc     p15, 0, r7, c10, c3, 1  @ AMAIR1
+       .if \store_to_vcpu == 0
+-      push    {r2,r4-r5}
++      push    {r2,r4-r7}
+       .else
+       str     r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+       add     r12, vcpu, #CP15_OFFSET(c7_PAR)
+       strd    r4, r5, [r12]
++      str     r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
++      str     r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
+       .endif
+ .endm
+@@ -322,15 +326,19 @@ vcpu     .req    r0              @ vcpu pointer always in r
+  */
+ .macro write_cp15_state read_from_vcpu
+       .if \read_from_vcpu == 0
+-      pop     {r2,r4-r5}
++      pop     {r2,r4-r7}
+       .else
+       ldr     r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+       add     r12, vcpu, #CP15_OFFSET(c7_PAR)
+       ldrd    r4, r5, [r12]
++      ldr     r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
++      ldr     r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
+       .endif
+       mcr     p15, 0, r2, c14, c1, 0  @ CNTKCTL
+       mcrr    p15, 0, r4, r5, c7      @ PAR
++      mcr     p15, 0, r6, c10, c3, 0  @ AMAIR0
++      mcr     p15, 0, r7, c10, c3, 1  @ AMAIR1
+       .if \read_from_vcpu == 0
+       pop     {r2-r12}
diff --git a/queue-3.14/arm-kvm-fix-handling-of-trapped-64bit-coprocessor-accesses.patch b/queue-3.14/arm-kvm-fix-handling-of-trapped-64bit-coprocessor-accesses.patch
new file mode 100644 (file)
index 0000000..ca71d54
--- /dev/null
@@ -0,0 +1,66 @@
+From 46c214dd595381c880794413facadfa07fba5c95 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 21 Jan 2014 18:56:26 +0000
+Subject: ARM: KVM: fix handling of trapped 64bit coprocessor accesses
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 46c214dd595381c880794413facadfa07fba5c95 upstream.
+
+Commit 240e99cbd00a (ARM: KVM: Fix 64-bit coprocessor handling)
+changed the way we match the 64bit coprocessor access from
+user space, but didn't update the trap handler for the same
+set of registers.
+
+The effect is that a trapped 64bit access is never matched, leading
+to a fault being injected into the guest. This went unnoticed as we
+didn't really trap any 64bit register so far.
+
+Placing the CRm field of the access into the CRn field of the matching
+structure fixes the problem. Also update the debug feature to emit the
+expected string in case of failing match.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/coproc.c |    4 ++--
+ arch/arm/kvm/coproc.h |    4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/kvm/coproc.c
++++ b/arch/arm/kvm/coproc.c
+@@ -443,7 +443,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *
+ {
+       struct coproc_params params;
+-      params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
++      params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
+       params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
+       params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
+       params.is_64bit = true;
+@@ -451,7 +451,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *
+       params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
+       params.Op2 = 0;
+       params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
+-      params.CRn = 0;
++      params.CRm = 0;
+       return emulate_cp15(vcpu, &params);
+ }
+--- a/arch/arm/kvm/coproc.h
++++ b/arch/arm/kvm/coproc.h
+@@ -58,8 +58,8 @@ static inline void print_cp_instr(const
+ {
+       /* Look, we even formatted it for you to paste into the table! */
+       if (p->is_64bit) {
+-              kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
+-                            p->CRm, p->Op1, p->is_write ? "write" : "read");
++              kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
++                            p->CRn, p->Op1, p->is_write ? "write" : "read");
+       } else {
+               kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
+                             " func_%s },\n",
diff --git a/queue-3.14/arm-kvm-fix-ordering-of-64bit-coprocessor-accesses.patch b/queue-3.14/arm-kvm-fix-ordering-of-64bit-coprocessor-accesses.patch
new file mode 100644 (file)
index 0000000..7cc7176
--- /dev/null
@@ -0,0 +1,47 @@
+From 547f781378a22b65c2ab468f235c23001b5924da Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 21 Jan 2014 18:56:26 +0000
+Subject: ARM: KVM: fix ordering of 64bit coprocessor accesses
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 547f781378a22b65c2ab468f235c23001b5924da upstream.
+
+Commit 240e99cbd00a (ARM: KVM: Fix 64-bit coprocessor handling)
+added an ordering dependency for the 64bit registers.
+
+The order described is: CRn, CRm, Op1, Op2, 64bit-first.
+
+Unfortunately, the implementation is: CRn, 64bit-first, CRm...
+
+Move the 64bit test to be last in order to match the documentation.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/coproc.h |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kvm/coproc.h
++++ b/arch/arm/kvm/coproc.h
+@@ -135,13 +135,13 @@ static inline int cmp_reg(const struct c
+               return -1;
+       if (i1->CRn != i2->CRn)
+               return i1->CRn - i2->CRn;
+-      if (i1->is_64 != i2->is_64)
+-              return i2->is_64 - i1->is_64;
+       if (i1->CRm != i2->CRm)
+               return i1->CRm - i2->CRm;
+       if (i1->Op1 != i2->Op1)
+               return i1->Op1 - i2->Op1;
+-      return i1->Op2 - i2->Op2;
++      if (i1->Op2 != i2->Op2)
++              return i1->Op2 - i2->Op2;
++      return i2->is_64 - i1->is_64;
+ }
diff --git a/queue-3.14/arm-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch b/queue-3.14/arm-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch
new file mode 100644 (file)
index 0000000..a4480fc
--- /dev/null
@@ -0,0 +1,59 @@
+From 159793001d7d85af17855630c94f0a176848e16b Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 14 Jan 2014 19:13:10 +0000
+Subject: ARM: KVM: force cache clean on page fault when caches are off
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 159793001d7d85af17855630c94f0a176848e16b upstream.
+
+In order for a guest with caches disabled to observe data written
+contained in a given page, we need to make sure that page is
+committed to memory, and not just hanging in the cache (as guest
+accesses are completely bypassing the cache until it decides to
+enable it).
+
+For this purpose, hook into the coherent_cache_guest_page
+function and flush the region if the guest SCTLR
+register doesn't show the MMU and caches as being enabled.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_mmu.h |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -129,9 +129,19 @@ static inline void kvm_set_s2pmd_writabl
+ struct kvm;
++#define kvm_flush_dcache_to_poc(a,l)  __cpuc_flush_dcache_area((a), (l))
++
++static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
++{
++      return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
++}
++
+ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+                                            unsigned long size)
+ {
++      if (!vcpu_has_cache_enabled(vcpu))
++              kvm_flush_dcache_to_poc((void *)hva, size);
++
+       /*
+        * If we are going to insert an instruction page and the icache is
+        * either VIPT or PIPT, there is a potential problem where the host
+@@ -152,7 +162,6 @@ static inline void coherent_cache_guest_
+       }
+ }
+-#define kvm_flush_dcache_to_poc(a,l)  __cpuc_flush_dcache_area((a), (l))
+ #define kvm_virt_to_phys(x)           virt_to_idmap((unsigned long)(x))
+ void stage2_flush_vm(struct kvm *kvm);
diff --git a/queue-3.14/arm-kvm-introduce-kvm_p-d_addr_end.patch b/queue-3.14/arm-kvm-introduce-kvm_p-d_addr_end.patch
new file mode 100644 (file)
index 0000000..491c647
--- /dev/null
@@ -0,0 +1,104 @@
+From a3c8bd31af260a17d626514f636849ee1cd1f63e Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 18 Feb 2014 14:29:03 +0000
+Subject: ARM: KVM: introduce kvm_p*d_addr_end
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit a3c8bd31af260a17d626514f636849ee1cd1f63e upstream.
+
+The use of p*d_addr_end with stage-2 translation is slightly dodgy,
+as the IPA is 40bits, while all the p*d_addr_end helpers are
+taking an unsigned long (arm64 is fine with that as unligned long
+is 64bit).
+
+The fix is to introduce 64bit clean versions of the same helpers,
+and use them in the stage-2 page table code.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_mmu.h   |   13 +++++++++++++
+ arch/arm/kvm/mmu.c               |   10 +++++-----
+ arch/arm64/include/asm/kvm_mmu.h |    4 ++++
+ 3 files changed, 22 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -114,6 +114,19 @@ static inline void kvm_set_s2pmd_writabl
+       pmd_val(*pmd) |= L_PMD_S2_RDWR;
+ }
++/* Open coded p*d_addr_end that can deal with 64bit addresses */
++#define kvm_pgd_addr_end(addr, end)                                   \
++({    u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;            \
++      (__boundary - 1 < (end) - 1)? __boundary: (end);                \
++})
++
++#define kvm_pud_addr_end(addr,end)            (end)
++
++#define kvm_pmd_addr_end(addr, end)                                   \
++({    u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;                \
++      (__boundary - 1 < (end) - 1)? __boundary: (end);                \
++})
++
+ struct kvm;
+ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -147,7 +147,7 @@ static void unmap_range(struct kvm *kvm,
+               pgd = pgdp + pgd_index(addr);
+               pud = pud_offset(pgd, addr);
+               if (pud_none(*pud)) {
+-                      addr = pud_addr_end(addr, end);
++                      addr = kvm_pud_addr_end(addr, end);
+                       continue;
+               }
+@@ -157,13 +157,13 @@ static void unmap_range(struct kvm *kvm,
+                        * move on.
+                        */
+                       clear_pud_entry(kvm, pud, addr);
+-                      addr = pud_addr_end(addr, end);
++                      addr = kvm_pud_addr_end(addr, end);
+                       continue;
+               }
+               pmd = pmd_offset(pud, addr);
+               if (pmd_none(*pmd)) {
+-                      addr = pmd_addr_end(addr, end);
++                      addr = kvm_pmd_addr_end(addr, end);
+                       continue;
+               }
+@@ -178,10 +178,10 @@ static void unmap_range(struct kvm *kvm,
+                */
+               if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
+                       clear_pmd_entry(kvm, pmd, addr);
+-                      next = pmd_addr_end(addr, end);
++                      next = kvm_pmd_addr_end(addr, end);
+                       if (page_empty(pmd) && !page_empty(pud)) {
+                               clear_pud_entry(kvm, pud, addr);
+-                              next = pud_addr_end(addr, end);
++                              next = kvm_pud_addr_end(addr, end);
+                       }
+               }
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -121,6 +121,10 @@ static inline void kvm_set_s2pmd_writabl
+       pmd_val(*pmd) |= PMD_S2_RDWR;
+ }
++#define kvm_pgd_addr_end(addr, end)   pgd_addr_end(addr, end)
++#define kvm_pud_addr_end(addr, end)   pud_addr_end(addr, end)
++#define kvm_pmd_addr_end(addr, end)   pmd_addr_end(addr, end)
++
+ struct kvm;
+ #define kvm_flush_dcache_to_poc(a,l)  __flush_dcache_area((a), (l))
diff --git a/queue-3.14/arm-kvm-introduce-per-vcpu-hyp-configuration-register.patch b/queue-3.14/arm-kvm-introduce-per-vcpu-hyp-configuration-register.patch
new file mode 100644 (file)
index 0000000..ea9a12f
--- /dev/null
@@ -0,0 +1,110 @@
+From ac30a11e8e92a03dbe236b285c5cbae0bf563141 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 22 Jan 2014 09:43:38 +0000
+Subject: ARM: KVM: introduce per-vcpu HYP Configuration Register
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit ac30a11e8e92a03dbe236b285c5cbae0bf563141 upstream.
+
+So far, KVM/ARM used a fixed HCR configuration per guest, except for
+the VI/VF/VA bits to control the interrupt in absence of VGIC.
+
+With the upcoming need to dynamically reconfigure trapping, it becomes
+necessary to allow the HCR to be changed on a per-vcpu basis.
+
+The fix here is to mimic what KVM/arm64 already does: a per vcpu HCR
+field, initialized at setup time.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_arm.h  |    1 -
+ arch/arm/include/asm/kvm_host.h |    9 ++++++---
+ arch/arm/kernel/asm-offsets.c   |    1 +
+ arch/arm/kvm/guest.c            |    1 +
+ arch/arm/kvm/interrupts_head.S  |    9 +++------
+ 5 files changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_arm.h
++++ b/arch/arm/include/asm/kvm_arm.h
+@@ -69,7 +69,6 @@
+ #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+                       HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+                       HCR_TWE | HCR_SWIO | HCR_TIDCP)
+-#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+ /* System Control Register (SCTLR) bits */
+ #define SCTLR_TE      (1 << 30)
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -101,6 +101,12 @@ struct kvm_vcpu_arch {
+       /* The CPU type we expose to the VM */
+       u32 midr;
++      /* HYP trapping configuration */
++      u32 hcr;
++
++      /* Interrupt related fields */
++      u32 irq_lines;          /* IRQ and FIQ levels */
++
+       /* Exception Information */
+       struct kvm_vcpu_fault_info fault;
+@@ -128,9 +134,6 @@ struct kvm_vcpu_arch {
+       /* IO related fields */
+       struct kvm_decode mmio_decode;
+-      /* Interrupt related fields */
+-      u32 irq_lines;          /* IRQ and FIQ levels */
+-
+       /* Cache some mmu pages needed inside spinlock regions */
+       struct kvm_mmu_memory_cache mmu_page_cache;
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -174,6 +174,7 @@ int main(void)
+   DEFINE(VCPU_FIQ_REGS,               offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
+   DEFINE(VCPU_PC,             offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
+   DEFINE(VCPU_CPSR,           offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
++  DEFINE(VCPU_HCR,            offsetof(struct kvm_vcpu, arch.hcr));
+   DEFINE(VCPU_IRQ_LINES,      offsetof(struct kvm_vcpu, arch.irq_lines));
+   DEFINE(VCPU_HSR,            offsetof(struct kvm_vcpu, arch.fault.hsr));
+   DEFINE(VCPU_HxFAR,          offsetof(struct kvm_vcpu, arch.fault.hxfar));
+--- a/arch/arm/kvm/guest.c
++++ b/arch/arm/kvm/guest.c
+@@ -38,6 +38,7 @@ struct kvm_stats_debugfs_item debugfs_en
+ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+ {
++      vcpu->arch.hcr = HCR_GUEST_MASK;
+       return 0;
+ }
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -597,17 +597,14 @@ vcpu     .req    r0              @ vcpu pointer always in r
+ /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
+ .macro configure_hyp_role operation
+-      mrc     p15, 4, r2, c1, c1, 0   @ HCR
+-      bic     r2, r2, #HCR_VIRT_EXCP_MASK
+-      ldr     r3, =HCR_GUEST_MASK
+       .if \operation == vmentry
+-      orr     r2, r2, r3
++      ldr     r2, [vcpu, #VCPU_HCR]
+       ldr     r3, [vcpu, #VCPU_IRQ_LINES]
+       orr     r2, r2, r3
+       .else
+-      bic     r2, r2, r3
++      mov     r2, #0
+       .endif
+-      mcr     p15, 4, r2, c1, c1, 0
++      mcr     p15, 4, r2, c1, c1, 0   @ HCR
+ .endm
+ .macro load_vcpu
diff --git a/queue-3.14/arm-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch b/queue-3.14/arm-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch
new file mode 100644 (file)
index 0000000..3f5d9af
--- /dev/null
@@ -0,0 +1,208 @@
+From 8034699a42d68043b495c7e0cfafccd920707ec8 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 14 Jan 2014 18:00:55 +0000
+Subject: ARM: KVM: trap VM system registers until MMU and caches are ON
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 8034699a42d68043b495c7e0cfafccd920707ec8 upstream.
+
+In order to be able to detect the point where the guest enables
+its MMU and caches, trap all the VM related system registers.
+
+Once we see the guest enabling both the MMU and the caches, we
+can go back to a saner mode of operation, which is to leave these
+registers in complete control of the guest.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_arm.h |    3 +
+ arch/arm/kvm/coproc.c          |   74 ++++++++++++++++++++++++++++++++---------
+ arch/arm/kvm/coproc.h          |    4 ++
+ arch/arm/kvm/coproc_a15.c      |    2 -
+ arch/arm/kvm/coproc_a7.c       |    2 -
+ 5 files changed, 66 insertions(+), 19 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_arm.h
++++ b/arch/arm/include/asm/kvm_arm.h
+@@ -55,6 +55,7 @@
+  * The bits we set in HCR:
+  * TAC:               Trap ACTLR
+  * TSC:               Trap SMC
++ * TVM:               Trap VM ops (until MMU and caches are on)
+  * TSW:               Trap cache operations by set/way
+  * TWI:               Trap WFI
+  * TWE:               Trap WFE
+@@ -68,7 +69,7 @@
+  */
+ #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+                       HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+-                      HCR_TWE | HCR_SWIO | HCR_TIDCP)
++                      HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
+ /* System Control Register (SCTLR) bits */
+ #define SCTLR_TE      (1 << 30)
+--- a/arch/arm/kvm/coproc.c
++++ b/arch/arm/kvm/coproc.c
+@@ -23,6 +23,7 @@
+ #include <asm/kvm_host.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
++#include <asm/kvm_mmu.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <trace/events/kvm.h>
+@@ -205,6 +206,44 @@ done:
+ }
+ /*
++ * Generic accessor for VM registers. Only called as long as HCR_TVM
++ * is set.
++ */
++static bool access_vm_reg(struct kvm_vcpu *vcpu,
++                        const struct coproc_params *p,
++                        const struct coproc_reg *r)
++{
++      BUG_ON(!p->is_write);
++
++      vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
++      if (p->is_64bit)
++              vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
++
++      return true;
++}
++
++/*
++ * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
++ * guest enables the MMU, we stop trapping the VM sys_regs and leave
++ * it in complete control of the caches.
++ *
++ * Used by the cpu-specific code.
++ */
++bool access_sctlr(struct kvm_vcpu *vcpu,
++                const struct coproc_params *p,
++                const struct coproc_reg *r)
++{
++      access_vm_reg(vcpu, p, r);
++
++      if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
++              vcpu->arch.hcr &= ~HCR_TVM;
++              stage2_flush_vm(vcpu->kvm);
++      }
++
++      return true;
++}
++
++/*
+  * We could trap ID_DFR0 and tell the guest we don't support performance
+  * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
+  * NAKed, so it will read the PMCR anyway.
+@@ -261,33 +300,36 @@ static const struct coproc_reg cp15_regs
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
+                       NULL, reset_val, c1_CPACR, 0x00000000 },
+-      /* TTBR0/TTBR1: swapped by interrupt.S. */
+-      { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+-      { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+-
+-      /* TTBCR: swapped by interrupt.S. */
++      /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
++      { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
++      { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
++                      access_vm_reg, reset_unknown, c2_TTBR0 },
++      { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
++                      access_vm_reg, reset_unknown, c2_TTBR1 },
+       { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
+-                      NULL, reset_val, c2_TTBCR, 0x00000000 },
++                      access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
++      { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
++
+       /* DACR: swapped by interrupt.S. */
+       { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
+-                      NULL, reset_unknown, c3_DACR },
++                      access_vm_reg, reset_unknown, c3_DACR },
+       /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
+       { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
+-                      NULL, reset_unknown, c5_DFSR },
++                      access_vm_reg, reset_unknown, c5_DFSR },
+       { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
+-                      NULL, reset_unknown, c5_IFSR },
++                      access_vm_reg, reset_unknown, c5_IFSR },
+       { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
+-                      NULL, reset_unknown, c5_ADFSR },
++                      access_vm_reg, reset_unknown, c5_ADFSR },
+       { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
+-                      NULL, reset_unknown, c5_AIFSR },
++                      access_vm_reg, reset_unknown, c5_AIFSR },
+       /* DFAR/IFAR: swapped by interrupt.S. */
+       { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
+-                      NULL, reset_unknown, c6_DFAR },
++                      access_vm_reg, reset_unknown, c6_DFAR },
+       { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
+-                      NULL, reset_unknown, c6_IFAR },
++                      access_vm_reg, reset_unknown, c6_IFAR },
+       /* PAR swapped by interrupt.S */
+       { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+@@ -324,9 +366,9 @@ static const struct coproc_reg cp15_regs
+       /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
+       { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
+-                      NULL, reset_unknown, c10_PRRR},
++                      access_vm_reg, reset_unknown, c10_PRRR},
+       { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
+-                      NULL, reset_unknown, c10_NMRR},
++                      access_vm_reg, reset_unknown, c10_NMRR},
+       /* AMAIR0/AMAIR1: swapped by interrupt.S. */
+       { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
+@@ -340,7 +382,7 @@ static const struct coproc_reg cp15_regs
+       /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
+       { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
+-                      NULL, reset_val, c13_CID, 0x00000000 },
++                      access_vm_reg, reset_val, c13_CID, 0x00000000 },
+       { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
+                       NULL, reset_unknown, c13_TID_URW },
+       { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
+--- a/arch/arm/kvm/coproc.h
++++ b/arch/arm/kvm/coproc.h
+@@ -153,4 +153,8 @@ static inline int cmp_reg(const struct c
+ #define is64          .is_64 = true
+ #define is32          .is_64 = false
++bool access_sctlr(struct kvm_vcpu *vcpu,
++                const struct coproc_params *p,
++                const struct coproc_reg *r);
++
+ #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
+--- a/arch/arm/kvm/coproc_a15.c
++++ b/arch/arm/kvm/coproc_a15.c
+@@ -34,7 +34,7 @@
+ static const struct coproc_reg a15_regs[] = {
+       /* SCTLR: swapped by interrupt.S. */
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+-                      NULL, reset_val, c1_SCTLR, 0x00C50078 },
++                      access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+ };
+ static struct kvm_coproc_target_table a15_target_table = {
+--- a/arch/arm/kvm/coproc_a7.c
++++ b/arch/arm/kvm/coproc_a7.c
+@@ -37,7 +37,7 @@
+ static const struct coproc_reg a7_regs[] = {
+       /* SCTLR: swapped by interrupt.S. */
+       { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
+-                      NULL, reset_val, c1_SCTLR, 0x00C50878 },
++                      access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
+ };
+ static struct kvm_coproc_target_table a7_target_table = {
diff --git a/queue-3.14/arm64-kvm-allows-discrimination-of-aarch32-sysreg-access.patch b/queue-3.14/arm64-kvm-allows-discrimination-of-aarch32-sysreg-access.patch
new file mode 100644 (file)
index 0000000..8b7210d
--- /dev/null
@@ -0,0 +1,72 @@
+From 2072d29c46b73e39b3c6c56c6027af77086f45fd Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 21 Jan 2014 10:55:17 +0000
+Subject: arm64: KVM: allows discrimination of AArch32 sysreg access
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 2072d29c46b73e39b3c6c56c6027af77086f45fd upstream.
+
+The current handling of AArch32 trapping is slightly less than
+perfect, as it is not possible (from a handler point of view)
+to distinguish it from an AArch64 access, nor to tell a 32bit
+from a 64bit access either.
+
+Fix this by introducing two additional flags:
+- is_aarch32: true if the access was made in AArch32 mode
+- is_32bit: true if is_aarch32 == true and a MCR/MRC instruction
+  was used to perform the access (as opposed to MCRR/MRRC).
+
+This allows a handler to cover all the possible conditions in which
+a system register gets trapped.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/sys_regs.c |    6 ++++++
+ arch/arm64/kvm/sys_regs.h |    2 ++
+ 2 files changed, 8 insertions(+)
+
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -437,6 +437,8 @@ int kvm_handle_cp15_64(struct kvm_vcpu *
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
+       int Rt2 = (hsr >> 10) & 0xf;
++      params.is_aarch32 = true;
++      params.is_32bit = false;
+       params.CRm = (hsr >> 1) & 0xf;
+       params.Rt = (hsr >> 5) & 0xf;
+       params.is_write = ((hsr & 1) == 0);
+@@ -480,6 +482,8 @@ int kvm_handle_cp15_32(struct kvm_vcpu *
+       struct sys_reg_params params;
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
++      params.is_aarch32 = true;
++      params.is_32bit = true;
+       params.CRm = (hsr >> 1) & 0xf;
+       params.Rt  = (hsr >> 5) & 0xf;
+       params.is_write = ((hsr & 1) == 0);
+@@ -549,6 +553,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *
+       struct sys_reg_params params;
+       unsigned long esr = kvm_vcpu_get_hsr(vcpu);
++      params.is_aarch32 = false;
++      params.is_32bit = false;
+       params.Op0 = (esr >> 20) & 3;
+       params.Op1 = (esr >> 14) & 0x7;
+       params.CRn = (esr >> 10) & 0xf;
+--- a/arch/arm64/kvm/sys_regs.h
++++ b/arch/arm64/kvm/sys_regs.h
+@@ -30,6 +30,8 @@ struct sys_reg_params {
+       u8      Op2;
+       u8      Rt;
+       bool    is_write;
++      bool    is_aarch32;
++      bool    is_32bit;       /* Only valid if is_aarch32 is true */
+ };
+ struct sys_reg_desc {
diff --git a/queue-3.14/arm64-kvm-flush-vm-pages-before-letting-the-guest-enable-caches.patch b/queue-3.14/arm64-kvm-flush-vm-pages-before-letting-the-guest-enable-caches.patch
new file mode 100644 (file)
index 0000000..ed114f3
--- /dev/null
@@ -0,0 +1,179 @@
+From 9d218a1fcf4c6b759d442ef702842fae92e1ea61 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 15 Jan 2014 12:50:23 +0000
+Subject: arm64: KVM: flush VM pages before letting the guest enable caches
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 9d218a1fcf4c6b759d442ef702842fae92e1ea61 upstream.
+
+When the guest runs with caches disabled (like in an early boot
+sequence, for example), all the writes are diectly going to RAM,
+bypassing the caches altogether.
+
+Once the MMU and caches are enabled, whatever sits in the cache
+becomes suddenly visible, which isn't what the guest expects.
+
+A way to avoid this potential disaster is to invalidate the cache
+when the MMU is being turned on. For this, we hook into the SCTLR_EL1
+trapping code, and scan the stage-2 page tables, invalidating the
+pages/sections that have already been mapped in.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_mmu.h   |    2 
+ arch/arm/kvm/mmu.c               |   93 +++++++++++++++++++++++++++++++++++++++
+ arch/arm64/include/asm/kvm_mmu.h |    2 
+ arch/arm64/kvm/sys_regs.c        |    5 +-
+ 4 files changed, 101 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -155,6 +155,8 @@ static inline void coherent_cache_guest_
+ #define kvm_flush_dcache_to_poc(a,l)  __cpuc_flush_dcache_area((a), (l))
+ #define kvm_virt_to_phys(x)           virt_to_idmap((unsigned long)(x))
++void stage2_flush_vm(struct kvm *kvm);
++
+ #endif        /* !__ASSEMBLY__ */
+ #endif /* __ARM_KVM_MMU_H__ */
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -189,6 +189,99 @@ static void unmap_range(struct kvm *kvm,
+       }
+ }
++static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
++                            phys_addr_t addr, phys_addr_t end)
++{
++      pte_t *pte;
++
++      pte = pte_offset_kernel(pmd, addr);
++      do {
++              if (!pte_none(*pte)) {
++                      hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
++                      kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
++              }
++      } while (pte++, addr += PAGE_SIZE, addr != end);
++}
++
++static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
++                            phys_addr_t addr, phys_addr_t end)
++{
++      pmd_t *pmd;
++      phys_addr_t next;
++
++      pmd = pmd_offset(pud, addr);
++      do {
++              next = kvm_pmd_addr_end(addr, end);
++              if (!pmd_none(*pmd)) {
++                      if (kvm_pmd_huge(*pmd)) {
++                              hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
++                              kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
++                      } else {
++                              stage2_flush_ptes(kvm, pmd, addr, next);
++                      }
++              }
++      } while (pmd++, addr = next, addr != end);
++}
++
++static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
++                            phys_addr_t addr, phys_addr_t end)
++{
++      pud_t *pud;
++      phys_addr_t next;
++
++      pud = pud_offset(pgd, addr);
++      do {
++              next = kvm_pud_addr_end(addr, end);
++              if (!pud_none(*pud)) {
++                      if (pud_huge(*pud)) {
++                              hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
++                              kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
++                      } else {
++                              stage2_flush_pmds(kvm, pud, addr, next);
++                      }
++              }
++      } while (pud++, addr = next, addr != end);
++}
++
++static void stage2_flush_memslot(struct kvm *kvm,
++                               struct kvm_memory_slot *memslot)
++{
++      phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
++      phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
++      phys_addr_t next;
++      pgd_t *pgd;
++
++      pgd = kvm->arch.pgd + pgd_index(addr);
++      do {
++              next = kvm_pgd_addr_end(addr, end);
++              stage2_flush_puds(kvm, pgd, addr, next);
++      } while (pgd++, addr = next, addr != end);
++}
++
++/**
++ * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
++ * @kvm: The struct kvm pointer
++ *
++ * Go through the stage 2 page tables and invalidate any cache lines
++ * backing memory already mapped to the VM.
++ */
++void stage2_flush_vm(struct kvm *kvm)
++{
++      struct kvm_memslots *slots;
++      struct kvm_memory_slot *memslot;
++      int idx;
++
++      idx = srcu_read_lock(&kvm->srcu);
++      spin_lock(&kvm->mmu_lock);
++
++      slots = kvm_memslots(kvm);
++      kvm_for_each_memslot(memslot, slots)
++              stage2_flush_memslot(kvm, memslot);
++
++      spin_unlock(&kvm->mmu_lock);
++      srcu_read_unlock(&kvm->srcu, idx);
++}
++
+ /**
+  * free_boot_hyp_pgd - free HYP boot page tables
+  *
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -150,5 +150,7 @@ static inline void coherent_cache_guest_
+ #define kvm_virt_to_phys(x)           __virt_to_phys((unsigned long)(x))
++void stage2_flush_vm(struct kvm *kvm);
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ARM64_KVM_MMU_H__ */
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -27,6 +27,7 @@
+ #include <asm/kvm_host.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
++#include <asm/kvm_mmu.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <trace/events/kvm.h>
+@@ -154,8 +155,10 @@ static bool access_sctlr(struct kvm_vcpu
+ {
+       access_vm_reg(vcpu, p, r);
+-      if (vcpu_has_cache_enabled(vcpu))       /* MMU+Caches enabled? */
++      if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
+               vcpu->arch.hcr_el2 &= ~HCR_TVM;
++              stage2_flush_vm(vcpu->kvm);
++      }
+       return true;
+ }
diff --git a/queue-3.14/arm64-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch b/queue-3.14/arm64-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch
new file mode 100644 (file)
index 0000000..83f3b0a
--- /dev/null
@@ -0,0 +1,105 @@
+From 2d58b733c87689d3d5144e4ac94ea861cc729145 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 14 Jan 2014 19:13:10 +0000
+Subject: arm64: KVM: force cache clean on page fault when caches are off
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 2d58b733c87689d3d5144e4ac94ea861cc729145 upstream.
+
+In order for the guest with caches off to observe data written
+contained in a given page, we need to make sure that page is
+committed to memory, and not just hanging in the cache (as
+guest accesses are completely bypassing the cache until it
+decides to enable it).
+
+For this purpose, hook into the coherent_icache_guest_page
+function and flush the region if the guest SCTLR_EL1
+register doesn't show the MMU  and caches as being enabled.
+The function also get renamed to coherent_cache_guest_page.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_mmu.h   |    4 ++--
+ arch/arm/kvm/mmu.c               |    4 ++--
+ arch/arm64/include/asm/kvm_mmu.h |   16 ++++++++++++----
+ 3 files changed, 16 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -116,8 +116,8 @@ static inline void kvm_set_s2pmd_writabl
+ struct kvm;
+-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+-                                            unsigned long size)
++static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
++                                           unsigned long size)
+ {
+       /*
+        * If we are going to insert an instruction page and the icache is
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -717,7 +717,7 @@ static int user_mem_abort(struct kvm_vcp
+                       kvm_set_s2pmd_writable(&new_pmd);
+                       kvm_set_pfn_dirty(pfn);
+               }
+-              coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
++              coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
+               ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+       } else {
+               pte_t new_pte = pfn_pte(pfn, PAGE_S2);
+@@ -725,7 +725,7 @@ static int user_mem_abort(struct kvm_vcp
+                       kvm_set_s2pte_writable(&new_pte);
+                       kvm_set_pfn_dirty(pfn);
+               }
+-              coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
++              coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+               ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
+       }
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -106,7 +106,6 @@ static inline bool kvm_is_write_fault(un
+       return true;
+ }
+-static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
+ static inline void kvm_clean_pgd(pgd_t *pgd) {}
+ static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
+ static inline void kvm_clean_pte(pte_t *pte) {}
+@@ -124,9 +123,19 @@ static inline void kvm_set_s2pmd_writabl
+ struct kvm;
+-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+-                                            unsigned long size)
++#define kvm_flush_dcache_to_poc(a,l)  __flush_dcache_area((a), (l))
++
++static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+ {
++      return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
++}
++
++static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
++                                           unsigned long size)
++{
++      if (!vcpu_has_cache_enabled(vcpu))
++              kvm_flush_dcache_to_poc((void *)hva, size);
++
+       if (!icache_is_aliasing()) {            /* PIPT */
+               flush_icache_range(hva, hva + size);
+       } else if (!icache_is_aivivt()) {       /* non ASID-tagged VIVT */
+@@ -135,7 +144,6 @@ static inline void coherent_icache_guest
+       }
+ }
+-#define kvm_flush_dcache_to_poc(a,l)  __flush_dcache_area((a), (l))
+ #define kvm_virt_to_phys(x)           __virt_to_phys((unsigned long)(x))
+ #endif /* __ASSEMBLY__ */
diff --git a/queue-3.14/arm64-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch b/queue-3.14/arm64-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch
new file mode 100644 (file)
index 0000000..822dab2
--- /dev/null
@@ -0,0 +1,219 @@
+From 4d44923b17bff283c002ed961373848284aaff1b Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 14 Jan 2014 18:00:55 +0000
+Subject: arm64: KVM: trap VM system registers until MMU and caches are ON
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 4d44923b17bff283c002ed961373848284aaff1b upstream.
+
+In order to be able to detect the point where the guest enables
+its MMU and caches, trap all the VM related system registers.
+
+Once we see the guest enabling both the MMU and the caches, we
+can go back to a saner mode of operation, which is to leave these
+registers in complete control of the guest.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/kvm_arm.h |    3 -
+ arch/arm64/include/asm/kvm_asm.h |    3 -
+ arch/arm64/kvm/sys_regs.c        |   90 +++++++++++++++++++++++++++++++++------
+ 3 files changed, 82 insertions(+), 14 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -62,6 +62,7 @@
+  * RW:                64bit by default, can be overriden for 32bit VMs
+  * TAC:               Trap ACTLR
+  * TSC:               Trap SMC
++ * TVM:               Trap VM ops (until M+C set in SCTLR_EL1)
+  * TSW:               Trap cache operations by set/way
+  * TWE:               Trap WFE
+  * TWI:               Trap WFI
+@@ -74,7 +75,7 @@
+  * SWIO:      Turn set/way invalidates into set/way clean+invalidate
+  */
+ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
+-                       HCR_BSU_IS | HCR_FB | HCR_TAC | \
++                       HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
+                        HCR_AMO | HCR_IMO | HCR_FMO | \
+                        HCR_SWIO | HCR_TIDCP | HCR_RW)
+ #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -79,7 +79,8 @@
+ #define c13_TID_URW   (TPIDR_EL0 * 2) /* Thread ID, User R/W */
+ #define c13_TID_URO   (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+ #define c13_TID_PRIV  (TPIDR_EL1 * 2) /* Thread ID, Privileged */
+-#define c10_AMAIR     (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
++#define c10_AMAIR0    (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
++#define c10_AMAIR1    (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+ #define c14_CNTKCTL   (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+ #define NR_CP15_REGS  (NR_SYS_REGS * 2)
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -121,6 +121,46 @@ done:
+ }
+ /*
++ * Generic accessor for VM registers. Only called as long as HCR_TVM
++ * is set.
++ */
++static bool access_vm_reg(struct kvm_vcpu *vcpu,
++                        const struct sys_reg_params *p,
++                        const struct sys_reg_desc *r)
++{
++      unsigned long val;
++
++      BUG_ON(!p->is_write);
++
++      val = *vcpu_reg(vcpu, p->Rt);
++      if (!p->is_aarch32) {
++              vcpu_sys_reg(vcpu, r->reg) = val;
++      } else {
++              vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
++              if (!p->is_32bit)
++                      vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
++      }
++      return true;
++}
++
++/*
++ * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set.  If the
++ * guest enables the MMU, we stop trapping the VM sys_regs and leave
++ * it in complete control of the caches.
++ */
++static bool access_sctlr(struct kvm_vcpu *vcpu,
++                       const struct sys_reg_params *p,
++                       const struct sys_reg_desc *r)
++{
++      access_vm_reg(vcpu, p, r);
++
++      if (vcpu_has_cache_enabled(vcpu))       /* MMU+Caches enabled? */
++              vcpu->arch.hcr_el2 &= ~HCR_TVM;
++
++      return true;
++}
++
++/*
+  * We could trap ID_DFR0 and tell the guest we don't support performance
+  * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
+  * NAKed, so it will read the PMCR anyway.
+@@ -185,32 +225,32 @@ static const struct sys_reg_desc sys_reg
+         NULL, reset_mpidr, MPIDR_EL1 },
+       /* SCTLR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+-        NULL, reset_val, SCTLR_EL1, 0x00C50078 },
++        access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+       /* CPACR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
+         NULL, reset_val, CPACR_EL1, 0 },
+       /* TTBR0_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
+-        NULL, reset_unknown, TTBR0_EL1 },
++        access_vm_reg, reset_unknown, TTBR0_EL1 },
+       /* TTBR1_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
+-        NULL, reset_unknown, TTBR1_EL1 },
++        access_vm_reg, reset_unknown, TTBR1_EL1 },
+       /* TCR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
+-        NULL, reset_val, TCR_EL1, 0 },
++        access_vm_reg, reset_val, TCR_EL1, 0 },
+       /* AFSR0_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
+-        NULL, reset_unknown, AFSR0_EL1 },
++        access_vm_reg, reset_unknown, AFSR0_EL1 },
+       /* AFSR1_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
+-        NULL, reset_unknown, AFSR1_EL1 },
++        access_vm_reg, reset_unknown, AFSR1_EL1 },
+       /* ESR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
+-        NULL, reset_unknown, ESR_EL1 },
++        access_vm_reg, reset_unknown, ESR_EL1 },
+       /* FAR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
+-        NULL, reset_unknown, FAR_EL1 },
++        access_vm_reg, reset_unknown, FAR_EL1 },
+       /* PAR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+         NULL, reset_unknown, PAR_EL1 },
+@@ -224,17 +264,17 @@ static const struct sys_reg_desc sys_reg
+       /* MAIR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
+-        NULL, reset_unknown, MAIR_EL1 },
++        access_vm_reg, reset_unknown, MAIR_EL1 },
+       /* AMAIR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
+-        NULL, reset_amair_el1, AMAIR_EL1 },
++        access_vm_reg, reset_amair_el1, AMAIR_EL1 },
+       /* VBAR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
+         NULL, reset_val, VBAR_EL1, 0 },
+       /* CONTEXTIDR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
+-        NULL, reset_val, CONTEXTIDR_EL1, 0 },
++        access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
+       /* TPIDR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
+         NULL, reset_unknown, TPIDR_EL1 },
+@@ -305,14 +345,32 @@ static const struct sys_reg_desc sys_reg
+         NULL, reset_val, FPEXC32_EL2, 0x70 },
+ };
+-/* Trapped cp15 registers */
++/*
++ * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
++ * depending on the way they are accessed (as a 32bit or a 64bit
++ * register).
++ */
+ static const struct sys_reg_desc cp15_regs[] = {
++      { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
++      { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
++      { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
++      { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
++      { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
++      { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
++      { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
++      { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
++      { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
++      { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
++      { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
++      { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
++
+       /*
+        * DC{C,I,CI}SW operations:
+        */
+       { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
+       { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
+       { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
++
+       { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
+@@ -326,6 +384,14 @@ static const struct sys_reg_desc cp15_re
+       { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
+       { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
+       { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
++
++      { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
++      { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
++      { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
++      { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
++      { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
++
++      { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
+ };
+ /* Target specific emulation tables */
diff --git a/queue-3.14/arm64-kvm-use-inner-shareable-barriers-for-inner-shareable-maintenance.patch b/queue-3.14/arm64-kvm-use-inner-shareable-barriers-for-inner-shareable-maintenance.patch
new file mode 100644 (file)
index 0000000..211c776
--- /dev/null
@@ -0,0 +1,55 @@
+From ee9e101c11478680d579bd20bb38a4d3e2514fe3 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 2 May 2014 16:24:14 +0100
+Subject: arm64: kvm: use inner-shareable barriers for inner-shareable maintenance
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit ee9e101c11478680d579bd20bb38a4d3e2514fe3 upstream.
+
+In order to ensure completion of inner-shareable maintenance instructions
+(cache and TLB) on AArch64, we can use the -ish suffix to the dsb
+instruction.
+
+This patch relaxes our dsb sy instructions to dsb ish where possible.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/hyp.S |   12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -630,9 +630,15 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
+        * whole of Stage-1. Weep...
+        */
+       tlbi    ipas2e1is, x1
+-      dsb     sy
++      /*
++       * We have to ensure completion of the invalidation at Stage-2,
++       * since a table walk on another CPU could refill a TLB with a
++       * complete (S1 + S2) walk based on the old Stage-2 mapping if
++       * the Stage-1 invalidation happened first.
++       */
++      dsb     ish
+       tlbi    vmalle1is
+-      dsb     sy
++      dsb     ish
+       isb
+       msr     vttbr_el2, xzr
+@@ -643,7 +649,7 @@ ENTRY(__kvm_flush_vm_context)
+       dsb     ishst
+       tlbi    alle1is
+       ic      ialluis
+-      dsb     sy
++      dsb     ish
+       ret
+ ENDPROC(__kvm_flush_vm_context)
diff --git a/queue-3.14/deal-with-deadlock-in-d_walk.patch b/queue-3.14/deal-with-deadlock-in-d_walk.patch
new file mode 100644 (file)
index 0000000..6f5f7cf
--- /dev/null
@@ -0,0 +1,91 @@
+From ca5358ef75fc69fee5322a38a340f5739d997c10 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sun, 26 Oct 2014 19:31:10 -0400
+Subject: deal with deadlock in d_walk()
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit ca5358ef75fc69fee5322a38a340f5739d997c10 upstream.
+
+... by not hitting rename_retry for reasons other than rename having
+happened.  In other words, do _not_ restart when finding that
+between unlocking the child and locking the parent the former got
+into __dentry_kill().  Skip the killed siblings instead...
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+[hujianyang: Backported to 3.14 refer to the work of Ben Hutchings in 3.2:
+ - Adjust context to make __dentry_kill() apply to d_kill()]
+Signed-off-by: hujianyang <hujianyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/dcache.c |   31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -435,7 +435,7 @@ static struct dentry *d_kill(struct dent
+       __releases(parent->d_lock)
+       __releases(dentry->d_inode->i_lock)
+ {
+-      list_del(&dentry->d_child);
++      __list_del_entry(&dentry->d_child);
+       /*
+        * Inform d_walk() that we are no longer attached to the
+        * dentry tree
+@@ -1123,33 +1123,31 @@ resume:
+       /*
+        * All done at this level ... ascend and resume the search.
+        */
++      rcu_read_lock();
++ascend:
+       if (this_parent != parent) {
+               struct dentry *child = this_parent;
+               this_parent = child->d_parent;
+-              rcu_read_lock();
+               spin_unlock(&child->d_lock);
+               spin_lock(&this_parent->d_lock);
+-              /*
+-               * might go back up the wrong parent if we have had a rename
+-               * or deletion
+-               */
+-              if (this_parent != child->d_parent ||
+-                       (child->d_flags & DCACHE_DENTRY_KILLED) ||
+-                       need_seqretry(&rename_lock, seq)) {
+-                      spin_unlock(&this_parent->d_lock);
+-                      rcu_read_unlock();
++              /* might go back up the wrong parent if we have had a rename. */
++              if (need_seqretry(&rename_lock, seq))
+                       goto rename_retry;
++              next = child->d_child.next;
++              while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
++                      if (next == &this_parent->d_subdirs)
++                              goto ascend;
++                      child = list_entry(next, struct dentry, d_child);
++                      next = next->next;
+               }
+               rcu_read_unlock();
+-              next = child->d_child.next;
+               goto resume;
+       }
+-      if (need_seqretry(&rename_lock, seq)) {
+-              spin_unlock(&this_parent->d_lock);
++      if (need_seqretry(&rename_lock, seq))
+               goto rename_retry;
+-      }
++      rcu_read_unlock();
+       if (finish)
+               finish(data);
+@@ -1159,6 +1157,9 @@ out_unlock:
+       return;
+ rename_retry:
++      spin_unlock(&this_parent->d_lock);
++      rcu_read_unlock();
++      BUG_ON(seq & 1);
+       if (!retry)
+               return;
+       seq = 1;
diff --git a/queue-3.14/kvm-arm-arm64-vgic-fix-gicd_icfgr-register-accesses.patch b/queue-3.14/kvm-arm-arm64-vgic-fix-gicd_icfgr-register-accesses.patch
new file mode 100644 (file)
index 0000000..0ab955b
--- /dev/null
@@ -0,0 +1,58 @@
+From f2ae85b2ab3776b9e4e42e5b6fa090f40d396794 Mon Sep 17 00:00:00 2001
+From: Andre Przywara <andre.przywara@arm.com>
+Date: Fri, 11 Apr 2014 00:07:18 +0200
+Subject: KVM: arm/arm64: vgic: fix GICD_ICFGR register accesses
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+commit f2ae85b2ab3776b9e4e42e5b6fa090f40d396794 upstream.
+
+Since KVM internally represents the ICFGR registers by stuffing two
+of them into one word, the offset for accessing the internal
+representation and the one for the MMIO based access are different.
+So keep the original offset around, but adjust the internal array
+offset by one bit.
+
+Reported-by: Haibin Wang <wanghaibin.wang@huawei.com>
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct k
+       u32 val;
+       u32 *reg;
+-      offset >>= 1;
+       reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+-                                vcpu->vcpu_id, offset);
++                                vcpu->vcpu_id, offset >> 1);
+-      if (offset & 2)
++      if (offset & 4)
+               val = *reg >> 16;
+       else
+               val = *reg & 0xffff;
+@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct k
+       vgic_reg_access(mmio, &val, offset,
+                       ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
+       if (mmio->is_write) {
+-              if (offset < 4) {
++              if (offset < 8) {
+                       *reg = ~0U; /* Force PPIs/SGIs to 1 */
+                       return false;
+               }
+               val = vgic_cfg_compress(val);
+-              if (offset & 2) {
++              if (offset & 4) {
+                       *reg &= 0xffff;
+                       *reg |= val << 16;
+               } else {
diff --git a/queue-3.14/kvm-arm-vgic-fix-the-overlap-check-action-about-setting-the-gicd-gicc-base-address.patch b/queue-3.14/kvm-arm-vgic-fix-the-overlap-check-action-about-setting-the-gicd-gicc-base-address.patch
new file mode 100644 (file)
index 0000000..1f4121c
--- /dev/null
@@ -0,0 +1,45 @@
+From 30c2117085bc4e05d091cee6eba79f069b41a9cd Mon Sep 17 00:00:00 2001
+From: Haibin Wang <wanghaibin.wang@huawei.com>
+Date: Tue, 29 Apr 2014 14:49:17 +0800
+Subject: KVM: ARM: vgic: Fix the overlap check action about setting the GICD & GICC base address.
+
+From: Haibin Wang <wanghaibin.wang@huawei.com>
+
+commit 30c2117085bc4e05d091cee6eba79f069b41a9cd upstream.
+
+Currently below check in vgic_ioaddr_overlap will always succeed,
+because the vgic dist base and vgic cpu base are still kept UNDEF
+after initialization. The code as follows will be return forever.
+
+       if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
+                return 0;
+
+So, before invoking the vgic_ioaddr_overlap, it needs to set the
+corresponding base address firstly.
+
+Signed-off-by: Haibin Wang <wanghaibin.wang@huawei.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm
+       if (addr + size < addr)
+               return -EINVAL;
++      *ioaddr = addr;
+       ret = vgic_ioaddr_overlap(kvm);
+       if (ret)
+-              return ret;
+-      *ioaddr = addr;
++              *ioaddr = VGIC_ADDR_UNDEF;
++
+       return ret;
+ }
diff --git a/queue-3.14/kvm-arm64-vgic-fix-hyp-panic-with-64k-pages-on-juno-platform.patch b/queue-3.14/kvm-arm64-vgic-fix-hyp-panic-with-64k-pages-on-juno-platform.patch
new file mode 100644 (file)
index 0000000..6d352c2
--- /dev/null
@@ -0,0 +1,89 @@
+From 63afbe7a0ac184ef8485dac4914e87b211b5bfaa Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 25 Jul 2014 16:29:12 +0100
+Subject: kvm: arm64: vgic: fix hyp panic with 64k pages on juno platform
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 63afbe7a0ac184ef8485dac4914e87b211b5bfaa upstream.
+
+If the physical address of GICV isn't page-aligned, then we end up
+creating a stage-2 mapping of the page containing it, which causes us to
+map neighbouring memory locations directly into the guest.
+
+As an example, consider a platform with GICV at physical 0x2c02f000
+running a 64k-page host kernel. If qemu maps this into the guest at
+0x80010000, then guest physical addresses 0x80010000 - 0x8001efff will
+map host physical region 0x2c020000 - 0x2c02efff. Accesses to these
+physical regions may cause UNPREDICTABLE behaviour, for example, on the
+Juno platform this will cause an SError exception to EL3, which brings
+down the entire physical CPU resulting in RCU stalls / HYP panics / host
+crashing / wasted weeks of debugging.
+
+SBSA recommends that systems alias the 4k GICV across the bounding 64k
+region, in which case GICV physical could be described as 0x2c020000 in
+the above scenario.
+
+This patch fixes the problem by failing the vgic probe if the physical
+base address or the size of GICV aren't page-aligned. Note that this
+generated a warning in dmesg about freeing enabled IRQs, so I had to
+move the IRQ enabling later in the probe.
+
+Cc: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Joel Schopp <joel.schopp@amd.com>
+Cc: Don Dutile <ddutile@redhat.com>
+Acked-by: Peter Maydell <peter.maydell@linaro.org>
+Acked-by: Joel Schopp <joel.schopp@amd.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Shannon Zhao <shannon.zhao@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic.c |   24 ++++++++++++++++++++----
+ 1 file changed, 20 insertions(+), 4 deletions(-)
+
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1526,17 +1526,33 @@ int kvm_vgic_hyp_init(void)
+               goto out_unmap;
+       }
+-      kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+-               vctrl_res.start, vgic_maint_irq);
+-      on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+-
+       if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
+               kvm_err("Cannot obtain VCPU resource\n");
+               ret = -ENXIO;
+               goto out_unmap;
+       }
++
++      if (!PAGE_ALIGNED(vcpu_res.start)) {
++              kvm_err("GICV physical address 0x%llx not page aligned\n",
++                      (unsigned long long)vcpu_res.start);
++              ret = -ENXIO;
++              goto out_unmap;
++      }
++
++      if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
++              kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
++                      (unsigned long long)resource_size(&vcpu_res),
++                      PAGE_SIZE);
++              ret = -ENXIO;
++              goto out_unmap;
++      }
++
+       vgic_vcpu_base = vcpu_res.start;
++      kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
++               vctrl_res.start, vgic_maint_irq);
++      on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
++
+       goto out;
+ out_unmap:
index 4bb17ee7ea4a09824b6bb2fe31bede230d806932..dae847bd26b0ada63386607a99e0f46b8cebb773 100644 (file)
@@ -33,3 +33,19 @@ acpica-tables-change-acpi_find_root_pointer-to-use-acpi_physical_address.patch
 acpica-utilities-cleanup-to-enforce-acpi_physaddr_to_ptr-acpi_ptr_to_physaddr.patch
 acpica-utilities-cleanup-to-convert-physical-address-printing-formats.patch
 acpica-utilities-cleanup-to-remove-useless-acpi_printf-format_xxx-helpers.patch
+deal-with-deadlock-in-d_walk.patch
+arm64-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch
+arm64-kvm-allows-discrimination-of-aarch32-sysreg-access.patch
+arm64-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch
+arm-kvm-introduce-kvm_p-d_addr_end.patch
+arm64-kvm-flush-vm-pages-before-letting-the-guest-enable-caches.patch
+arm-kvm-force-cache-clean-on-page-fault-when-caches-are-off.patch
+arm-kvm-fix-handling-of-trapped-64bit-coprocessor-accesses.patch
+arm-kvm-fix-ordering-of-64bit-coprocessor-accesses.patch
+arm-kvm-introduce-per-vcpu-hyp-configuration-register.patch
+arm-kvm-add-world-switch-for-amair-0-1.patch
+arm-kvm-trap-vm-system-registers-until-mmu-and-caches-are-on.patch
+kvm-arm-arm64-vgic-fix-gicd_icfgr-register-accesses.patch
+kvm-arm-vgic-fix-the-overlap-check-action-about-setting-the-gicd-gicc-base-address.patch
+arm64-kvm-use-inner-shareable-barriers-for-inner-shareable-maintenance.patch
+kvm-arm64-vgic-fix-hyp-panic-with-64k-pages-on-juno-platform.patch