]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.19.7/arm64-kvm-do-not-use-pgd_index-to-index-stage-2-pgd.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.19.7 / arm64-kvm-do-not-use-pgd_index-to-index-stage-2-pgd.patch
CommitLineData
c9c77b0b
GKH
1From 04b8dc85bf4a64517e3cf20e409eeaa503b15cc1 Mon Sep 17 00:00:00 2001
2From: Marc Zyngier <marc.zyngier@arm.com>
3Date: Tue, 10 Mar 2015 19:07:00 +0000
4Subject: arm64: KVM: Do not use pgd_index to index stage-2 pgd
5
6From: Marc Zyngier <marc.zyngier@arm.com>
7
8commit 04b8dc85bf4a64517e3cf20e409eeaa503b15cc1 upstream.
9
10The kernel's pgd_index macro is designed to index a normal, page
11sized array. KVM is a bit diffferent, as we can use concatenated
12pages to have a bigger address space (for example 40bit IPA with
134kB pages gives us an 8kB PGD.
14
15In the above case, the use of pgd_index will always return an index
16inside the first 4kB, which makes a guest that has memory above
170x8000000000 rather unhappy, as it spins forever in a page fault,
18whist the host happilly corrupts the lower pgd.
19
20The obvious fix is to get our own kvm_pgd_index that does the right
21thing(tm).
22
23Tested on X-Gene with a hacked kvmtool that put memory at a stupidly
24high address.
25
26Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
27Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
28Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
29Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
30Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
31---
32 arch/arm/include/asm/kvm_mmu.h | 3 ++-
33 arch/arm/kvm/mmu.c | 6 +++---
34 arch/arm64/include/asm/kvm_mmu.h | 2 ++
35 3 files changed, 7 insertions(+), 4 deletions(-)
36
37--- a/arch/arm/include/asm/kvm_mmu.h
38+++ b/arch/arm/include/asm/kvm_mmu.h
39@@ -128,13 +128,14 @@ static inline void kvm_set_s2pmd_writabl
40 (__boundary - 1 < (end) - 1)? __boundary: (end); \
41 })
42
43+#define kvm_pgd_index(addr) pgd_index(addr)
44+
45 static inline bool kvm_page_empty(void *ptr)
46 {
47 struct page *ptr_page = virt_to_page(ptr);
48 return page_count(ptr_page) == 1;
49 }
50
51-
52 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
53 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
54 #define kvm_pud_table_empty(kvm, pudp) (0)
55--- a/arch/arm/kvm/mmu.c
56+++ b/arch/arm/kvm/mmu.c
57@@ -251,7 +251,7 @@ static void unmap_range(struct kvm *kvm,
58 phys_addr_t addr = start, end = start + size;
59 phys_addr_t next;
60
61- pgd = pgdp + pgd_index(addr);
62+ pgd = pgdp + kvm_pgd_index(addr);
63 do {
64 next = kvm_pgd_addr_end(addr, end);
65 if (!pgd_none(*pgd))
66@@ -316,7 +316,7 @@ static void stage2_flush_memslot(struct
67 phys_addr_t next;
68 pgd_t *pgd;
69
70- pgd = kvm->arch.pgd + pgd_index(addr);
71+ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
72 do {
73 next = kvm_pgd_addr_end(addr, end);
74 stage2_flush_puds(kvm, pgd, addr, next);
75@@ -791,7 +791,7 @@ static pud_t *stage2_get_pud(struct kvm
76 pgd_t *pgd;
77 pud_t *pud;
78
79- pgd = kvm->arch.pgd + pgd_index(addr);
80+ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
81 if (WARN_ON(pgd_none(*pgd))) {
82 if (!cache)
83 return NULL;
84--- a/arch/arm64/include/asm/kvm_mmu.h
85+++ b/arch/arm64/include/asm/kvm_mmu.h
86@@ -137,6 +137,8 @@ static inline void kvm_set_s2pmd_writabl
87 #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
88 #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
89
90+#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
91+
92 /*
93 * If we are concatenating first level stage-2 page tables, we would have less
94 * than or equal to 16 pointers in the fake PGD, because that's what the