]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/3.16.3/powerpc-thp-invalidate-old-64k-based-hash-page-mapping-before-insert-of-4k-pte.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.16.3 / powerpc-thp-invalidate-old-64k-based-hash-page-mapping-before-insert-of-4k-pte.patch
1 From 629149fae478f0ac6bf705a535708b192e9c6b59 Mon Sep 17 00:00:00 2001
2 From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
3 Date: Wed, 13 Aug 2014 12:31:59 +0530
4 Subject: powerpc/thp: Invalidate old 64K based hash page mapping before insert of 4k pte
5
6 From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
7
8 commit 629149fae478f0ac6bf705a535708b192e9c6b59 upstream.
9
10 If we changed base page size of the segment, either via sub_page_protect
11 or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash
12 table entries. We do a lazy hash page table flush for all mapped pages
13 in the demoted segment. This happens when we handle hash page fault
14 for these pages.
15
16 We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a
17 pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte,
18 that implies that we could possibly have older 64K hash pte entries in
19 the hash page table and we need to invalidate those entries.
20
21 Handle this correctly for 16M pages
22
23 Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
24 Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
25 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
26
27 ---
28 arch/powerpc/mm/hugepage-hash64.c | 79 +++++++++++++++++++++++++++++++++-----
29 1 file changed, 70 insertions(+), 9 deletions(-)
30
31 --- a/arch/powerpc/mm/hugepage-hash64.c
32 +++ b/arch/powerpc/mm/hugepage-hash64.c
33 @@ -18,6 +18,57 @@
34 #include <linux/mm.h>
35 #include <asm/machdep.h>
36
37 +static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
38 + pmd_t *pmdp, unsigned int psize, int ssize)
39 +{
40 + int i, max_hpte_count, valid;
41 + unsigned long s_addr;
42 + unsigned char *hpte_slot_array;
43 + unsigned long hidx, shift, vpn, hash, slot;
44 +
45 + s_addr = addr & HPAGE_PMD_MASK;
46 + hpte_slot_array = get_hpte_slot_array(pmdp);
47 + /*
48 + * IF we try to do a HUGE PTE update after a withdraw is done.
49 + * we will find the below NULL. This happens when we do
50 + * split_huge_page_pmd
51 + */
52 + if (!hpte_slot_array)
53 + return;
54 +
55 + if (ppc_md.hugepage_invalidate)
56 + return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
57 + psize, ssize);
58 + /*
59 + * No bluk hpte removal support, invalidate each entry
60 + */
61 + shift = mmu_psize_defs[psize].shift;
62 + max_hpte_count = HPAGE_PMD_SIZE >> shift;
63 + for (i = 0; i < max_hpte_count; i++) {
64 + /*
65 + * 8 bits per each hpte entries
66 + * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
67 + */
68 + valid = hpte_valid(hpte_slot_array, i);
69 + if (!valid)
70 + continue;
71 + hidx = hpte_hash_index(hpte_slot_array, i);
72 +
73 + /* get the vpn */
74 + addr = s_addr + (i * (1ul << shift));
75 + vpn = hpt_vpn(addr, vsid, ssize);
76 + hash = hpt_hash(vpn, shift, ssize);
77 + if (hidx & _PTEIDX_SECONDARY)
78 + hash = ~hash;
79 +
80 + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
81 + slot += hidx & _PTEIDX_GROUP_IX;
82 + ppc_md.hpte_invalidate(slot, vpn, psize,
83 + MMU_PAGE_16M, ssize, 0);
84 + }
85 +}
86 +
87 +
88 int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
89 pmd_t *pmdp, unsigned long trap, int local, int ssize,
90 unsigned int psize)
91 @@ -85,6 +136,15 @@ int __hash_page_thp(unsigned long ea, un
92 vpn = hpt_vpn(ea, vsid, ssize);
93 hash = hpt_hash(vpn, shift, ssize);
94 hpte_slot_array = get_hpte_slot_array(pmdp);
95 + if (psize == MMU_PAGE_4K) {
96 + /*
97 + * invalidate the old hpte entry if we have that mapped via 64K
98 + * base page size. This is because demote_segment won't flush
99 + * hash page table entries.
100 + */
101 + if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
102 + invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
103 + }
104
105 valid = hpte_valid(hpte_slot_array, index);
106 if (valid) {
107 @@ -107,11 +167,8 @@ int __hash_page_thp(unsigned long ea, un
108 * safely update this here.
109 */
110 valid = 0;
111 - new_pmd &= ~_PAGE_HPTEFLAGS;
112 hpte_slot_array[index] = 0;
113 - } else
114 - /* clear the busy bits and set the hash pte bits */
115 - new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
116 + }
117 }
118
119 if (!valid) {
120 @@ -119,11 +176,7 @@ int __hash_page_thp(unsigned long ea, un
121
122 /* insert new entry */
123 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
124 -repeat:
125 - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
126 -
127 - /* clear the busy bits and set the hash pte bits */
128 - new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
129 + new_pmd |= _PAGE_HASHPTE;
130
131 /* Add in WIMG bits */
132 rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
133 @@ -132,6 +185,8 @@ repeat:
134 * enable the memory coherence always
135 */
136 rflags |= HPTE_R_M;
137 +repeat:
138 + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
139
140 /* Insert into the hash table, primary slot */
141 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
142 @@ -172,6 +227,12 @@ repeat:
143 mark_hpte_slot_valid(hpte_slot_array, index, slot);
144 }
145 /*
146 + * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
147 + * base page size 4k.
148 + */
149 + if (psize == MMU_PAGE_4K)
150 + new_pmd |= _PAGE_COMBO;
151 + /*
152 * The hpte valid is stored in the pgtable whose address is in the
153 * second half of the PMD. Order this against clearing of the busy bit in
154 * huge pmd.