]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/3.16.3/powerpc-thp-handle-combo-pages-in-invalidate.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.16.3 / powerpc-thp-handle-combo-pages-in-invalidate.patch
1 From fc0479557572375100ef16c71170b29a98e0d69a Mon Sep 17 00:00:00 2001
2 From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
3 Date: Wed, 13 Aug 2014 12:32:00 +0530
4 Subject: powerpc/thp: Handle combo pages in invalidate
5
6 From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
7
8 commit fc0479557572375100ef16c71170b29a98e0d69a upstream.
9
10 If we changed base page size of the segment, either via sub_page_protect
11 or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash
12 table entries. We do a lazy hash page table flush for all mapped pages
13 in the demoted segment. This happens when we handle hash page fault for
14 these pages.
15
16 We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a
17 pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte,
18 that implies that we could possibly have older 64K hash pte entries in
19 the hash page table and we need to invalidate those entries.
20
21 Use _PAGE_COMBO to determine the page size with which we should
22 invalidate the hash table entries on unmap.
23
24 Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
25 Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
26 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
27
28 ---
29 arch/powerpc/include/asm/pgtable-ppc64.h | 2 +-
30 arch/powerpc/mm/pgtable_64.c | 14 +++++++++++---
31 arch/powerpc/mm/tlb_hash64.c | 2 +-
32 3 files changed, 13 insertions(+), 5 deletions(-)
33
34 --- a/arch/powerpc/include/asm/pgtable-ppc64.h
35 +++ b/arch/powerpc/include/asm/pgtable-ppc64.h
36 @@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(
37 }
38
39 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
40 - pmd_t *pmdp);
41 + pmd_t *pmdp, unsigned long old_pmd);
42 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
43 extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
44 extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
45 --- a/arch/powerpc/mm/pgtable_64.c
46 +++ b/arch/powerpc/mm/pgtable_64.c
47 @@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct
48 *pmdp = __pmd((old & ~clr) | set);
49 #endif
50 if (old & _PAGE_HASHPTE)
51 - hpte_do_hugepage_flush(mm, addr, pmdp);
52 + hpte_do_hugepage_flush(mm, addr, pmdp, old);
53 return old;
54 }
55
56 @@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area
57 if (!(old & _PAGE_SPLITTING)) {
58 /* We need to flush the hpte */
59 if (old & _PAGE_HASHPTE)
60 - hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
61 + hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
62 }
63 /*
64 * This ensures that generic code that rely on IRQ disabling
65 @@ -723,7 +723,7 @@ void pmdp_invalidate(struct vm_area_stru
66 * neesd to be flushed.
67 */
68 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
69 - pmd_t *pmdp)
70 + pmd_t *pmdp, unsigned long old_pmd)
71 {
72 int ssize, i;
73 unsigned long s_addr;
74 @@ -746,7 +746,15 @@ void hpte_do_hugepage_flush(struct mm_st
75 return;
76
77 /* get the base page size,vsid and segment size */
78 +#ifdef CONFIG_DEBUG_VM
79 psize = get_slice_psize(mm, s_addr);
80 + BUG_ON(psize == MMU_PAGE_16M);
81 +#endif
82 + if (old_pmd & _PAGE_COMBO)
83 + psize = MMU_PAGE_4K;
84 + else
85 + psize = MMU_PAGE_64K;
86 +
87 if (!is_kernel_addr(s_addr)) {
88 ssize = user_segment_size(s_addr);
89 vsid = get_vsid(mm->context.id, s_addr, ssize);
90 --- a/arch/powerpc/mm/tlb_hash64.c
91 +++ b/arch/powerpc/mm/tlb_hash64.c
92 @@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_
93 if (!(pte & _PAGE_HASHPTE))
94 continue;
95 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
96 - hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
97 + hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
98 else
99 hpte_need_flush(mm, start, ptep, pte, 0);
100 }