]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.7.10/sparc64-handle-hugepage-tsb-being-null.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.7.10 / sparc64-handle-hugepage-tsb-being-null.patch
CommitLineData
27c7a084
GKH
1From 890b015b620ca3c2d14c798ce5303cf830d31afd Mon Sep 17 00:00:00 2001
2From: "David S. Miller" <davem@davemloft.net>
3Date: Tue, 19 Feb 2013 13:20:08 -0800
4Subject: sparc64: Handle hugepage TSB being NULL.
5
6
7From: "David S. Miller" <davem@davemloft.net>
8
9[ Upstream commit bcd896bae0166b4443503482a26ecf84d9ba60ab ]
10
11Accomodate the possibility that the TSB might be NULL at
12the point that update_mmu_cache() is invoked. This is
13necessary because we will sometimes need to defer the TSB
14allocation to the first fault that happens in the 'mm'.
15
16Seperate out the hugepage PTE test into a seperate function
17so that the logic is clearer.
18
19Signed-off-by: David S. Miller <davem@davemloft.net>
20Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
21---
22 arch/sparc/mm/init_64.c | 38 ++++++++++++++++++++++----------------
23 1 file changed, 22 insertions(+), 16 deletions(-)
24
25--- a/arch/sparc/mm/init_64.c
26+++ b/arch/sparc/mm/init_64.c
27@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(stru
28 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
29 unsigned long tag;
30
31+ if (unlikely(!tsb))
32+ return;
33+
34 tsb += ((address >> tsb_hash_shift) &
35 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
36 tag = (address >> 22UL);
37 tsb_insert(tsb, tag, tte);
38 }
39
40+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
41+static inline bool is_hugetlb_pte(pte_t pte)
42+{
43+ if ((tlb_type == hypervisor &&
44+ (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
45+ (tlb_type != hypervisor &&
46+ (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
47+ return true;
48+ return false;
49+}
50+#endif
51+
52 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
53 {
54- unsigned long tsb_index, tsb_hash_shift, flags;
55 struct mm_struct *mm;
56+ unsigned long flags;
57 pte_t pte = *ptep;
58
59 if (tlb_type != hypervisor) {
60@@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_str
61
62 mm = vma->vm_mm;
63
64- tsb_index = MM_TSB_BASE;
65- tsb_hash_shift = PAGE_SHIFT;
66-
67 spin_lock_irqsave(&mm->context.lock, flags);
68
69 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
70- if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
71- if ((tlb_type == hypervisor &&
72- (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
73- (tlb_type != hypervisor &&
74- (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
75- tsb_index = MM_TSB_HUGE;
76- tsb_hash_shift = HPAGE_SHIFT;
77- }
78- }
79+ if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
80+ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
81+ address, pte_val(pte));
82+ else
83 #endif
84-
85- __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift,
86- address, pte_val(pte));
87+ __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
88+ address, pte_val(pte));
89
90 spin_unlock_irqrestore(&mm->context.lock, flags);
91 }