]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.8.8/x86-32-fix-possible-incomplete-tlb-invalidate-with-pae-pagetables.patch
Linux 3.8.8
[thirdparty/kernel/stable-queue.git] / releases / 3.8.8 / x86-32-fix-possible-incomplete-tlb-invalidate-with-pae-pagetables.patch
CommitLineData
d33164c9
GKH
1From 1de14c3c5cbc9bb17e9dcc648cda51c0c85d54b9 Mon Sep 17 00:00:00 2001
2From: Dave Hansen <dave@sr71.net>
3Date: Fri, 12 Apr 2013 16:23:54 -0700
4Subject: x86-32: Fix possible incomplete TLB invalidate with PAE pagetables
5
6From: Dave Hansen <dave@sr71.net>
7
8commit 1de14c3c5cbc9bb17e9dcc648cda51c0c85d54b9 upstream.
9
10This patch attempts to fix:
11
12 https://bugzilla.kernel.org/show_bug.cgi?id=56461
13
14The symptom is a crash and messages like this:
15
16 chrome: Corrupted page table at address 34a03000
17 *pdpt = 0000000000000000 *pde = 0000000000000000
18 Bad pagetable: 000f [#1] PREEMPT SMP
19
20Ingo guesses this got introduced by commit 611ae8e3f520 ("x86/tlb:
21enable tlb flush range support for x86") since that code started to free
22unused pagetables.
23
24On x86-32 PAE kernels, that new code has the potential to free an entire
25PMD page and will clear one of the four page-directory-pointer-table
26(aka pgd_t entries).
27
28The hardware aggressively "caches" these top-level entries and invlpg
29does not actually affect the CPU's copy. If we clear one we *HAVE* to
30do a full TLB flush, otherwise we might continue using a freed pmd page.
31(note, we do this properly on the population side in pud_populate()).
32
33This patch tracks whenever we clear one of these entries in the 'struct
34mmu_gather', and ensures that we follow up with a full tlb flush.
35
36BTW, I disassembled and checked that:
37
38 if (tlb->fullmm == 0)
39and
40 if (!tlb->fullmm && !tlb->need_flush_all)
41
42generate essentially the same code, so there should be zero impact there
43to the !PAE case.
44
45Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
46Cc: Peter Anvin <hpa@zytor.com>
47Cc: Ingo Molnar <mingo@kernel.org>
48Cc: Artem S Tashkinov <t.artem@mailcity.com>
49Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
50Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
51
52---
53 arch/x86/include/asm/tlb.h | 2 +-
54 arch/x86/mm/pgtable.c | 7 +++++++
55 include/asm-generic/tlb.h | 7 ++++++-
56 mm/memory.c | 1 +
57 4 files changed, 15 insertions(+), 2 deletions(-)
58
59--- a/arch/x86/include/asm/tlb.h
60+++ b/arch/x86/include/asm/tlb.h
61@@ -7,7 +7,7 @@
62
63 #define tlb_flush(tlb) \
64 { \
65- if (tlb->fullmm == 0) \
66+ if (!tlb->fullmm && !tlb->need_flush_all) \
67 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
68 else \
69 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
70--- a/arch/x86/mm/pgtable.c
71+++ b/arch/x86/mm/pgtable.c
72@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *
73 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
74 {
75 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
76+ /*
77+ * NOTE! For PAE, any changes to the top page-directory-pointer-table
78+ * entries need a full cr3 reload to flush.
79+ */
80+#ifdef CONFIG_X86_PAE
81+ tlb->need_flush_all = 1;
82+#endif
83 tlb_remove_page(tlb, virt_to_page(pmd));
84 }
85
86--- a/include/asm-generic/tlb.h
87+++ b/include/asm-generic/tlb.h
88@@ -99,7 +99,12 @@ struct mmu_gather {
89 unsigned int need_flush : 1, /* Did free PTEs */
90 fast_mode : 1; /* No batching */
91
92- unsigned int fullmm;
93+ /* we are in the middle of an operation to clear
94+ * a full mm and can make some optimizations */
95+ unsigned int fullmm : 1,
96+ /* we have performed an operation which
97+ * requires a complete flush of the tlb */
98+ need_flush_all : 1;
99
100 struct mmu_gather_batch *active;
101 struct mmu_gather_batch local;
102--- a/mm/memory.c
103+++ b/mm/memory.c
104@@ -212,6 +212,7 @@ void tlb_gather_mmu(struct mmu_gather *t
105 tlb->mm = mm;
106
107 tlb->fullmm = fullmm;
108+ tlb->need_flush_all = 0;
109 tlb->start = -1UL;
110 tlb->end = 0;
111 tlb->need_flush = 0;