]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
2e6b9df2247e8c9afb67fb96cc998a172f2c2965
[thirdparty/kernel/stable-queue.git] /
1 From 3ea277194daaeaa84ce75180ec7c7a2075027a68 Mon Sep 17 00:00:00 2001
2 From: Mel Gorman <mgorman@suse.de>
3 Date: Wed, 2 Aug 2017 13:31:52 -0700
4 Subject: mm, mprotect: flush TLB if potentially racing with a parallel reclaim leaving stale TLB entries
5
6 From: Mel Gorman <mgorman@suse.de>
7
8 commit 3ea277194daaeaa84ce75180ec7c7a2075027a68 upstream.
9
10 Stable note for 4.4: The upstream patch patches madvise(MADV_FREE) but 4.4
11 does not have support for that feature. The changelog is left
12 as-is but the hunk related to madvise is omitted from the backport.
13
14 Nadav Amit identified a theoritical race between page reclaim and
15 mprotect due to TLB flushes being batched outside of the PTL being held.
16
17 He described the race as follows:
18
19 CPU0 CPU1
20 ---- ----
21 user accesses memory using RW PTE
22 [PTE now cached in TLB]
23 try_to_unmap_one()
24 ==> ptep_get_and_clear()
25 ==> set_tlb_ubc_flush_pending()
26 mprotect(addr, PROT_READ)
27 ==> change_pte_range()
28 ==> [ PTE non-present - no flush ]
29
30 user writes using cached RW PTE
31 ...
32
33 try_to_unmap_flush()
34
35 The same type of race exists for reads when protecting for PROT_NONE and
36 also exists for operations that can leave an old TLB entry behind such
37 as munmap, mremap and madvise.
38
39 For some operations like mprotect, it's not necessarily a data integrity
40 issue but it is a correctness issue as there is a window where an
41 mprotect that limits access still allows access. For munmap, it's
42 potentially a data integrity issue although the race is massive as an
43 munmap, mmap and return to userspace must all complete between the
44 window when reclaim drops the PTL and flushes the TLB. However, it's
45 theoritically possible so handle this issue by flushing the mm if
46 reclaim is potentially currently batching TLB flushes.
47
48 Other instances where a flush is required for a present pte should be ok
49 as either the page lock is held preventing parallel reclaim or a page
50 reference count is elevated preventing a parallel free leading to
51 corruption. In the case of page_mkclean there isn't an obvious path
52 that userspace could take advantage of without using the operations that
53 are guarded by this patch. Other users such as gup as a race with
54 reclaim looks just at PTEs. huge page variants should be ok as they
55 don't race with reclaim. mincore only looks at PTEs. userfault also
56 should be ok as if a parallel reclaim takes place, it will either fault
57 the page back in or read some of the data before the flush occurs
58 triggering a fault.
59
60 Note that a variant of this patch was acked by Andy Lutomirski but this
61 was for the x86 parts on top of his PCID work which didn't make the 4.13
62 merge window as expected. His ack is dropped from this version and
63 there will be a follow-on patch on top of PCID that will include his
64 ack.
65
66 [akpm@linux-foundation.org: tweak comments]
67 [akpm@linux-foundation.org: fix spello]
68 Link: http://lkml.kernel.org/r/20170717155523.emckq2esjro6hf3z@suse.de
69 Reported-by: Nadav Amit <nadav.amit@gmail.com>
70 Signed-off-by: Mel Gorman <mgorman@suse.de>
71 Cc: Andy Lutomirski <luto@kernel.org>
72 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
73 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
74 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
75 ---
76 include/linux/mm_types.h | 4 ++++
77 mm/internal.h | 5 ++++-
78 mm/memory.c | 1 +
79 mm/mprotect.c | 1 +
80 mm/mremap.c | 1 +
81 mm/rmap.c | 36 ++++++++++++++++++++++++++++++++++++
82 6 files changed, 47 insertions(+), 1 deletion(-)
83
84 --- a/include/linux/mm_types.h
85 +++ b/include/linux/mm_types.h
86 @@ -504,6 +504,10 @@ struct mm_struct {
87 */
88 bool tlb_flush_pending;
89 #endif
90 +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
91 + /* See flush_tlb_batched_pending() */
92 + bool tlb_flush_batched;
93 +#endif
94 struct uprobes_state uprobes_state;
95 #ifdef CONFIG_X86_INTEL_MPX
96 /* address of the bounds directory */
97 --- a/mm/internal.h
98 +++ b/mm/internal.h
99 @@ -453,6 +453,7 @@ struct tlbflush_unmap_batch;
100 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
101 void try_to_unmap_flush(void);
102 void try_to_unmap_flush_dirty(void);
103 +void flush_tlb_batched_pending(struct mm_struct *mm);
104 #else
105 static inline void try_to_unmap_flush(void)
106 {
107 @@ -460,6 +461,8 @@ static inline void try_to_unmap_flush(vo
108 static inline void try_to_unmap_flush_dirty(void)
109 {
110 }
111 -
112 +static inline void flush_tlb_batched_pending(struct mm_struct *mm)
113 +{
114 +}
115 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
116 #endif /* __MM_INTERNAL_H */
117 --- a/mm/memory.c
118 +++ b/mm/memory.c
119 @@ -1127,6 +1127,7 @@ again:
120 init_rss_vec(rss);
121 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
122 pte = start_pte;
123 + flush_tlb_batched_pending(mm);
124 arch_enter_lazy_mmu_mode();
125 do {
126 pte_t ptent = *pte;
127 --- a/mm/mprotect.c
128 +++ b/mm/mprotect.c
129 @@ -72,6 +72,7 @@ static unsigned long change_pte_range(st
130 if (!pte)
131 return 0;
132
133 + flush_tlb_batched_pending(vma->vm_mm);
134 arch_enter_lazy_mmu_mode();
135 do {
136 oldpte = *pte;
137 --- a/mm/mremap.c
138 +++ b/mm/mremap.c
139 @@ -135,6 +135,7 @@ static void move_ptes(struct vm_area_str
140 new_ptl = pte_lockptr(mm, new_pmd);
141 if (new_ptl != old_ptl)
142 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
143 + flush_tlb_batched_pending(vma->vm_mm);
144 arch_enter_lazy_mmu_mode();
145
146 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
147 --- a/mm/rmap.c
148 +++ b/mm/rmap.c
149 @@ -649,6 +649,13 @@ static void set_tlb_ubc_flush_pending(st
150 tlb_ubc->flush_required = true;
151
152 /*
153 + * Ensure compiler does not re-order the setting of tlb_flush_batched
154 + * before the PTE is cleared.
155 + */
156 + barrier();
157 + mm->tlb_flush_batched = true;
158 +
159 + /*
160 * If the PTE was dirty then it's best to assume it's writable. The
161 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
162 * before the page is queued for IO.
163 @@ -675,6 +682,35 @@ static bool should_defer_flush(struct mm
164
165 return should_defer;
166 }
167 +
168 +/*
169 + * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
170 + * releasing the PTL if TLB flushes are batched. It's possible for a parallel
171 + * operation such as mprotect or munmap to race between reclaim unmapping
172 + * the page and flushing the page. If this race occurs, it potentially allows
173 + * access to data via a stale TLB entry. Tracking all mm's that have TLB
174 + * batching in flight would be expensive during reclaim so instead track
175 + * whether TLB batching occurred in the past and if so then do a flush here
176 + * if required. This will cost one additional flush per reclaim cycle paid
177 + * by the first operation at risk such as mprotect and mumap.
178 + *
179 + * This must be called under the PTL so that an access to tlb_flush_batched
180 + * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
181 + * via the PTL.
182 + */
183 +void flush_tlb_batched_pending(struct mm_struct *mm)
184 +{
185 + if (mm->tlb_flush_batched) {
186 + flush_tlb_mm(mm);
187 +
188 + /*
189 + * Do not allow the compiler to re-order the clearing of
190 + * tlb_flush_batched before the tlb is flushed.
191 + */
192 + barrier();
193 + mm->tlb_flush_batched = false;
194 + }
195 +}
196 #else
197 static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
198 struct page *page, bool writable)