]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.14.21/x86-mm-rename-flush_tlb_single-and-flush_tlb_one-to-__flush_tlb_one_.patch
fixes for 4.19
[thirdparty/kernel/stable-queue.git] / releases / 4.14.21 / x86-mm-rename-flush_tlb_single-and-flush_tlb_one-to-__flush_tlb_one_.patch
1 From 1299ef1d8870d2d9f09a5aadf2f8b2c887c2d033 Mon Sep 17 00:00:00 2001
2 From: Andy Lutomirski <luto@kernel.org>
3 Date: Wed, 31 Jan 2018 08:03:10 -0800
4 Subject: x86/mm: Rename flush_tlb_single() and flush_tlb_one() to __flush_tlb_one_[user|kernel]()
5
6 From: Andy Lutomirski <luto@kernel.org>
7
8 commit 1299ef1d8870d2d9f09a5aadf2f8b2c887c2d033 upstream.
9
10 flush_tlb_single() and flush_tlb_one() sound almost identical, but
11 they really mean "flush one user translation" and "flush one kernel
12 translation". Rename them to flush_tlb_one_user() and
13 flush_tlb_one_kernel() to make the semantics more obvious.
14
15 [ I was looking at some PTI-related code, and the flush-one-address code
16 is unnecessarily hard to understand because the names of the helpers are
17 uninformative. This came up during PTI review, but no one got around to
18 doing it. ]
19
20 Signed-off-by: Andy Lutomirski <luto@kernel.org>
21 Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
22 Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
23 Cc: Borislav Petkov <bp@alien8.de>
24 Cc: Brian Gerst <brgerst@gmail.com>
25 Cc: Dave Hansen <dave.hansen@intel.com>
26 Cc: Eduardo Valentin <eduval@amazon.com>
27 Cc: Hugh Dickins <hughd@google.com>
28 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
29 Cc: Juergen Gross <jgross@suse.com>
30 Cc: Kees Cook <keescook@google.com>
31 Cc: Linus Torvalds <torvalds@linux-foundation.org>
32 Cc: Linux-MM <linux-mm@kvack.org>
33 Cc: Rik van Riel <riel@redhat.com>
34 Cc: Thomas Gleixner <tglx@linutronix.de>
35 Cc: Will Deacon <will.deacon@arm.com>
36 Link: http://lkml.kernel.org/r/3303b02e3c3d049dc5235d5651e0ae6d29a34354.1517414378.git.luto@kernel.org
37 Signed-off-by: Ingo Molnar <mingo@kernel.org>
38 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
39
40 ---
41 arch/x86/include/asm/paravirt.h | 4 ++--
42 arch/x86/include/asm/paravirt_types.h | 2 +-
43 arch/x86/include/asm/pgtable_32.h | 2 +-
44 arch/x86/include/asm/tlbflush.h | 27 ++++++++++++++++++++-------
45 arch/x86/kernel/acpi/apei.c | 2 +-
46 arch/x86/kernel/paravirt.c | 6 +++---
47 arch/x86/mm/init_64.c | 2 +-
48 arch/x86/mm/ioremap.c | 2 +-
49 arch/x86/mm/kmmio.c | 2 +-
50 arch/x86/mm/pgtable_32.c | 2 +-
51 arch/x86/mm/tlb.c | 6 +++---
52 arch/x86/platform/uv/tlb_uv.c | 2 +-
53 arch/x86/xen/mmu_pv.c | 6 +++---
54 include/trace/events/xen.h | 2 +-
55 14 files changed, 40 insertions(+), 27 deletions(-)
56
57 --- a/arch/x86/include/asm/paravirt.h
58 +++ b/arch/x86/include/asm/paravirt.h
59 @@ -297,9 +297,9 @@ static inline void __flush_tlb_global(vo
60 {
61 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
62 }
63 -static inline void __flush_tlb_single(unsigned long addr)
64 +static inline void __flush_tlb_one_user(unsigned long addr)
65 {
66 - PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
67 + PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
68 }
69
70 static inline void flush_tlb_others(const struct cpumask *cpumask,
71 --- a/arch/x86/include/asm/paravirt_types.h
72 +++ b/arch/x86/include/asm/paravirt_types.h
73 @@ -217,7 +217,7 @@ struct pv_mmu_ops {
74 /* TLB operations */
75 void (*flush_tlb_user)(void);
76 void (*flush_tlb_kernel)(void);
77 - void (*flush_tlb_single)(unsigned long addr);
78 + void (*flush_tlb_one_user)(unsigned long addr);
79 void (*flush_tlb_others)(const struct cpumask *cpus,
80 const struct flush_tlb_info *info);
81
82 --- a/arch/x86/include/asm/pgtable_32.h
83 +++ b/arch/x86/include/asm/pgtable_32.h
84 @@ -61,7 +61,7 @@ void paging_init(void);
85 #define kpte_clear_flush(ptep, vaddr) \
86 do { \
87 pte_clear(&init_mm, (vaddr), (ptep)); \
88 - __flush_tlb_one((vaddr)); \
89 + __flush_tlb_one_kernel((vaddr)); \
90 } while (0)
91
92 #endif /* !__ASSEMBLY__ */
93 --- a/arch/x86/include/asm/tlbflush.h
94 +++ b/arch/x86/include/asm/tlbflush.h
95 @@ -140,7 +140,7 @@ static inline unsigned long build_cr3_no
96 #else
97 #define __flush_tlb() __native_flush_tlb()
98 #define __flush_tlb_global() __native_flush_tlb_global()
99 -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
100 +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
101 #endif
102
103 static inline bool tlb_defer_switch_to_init_mm(void)
104 @@ -397,7 +397,7 @@ static inline void __native_flush_tlb_gl
105 /*
106 * flush one page in the user mapping
107 */
108 -static inline void __native_flush_tlb_single(unsigned long addr)
109 +static inline void __native_flush_tlb_one_user(unsigned long addr)
110 {
111 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
112
113 @@ -434,18 +434,31 @@ static inline void __flush_tlb_all(void)
114 /*
115 * flush one page in the kernel mapping
116 */
117 -static inline void __flush_tlb_one(unsigned long addr)
118 +static inline void __flush_tlb_one_kernel(unsigned long addr)
119 {
120 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
121 - __flush_tlb_single(addr);
122 +
123 + /*
124 + * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
125 + * paravirt equivalent. Even with PCID, this is sufficient: we only
126 + * use PCID if we also use global PTEs for the kernel mapping, and
127 + * INVLPG flushes global translations across all address spaces.
128 + *
129 + * If PTI is on, then the kernel is mapped with non-global PTEs, and
130 + * __flush_tlb_one_user() will flush the given address for the current
131 + * kernel address space and for its usermode counterpart, but it does
132 + * not flush it for other address spaces.
133 + */
134 + __flush_tlb_one_user(addr);
135
136 if (!static_cpu_has(X86_FEATURE_PTI))
137 return;
138
139 /*
140 - * __flush_tlb_single() will have cleared the TLB entry for this ASID,
141 - * but since kernel space is replicated across all, we must also
142 - * invalidate all others.
143 + * See above. We need to propagate the flush to all other address
144 + * spaces. In principle, we only need to propagate it to kernelmode
145 + * address spaces, but the extra bookkeeping we would need is not
146 + * worth it.
147 */
148 invalidate_other_asid();
149 }
150 --- a/arch/x86/kernel/acpi/apei.c
151 +++ b/arch/x86/kernel/acpi/apei.c
152 @@ -55,5 +55,5 @@ void arch_apei_report_mem_error(int sev,
153
154 void arch_apei_flush_tlb_one(unsigned long addr)
155 {
156 - __flush_tlb_one(addr);
157 + __flush_tlb_one_kernel(addr);
158 }
159 --- a/arch/x86/kernel/paravirt.c
160 +++ b/arch/x86/kernel/paravirt.c
161 @@ -190,9 +190,9 @@ static void native_flush_tlb_global(void
162 __native_flush_tlb_global();
163 }
164
165 -static void native_flush_tlb_single(unsigned long addr)
166 +static void native_flush_tlb_one_user(unsigned long addr)
167 {
168 - __native_flush_tlb_single(addr);
169 + __native_flush_tlb_one_user(addr);
170 }
171
172 struct static_key paravirt_steal_enabled;
173 @@ -391,7 +391,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_
174
175 .flush_tlb_user = native_flush_tlb,
176 .flush_tlb_kernel = native_flush_tlb_global,
177 - .flush_tlb_single = native_flush_tlb_single,
178 + .flush_tlb_one_user = native_flush_tlb_one_user,
179 .flush_tlb_others = native_flush_tlb_others,
180
181 .pgd_alloc = __paravirt_pgd_alloc,
182 --- a/arch/x86/mm/init_64.c
183 +++ b/arch/x86/mm/init_64.c
184 @@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud,
185 * It's enough to flush this one mapping.
186 * (PGE mappings get flushed as well)
187 */
188 - __flush_tlb_one(vaddr);
189 + __flush_tlb_one_kernel(vaddr);
190 }
191
192 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
193 --- a/arch/x86/mm/ioremap.c
194 +++ b/arch/x86/mm/ioremap.c
195 @@ -749,5 +749,5 @@ void __init __early_set_fixmap(enum fixe
196 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
197 else
198 pte_clear(&init_mm, addr, pte);
199 - __flush_tlb_one(addr);
200 + __flush_tlb_one_kernel(addr);
201 }
202 --- a/arch/x86/mm/kmmio.c
203 +++ b/arch/x86/mm/kmmio.c
204 @@ -168,7 +168,7 @@ static int clear_page_presence(struct km
205 return -1;
206 }
207
208 - __flush_tlb_one(f->addr);
209 + __flush_tlb_one_kernel(f->addr);
210 return 0;
211 }
212
213 --- a/arch/x86/mm/pgtable_32.c
214 +++ b/arch/x86/mm/pgtable_32.c
215 @@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr,
216 * It's enough to flush this one mapping.
217 * (PGE mappings get flushed as well)
218 */
219 - __flush_tlb_one(vaddr);
220 + __flush_tlb_one_kernel(vaddr);
221 }
222
223 unsigned long __FIXADDR_TOP = 0xfffff000;
224 --- a/arch/x86/mm/tlb.c
225 +++ b/arch/x86/mm/tlb.c
226 @@ -492,7 +492,7 @@ static void flush_tlb_func_common(const
227 * flush that changes context.tlb_gen from 2 to 3. If they get
228 * processed on this CPU in reverse order, we'll see
229 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
230 - * If we were to use __flush_tlb_single() and set local_tlb_gen to
231 + * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
232 * 3, we'd be break the invariant: we'd update local_tlb_gen above
233 * 1 without the full flush that's needed for tlb_gen 2.
234 *
235 @@ -513,7 +513,7 @@ static void flush_tlb_func_common(const
236
237 addr = f->start;
238 while (addr < f->end) {
239 - __flush_tlb_single(addr);
240 + __flush_tlb_one_user(addr);
241 addr += PAGE_SIZE;
242 }
243 if (local)
244 @@ -660,7 +660,7 @@ static void do_kernel_range_flush(void *
245
246 /* flush range by one by one 'invlpg' */
247 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
248 - __flush_tlb_one(addr);
249 + __flush_tlb_one_kernel(addr);
250 }
251
252 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
253 --- a/arch/x86/platform/uv/tlb_uv.c
254 +++ b/arch/x86/platform/uv/tlb_uv.c
255 @@ -299,7 +299,7 @@ static void bau_process_message(struct m
256 local_flush_tlb();
257 stat->d_alltlb++;
258 } else {
259 - __flush_tlb_single(msg->address);
260 + __flush_tlb_one_user(msg->address);
261 stat->d_onetlb++;
262 }
263 stat->d_requestee++;
264 --- a/arch/x86/xen/mmu_pv.c
265 +++ b/arch/x86/xen/mmu_pv.c
266 @@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void)
267 preempt_enable();
268 }
269
270 -static void xen_flush_tlb_single(unsigned long addr)
271 +static void xen_flush_tlb_one_user(unsigned long addr)
272 {
273 struct mmuext_op *op;
274 struct multicall_space mcs;
275
276 - trace_xen_mmu_flush_tlb_single(addr);
277 + trace_xen_mmu_flush_tlb_one_user(addr);
278
279 preempt_disable();
280
281 @@ -2360,7 +2360,7 @@ static const struct pv_mmu_ops xen_mmu_o
282
283 .flush_tlb_user = xen_flush_tlb,
284 .flush_tlb_kernel = xen_flush_tlb,
285 - .flush_tlb_single = xen_flush_tlb_single,
286 + .flush_tlb_one_user = xen_flush_tlb_one_user,
287 .flush_tlb_others = xen_flush_tlb_others,
288
289 .pgd_alloc = xen_pgd_alloc,
290 --- a/include/trace/events/xen.h
291 +++ b/include/trace/events/xen.h
292 @@ -365,7 +365,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
293 TP_printk("%s", "")
294 );
295
296 -TRACE_EVENT(xen_mmu_flush_tlb_single,
297 +TRACE_EVENT(xen_mmu_flush_tlb_one_user,
298 TP_PROTO(unsigned long addr),
299 TP_ARGS(addr),
300 TP_STRUCT__entry(