]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/x86/mm/tlb.c
x86/mm: Refactor flush_tlb_mm_range() to merge local and remote cases
[thirdparty/linux.git] / arch / x86 / mm / tlb.c
CommitLineData
c048fdfe
GC
1#include <linux/init.h>
2
3#include <linux/mm.h>
c048fdfe
GC
4#include <linux/spinlock.h>
5#include <linux/smp.h>
c048fdfe 6#include <linux/interrupt.h>
4b599fed 7#include <linux/export.h>
93296720 8#include <linux/cpu.h>
c048fdfe 9
c048fdfe 10#include <asm/tlbflush.h>
c048fdfe 11#include <asm/mmu_context.h>
350f8f56 12#include <asm/cache.h>
6dd01bed 13#include <asm/apic.h>
bdbcdd48 14#include <asm/uv/uv.h>
3df3212f 15#include <linux/debugfs.h>
5af5573e 16
c048fdfe
GC
17/*
18 * Smarter SMP flushing macros.
19 * c/o Linus Torvalds.
20 *
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
23 *
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
25 *
26 * More scalable flush, from Andi Kleen
27 *
52aec330 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
c048fdfe
GC
29 */
30
e1074888
AL
31#ifdef CONFIG_SMP
32
c048fdfe
GC
33/*
34 * We cannot call mmdrop() because we are in interrupt context,
35 * instead update mm->cpu_vm_mask.
36 */
37void leave_mm(int cpu)
38{
02171b4a 39 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
c6ae41e7 40 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
c048fdfe 41 BUG();
a6fca40f
SS
42 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
43 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
44 load_cr3(swapper_pg_dir);
7c7f1547
DH
45 /*
46 * This gets called in the idle path where RCU
47 * functions differently. Tracing normally
48 * uses RCU, so we have to call the tracepoint
49 * specially here.
50 */
51 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
a6fca40f 52 }
c048fdfe
GC
53}
54EXPORT_SYMBOL_GPL(leave_mm);
55
69c0319a
AL
56#endif /* CONFIG_SMP */
57
58void switch_mm(struct mm_struct *prev, struct mm_struct *next,
59 struct task_struct *tsk)
078194f8
AL
60{
61 unsigned long flags;
62
63 local_irq_save(flags);
64 switch_mm_irqs_off(prev, next, tsk);
65 local_irq_restore(flags);
66}
67
68void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
69 struct task_struct *tsk)
69c0319a
AL
70{
71 unsigned cpu = smp_processor_id();
72
73 if (likely(prev != next)) {
e37e43a4
AL
74 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
75 /*
76 * If our current stack is in vmalloc space and isn't
77 * mapped in the new pgd, we'll double-fault. Forcibly
78 * map it.
79 */
80 unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
81
82 pgd_t *pgd = next->pgd + stack_pgd_index;
83
84 if (unlikely(pgd_none(*pgd)))
85 set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
86 }
87
69c0319a
AL
88#ifdef CONFIG_SMP
89 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
90 this_cpu_write(cpu_tlbstate.active_mm, next);
91#endif
e37e43a4 92
69c0319a
AL
93 cpumask_set_cpu(cpu, mm_cpumask(next));
94
95 /*
96 * Re-load page tables.
97 *
98 * This logic has an ordering constraint:
99 *
100 * CPU 0: Write to a PTE for 'next'
101 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
102 * CPU 1: set bit 1 in next's mm_cpumask
103 * CPU 1: load from the PTE that CPU 0 writes (implicit)
104 *
105 * We need to prevent an outcome in which CPU 1 observes
106 * the new PTE value and CPU 0 observes bit 1 clear in
107 * mm_cpumask. (If that occurs, then the IPI will never
108 * be sent, and CPU 0's TLB will contain a stale entry.)
109 *
110 * The bad outcome can occur if either CPU's load is
111 * reordered before that CPU's store, so both CPUs must
112 * execute full barriers to prevent this from happening.
113 *
114 * Thus, switch_mm needs a full barrier between the
115 * store to mm_cpumask and any operation that could load
116 * from next->pgd. TLB fills are special and can happen
117 * due to instruction fetches or for no reason at all,
118 * and neither LOCK nor MFENCE orders them.
119 * Fortunately, load_cr3() is serializing and gives the
120 * ordering guarantee we need.
121 *
122 */
123 load_cr3(next->pgd);
124
125 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
126
127 /* Stop flush ipis for the previous mm */
128 cpumask_clear_cpu(cpu, mm_cpumask(prev));
129
130 /* Load per-mm CR4 state */
131 load_mm_cr4(next);
132
133#ifdef CONFIG_MODIFY_LDT_SYSCALL
134 /*
135 * Load the LDT, if the LDT is different.
136 *
137 * It's possible that prev->context.ldt doesn't match
138 * the LDT register. This can happen if leave_mm(prev)
139 * was called and then modify_ldt changed
140 * prev->context.ldt but suppressed an IPI to this CPU.
141 * In this case, prev->context.ldt != NULL, because we
142 * never set context.ldt to NULL while the mm still
143 * exists. That means that next->context.ldt !=
144 * prev->context.ldt, because mms never share an LDT.
145 */
146 if (unlikely(prev->context.ldt != next->context.ldt))
147 load_mm_ldt(next);
148#endif
149 }
150#ifdef CONFIG_SMP
151 else {
152 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
153 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
154
155 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
156 /*
157 * On established mms, the mm_cpumask is only changed
158 * from irq context, from ptep_clear_flush() while in
159 * lazy tlb mode, and here. Irqs are blocked during
160 * schedule, protecting us from simultaneous changes.
161 */
162 cpumask_set_cpu(cpu, mm_cpumask(next));
163
164 /*
165 * We were in lazy tlb mode and leave_mm disabled
166 * tlb flush IPI delivery. We must reload CR3
167 * to make sure to use no freed page tables.
168 *
169 * As above, load_cr3() is serializing and orders TLB
170 * fills with respect to the mm_cpumask write.
171 */
172 load_cr3(next->pgd);
173 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
174 load_mm_cr4(next);
175 load_mm_ldt(next);
176 }
177 }
178#endif
179}
180
181#ifdef CONFIG_SMP
182
c048fdfe 183/*
c048fdfe
GC
184 * The flush IPI assumes that a thread switch happens in this order:
185 * [cpu0: the cpu that switches]
186 * 1) switch_mm() either 1a) or 1b)
187 * 1a) thread switch to a different mm
52aec330
AS
188 * 1a1) set cpu_tlbstate to TLBSTATE_OK
189 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
190 * if cpu0 was in lazy tlb mode.
191 * 1a2) update cpu active_mm
c048fdfe 192 * Now cpu0 accepts tlb flushes for the new mm.
52aec330 193 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
c048fdfe
GC
194 * Now the other cpus will send tlb flush ipis.
195 * 1a4) change cr3.
52aec330
AS
196 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
197 * Stop ipi delivery for the old mm. This is not synchronized with
198 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
199 * mm, and in the worst case we perform a superfluous tlb flush.
c048fdfe 200 * 1b) thread switch without mm change
52aec330
AS
201 * cpu active_mm is correct, cpu0 already handles flush ipis.
202 * 1b1) set cpu_tlbstate to TLBSTATE_OK
c048fdfe
GC
203 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
204 * Atomically set the bit [other cpus will start sending flush ipis],
205 * and test the bit.
206 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
207 * 2) switch %%esp, ie current
208 *
209 * The interrupt must handle 2 special cases:
210 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
211 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
212 * runs in kernel space, the cpu could load tlb entries for user space
213 * pages.
214 *
52aec330 215 * The good news is that cpu_tlbstate is local to each cpu, no
c048fdfe
GC
216 * write/read ordering problems.
217 */
218
454bbad9
AL
219static void flush_tlb_func_common(const struct flush_tlb_info *f,
220 bool local, enum tlb_flush_reason reason)
c048fdfe 221{
b3b90e5a 222 if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
52aec330 223 leave_mm(smp_processor_id());
b3b90e5a
AL
224 return;
225 }
c048fdfe 226
a2055abe 227 if (f->end == TLB_FLUSH_ALL) {
b3b90e5a 228 local_flush_tlb();
454bbad9
AL
229 if (local)
230 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
231 trace_tlb_flush(reason, TLB_FLUSH_ALL);
b3b90e5a
AL
232 } else {
233 unsigned long addr;
234 unsigned long nr_pages =
a2055abe
AL
235 (f->end - f->start) / PAGE_SIZE;
236 addr = f->start;
237 while (addr < f->end) {
b3b90e5a
AL
238 __flush_tlb_single(addr);
239 addr += PAGE_SIZE;
240 }
454bbad9
AL
241 if (local)
242 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
243 trace_tlb_flush(reason, nr_pages);
b3b90e5a 244 }
c048fdfe
GC
245}
246
454bbad9
AL
247static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
248{
249 const struct flush_tlb_info *f = info;
250
251 flush_tlb_func_common(f, true, reason);
252}
253
254static void flush_tlb_func_remote(void *info)
255{
256 const struct flush_tlb_info *f = info;
257
258 inc_irq_stat(irq_tlb_count);
259
260 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm))
261 return;
262
263 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
264 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
265}
266
4595f962 267void native_flush_tlb_others(const struct cpumask *cpumask,
a2055abe 268 const struct flush_tlb_info *info)
4595f962 269{
ec659934 270 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
a2055abe 271 if (info->end == TLB_FLUSH_ALL)
18c98243
NA
272 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
273 else
274 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
a2055abe 275 (info->end - info->start) >> PAGE_SHIFT);
18c98243 276
4595f962 277 if (is_uv_system()) {
bdbcdd48 278 unsigned int cpu;
0e21990a 279
25542c64 280 cpu = smp_processor_id();
a2055abe 281 cpumask = uv_flush_tlb_others(cpumask, info);
bdbcdd48 282 if (cpumask)
454bbad9 283 smp_call_function_many(cpumask, flush_tlb_func_remote,
a2055abe 284 (void *)info, 1);
0e21990a 285 return;
4595f962 286 }
454bbad9 287 smp_call_function_many(cpumask, flush_tlb_func_remote,
a2055abe 288 (void *)info, 1);
c048fdfe 289}
c048fdfe 290
a5102476
DH
291/*
292 * See Documentation/x86/tlb.txt for details. We choose 33
293 * because it is large enough to cover the vast majority (at
294 * least 95%) of allocations, and is small enough that we are
295 * confident it will not cause too much overhead. Each single
296 * flush is about 100 ns, so this caps the maximum overhead at
297 * _about_ 3,000 ns.
298 *
299 * This is in units of pages.
300 */
86426851 301static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
e9f4e0a9 302
611ae8e3
AS
303void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
304 unsigned long end, unsigned long vmflag)
305{
454bbad9 306 int cpu;
ce27374f 307
454bbad9
AL
308 struct flush_tlb_info info = {
309 .mm = mm,
310 };
ce27374f 311
454bbad9 312 cpu = get_cpu();
71b3c126 313
454bbad9
AL
314 /* Synchronize with switch_mm. */
315 smp_mb();
71b3c126 316
454bbad9
AL
317 /* Should we flush just the requested range? */
318 if ((end != TLB_FLUSH_ALL) &&
319 !(vmflag & VM_HUGETLB) &&
320 ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
321 info.start = start;
322 info.end = end;
9824cf97 323 } else {
a2055abe
AL
324 info.start = 0UL;
325 info.end = TLB_FLUSH_ALL;
4995ab9c 326 }
454bbad9
AL
327
328 if (mm == current->active_mm)
329 flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
330 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
a2055abe 331 flush_tlb_others(mm_cpumask(mm), &info);
454bbad9 332 put_cpu();
c048fdfe
GC
333}
334
a2055abe 335
c048fdfe
GC
336static void do_flush_tlb_all(void *info)
337{
ec659934 338 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
c048fdfe 339 __flush_tlb_all();
c6ae41e7 340 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
3f8afb77 341 leave_mm(smp_processor_id());
c048fdfe
GC
342}
343
344void flush_tlb_all(void)
345{
ec659934 346 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
15c8b6c1 347 on_each_cpu(do_flush_tlb_all, NULL, 1);
c048fdfe 348}
3df3212f 349
effee4b9
AS
350static void do_kernel_range_flush(void *info)
351{
352 struct flush_tlb_info *f = info;
353 unsigned long addr;
354
355 /* flush range by one by one 'invlpg' */
a2055abe 356 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
effee4b9
AS
357 __flush_tlb_single(addr);
358}
359
360void flush_tlb_kernel_range(unsigned long start, unsigned long end)
361{
effee4b9
AS
362
363 /* Balance as user space task's flush, a bit conservative */
e9f4e0a9
DH
364 if (end == TLB_FLUSH_ALL ||
365 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
effee4b9 366 on_each_cpu(do_flush_tlb_all, NULL, 1);
e9f4e0a9
DH
367 } else {
368 struct flush_tlb_info info;
a2055abe
AL
369 info.start = start;
370 info.end = end;
effee4b9
AS
371 on_each_cpu(do_kernel_range_flush, &info, 1);
372 }
373}
2d040a1c 374
e73ad5ff
AL
375void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
376{
a2055abe
AL
377 struct flush_tlb_info info = {
378 .mm = NULL,
379 .start = 0UL,
380 .end = TLB_FLUSH_ALL,
381 };
382
e73ad5ff
AL
383 int cpu = get_cpu();
384
385 if (cpumask_test_cpu(cpu, &batch->cpumask)) {
386 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
387 local_flush_tlb();
388 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
389 }
390
391 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
a2055abe 392 flush_tlb_others(&batch->cpumask, &info);
e73ad5ff
AL
393 cpumask_clear(&batch->cpumask);
394
395 put_cpu();
396}
397
2d040a1c
DH
398static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
399 size_t count, loff_t *ppos)
400{
401 char buf[32];
402 unsigned int len;
403
404 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
405 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
406}
407
408static ssize_t tlbflush_write_file(struct file *file,
409 const char __user *user_buf, size_t count, loff_t *ppos)
410{
411 char buf[32];
412 ssize_t len;
413 int ceiling;
414
415 len = min(count, sizeof(buf) - 1);
416 if (copy_from_user(buf, user_buf, len))
417 return -EFAULT;
418
419 buf[len] = '\0';
420 if (kstrtoint(buf, 0, &ceiling))
421 return -EINVAL;
422
423 if (ceiling < 0)
424 return -EINVAL;
425
426 tlb_single_page_flush_ceiling = ceiling;
427 return count;
428}
429
430static const struct file_operations fops_tlbflush = {
431 .read = tlbflush_read_file,
432 .write = tlbflush_write_file,
433 .llseek = default_llseek,
434};
435
436static int __init create_tlb_single_page_flush_ceiling(void)
437{
438 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
439 arch_debugfs_dir, NULL, &fops_tlbflush);
440 return 0;
441}
442late_initcall(create_tlb_single_page_flush_ceiling);
e1074888
AL
443
444#endif /* CONFIG_SMP */