]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/init.h> | |
2 | ||
3 | #include <linux/mm.h> | |
4 | #include <linux/spinlock.h> | |
5 | #include <linux/smp.h> | |
6 | #include <linux/interrupt.h> | |
7 | #include <linux/export.h> | |
8 | #include <linux/cpu.h> | |
9 | ||
10 | #include <asm/tlbflush.h> | |
11 | #include <asm/mmu_context.h> | |
12 | #include <asm/cache.h> | |
13 | #include <asm/apic.h> | |
14 | #include <asm/uv/uv.h> | |
15 | #include <linux/debugfs.h> | |
16 | ||
17 | /* | |
18 | * TLB flushing, formerly SMP-only | |
19 | * c/o Linus Torvalds. | |
20 | * | |
21 | * These mean you can really definitely utterly forget about | |
22 | * writing to user space from interrupts. (Its not allowed anyway). | |
23 | * | |
24 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
25 | * | |
26 | * More scalable flush, from Andi Kleen | |
27 | * | |
28 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi | |
29 | */ | |
30 | ||
31 | void leave_mm(int cpu) | |
32 | { | |
33 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); | |
34 | ||
35 | /* | |
36 | * It's plausible that we're in lazy TLB mode while our mm is init_mm. | |
37 | * If so, our callers still expect us to flush the TLB, but there | |
38 | * aren't any user TLB entries in init_mm to worry about. | |
39 | * | |
40 | * This needs to happen before any other sanity checks due to | |
41 | * intel_idle's shenanigans. | |
42 | */ | |
43 | if (loaded_mm == &init_mm) | |
44 | return; | |
45 | ||
46 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) | |
47 | BUG(); | |
48 | ||
49 | switch_mm(NULL, &init_mm, NULL); | |
50 | } | |
51 | EXPORT_SYMBOL_GPL(leave_mm); | |
52 | ||
53 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
54 | struct task_struct *tsk) | |
55 | { | |
56 | unsigned long flags; | |
57 | ||
58 | local_irq_save(flags); | |
59 | switch_mm_irqs_off(prev, next, tsk); | |
60 | local_irq_restore(flags); | |
61 | } | |
62 | ||
63 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |
64 | struct task_struct *tsk) | |
65 | { | |
66 | unsigned cpu = smp_processor_id(); | |
67 | struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); | |
68 | ||
69 | /* | |
70 | * NB: The scheduler will call us with prev == next when | |
71 | * switching from lazy TLB mode to normal mode if active_mm | |
72 | * isn't changing. When this happens, there is no guarantee | |
73 | * that CR3 (and hence cpu_tlbstate.loaded_mm) matches next. | |
74 | * | |
75 | * NB: leave_mm() calls us with prev == NULL and tsk == NULL. | |
76 | */ | |
77 | ||
78 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | |
79 | ||
80 | if (real_prev == next) { | |
81 | /* | |
82 | * There's nothing to do: we always keep the per-mm control | |
83 | * regs in sync with cpu_tlbstate.loaded_mm. Just | |
84 | * sanity-check mm_cpumask. | |
85 | */ | |
86 | if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next)))) | |
87 | cpumask_set_cpu(cpu, mm_cpumask(next)); | |
88 | return; | |
89 | } | |
90 | ||
91 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { | |
92 | /* | |
93 | * If our current stack is in vmalloc space and isn't | |
94 | * mapped in the new pgd, we'll double-fault. Forcibly | |
95 | * map it. | |
96 | */ | |
97 | unsigned int stack_pgd_index = pgd_index(current_stack_pointer()); | |
98 | ||
99 | pgd_t *pgd = next->pgd + stack_pgd_index; | |
100 | ||
101 | if (unlikely(pgd_none(*pgd))) | |
102 | set_pgd(pgd, init_mm.pgd[stack_pgd_index]); | |
103 | } | |
104 | ||
105 | this_cpu_write(cpu_tlbstate.loaded_mm, next); | |
106 | ||
107 | WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next))); | |
108 | cpumask_set_cpu(cpu, mm_cpumask(next)); | |
109 | ||
110 | /* | |
111 | * Re-load page tables. | |
112 | * | |
113 | * This logic has an ordering constraint: | |
114 | * | |
115 | * CPU 0: Write to a PTE for 'next' | |
116 | * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. | |
117 | * CPU 1: set bit 1 in next's mm_cpumask | |
118 | * CPU 1: load from the PTE that CPU 0 writes (implicit) | |
119 | * | |
120 | * We need to prevent an outcome in which CPU 1 observes | |
121 | * the new PTE value and CPU 0 observes bit 1 clear in | |
122 | * mm_cpumask. (If that occurs, then the IPI will never | |
123 | * be sent, and CPU 0's TLB will contain a stale entry.) | |
124 | * | |
125 | * The bad outcome can occur if either CPU's load is | |
126 | * reordered before that CPU's store, so both CPUs must | |
127 | * execute full barriers to prevent this from happening. | |
128 | * | |
129 | * Thus, switch_mm needs a full barrier between the | |
130 | * store to mm_cpumask and any operation that could load | |
131 | * from next->pgd. TLB fills are special and can happen | |
132 | * due to instruction fetches or for no reason at all, | |
133 | * and neither LOCK nor MFENCE orders them. | |
134 | * Fortunately, load_cr3() is serializing and gives the | |
135 | * ordering guarantee we need. | |
136 | */ | |
137 | load_cr3(next->pgd); | |
138 | ||
139 | /* | |
140 | * This gets called via leave_mm() in the idle path where RCU | |
141 | * functions differently. Tracing normally uses RCU, so we have to | |
142 | * call the tracepoint specially here. | |
143 | */ | |
144 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | |
145 | ||
146 | /* Stop flush ipis for the previous mm */ | |
147 | WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) && | |
148 | real_prev != &init_mm); | |
149 | cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); | |
150 | ||
151 | /* Load per-mm CR4 state */ | |
152 | load_mm_cr4(next); | |
153 | ||
154 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | |
155 | /* | |
156 | * Load the LDT, if the LDT is different. | |
157 | * | |
158 | * It's possible that prev->context.ldt doesn't match | |
159 | * the LDT register. This can happen if leave_mm(prev) | |
160 | * was called and then modify_ldt changed | |
161 | * prev->context.ldt but suppressed an IPI to this CPU. | |
162 | * In this case, prev->context.ldt != NULL, because we | |
163 | * never set context.ldt to NULL while the mm still | |
164 | * exists. That means that next->context.ldt != | |
165 | * prev->context.ldt, because mms never share an LDT. | |
166 | */ | |
167 | if (unlikely(real_prev->context.ldt != next->context.ldt)) | |
168 | load_mm_ldt(next); | |
169 | #endif | |
170 | } | |
171 | ||
172 | /* | |
173 | * The flush IPI assumes that a thread switch happens in this order: | |
174 | * [cpu0: the cpu that switches] | |
175 | * 1) switch_mm() either 1a) or 1b) | |
176 | * 1a) thread switch to a different mm | |
177 | * 1a1) set cpu_tlbstate to TLBSTATE_OK | |
178 | * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm | |
179 | * if cpu0 was in lazy tlb mode. | |
180 | * 1a2) update cpu active_mm | |
181 | * Now cpu0 accepts tlb flushes for the new mm. | |
182 | * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); | |
183 | * Now the other cpus will send tlb flush ipis. | |
184 | * 1a4) change cr3. | |
185 | * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); | |
186 | * Stop ipi delivery for the old mm. This is not synchronized with | |
187 | * the other cpus, but flush_tlb_func ignore flush ipis for the wrong | |
188 | * mm, and in the worst case we perform a superfluous tlb flush. | |
189 | * 1b) thread switch without mm change | |
190 | * cpu active_mm is correct, cpu0 already handles flush ipis. | |
191 | * 1b1) set cpu_tlbstate to TLBSTATE_OK | |
192 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
193 | * Atomically set the bit [other cpus will start sending flush ipis], | |
194 | * and test the bit. | |
195 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
196 | * 2) switch %%esp, ie current | |
197 | * | |
198 | * The interrupt must handle 2 special cases: | |
199 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
200 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
201 | * runs in kernel space, the cpu could load tlb entries for user space | |
202 | * pages. | |
203 | * | |
204 | * The good news is that cpu_tlbstate is local to each cpu, no | |
205 | * write/read ordering problems. | |
206 | */ | |
207 | ||
208 | static void flush_tlb_func_common(const struct flush_tlb_info *f, | |
209 | bool local, enum tlb_flush_reason reason) | |
210 | { | |
211 | if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) { | |
212 | leave_mm(smp_processor_id()); | |
213 | return; | |
214 | } | |
215 | ||
216 | if (f->end == TLB_FLUSH_ALL) { | |
217 | local_flush_tlb(); | |
218 | if (local) | |
219 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | |
220 | trace_tlb_flush(reason, TLB_FLUSH_ALL); | |
221 | } else { | |
222 | unsigned long addr; | |
223 | unsigned long nr_pages = | |
224 | (f->end - f->start) / PAGE_SIZE; | |
225 | addr = f->start; | |
226 | while (addr < f->end) { | |
227 | __flush_tlb_single(addr); | |
228 | addr += PAGE_SIZE; | |
229 | } | |
230 | if (local) | |
231 | count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages); | |
232 | trace_tlb_flush(reason, nr_pages); | |
233 | } | |
234 | } | |
235 | ||
236 | static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason) | |
237 | { | |
238 | const struct flush_tlb_info *f = info; | |
239 | ||
240 | flush_tlb_func_common(f, true, reason); | |
241 | } | |
242 | ||
243 | static void flush_tlb_func_remote(void *info) | |
244 | { | |
245 | const struct flush_tlb_info *f = info; | |
246 | ||
247 | inc_irq_stat(irq_tlb_count); | |
248 | ||
249 | if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) | |
250 | return; | |
251 | ||
252 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | |
253 | flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); | |
254 | } | |
255 | ||
256 | void native_flush_tlb_others(const struct cpumask *cpumask, | |
257 | const struct flush_tlb_info *info) | |
258 | { | |
259 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); | |
260 | if (info->end == TLB_FLUSH_ALL) | |
261 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); | |
262 | else | |
263 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, | |
264 | (info->end - info->start) >> PAGE_SHIFT); | |
265 | ||
266 | if (is_uv_system()) { | |
267 | unsigned int cpu; | |
268 | ||
269 | cpu = smp_processor_id(); | |
270 | cpumask = uv_flush_tlb_others(cpumask, info); | |
271 | if (cpumask) | |
272 | smp_call_function_many(cpumask, flush_tlb_func_remote, | |
273 | (void *)info, 1); | |
274 | return; | |
275 | } | |
276 | smp_call_function_many(cpumask, flush_tlb_func_remote, | |
277 | (void *)info, 1); | |
278 | } | |
279 | ||
280 | /* | |
281 | * See Documentation/x86/tlb.txt for details. We choose 33 | |
282 | * because it is large enough to cover the vast majority (at | |
283 | * least 95%) of allocations, and is small enough that we are | |
284 | * confident it will not cause too much overhead. Each single | |
285 | * flush is about 100 ns, so this caps the maximum overhead at | |
286 | * _about_ 3,000 ns. | |
287 | * | |
288 | * This is in units of pages. | |
289 | */ | |
290 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; | |
291 | ||
292 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | |
293 | unsigned long end, unsigned long vmflag) | |
294 | { | |
295 | int cpu; | |
296 | ||
297 | struct flush_tlb_info info = { | |
298 | .mm = mm, | |
299 | }; | |
300 | ||
301 | cpu = get_cpu(); | |
302 | ||
303 | /* Synchronize with switch_mm. */ | |
304 | smp_mb(); | |
305 | ||
306 | /* Should we flush just the requested range? */ | |
307 | if ((end != TLB_FLUSH_ALL) && | |
308 | !(vmflag & VM_HUGETLB) && | |
309 | ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) { | |
310 | info.start = start; | |
311 | info.end = end; | |
312 | } else { | |
313 | info.start = 0UL; | |
314 | info.end = TLB_FLUSH_ALL; | |
315 | } | |
316 | ||
317 | if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) | |
318 | flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN); | |
319 | if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) | |
320 | flush_tlb_others(mm_cpumask(mm), &info); | |
321 | put_cpu(); | |
322 | } | |
323 | ||
324 | ||
325 | static void do_flush_tlb_all(void *info) | |
326 | { | |
327 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | |
328 | __flush_tlb_all(); | |
329 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) | |
330 | leave_mm(smp_processor_id()); | |
331 | } | |
332 | ||
333 | void flush_tlb_all(void) | |
334 | { | |
335 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); | |
336 | on_each_cpu(do_flush_tlb_all, NULL, 1); | |
337 | } | |
338 | ||
339 | static void do_kernel_range_flush(void *info) | |
340 | { | |
341 | struct flush_tlb_info *f = info; | |
342 | unsigned long addr; | |
343 | ||
344 | /* flush range by one by one 'invlpg' */ | |
345 | for (addr = f->start; addr < f->end; addr += PAGE_SIZE) | |
346 | __flush_tlb_single(addr); | |
347 | } | |
348 | ||
349 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
350 | { | |
351 | ||
352 | /* Balance as user space task's flush, a bit conservative */ | |
353 | if (end == TLB_FLUSH_ALL || | |
354 | (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { | |
355 | on_each_cpu(do_flush_tlb_all, NULL, 1); | |
356 | } else { | |
357 | struct flush_tlb_info info; | |
358 | info.start = start; | |
359 | info.end = end; | |
360 | on_each_cpu(do_kernel_range_flush, &info, 1); | |
361 | } | |
362 | } | |
363 | ||
364 | void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) | |
365 | { | |
366 | struct flush_tlb_info info = { | |
367 | .mm = NULL, | |
368 | .start = 0UL, | |
369 | .end = TLB_FLUSH_ALL, | |
370 | }; | |
371 | ||
372 | int cpu = get_cpu(); | |
373 | ||
374 | if (cpumask_test_cpu(cpu, &batch->cpumask)) | |
375 | flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN); | |
376 | if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) | |
377 | flush_tlb_others(&batch->cpumask, &info); | |
378 | cpumask_clear(&batch->cpumask); | |
379 | ||
380 | put_cpu(); | |
381 | } | |
382 | ||
383 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, | |
384 | size_t count, loff_t *ppos) | |
385 | { | |
386 | char buf[32]; | |
387 | unsigned int len; | |
388 | ||
389 | len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); | |
390 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | |
391 | } | |
392 | ||
393 | static ssize_t tlbflush_write_file(struct file *file, | |
394 | const char __user *user_buf, size_t count, loff_t *ppos) | |
395 | { | |
396 | char buf[32]; | |
397 | ssize_t len; | |
398 | int ceiling; | |
399 | ||
400 | len = min(count, sizeof(buf) - 1); | |
401 | if (copy_from_user(buf, user_buf, len)) | |
402 | return -EFAULT; | |
403 | ||
404 | buf[len] = '\0'; | |
405 | if (kstrtoint(buf, 0, &ceiling)) | |
406 | return -EINVAL; | |
407 | ||
408 | if (ceiling < 0) | |
409 | return -EINVAL; | |
410 | ||
411 | tlb_single_page_flush_ceiling = ceiling; | |
412 | return count; | |
413 | } | |
414 | ||
415 | static const struct file_operations fops_tlbflush = { | |
416 | .read = tlbflush_read_file, | |
417 | .write = tlbflush_write_file, | |
418 | .llseek = default_llseek, | |
419 | }; | |
420 | ||
421 | static int __init create_tlb_single_page_flush_ceiling(void) | |
422 | { | |
423 | debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, | |
424 | arch_debugfs_dir, NULL, &fops_tlbflush); | |
425 | return 0; | |
426 | } | |
427 | late_initcall(create_tlb_single_page_flush_ceiling); |