2 #include <linux/highmem.h>
3 #include <linux/kernel.h>
4 #include <linux/mmdebug.h>
5 #include <linux/mm_types.h>
6 #include <linux/mm_inline.h>
7 #include <linux/pagemap.h>
8 #include <linux/rcupdate.h>
10 #include <linux/swap.h>
11 #include <linux/rmap.h>
13 #include <asm/pgalloc.h>
16 #ifndef CONFIG_MMU_GATHER_NO_GATHER
18 static bool tlb_next_batch(struct mmu_gather
*tlb
)
20 struct mmu_gather_batch
*batch
;
22 /* Limit batching if we have delayed rmaps pending */
23 if (tlb
->delayed_rmap
&& tlb
->active
!= &tlb
->local
)
28 tlb
->active
= batch
->next
;
32 if (tlb
->batch_count
== MAX_GATHER_BATCH_COUNT
)
35 batch
= (void *)__get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
42 batch
->max
= MAX_GATHER_BATCH
;
44 tlb
->active
->next
= batch
;
51 static void tlb_flush_rmap_batch(struct mmu_gather_batch
*batch
, struct vm_area_struct
*vma
)
53 for (int i
= 0; i
< batch
->nr
; i
++) {
54 struct encoded_page
*enc
= batch
->encoded_pages
[i
];
56 if (encoded_page_flags(enc
)) {
57 struct page
*page
= encoded_page_ptr(enc
);
58 page_remove_rmap(page
, vma
, false);
64 * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB
65 * @tlb: the current mmu_gather
67 * Note that because of how tlb_next_batch() above works, we will
68 * never start multiple new batches with pending delayed rmaps, so
69 * we only need to walk through the current active batch and the
72 void tlb_flush_rmaps(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
74 if (!tlb
->delayed_rmap
)
77 tlb_flush_rmap_batch(&tlb
->local
, vma
);
78 if (tlb
->active
!= &tlb
->local
)
79 tlb_flush_rmap_batch(tlb
->active
, vma
);
80 tlb
->delayed_rmap
= 0;
84 static void tlb_batch_pages_flush(struct mmu_gather
*tlb
)
86 struct mmu_gather_batch
*batch
;
88 for (batch
= &tlb
->local
; batch
&& batch
->nr
; batch
= batch
->next
) {
89 struct encoded_page
**pages
= batch
->encoded_pages
;
93 * limit free batch count when PAGE_SIZE > 4K
95 unsigned int nr
= min(512U, batch
->nr
);
97 free_pages_and_swap_cache(pages
, nr
);
104 tlb
->active
= &tlb
->local
;
107 static void tlb_batch_list_free(struct mmu_gather
*tlb
)
109 struct mmu_gather_batch
*batch
, *next
;
111 for (batch
= tlb
->local
.next
; batch
; batch
= next
) {
113 free_pages((unsigned long)batch
, 0);
115 tlb
->local
.next
= NULL
;
118 bool __tlb_remove_page_size(struct mmu_gather
*tlb
, struct encoded_page
*page
, int page_size
)
120 struct mmu_gather_batch
*batch
;
122 VM_BUG_ON(!tlb
->end
);
124 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
125 VM_WARN_ON(tlb
->page_size
!= page_size
);
130 * Add the page and check if we are full. If so
133 batch
->encoded_pages
[batch
->nr
++] = page
;
134 if (batch
->nr
== batch
->max
) {
135 if (!tlb_next_batch(tlb
))
139 VM_BUG_ON_PAGE(batch
->nr
> batch
->max
, encoded_page_ptr(page
));
144 #endif /* MMU_GATHER_NO_GATHER */
146 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
148 static void __tlb_remove_table_free(struct mmu_table_batch
*batch
)
152 for (i
= 0; i
< batch
->nr
; i
++)
153 __tlb_remove_table(batch
->tables
[i
]);
155 free_page((unsigned long)batch
);
158 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
161 * Semi RCU freeing of the page directories.
163 * This is needed by some architectures to implement software pagetable walkers.
165 * gup_fast() and other software pagetable walkers do a lockless page-table
166 * walk and therefore needs some synchronization with the freeing of the page
167 * directories. The chosen means to accomplish that is by disabling IRQs over
170 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
171 * since we unlink the page, flush TLBs, free the page. Since the disabling of
172 * IRQs delays the completion of the TLB flush we can never observe an already
175 * Architectures that do not have this (PPC) need to delay the freeing by some
176 * other means, this is that means.
178 * What we do is batch the freed directory pages (tables) and RCU free them.
179 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
180 * holds off grace periods.
182 * However, in order to batch these pages we need to allocate storage, this
183 * allocation is deep inside the MM code and can thus easily fail on memory
184 * pressure. To guarantee progress we fall back to single table freeing, see
185 * the implementation of tlb_remove_table_one().
189 static void tlb_remove_table_smp_sync(void *arg
)
191 /* Simply deliver the interrupt */
194 void tlb_remove_table_sync_one(void)
197 * This isn't an RCU grace period and hence the page-tables cannot be
198 * assumed to be actually RCU-freed.
200 * It is however sufficient for software page-table walkers that rely on
203 smp_call_function(tlb_remove_table_smp_sync
, NULL
, 1);
206 static void tlb_remove_table_rcu(struct rcu_head
*head
)
208 __tlb_remove_table_free(container_of(head
, struct mmu_table_batch
, rcu
));
211 static void tlb_remove_table_free(struct mmu_table_batch
*batch
)
213 call_rcu(&batch
->rcu
, tlb_remove_table_rcu
);
216 #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
218 static void tlb_remove_table_free(struct mmu_table_batch
*batch
)
220 __tlb_remove_table_free(batch
);
223 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
226 * If we want tlb_remove_table() to imply TLB invalidates.
228 static inline void tlb_table_invalidate(struct mmu_gather
*tlb
)
230 if (tlb_needs_table_invalidate()) {
232 * Invalidate page-table caches used by hardware walkers. Then
233 * we still need to RCU-sched wait while freeing the pages
234 * because software walkers can still be in-flight.
236 tlb_flush_mmu_tlbonly(tlb
);
240 static void tlb_remove_table_one(void *table
)
242 tlb_remove_table_sync_one();
243 __tlb_remove_table(table
);
246 static void tlb_table_flush(struct mmu_gather
*tlb
)
248 struct mmu_table_batch
**batch
= &tlb
->batch
;
251 tlb_table_invalidate(tlb
);
252 tlb_remove_table_free(*batch
);
257 void tlb_remove_table(struct mmu_gather
*tlb
, void *table
)
259 struct mmu_table_batch
**batch
= &tlb
->batch
;
261 if (*batch
== NULL
) {
262 *batch
= (struct mmu_table_batch
*)__get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
263 if (*batch
== NULL
) {
264 tlb_table_invalidate(tlb
);
265 tlb_remove_table_one(table
);
271 (*batch
)->tables
[(*batch
)->nr
++] = table
;
272 if ((*batch
)->nr
== MAX_TABLE_BATCH
)
273 tlb_table_flush(tlb
);
276 static inline void tlb_table_init(struct mmu_gather
*tlb
)
281 #else /* !CONFIG_MMU_GATHER_TABLE_FREE */
283 static inline void tlb_table_flush(struct mmu_gather
*tlb
) { }
284 static inline void tlb_table_init(struct mmu_gather
*tlb
) { }
286 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
288 static void tlb_flush_mmu_free(struct mmu_gather
*tlb
)
290 tlb_table_flush(tlb
);
291 #ifndef CONFIG_MMU_GATHER_NO_GATHER
292 tlb_batch_pages_flush(tlb
);
296 void tlb_flush_mmu(struct mmu_gather
*tlb
)
298 tlb_flush_mmu_tlbonly(tlb
);
299 tlb_flush_mmu_free(tlb
);
302 static void __tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
,
306 tlb
->fullmm
= fullmm
;
308 #ifndef CONFIG_MMU_GATHER_NO_GATHER
309 tlb
->need_flush_all
= 0;
310 tlb
->local
.next
= NULL
;
312 tlb
->local
.max
= ARRAY_SIZE(tlb
->__pages
);
313 tlb
->active
= &tlb
->local
;
314 tlb
->batch_count
= 0;
316 tlb
->delayed_rmap
= 0;
319 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
323 __tlb_reset_range(tlb
);
324 inc_tlb_flush_pending(tlb
->mm
);
328 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
329 * @tlb: the mmu_gather structure to initialize
330 * @mm: the mm_struct of the target address space
332 * Called to initialize an (on-stack) mmu_gather structure for page-table
333 * tear-down from @mm.
335 void tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
)
337 __tlb_gather_mmu(tlb
, mm
, false);
341 * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
342 * @tlb: the mmu_gather structure to initialize
343 * @mm: the mm_struct of the target address space
345 * In this case, @mm is without users and we're going to destroy the
346 * full address space (exit/execve).
348 * Called to initialize an (on-stack) mmu_gather structure for page-table
349 * tear-down from @mm.
351 void tlb_gather_mmu_fullmm(struct mmu_gather
*tlb
, struct mm_struct
*mm
)
353 __tlb_gather_mmu(tlb
, mm
, true);
357 * tlb_finish_mmu - finish an mmu_gather structure
358 * @tlb: the mmu_gather structure to finish
360 * Called at the end of the shootdown operation to free up any resources that
363 void tlb_finish_mmu(struct mmu_gather
*tlb
)
366 * If there are parallel threads are doing PTE changes on same range
367 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
368 * flush by batching, one thread may end up seeing inconsistent PTEs
369 * and result in having stale TLB entries. So flush TLB forcefully
370 * if we detect parallel PTE batching threads.
372 * However, some syscalls, e.g. munmap(), may free page tables, this
373 * needs force flush everything in the given range. Otherwise this
374 * may result in having stale TLB entries for some architectures,
375 * e.g. aarch64, that could specify flush what level TLB.
377 if (mm_tlb_flush_nested(tlb
->mm
)) {
379 * The aarch64 yields better performance with fullmm by
380 * avoiding multiple CPUs spamming TLBI messages at the
383 * On x86 non-fullmm doesn't yield significant difference
387 __tlb_reset_range(tlb
);
388 tlb
->freed_tables
= 1;
393 #ifndef CONFIG_MMU_GATHER_NO_GATHER
394 tlb_batch_list_free(tlb
);
396 dec_tlb_flush_pending(tlb
->mm
);