1 // SPDX-License-Identifier: GPL-2.0
3 * HugeTLB Vmemmap Optimization (HVO)
5 * Copyright (c) 2020, ByteDance. All rights reserved.
7 * Author: Muchun Song <songmuchun@bytedance.com>
9 * See Documentation/mm/vmemmap_dedup.rst
11 #define pr_fmt(fmt) "HugeTLB: " fmt
13 #include <linux/pgtable.h>
14 #include <linux/moduleparam.h>
15 #include <linux/bootmem_info.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
18 #include "hugetlb_vmemmap.h"
21 * struct vmemmap_remap_walk - walk vmemmap page table
23 * @remap_pte: called for each lowest-level entry (PTE).
24 * @nr_walked: the number of walked pte.
25 * @reuse_page: the page which is reused for the tail vmemmap pages.
26 * @reuse_addr: the virtual address of the @reuse_page page.
27 * @vmemmap_pages: the list head of the vmemmap pages that can be freed
30 struct vmemmap_remap_walk
{
31 void (*remap_pte
)(pte_t
*pte
, unsigned long addr
,
32 struct vmemmap_remap_walk
*walk
);
33 unsigned long nr_walked
;
34 struct page
*reuse_page
;
35 unsigned long reuse_addr
;
36 struct list_head
*vmemmap_pages
;
39 static int __split_vmemmap_huge_pmd(pmd_t
*pmd
, unsigned long start
)
43 unsigned long addr
= start
;
44 struct page
*page
= pmd_page(*pmd
);
45 pte_t
*pgtable
= pte_alloc_one_kernel(&init_mm
);
50 pmd_populate_kernel(&init_mm
, &__pmd
, pgtable
);
52 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
54 pgprot_t pgprot
= PAGE_KERNEL
;
56 entry
= mk_pte(page
+ i
, pgprot
);
57 pte
= pte_offset_kernel(&__pmd
, addr
);
58 set_pte_at(&init_mm
, addr
, pte
, entry
);
61 spin_lock(&init_mm
.page_table_lock
);
62 if (likely(pmd_leaf(*pmd
))) {
64 * Higher order allocations from buddy allocator must be able to
65 * be treated as indepdenent small pages (as they can be freed
68 if (!PageReserved(page
))
69 split_page(page
, get_order(PMD_SIZE
));
71 /* Make pte visible before pmd. See comment in pmd_install(). */
73 pmd_populate_kernel(&init_mm
, pmd
, pgtable
);
74 flush_tlb_kernel_range(start
, start
+ PMD_SIZE
);
76 pte_free_kernel(&init_mm
, pgtable
);
78 spin_unlock(&init_mm
.page_table_lock
);
83 static int split_vmemmap_huge_pmd(pmd_t
*pmd
, unsigned long start
)
87 spin_lock(&init_mm
.page_table_lock
);
88 leaf
= pmd_leaf(*pmd
);
89 spin_unlock(&init_mm
.page_table_lock
);
94 return __split_vmemmap_huge_pmd(pmd
, start
);
97 static void vmemmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
99 struct vmemmap_remap_walk
*walk
)
101 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
104 * The reuse_page is found 'first' in table walk before we start
105 * remapping (which is calling @walk->remap_pte).
107 if (!walk
->reuse_page
) {
108 walk
->reuse_page
= pte_page(*pte
);
110 * Because the reuse address is part of the range that we are
111 * walking, skip the reuse address range.
118 for (; addr
!= end
; addr
+= PAGE_SIZE
, pte
++) {
119 walk
->remap_pte(pte
, addr
, walk
);
124 static int vmemmap_pmd_range(pud_t
*pud
, unsigned long addr
,
126 struct vmemmap_remap_walk
*walk
)
131 pmd
= pmd_offset(pud
, addr
);
135 ret
= split_vmemmap_huge_pmd(pmd
, addr
& PMD_MASK
);
139 next
= pmd_addr_end(addr
, end
);
140 vmemmap_pte_range(pmd
, addr
, next
, walk
);
141 } while (pmd
++, addr
= next
, addr
!= end
);
146 static int vmemmap_pud_range(p4d_t
*p4d
, unsigned long addr
,
148 struct vmemmap_remap_walk
*walk
)
153 pud
= pud_offset(p4d
, addr
);
157 next
= pud_addr_end(addr
, end
);
158 ret
= vmemmap_pmd_range(pud
, addr
, next
, walk
);
161 } while (pud
++, addr
= next
, addr
!= end
);
166 static int vmemmap_p4d_range(pgd_t
*pgd
, unsigned long addr
,
168 struct vmemmap_remap_walk
*walk
)
173 p4d
= p4d_offset(pgd
, addr
);
177 next
= p4d_addr_end(addr
, end
);
178 ret
= vmemmap_pud_range(p4d
, addr
, next
, walk
);
181 } while (p4d
++, addr
= next
, addr
!= end
);
186 static int vmemmap_remap_range(unsigned long start
, unsigned long end
,
187 struct vmemmap_remap_walk
*walk
)
189 unsigned long addr
= start
;
193 VM_BUG_ON(!PAGE_ALIGNED(start
));
194 VM_BUG_ON(!PAGE_ALIGNED(end
));
196 pgd
= pgd_offset_k(addr
);
200 next
= pgd_addr_end(addr
, end
);
201 ret
= vmemmap_p4d_range(pgd
, addr
, next
, walk
);
204 } while (pgd
++, addr
= next
, addr
!= end
);
206 flush_tlb_kernel_range(start
, end
);
212 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
213 * allocator or buddy allocator. If the PG_reserved flag is set, it means
214 * that it allocated from the memblock allocator, just free it via the
215 * free_bootmem_page(). Otherwise, use __free_page().
217 static inline void free_vmemmap_page(struct page
*page
)
219 if (PageReserved(page
))
220 free_bootmem_page(page
);
225 /* Free a list of the vmemmap pages */
226 static void free_vmemmap_page_list(struct list_head
*list
)
228 struct page
*page
, *next
;
230 list_for_each_entry_safe(page
, next
, list
, lru
)
231 free_vmemmap_page(page
);
234 static void vmemmap_remap_pte(pte_t
*pte
, unsigned long addr
,
235 struct vmemmap_remap_walk
*walk
)
238 * Remap the tail pages as read-only to catch illegal write operation
241 pgprot_t pgprot
= PAGE_KERNEL_RO
;
242 struct page
*page
= pte_page(*pte
);
245 /* Remapping the head page requires r/w */
246 if (unlikely(addr
== walk
->reuse_addr
)) {
247 pgprot
= PAGE_KERNEL
;
248 list_del(&walk
->reuse_page
->lru
);
251 * Makes sure that preceding stores to the page contents from
252 * vmemmap_remap_free() become visible before the set_pte_at()
258 entry
= mk_pte(walk
->reuse_page
, pgprot
);
259 list_add_tail(&page
->lru
, walk
->vmemmap_pages
);
260 set_pte_at(&init_mm
, addr
, pte
, entry
);
264 * How many struct page structs need to be reset. When we reuse the head
265 * struct page, the special metadata (e.g. page->flags or page->mapping)
266 * cannot copy to the tail struct page structs. The invalid value will be
267 * checked in the free_tail_pages_check(). In order to avoid the message
268 * of "corrupted mapping in tail page". We need to reset at least 3 (one
269 * head struct page struct and two tail struct page structs) struct page
272 #define NR_RESET_STRUCT_PAGE 3
274 static inline void reset_struct_pages(struct page
*start
)
276 struct page
*from
= start
+ NR_RESET_STRUCT_PAGE
;
278 BUILD_BUG_ON(NR_RESET_STRUCT_PAGE
* 2 > PAGE_SIZE
/ sizeof(struct page
));
279 memcpy(start
, from
, sizeof(*from
) * NR_RESET_STRUCT_PAGE
);
282 static void vmemmap_restore_pte(pte_t
*pte
, unsigned long addr
,
283 struct vmemmap_remap_walk
*walk
)
285 pgprot_t pgprot
= PAGE_KERNEL
;
289 BUG_ON(pte_page(*pte
) != walk
->reuse_page
);
291 page
= list_first_entry(walk
->vmemmap_pages
, struct page
, lru
);
292 list_del(&page
->lru
);
293 to
= page_to_virt(page
);
294 copy_page(to
, (void *)walk
->reuse_addr
);
295 reset_struct_pages(to
);
298 * Makes sure that preceding stores to the page contents become visible
299 * before the set_pte_at() write.
302 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, pgprot
));
306 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
307 * to the page which @reuse is mapped to, then free vmemmap
308 * which the range are mapped to.
309 * @start: start address of the vmemmap virtual address range that we want
311 * @end: end address of the vmemmap virtual address range that we want to
313 * @reuse: reuse address.
315 * Return: %0 on success, negative error code otherwise.
317 static int vmemmap_remap_free(unsigned long start
, unsigned long end
,
321 LIST_HEAD(vmemmap_pages
);
322 struct vmemmap_remap_walk walk
= {
323 .remap_pte
= vmemmap_remap_pte
,
325 .vmemmap_pages
= &vmemmap_pages
,
327 int nid
= page_to_nid((struct page
*)start
);
328 gfp_t gfp_mask
= GFP_KERNEL
| __GFP_THISNODE
| __GFP_NORETRY
|
332 * Allocate a new head vmemmap page to avoid breaking a contiguous
333 * block of struct page memory when freeing it back to page allocator
334 * in free_vmemmap_page_list(). This will allow the likely contiguous
335 * struct page backing memory to be kept contiguous and allowing for
336 * more allocations of hugepages. Fallback to the currently
337 * mapped head page in case should it fail to allocate.
339 walk
.reuse_page
= alloc_pages_node(nid
, gfp_mask
, 0);
340 if (walk
.reuse_page
) {
341 copy_page(page_to_virt(walk
.reuse_page
),
342 (void *)walk
.reuse_addr
);
343 list_add(&walk
.reuse_page
->lru
, &vmemmap_pages
);
347 * In order to make remapping routine most efficient for the huge pages,
348 * the routine of vmemmap page table walking has the following rules
349 * (see more details from the vmemmap_pte_range()):
351 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
352 * should be continuous.
353 * - The @reuse address is part of the range [@reuse, @end) that we are
354 * walking which is passed to vmemmap_remap_range().
355 * - The @reuse address is the first in the complete range.
357 * So we need to make sure that @start and @reuse meet the above rules.
359 BUG_ON(start
- reuse
!= PAGE_SIZE
);
361 mmap_read_lock(&init_mm
);
362 ret
= vmemmap_remap_range(reuse
, end
, &walk
);
363 if (ret
&& walk
.nr_walked
) {
364 end
= reuse
+ walk
.nr_walked
* PAGE_SIZE
;
366 * vmemmap_pages contains pages from the previous
367 * vmemmap_remap_range call which failed. These
368 * are pages which were removed from the vmemmap.
369 * They will be restored in the following call.
371 walk
= (struct vmemmap_remap_walk
) {
372 .remap_pte
= vmemmap_restore_pte
,
374 .vmemmap_pages
= &vmemmap_pages
,
377 vmemmap_remap_range(reuse
, end
, &walk
);
379 mmap_read_unlock(&init_mm
);
381 free_vmemmap_page_list(&vmemmap_pages
);
386 static int alloc_vmemmap_page_list(unsigned long start
, unsigned long end
,
387 gfp_t gfp_mask
, struct list_head
*list
)
389 unsigned long nr_pages
= (end
- start
) >> PAGE_SHIFT
;
390 int nid
= page_to_nid((struct page
*)start
);
391 struct page
*page
, *next
;
394 page
= alloc_pages_node(nid
, gfp_mask
, 0);
397 list_add_tail(&page
->lru
, list
);
402 list_for_each_entry_safe(page
, next
, list
, lru
)
403 __free_pages(page
, 0);
408 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
409 * to the page which is from the @vmemmap_pages
411 * @start: start address of the vmemmap virtual address range that we want
413 * @end: end address of the vmemmap virtual address range that we want to
415 * @reuse: reuse address.
416 * @gfp_mask: GFP flag for allocating vmemmap pages.
418 * Return: %0 on success, negative error code otherwise.
420 static int vmemmap_remap_alloc(unsigned long start
, unsigned long end
,
421 unsigned long reuse
, gfp_t gfp_mask
)
423 LIST_HEAD(vmemmap_pages
);
424 struct vmemmap_remap_walk walk
= {
425 .remap_pte
= vmemmap_restore_pte
,
427 .vmemmap_pages
= &vmemmap_pages
,
430 /* See the comment in the vmemmap_remap_free(). */
431 BUG_ON(start
- reuse
!= PAGE_SIZE
);
433 if (alloc_vmemmap_page_list(start
, end
, gfp_mask
, &vmemmap_pages
))
436 mmap_read_lock(&init_mm
);
437 vmemmap_remap_range(reuse
, end
, &walk
);
438 mmap_read_unlock(&init_mm
);
443 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key
);
444 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key
);
446 static bool vmemmap_optimize_enabled
= IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON
);
447 core_param(hugetlb_free_vmemmap
, vmemmap_optimize_enabled
, bool, 0);
450 * hugetlb_vmemmap_restore - restore previously optimized (by
451 * hugetlb_vmemmap_optimize()) vmemmap pages which
452 * will be reallocated and remapped.
454 * @head: the head page whose vmemmap pages will be restored.
456 * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
457 * negative error code otherwise.
459 int hugetlb_vmemmap_restore(const struct hstate
*h
, struct page
*head
)
462 unsigned long vmemmap_start
= (unsigned long)head
, vmemmap_end
;
463 unsigned long vmemmap_reuse
;
465 if (!HPageVmemmapOptimized(head
))
468 vmemmap_end
= vmemmap_start
+ hugetlb_vmemmap_size(h
);
469 vmemmap_reuse
= vmemmap_start
;
470 vmemmap_start
+= HUGETLB_VMEMMAP_RESERVE_SIZE
;
473 * The pages which the vmemmap virtual address range [@vmemmap_start,
474 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
475 * the range is mapped to the page which @vmemmap_reuse is mapped to.
476 * When a HugeTLB page is freed to the buddy allocator, previously
477 * discarded vmemmap pages must be allocated and remapping.
479 ret
= vmemmap_remap_alloc(vmemmap_start
, vmemmap_end
, vmemmap_reuse
,
480 GFP_KERNEL
| __GFP_NORETRY
| __GFP_THISNODE
);
482 ClearHPageVmemmapOptimized(head
);
483 static_branch_dec(&hugetlb_optimize_vmemmap_key
);
489 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
490 static bool vmemmap_should_optimize(const struct hstate
*h
, const struct page
*head
)
492 if (!READ_ONCE(vmemmap_optimize_enabled
))
495 if (!hugetlb_vmemmap_optimizable(h
))
498 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG
)) {
500 struct page
*vmemmap_page
;
501 unsigned long vaddr
= (unsigned long)head
;
504 * Only the vmemmap page's vmemmap page can be self-hosted.
505 * Walking the page tables to find the backing page of the
508 pmdp
= pmd_off_k(vaddr
);
510 * The READ_ONCE() is used to stabilize *pmdp in a register or
511 * on the stack so that it will stop changing under the code.
512 * The only concurrent operation where it can be changed is
513 * split_vmemmap_huge_pmd() (*pmdp will be stable after this
516 pmd
= READ_ONCE(*pmdp
);
518 vmemmap_page
= pmd_page(pmd
) + pte_index(vaddr
);
520 vmemmap_page
= pte_page(*pte_offset_kernel(pmdp
, vaddr
));
522 * Due to HugeTLB alignment requirements and the vmemmap pages
523 * being at the start of the hotplugged memory region in
524 * memory_hotplug.memmap_on_memory case. Checking any vmemmap
525 * page's vmemmap page if it is marked as VmemmapSelfHosted is
528 * [ hotplugged memory ]
529 * [ section ][...][ section ]
530 * [ vmemmap ][ usable memory ]
536 * +-------------------------------------------+
538 if (PageVmemmapSelfHosted(vmemmap_page
))
546 * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
548 * @head: the head page whose vmemmap pages will be optimized.
550 * This function only tries to optimize @head's vmemmap pages and does not
551 * guarantee that the optimization will succeed after it returns. The caller
552 * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
553 * have been optimized.
555 void hugetlb_vmemmap_optimize(const struct hstate
*h
, struct page
*head
)
557 unsigned long vmemmap_start
= (unsigned long)head
, vmemmap_end
;
558 unsigned long vmemmap_reuse
;
560 if (!vmemmap_should_optimize(h
, head
))
563 static_branch_inc(&hugetlb_optimize_vmemmap_key
);
565 vmemmap_end
= vmemmap_start
+ hugetlb_vmemmap_size(h
);
566 vmemmap_reuse
= vmemmap_start
;
567 vmemmap_start
+= HUGETLB_VMEMMAP_RESERVE_SIZE
;
570 * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end)
571 * to the page which @vmemmap_reuse is mapped to, then free the pages
572 * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
574 if (vmemmap_remap_free(vmemmap_start
, vmemmap_end
, vmemmap_reuse
))
575 static_branch_dec(&hugetlb_optimize_vmemmap_key
);
577 SetHPageVmemmapOptimized(head
);
580 static struct ctl_table hugetlb_vmemmap_sysctls
[] = {
582 .procname
= "hugetlb_optimize_vmemmap",
583 .data
= &vmemmap_optimize_enabled
,
584 .maxlen
= sizeof(vmemmap_optimize_enabled
),
586 .proc_handler
= proc_dobool
,
591 static int __init
hugetlb_vmemmap_init(void)
593 /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */
594 BUILD_BUG_ON(__NR_USED_SUBPAGE
* sizeof(struct page
) > HUGETLB_VMEMMAP_RESERVE_SIZE
);
596 if (IS_ENABLED(CONFIG_PROC_SYSCTL
)) {
597 const struct hstate
*h
;
600 if (hugetlb_vmemmap_optimizable(h
)) {
601 register_sysctl_init("vm", hugetlb_vmemmap_sysctls
);
608 late_initcall(hugetlb_vmemmap_init
);