1 // SPDX-License-Identifier: GPL-2.0
3 * Virtual Memory Map support
5 * (C) 2007 sgi. Christoph Lameter.
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30 #include <linux/pgtable.h>
31 #include <linux/bootmem_info.h>
34 #include <asm/pgalloc.h>
35 #include <asm/tlbflush.h>
37 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
39 * struct vmemmap_remap_walk - walk vmemmap page table
41 * @remap_pte: called for each lowest-level entry (PTE).
42 * @nr_walked: the number of walked pte.
43 * @reuse_page: the page which is reused for the tail vmemmap pages.
44 * @reuse_addr: the virtual address of the @reuse_page page.
45 * @vmemmap_pages: the list head of the vmemmap pages that can be freed
48 struct vmemmap_remap_walk
{
49 void (*remap_pte
)(pte_t
*pte
, unsigned long addr
,
50 struct vmemmap_remap_walk
*walk
);
51 unsigned long nr_walked
;
52 struct page
*reuse_page
;
53 unsigned long reuse_addr
;
54 struct list_head
*vmemmap_pages
;
57 static int __split_vmemmap_huge_pmd(pmd_t
*pmd
, unsigned long start
)
61 unsigned long addr
= start
;
62 struct page
*page
= pmd_page(*pmd
);
63 pte_t
*pgtable
= pte_alloc_one_kernel(&init_mm
);
68 pmd_populate_kernel(&init_mm
, &__pmd
, pgtable
);
70 for (i
= 0; i
< PMD_SIZE
/ PAGE_SIZE
; i
++, addr
+= PAGE_SIZE
) {
72 pgprot_t pgprot
= PAGE_KERNEL
;
74 entry
= mk_pte(page
+ i
, pgprot
);
75 pte
= pte_offset_kernel(&__pmd
, addr
);
76 set_pte_at(&init_mm
, addr
, pte
, entry
);
79 spin_lock(&init_mm
.page_table_lock
);
80 if (likely(pmd_leaf(*pmd
))) {
82 * Higher order allocations from buddy allocator must be able to
83 * be treated as indepdenent small pages (as they can be freed
86 if (!PageReserved(page
))
87 split_page(page
, get_order(PMD_SIZE
));
89 /* Make pte visible before pmd. See comment in pmd_install(). */
91 pmd_populate_kernel(&init_mm
, pmd
, pgtable
);
92 flush_tlb_kernel_range(start
, start
+ PMD_SIZE
);
94 pte_free_kernel(&init_mm
, pgtable
);
96 spin_unlock(&init_mm
.page_table_lock
);
101 static int split_vmemmap_huge_pmd(pmd_t
*pmd
, unsigned long start
)
105 spin_lock(&init_mm
.page_table_lock
);
106 leaf
= pmd_leaf(*pmd
);
107 spin_unlock(&init_mm
.page_table_lock
);
112 return __split_vmemmap_huge_pmd(pmd
, start
);
115 static void vmemmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
117 struct vmemmap_remap_walk
*walk
)
119 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
122 * The reuse_page is found 'first' in table walk before we start
123 * remapping (which is calling @walk->remap_pte).
125 if (!walk
->reuse_page
) {
126 walk
->reuse_page
= pte_page(*pte
);
128 * Because the reuse address is part of the range that we are
129 * walking, skip the reuse address range.
136 for (; addr
!= end
; addr
+= PAGE_SIZE
, pte
++) {
137 walk
->remap_pte(pte
, addr
, walk
);
142 static int vmemmap_pmd_range(pud_t
*pud
, unsigned long addr
,
144 struct vmemmap_remap_walk
*walk
)
149 pmd
= pmd_offset(pud
, addr
);
153 ret
= split_vmemmap_huge_pmd(pmd
, addr
& PMD_MASK
);
157 next
= pmd_addr_end(addr
, end
);
158 vmemmap_pte_range(pmd
, addr
, next
, walk
);
159 } while (pmd
++, addr
= next
, addr
!= end
);
164 static int vmemmap_pud_range(p4d_t
*p4d
, unsigned long addr
,
166 struct vmemmap_remap_walk
*walk
)
171 pud
= pud_offset(p4d
, addr
);
175 next
= pud_addr_end(addr
, end
);
176 ret
= vmemmap_pmd_range(pud
, addr
, next
, walk
);
179 } while (pud
++, addr
= next
, addr
!= end
);
184 static int vmemmap_p4d_range(pgd_t
*pgd
, unsigned long addr
,
186 struct vmemmap_remap_walk
*walk
)
191 p4d
= p4d_offset(pgd
, addr
);
195 next
= p4d_addr_end(addr
, end
);
196 ret
= vmemmap_pud_range(p4d
, addr
, next
, walk
);
199 } while (p4d
++, addr
= next
, addr
!= end
);
204 static int vmemmap_remap_range(unsigned long start
, unsigned long end
,
205 struct vmemmap_remap_walk
*walk
)
207 unsigned long addr
= start
;
211 VM_BUG_ON(!IS_ALIGNED(start
, PAGE_SIZE
));
212 VM_BUG_ON(!IS_ALIGNED(end
, PAGE_SIZE
));
214 pgd
= pgd_offset_k(addr
);
218 next
= pgd_addr_end(addr
, end
);
219 ret
= vmemmap_p4d_range(pgd
, addr
, next
, walk
);
222 } while (pgd
++, addr
= next
, addr
!= end
);
225 * We only change the mapping of the vmemmap virtual address range
226 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
227 * belongs to the range.
229 flush_tlb_kernel_range(start
+ PAGE_SIZE
, end
);
235 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
236 * allocator or buddy allocator. If the PG_reserved flag is set, it means
237 * that it allocated from the memblock allocator, just free it via the
238 * free_bootmem_page(). Otherwise, use __free_page().
240 static inline void free_vmemmap_page(struct page
*page
)
242 if (PageReserved(page
))
243 free_bootmem_page(page
);
248 /* Free a list of the vmemmap pages */
249 static void free_vmemmap_page_list(struct list_head
*list
)
251 struct page
*page
, *next
;
253 list_for_each_entry_safe(page
, next
, list
, lru
) {
254 list_del(&page
->lru
);
255 free_vmemmap_page(page
);
259 static void vmemmap_remap_pte(pte_t
*pte
, unsigned long addr
,
260 struct vmemmap_remap_walk
*walk
)
263 * Remap the tail pages as read-only to catch illegal write operation
266 pgprot_t pgprot
= PAGE_KERNEL_RO
;
267 pte_t entry
= mk_pte(walk
->reuse_page
, pgprot
);
268 struct page
*page
= pte_page(*pte
);
270 list_add_tail(&page
->lru
, walk
->vmemmap_pages
);
271 set_pte_at(&init_mm
, addr
, pte
, entry
);
275 * How many struct page structs need to be reset. When we reuse the head
276 * struct page, the special metadata (e.g. page->flags or page->mapping)
277 * cannot copy to the tail struct page structs. The invalid value will be
278 * checked in the free_tail_pages_check(). In order to avoid the message
279 * of "corrupted mapping in tail page". We need to reset at least 3 (one
280 * head struct page struct and two tail struct page structs) struct page
283 #define NR_RESET_STRUCT_PAGE 3
285 static inline void reset_struct_pages(struct page
*start
)
288 struct page
*from
= start
+ NR_RESET_STRUCT_PAGE
;
290 for (i
= 0; i
< NR_RESET_STRUCT_PAGE
; i
++)
291 memcpy(start
+ i
, from
, sizeof(*from
));
294 static void vmemmap_restore_pte(pte_t
*pte
, unsigned long addr
,
295 struct vmemmap_remap_walk
*walk
)
297 pgprot_t pgprot
= PAGE_KERNEL
;
301 BUG_ON(pte_page(*pte
) != walk
->reuse_page
);
303 page
= list_first_entry(walk
->vmemmap_pages
, struct page
, lru
);
304 list_del(&page
->lru
);
305 to
= page_to_virt(page
);
306 copy_page(to
, (void *)walk
->reuse_addr
);
307 reset_struct_pages(to
);
309 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, pgprot
));
313 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
314 * to the page which @reuse is mapped to, then free vmemmap
315 * which the range are mapped to.
316 * @start: start address of the vmemmap virtual address range that we want
318 * @end: end address of the vmemmap virtual address range that we want to
320 * @reuse: reuse address.
322 * Return: %0 on success, negative error code otherwise.
324 int vmemmap_remap_free(unsigned long start
, unsigned long end
,
328 LIST_HEAD(vmemmap_pages
);
329 struct vmemmap_remap_walk walk
= {
330 .remap_pte
= vmemmap_remap_pte
,
332 .vmemmap_pages
= &vmemmap_pages
,
336 * In order to make remapping routine most efficient for the huge pages,
337 * the routine of vmemmap page table walking has the following rules
338 * (see more details from the vmemmap_pte_range()):
340 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
341 * should be continuous.
342 * - The @reuse address is part of the range [@reuse, @end) that we are
343 * walking which is passed to vmemmap_remap_range().
344 * - The @reuse address is the first in the complete range.
346 * So we need to make sure that @start and @reuse meet the above rules.
348 BUG_ON(start
- reuse
!= PAGE_SIZE
);
350 mmap_read_lock(&init_mm
);
351 ret
= vmemmap_remap_range(reuse
, end
, &walk
);
352 if (ret
&& walk
.nr_walked
) {
353 end
= reuse
+ walk
.nr_walked
* PAGE_SIZE
;
355 * vmemmap_pages contains pages from the previous
356 * vmemmap_remap_range call which failed. These
357 * are pages which were removed from the vmemmap.
358 * They will be restored in the following call.
360 walk
= (struct vmemmap_remap_walk
) {
361 .remap_pte
= vmemmap_restore_pte
,
363 .vmemmap_pages
= &vmemmap_pages
,
366 vmemmap_remap_range(reuse
, end
, &walk
);
368 mmap_read_unlock(&init_mm
);
370 free_vmemmap_page_list(&vmemmap_pages
);
375 static int alloc_vmemmap_page_list(unsigned long start
, unsigned long end
,
376 gfp_t gfp_mask
, struct list_head
*list
)
378 unsigned long nr_pages
= (end
- start
) >> PAGE_SHIFT
;
379 int nid
= page_to_nid((struct page
*)start
);
380 struct page
*page
, *next
;
383 page
= alloc_pages_node(nid
, gfp_mask
, 0);
386 list_add_tail(&page
->lru
, list
);
391 list_for_each_entry_safe(page
, next
, list
, lru
)
392 __free_pages(page
, 0);
397 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
398 * to the page which is from the @vmemmap_pages
400 * @start: start address of the vmemmap virtual address range that we want
402 * @end: end address of the vmemmap virtual address range that we want to
404 * @reuse: reuse address.
405 * @gfp_mask: GFP flag for allocating vmemmap pages.
407 * Return: %0 on success, negative error code otherwise.
409 int vmemmap_remap_alloc(unsigned long start
, unsigned long end
,
410 unsigned long reuse
, gfp_t gfp_mask
)
412 LIST_HEAD(vmemmap_pages
);
413 struct vmemmap_remap_walk walk
= {
414 .remap_pte
= vmemmap_restore_pte
,
416 .vmemmap_pages
= &vmemmap_pages
,
419 /* See the comment in the vmemmap_remap_free(). */
420 BUG_ON(start
- reuse
!= PAGE_SIZE
);
422 if (alloc_vmemmap_page_list(start
, end
, gfp_mask
, &vmemmap_pages
))
425 mmap_read_lock(&init_mm
);
426 vmemmap_remap_range(reuse
, end
, &walk
);
427 mmap_read_unlock(&init_mm
);
431 #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
434 * Allocate a block of memory to be used to back the virtual memory map
435 * or to back the page tables that are used to create the mapping.
436 * Uses the main allocators if they are available, else bootmem.
439 static void * __ref
__earlyonly_bootmem_alloc(int node
,
444 return memblock_alloc_try_nid_raw(size
, align
, goal
,
445 MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
448 void * __meminit
vmemmap_alloc_block(unsigned long size
, int node
)
450 /* If the main allocator is up use that, fallback to bootmem. */
451 if (slab_is_available()) {
452 gfp_t gfp_mask
= GFP_KERNEL
|__GFP_RETRY_MAYFAIL
|__GFP_NOWARN
;
453 int order
= get_order(size
);
457 page
= alloc_pages_node(node
, gfp_mask
, order
);
459 return page_address(page
);
462 warn_alloc(gfp_mask
& ~__GFP_NOWARN
, NULL
,
463 "vmemmap alloc failure: order:%u", order
);
468 return __earlyonly_bootmem_alloc(node
, size
, size
,
469 __pa(MAX_DMA_ADDRESS
));
472 static void * __meminit
altmap_alloc_block_buf(unsigned long size
,
473 struct vmem_altmap
*altmap
);
475 /* need to make sure size is all the same during early stage */
476 void * __meminit
vmemmap_alloc_block_buf(unsigned long size
, int node
,
477 struct vmem_altmap
*altmap
)
482 return altmap_alloc_block_buf(size
, altmap
);
484 ptr
= sparse_buffer_alloc(size
);
486 ptr
= vmemmap_alloc_block(size
, node
);
490 static unsigned long __meminit
vmem_altmap_next_pfn(struct vmem_altmap
*altmap
)
492 return altmap
->base_pfn
+ altmap
->reserve
+ altmap
->alloc
496 static unsigned long __meminit
vmem_altmap_nr_free(struct vmem_altmap
*altmap
)
498 unsigned long allocated
= altmap
->alloc
+ altmap
->align
;
500 if (altmap
->free
> allocated
)
501 return altmap
->free
- allocated
;
505 static void * __meminit
altmap_alloc_block_buf(unsigned long size
,
506 struct vmem_altmap
*altmap
)
508 unsigned long pfn
, nr_pfns
, nr_align
;
510 if (size
& ~PAGE_MASK
) {
511 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
516 pfn
= vmem_altmap_next_pfn(altmap
);
517 nr_pfns
= size
>> PAGE_SHIFT
;
518 nr_align
= 1UL << find_first_bit(&nr_pfns
, BITS_PER_LONG
);
519 nr_align
= ALIGN(pfn
, nr_align
) - pfn
;
520 if (nr_pfns
+ nr_align
> vmem_altmap_nr_free(altmap
))
523 altmap
->alloc
+= nr_pfns
;
524 altmap
->align
+= nr_align
;
527 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
528 __func__
, pfn
, altmap
->alloc
, altmap
->align
, nr_pfns
);
529 return __va(__pfn_to_phys(pfn
));
532 void __meminit
vmemmap_verify(pte_t
*pte
, int node
,
533 unsigned long start
, unsigned long end
)
535 unsigned long pfn
= pte_pfn(*pte
);
536 int actual_node
= early_pfn_to_nid(pfn
);
538 if (node_distance(actual_node
, node
) > LOCAL_DISTANCE
)
539 pr_warn("[%lx-%lx] potential offnode page_structs\n",
543 pte_t
* __meminit
vmemmap_pte_populate(pmd_t
*pmd
, unsigned long addr
, int node
,
544 struct vmem_altmap
*altmap
,
547 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
548 if (pte_none(*pte
)) {
553 p
= vmemmap_alloc_block_buf(PAGE_SIZE
, node
, altmap
);
558 * When a PTE/PMD entry is freed from the init_mm
559 * there's a a free_pages() call to this page allocated
560 * above. Thus this get_page() is paired with the
561 * put_page_testzero() on the freeing path.
562 * This can only called by certain ZONE_DEVICE path,
563 * and through vmemmap_populate_compound_pages() when
567 p
= page_to_virt(reuse
);
569 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
, PAGE_KERNEL
);
570 set_pte_at(&init_mm
, addr
, pte
, entry
);
575 static void * __meminit
vmemmap_alloc_block_zero(unsigned long size
, int node
)
577 void *p
= vmemmap_alloc_block(size
, node
);
586 pmd_t
* __meminit
vmemmap_pmd_populate(pud_t
*pud
, unsigned long addr
, int node
)
588 pmd_t
*pmd
= pmd_offset(pud
, addr
);
589 if (pmd_none(*pmd
)) {
590 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
593 pmd_populate_kernel(&init_mm
, pmd
, p
);
598 pud_t
* __meminit
vmemmap_pud_populate(p4d_t
*p4d
, unsigned long addr
, int node
)
600 pud_t
*pud
= pud_offset(p4d
, addr
);
601 if (pud_none(*pud
)) {
602 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
605 pud_populate(&init_mm
, pud
, p
);
610 p4d_t
* __meminit
vmemmap_p4d_populate(pgd_t
*pgd
, unsigned long addr
, int node
)
612 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
613 if (p4d_none(*p4d
)) {
614 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
617 p4d_populate(&init_mm
, p4d
, p
);
622 pgd_t
* __meminit
vmemmap_pgd_populate(unsigned long addr
, int node
)
624 pgd_t
*pgd
= pgd_offset_k(addr
);
625 if (pgd_none(*pgd
)) {
626 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
629 pgd_populate(&init_mm
, pgd
, p
);
634 static pte_t
* __meminit
vmemmap_populate_address(unsigned long addr
, int node
,
635 struct vmem_altmap
*altmap
,
644 pgd
= vmemmap_pgd_populate(addr
, node
);
647 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
650 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
653 pmd
= vmemmap_pmd_populate(pud
, addr
, node
);
656 pte
= vmemmap_pte_populate(pmd
, addr
, node
, altmap
, reuse
);
659 vmemmap_verify(pte
, node
, addr
, addr
+ PAGE_SIZE
);
664 static int __meminit
vmemmap_populate_range(unsigned long start
,
665 unsigned long end
, int node
,
666 struct vmem_altmap
*altmap
,
669 unsigned long addr
= start
;
672 for (; addr
< end
; addr
+= PAGE_SIZE
) {
673 pte
= vmemmap_populate_address(addr
, node
, altmap
, reuse
);
681 int __meminit
vmemmap_populate_basepages(unsigned long start
, unsigned long end
,
682 int node
, struct vmem_altmap
*altmap
)
684 return vmemmap_populate_range(start
, end
, node
, altmap
, NULL
);
688 * For compound pages bigger than section size (e.g. x86 1G compound
689 * pages with 2M subsection size) fill the rest of sections as tail
692 * Note that memremap_pages() resets @nr_range value and will increment
693 * it after each range successful onlining. Thus the value or @nr_range
694 * at section memmap populate corresponds to the in-progress range
695 * being onlined here.
697 static bool __meminit
reuse_compound_section(unsigned long start_pfn
,
698 struct dev_pagemap
*pgmap
)
700 unsigned long nr_pages
= pgmap_vmemmap_nr(pgmap
);
701 unsigned long offset
= start_pfn
-
702 PHYS_PFN(pgmap
->ranges
[pgmap
->nr_range
].start
);
704 return !IS_ALIGNED(offset
, nr_pages
) && nr_pages
> PAGES_PER_SUBSECTION
;
707 static pte_t
* __meminit
compound_section_tail_page(unsigned long addr
)
714 * Assuming sections are populated sequentially, the previous section's
715 * page data can be reused.
717 pte
= pte_offset_kernel(pmd_off_k(addr
), addr
);
724 static int __meminit
vmemmap_populate_compound_pages(unsigned long start_pfn
,
726 unsigned long end
, int node
,
727 struct dev_pagemap
*pgmap
)
729 unsigned long size
, addr
;
733 if (reuse_compound_section(start_pfn
, pgmap
)) {
734 pte
= compound_section_tail_page(start
);
739 * Reuse the page that was populated in the prior iteration
740 * with just tail struct pages.
742 return vmemmap_populate_range(start
, end
, node
, NULL
,
746 size
= min(end
- start
, pgmap_vmemmap_nr(pgmap
) * sizeof(struct page
));
747 for (addr
= start
; addr
< end
; addr
+= size
) {
748 unsigned long next
= addr
, last
= addr
+ size
;
750 /* Populate the head page vmemmap page */
751 pte
= vmemmap_populate_address(addr
, node
, NULL
, NULL
);
755 /* Populate the tail pages vmemmap page */
756 next
= addr
+ PAGE_SIZE
;
757 pte
= vmemmap_populate_address(next
, node
, NULL
, NULL
);
762 * Reuse the previous page for the rest of tail pages
763 * See layout diagram in Documentation/vm/vmemmap_dedup.rst
766 rc
= vmemmap_populate_range(next
, last
, node
, NULL
,
775 struct page
* __meminit
__populate_section_memmap(unsigned long pfn
,
776 unsigned long nr_pages
, int nid
, struct vmem_altmap
*altmap
,
777 struct dev_pagemap
*pgmap
)
779 unsigned long start
= (unsigned long) pfn_to_page(pfn
);
780 unsigned long end
= start
+ nr_pages
* sizeof(struct page
);
783 if (WARN_ON_ONCE(!IS_ALIGNED(pfn
, PAGES_PER_SUBSECTION
) ||
784 !IS_ALIGNED(nr_pages
, PAGES_PER_SUBSECTION
)))
787 if (is_power_of_2(sizeof(struct page
)) &&
788 pgmap
&& pgmap_vmemmap_nr(pgmap
) > 1 && !altmap
)
789 r
= vmemmap_populate_compound_pages(pfn
, start
, end
, nid
, pgmap
);
791 r
= vmemmap_populate(start
, end
, nid
, altmap
);
796 return pfn_to_page(pfn
);