2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
24 #include <asm/hugetlb.h>
26 #ifdef CONFIG_HUGETLB_PAGE
28 #define PAGE_SHIFT_64K 16
29 #define PAGE_SHIFT_16M 24
30 #define PAGE_SHIFT_16G 34
32 unsigned int HPAGE_SHIFT
;
35 * Tracks gpages after the device tree is scanned and before the
36 * huge_boot_pages list is ready. On non-Freescale implementations, this is
37 * just used to track 16G pages and so is a single array. FSL-based
38 * implementations may have more than one gpage size, so we need multiple
41 #ifdef CONFIG_PPC_FSL_BOOK3E
42 #define MAX_NUMBER_GPAGES 128
44 u64 gpage_list
[MAX_NUMBER_GPAGES
];
45 unsigned int nr_gpages
;
47 static struct psize_gpages gpage_freearray
[MMU_PAGE_COUNT
];
49 #define MAX_NUMBER_GPAGES 1024
50 static u64 gpage_freearray
[MAX_NUMBER_GPAGES
];
51 static unsigned nr_gpages
;
54 #define hugepd_none(hpd) ((hpd).pd == 0)
56 #ifdef CONFIG_PPC_BOOK3S_64
58 * At this point we do the placement change only for BOOK3S 64. This would
59 * possibly work on other subarchs.
63 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
64 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
66 int pmd_huge(pmd_t pmd
)
69 * leaf pte for huge page, bottom two bits != 00
71 return ((pmd_val(pmd
) & 0x3) != 0x0);
74 int pud_huge(pud_t pud
)
77 * leaf pte for huge page, bottom two bits != 00
79 return ((pud_val(pud
) & 0x3) != 0x0);
82 int pgd_huge(pgd_t pgd
)
85 * leaf pte for huge page, bottom two bits != 00
87 return ((pgd_val(pgd
) & 0x3) != 0x0);
90 int pmd_huge_support(void)
95 int pmd_huge(pmd_t pmd
)
100 int pud_huge(pud_t pud
)
105 int pgd_huge(pgd_t pgd
)
110 int pmd_huge_support(void)
116 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
118 /* Only called for hugetlbfs pages, hence can ignore THP */
119 return find_linux_pte_or_hugepte(mm
->pgd
, addr
, NULL
);
122 static int __hugepte_alloc(struct mm_struct
*mm
, hugepd_t
*hpdp
,
123 unsigned long address
, unsigned pdshift
, unsigned pshift
)
125 struct kmem_cache
*cachep
;
128 #ifdef CONFIG_PPC_FSL_BOOK3E
130 int num_hugepd
= 1 << (pshift
- pdshift
);
131 cachep
= hugepte_cache
;
133 cachep
= PGT_CACHE(pdshift
- pshift
);
136 new = kmem_cache_zalloc(cachep
, GFP_KERNEL
|__GFP_REPEAT
);
138 BUG_ON(pshift
> HUGEPD_SHIFT_MASK
);
139 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK
);
144 spin_lock(&mm
->page_table_lock
);
145 #ifdef CONFIG_PPC_FSL_BOOK3E
147 * We have multiple higher-level entries that point to the same
148 * actual pte location. Fill in each as we go and backtrack on error.
149 * We need all of these so the DTLB pgtable walk code can find the
150 * right higher-level entry without knowing if it's a hugepage or not.
152 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++) {
153 if (unlikely(!hugepd_none(*hpdp
)))
156 /* We use the old format for PPC_FSL_BOOK3E */
157 hpdp
->pd
= ((unsigned long)new & ~PD_HUGE
) | pshift
;
159 /* If we bailed from the for loop early, an error occurred, clean up */
160 if (i
< num_hugepd
) {
161 for (i
= i
- 1 ; i
>= 0; i
--, hpdp
--)
163 kmem_cache_free(cachep
, new);
166 if (!hugepd_none(*hpdp
))
167 kmem_cache_free(cachep
, new);
169 #ifdef CONFIG_PPC_BOOK3S_64
170 hpdp
->pd
= (unsigned long)new |
171 (shift_to_mmu_psize(pshift
) << 2);
173 hpdp
->pd
= ((unsigned long)new & ~PD_HUGE
) | pshift
;
177 spin_unlock(&mm
->page_table_lock
);
182 * These macros define how to determine which level of the page table holds
185 #ifdef CONFIG_PPC_FSL_BOOK3E
186 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
187 #define HUGEPD_PUD_SHIFT PUD_SHIFT
189 #define HUGEPD_PGD_SHIFT PUD_SHIFT
190 #define HUGEPD_PUD_SHIFT PMD_SHIFT
193 #ifdef CONFIG_PPC_BOOK3S_64
195 * At this point we do the placement change only for BOOK3S 64. This would
196 * possibly work on other subarchs.
198 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
203 hugepd_t
*hpdp
= NULL
;
204 unsigned pshift
= __ffs(sz
);
205 unsigned pdshift
= PGDIR_SHIFT
;
208 pg
= pgd_offset(mm
, addr
);
210 if (pshift
== PGDIR_SHIFT
)
213 else if (pshift
> PUD_SHIFT
)
215 * We need to use hugepd table
217 hpdp
= (hugepd_t
*)pg
;
220 pu
= pud_alloc(mm
, pg
, addr
);
221 if (pshift
== PUD_SHIFT
)
223 else if (pshift
> PMD_SHIFT
)
224 hpdp
= (hugepd_t
*)pu
;
227 pm
= pmd_alloc(mm
, pu
, addr
);
228 if (pshift
== PMD_SHIFT
)
232 hpdp
= (hugepd_t
*)pm
;
238 BUG_ON(!hugepd_none(*hpdp
) && !hugepd_ok(*hpdp
));
240 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
, pdshift
, pshift
))
243 return hugepte_offset(hpdp
, addr
, pdshift
);
248 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
253 hugepd_t
*hpdp
= NULL
;
254 unsigned pshift
= __ffs(sz
);
255 unsigned pdshift
= PGDIR_SHIFT
;
259 pg
= pgd_offset(mm
, addr
);
261 if (pshift
>= HUGEPD_PGD_SHIFT
) {
262 hpdp
= (hugepd_t
*)pg
;
265 pu
= pud_alloc(mm
, pg
, addr
);
266 if (pshift
>= HUGEPD_PUD_SHIFT
) {
267 hpdp
= (hugepd_t
*)pu
;
270 pm
= pmd_alloc(mm
, pu
, addr
);
271 hpdp
= (hugepd_t
*)pm
;
278 BUG_ON(!hugepd_none(*hpdp
) && !hugepd_ok(*hpdp
));
280 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
, pdshift
, pshift
))
283 return hugepte_offset(hpdp
, addr
, pdshift
);
287 #ifdef CONFIG_PPC_FSL_BOOK3E
288 /* Build list of addresses of gigantic pages. This function is used in early
289 * boot before the buddy or bootmem allocator is setup.
291 void add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
293 unsigned int idx
= shift_to_mmu_psize(__ffs(page_size
));
299 gpage_freearray
[idx
].nr_gpages
= number_of_pages
;
301 for (i
= 0; i
< number_of_pages
; i
++) {
302 gpage_freearray
[idx
].gpage_list
[i
] = addr
;
308 * Moves the gigantic page addresses from the temporary list to the
309 * huge_boot_pages list.
311 int alloc_bootmem_huge_page(struct hstate
*hstate
)
313 struct huge_bootmem_page
*m
;
314 int idx
= shift_to_mmu_psize(huge_page_shift(hstate
));
315 int nr_gpages
= gpage_freearray
[idx
].nr_gpages
;
320 #ifdef CONFIG_HIGHMEM
322 * If gpages can be in highmem we can't use the trick of storing the
323 * data structure in the page; allocate space for this
325 m
= alloc_bootmem(sizeof(struct huge_bootmem_page
));
326 m
->phys
= gpage_freearray
[idx
].gpage_list
[--nr_gpages
];
328 m
= phys_to_virt(gpage_freearray
[idx
].gpage_list
[--nr_gpages
]);
331 list_add(&m
->list
, &huge_boot_pages
);
332 gpage_freearray
[idx
].nr_gpages
= nr_gpages
;
333 gpage_freearray
[idx
].gpage_list
[nr_gpages
] = 0;
339 * Scan the command line hugepagesz= options for gigantic pages; store those in
340 * a list that we use to allocate the memory once all options are parsed.
343 unsigned long gpage_npages
[MMU_PAGE_COUNT
];
345 static int __init
do_gpage_early_setup(char *param
, char *val
,
348 static phys_addr_t size
;
349 unsigned long npages
;
352 * The hugepagesz and hugepages cmdline options are interleaved. We
353 * use the size variable to keep track of whether or not this was done
354 * properly and skip over instances where it is incorrect. Other
355 * command-line parsing code will issue warnings, so we don't need to.
358 if ((strcmp(param
, "default_hugepagesz") == 0) ||
359 (strcmp(param
, "hugepagesz") == 0)) {
360 size
= memparse(val
, NULL
);
361 } else if (strcmp(param
, "hugepages") == 0) {
363 if (sscanf(val
, "%lu", &npages
) <= 0)
365 gpage_npages
[shift_to_mmu_psize(__ffs(size
))] = npages
;
374 * This function allocates physical space for pages that are larger than the
375 * buddy allocator can handle. We want to allocate these in highmem because
376 * the amount of lowmem is limited. This means that this function MUST be
377 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
378 * allocate to grab highmem.
380 void __init
reserve_hugetlb_gpages(void)
382 static __initdata
char cmdline
[COMMAND_LINE_SIZE
];
383 phys_addr_t size
, base
;
386 strlcpy(cmdline
, boot_command_line
, COMMAND_LINE_SIZE
);
387 parse_args("hugetlb gpages", cmdline
, NULL
, 0, 0, 0,
388 &do_gpage_early_setup
);
391 * Walk gpage list in reverse, allocating larger page sizes first.
392 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
393 * When we reach the point in the list where pages are no longer
394 * considered gpages, we're done.
396 for (i
= MMU_PAGE_COUNT
-1; i
>= 0; i
--) {
397 if (mmu_psize_defs
[i
].shift
== 0 || gpage_npages
[i
] == 0)
399 else if (mmu_psize_to_shift(i
) < (MAX_ORDER
+ PAGE_SHIFT
))
402 size
= (phys_addr_t
)(1ULL << mmu_psize_to_shift(i
));
403 base
= memblock_alloc_base(size
* gpage_npages
[i
], size
,
404 MEMBLOCK_ALLOC_ANYWHERE
);
405 add_gpage(base
, size
, gpage_npages
[i
]);
409 #else /* !PPC_FSL_BOOK3E */
411 /* Build list of addresses of gigantic pages. This function is used in early
412 * boot before the buddy or bootmem allocator is setup.
414 void add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
418 while (number_of_pages
> 0) {
419 gpage_freearray
[nr_gpages
] = addr
;
426 /* Moves the gigantic page addresses from the temporary list to the
427 * huge_boot_pages list.
429 int alloc_bootmem_huge_page(struct hstate
*hstate
)
431 struct huge_bootmem_page
*m
;
434 m
= phys_to_virt(gpage_freearray
[--nr_gpages
]);
435 gpage_freearray
[nr_gpages
] = 0;
436 list_add(&m
->list
, &huge_boot_pages
);
442 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
447 #ifdef CONFIG_PPC_FSL_BOOK3E
448 #define HUGEPD_FREELIST_SIZE \
449 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
451 struct hugepd_freelist
{
457 static DEFINE_PER_CPU(struct hugepd_freelist
*, hugepd_freelist_cur
);
459 static void hugepd_free_rcu_callback(struct rcu_head
*head
)
461 struct hugepd_freelist
*batch
=
462 container_of(head
, struct hugepd_freelist
, rcu
);
465 for (i
= 0; i
< batch
->index
; i
++)
466 kmem_cache_free(hugepte_cache
, batch
->ptes
[i
]);
468 free_page((unsigned long)batch
);
471 static void hugepd_free(struct mmu_gather
*tlb
, void *hugepte
)
473 struct hugepd_freelist
**batchp
;
475 batchp
= &__get_cpu_var(hugepd_freelist_cur
);
477 if (atomic_read(&tlb
->mm
->mm_users
) < 2 ||
478 cpumask_equal(mm_cpumask(tlb
->mm
),
479 cpumask_of(smp_processor_id()))) {
480 kmem_cache_free(hugepte_cache
, hugepte
);
484 if (*batchp
== NULL
) {
485 *batchp
= (struct hugepd_freelist
*)__get_free_page(GFP_ATOMIC
);
486 (*batchp
)->index
= 0;
489 (*batchp
)->ptes
[(*batchp
)->index
++] = hugepte
;
490 if ((*batchp
)->index
== HUGEPD_FREELIST_SIZE
) {
491 call_rcu_sched(&(*batchp
)->rcu
, hugepd_free_rcu_callback
);
497 static void free_hugepd_range(struct mmu_gather
*tlb
, hugepd_t
*hpdp
, int pdshift
,
498 unsigned long start
, unsigned long end
,
499 unsigned long floor
, unsigned long ceiling
)
501 pte_t
*hugepte
= hugepd_page(*hpdp
);
504 unsigned long pdmask
= ~((1UL << pdshift
) - 1);
505 unsigned int num_hugepd
= 1;
507 #ifdef CONFIG_PPC_FSL_BOOK3E
508 /* Note: On fsl the hpdp may be the first of several */
509 num_hugepd
= (1 << (hugepd_shift(*hpdp
) - pdshift
));
511 unsigned int shift
= hugepd_shift(*hpdp
);
522 if (end
- 1 > ceiling
- 1)
525 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++)
530 #ifdef CONFIG_PPC_FSL_BOOK3E
531 hugepd_free(tlb
, hugepte
);
533 pgtable_free_tlb(tlb
, hugepte
, pdshift
- shift
);
537 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
538 unsigned long addr
, unsigned long end
,
539 unsigned long floor
, unsigned long ceiling
)
547 pmd
= pmd_offset(pud
, addr
);
548 next
= pmd_addr_end(addr
, end
);
549 if (!is_hugepd(pmd
)) {
551 * if it is not hugepd pointer, we should already find
554 WARN_ON(!pmd_none_or_clear_bad(pmd
));
557 #ifdef CONFIG_PPC_FSL_BOOK3E
559 * Increment next by the size of the huge mapping since
560 * there may be more than one entry at this level for a
561 * single hugepage, but all of them point to
562 * the same kmem cache that holds the hugepte.
564 next
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pmd
));
566 free_hugepd_range(tlb
, (hugepd_t
*)pmd
, PMD_SHIFT
,
567 addr
, next
, floor
, ceiling
);
568 } while (addr
= next
, addr
!= end
);
578 if (end
- 1 > ceiling
- 1)
581 pmd
= pmd_offset(pud
, start
);
583 pmd_free_tlb(tlb
, pmd
, start
);
586 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
587 unsigned long addr
, unsigned long end
,
588 unsigned long floor
, unsigned long ceiling
)
596 pud
= pud_offset(pgd
, addr
);
597 next
= pud_addr_end(addr
, end
);
598 if (!is_hugepd(pud
)) {
599 if (pud_none_or_clear_bad(pud
))
601 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
,
604 #ifdef CONFIG_PPC_FSL_BOOK3E
606 * Increment next by the size of the huge mapping since
607 * there may be more than one entry at this level for a
608 * single hugepage, but all of them point to
609 * the same kmem cache that holds the hugepte.
611 next
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pud
));
613 free_hugepd_range(tlb
, (hugepd_t
*)pud
, PUD_SHIFT
,
614 addr
, next
, floor
, ceiling
);
616 } while (addr
= next
, addr
!= end
);
622 ceiling
&= PGDIR_MASK
;
626 if (end
- 1 > ceiling
- 1)
629 pud
= pud_offset(pgd
, start
);
631 pud_free_tlb(tlb
, pud
, start
);
635 * This function frees user-level page tables of a process.
637 * Must be called with pagetable lock held.
639 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
640 unsigned long addr
, unsigned long end
,
641 unsigned long floor
, unsigned long ceiling
)
647 * Because there are a number of different possible pagetable
648 * layouts for hugepage ranges, we limit knowledge of how
649 * things should be laid out to the allocation path
650 * (huge_pte_alloc(), above). Everything else works out the
651 * structure as it goes from information in the hugepd
652 * pointers. That means that we can't here use the
653 * optimization used in the normal page free_pgd_range(), of
654 * checking whether we're actually covering a large enough
655 * range to have to do anything at the top level of the walk
656 * instead of at the bottom.
658 * To make sense of this, you should probably go read the big
659 * block comment at the top of the normal free_pgd_range(),
664 next
= pgd_addr_end(addr
, end
);
665 pgd
= pgd_offset(tlb
->mm
, addr
);
666 if (!is_hugepd(pgd
)) {
667 if (pgd_none_or_clear_bad(pgd
))
669 hugetlb_free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
671 #ifdef CONFIG_PPC_FSL_BOOK3E
673 * Increment next by the size of the huge mapping since
674 * there may be more than one entry at the pgd level
675 * for a single hugepage, but all of them point to the
676 * same kmem cache that holds the hugepte.
678 next
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pgd
));
680 free_hugepd_range(tlb
, (hugepd_t
*)pgd
, PGDIR_SHIFT
,
681 addr
, next
, floor
, ceiling
);
683 } while (addr
= next
, addr
!= end
);
687 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
694 * Transparent hugepages are handled by generic code. We can skip them
697 ptep
= find_linux_pte_or_hugepte(mm
->pgd
, address
, &shift
);
699 /* Verify it is a huge page else bail. */
700 if (!ptep
|| !shift
|| pmd_trans_huge(*(pmd_t
*)ptep
))
701 return ERR_PTR(-EINVAL
);
703 mask
= (1UL << shift
) - 1;
704 page
= pte_page(*ptep
);
706 page
+= (address
& mask
) / PAGE_SIZE
;
712 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
713 pmd_t
*pmd
, int write
)
719 static unsigned long hugepte_addr_end(unsigned long addr
, unsigned long end
,
722 unsigned long __boundary
= (addr
+ sz
) & ~(sz
-1);
723 return (__boundary
- 1 < end
- 1) ? __boundary
: end
;
726 int gup_hugepd(hugepd_t
*hugepd
, unsigned pdshift
,
727 unsigned long addr
, unsigned long end
,
728 int write
, struct page
**pages
, int *nr
)
731 unsigned long sz
= 1UL << hugepd_shift(*hugepd
);
734 ptep
= hugepte_offset(hugepd
, addr
, pdshift
);
736 next
= hugepte_addr_end(addr
, end
, sz
);
737 if (!gup_hugepte(ptep
, sz
, addr
, end
, write
, pages
, nr
))
739 } while (ptep
++, addr
= next
, addr
!= end
);
744 #ifdef CONFIG_PPC_MM_SLICES
745 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
746 unsigned long len
, unsigned long pgoff
,
749 struct hstate
*hstate
= hstate_file(file
);
750 int mmu_psize
= shift_to_mmu_psize(huge_page_shift(hstate
));
752 return slice_get_unmapped_area(addr
, len
, flags
, mmu_psize
, 1);
756 unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
758 #ifdef CONFIG_PPC_MM_SLICES
759 unsigned int psize
= get_slice_psize(vma
->vm_mm
, vma
->vm_start
);
761 return 1UL << mmu_psize_to_shift(psize
);
763 if (!is_vm_hugetlb_page(vma
))
766 return huge_page_size(hstate_vma(vma
));
770 static inline bool is_power_of_4(unsigned long x
)
772 if (is_power_of_2(x
))
773 return (__ilog2(x
) % 2) ? false : true;
777 static int __init
add_huge_page_size(unsigned long long size
)
779 int shift
= __ffs(size
);
782 /* Check that it is a page size supported by the hardware and
783 * that it fits within pagetable and slice limits. */
784 #ifdef CONFIG_PPC_FSL_BOOK3E
785 if ((size
< PAGE_SIZE
) || !is_power_of_4(size
))
788 if (!is_power_of_2(size
)
789 || (shift
> SLICE_HIGH_SHIFT
) || (shift
<= PAGE_SHIFT
))
793 if ((mmu_psize
= shift_to_mmu_psize(shift
)) < 0)
796 #ifdef CONFIG_SPU_FS_64K_LS
797 /* Disable support for 64K huge pages when 64K SPU local store
798 * support is enabled as the current implementation conflicts.
800 if (shift
== PAGE_SHIFT_64K
)
802 #endif /* CONFIG_SPU_FS_64K_LS */
804 BUG_ON(mmu_psize_defs
[mmu_psize
].shift
!= shift
);
806 /* Return if huge page size has already been setup */
807 if (size_to_hstate(size
))
810 hugetlb_add_hstate(shift
- PAGE_SHIFT
);
815 static int __init
hugepage_setup_sz(char *str
)
817 unsigned long long size
;
819 size
= memparse(str
, &str
);
821 if (add_huge_page_size(size
) != 0)
822 printk(KERN_WARNING
"Invalid huge page size specified(%llu)\n", size
);
826 __setup("hugepagesz=", hugepage_setup_sz
);
828 #ifdef CONFIG_PPC_FSL_BOOK3E
829 struct kmem_cache
*hugepte_cache
;
830 static int __init
hugetlbpage_init(void)
834 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
837 if (!mmu_psize_defs
[psize
].shift
)
840 shift
= mmu_psize_to_shift(psize
);
842 /* Don't treat normal page sizes as huge... */
843 if (shift
!= PAGE_SHIFT
)
844 if (add_huge_page_size(1ULL << shift
) < 0)
849 * Create a kmem cache for hugeptes. The bottom bits in the pte have
850 * size information encoded in them, so align them to allow this
852 hugepte_cache
= kmem_cache_create("hugepte-cache", sizeof(pte_t
),
853 HUGEPD_SHIFT_MASK
+ 1, 0, NULL
);
854 if (hugepte_cache
== NULL
)
855 panic("%s: Unable to create kmem cache for hugeptes\n",
858 /* Default hpage size = 4M */
859 if (mmu_psize_defs
[MMU_PAGE_4M
].shift
)
860 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_4M
].shift
;
862 panic("%s: Unable to set default huge page size\n", __func__
);
868 static int __init
hugetlbpage_init(void)
872 if (!mmu_has_feature(MMU_FTR_16M_PAGE
))
875 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
879 if (!mmu_psize_defs
[psize
].shift
)
882 shift
= mmu_psize_to_shift(psize
);
884 if (add_huge_page_size(1ULL << shift
) < 0)
887 if (shift
< PMD_SHIFT
)
889 else if (shift
< PUD_SHIFT
)
892 pdshift
= PGDIR_SHIFT
;
894 * if we have pdshift and shift value same, we don't
895 * use pgt cache for hugepd.
897 if (pdshift
!= shift
) {
898 pgtable_cache_add(pdshift
- shift
, NULL
);
899 if (!PGT_CACHE(pdshift
- shift
))
900 panic("hugetlbpage_init(): could not create "
901 "pgtable cache for %d bit pagesize\n", shift
);
905 /* Set default large page size. Currently, we pick 16M or 1M
906 * depending on what is available
908 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
909 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_16M
].shift
;
910 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
911 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_1M
].shift
;
916 module_init(hugetlbpage_init
);
918 void flush_dcache_icache_hugepage(struct page
*page
)
923 BUG_ON(!PageCompound(page
));
925 for (i
= 0; i
< (1UL << compound_order(page
)); i
++) {
926 if (!PageHighMem(page
)) {
927 __flush_dcache_icache(page_address(page
+i
));
929 start
= kmap_atomic(page
+i
);
930 __flush_dcache_icache(start
);
931 kunmap_atomic(start
);
936 #endif /* CONFIG_HUGETLB_PAGE */
939 * We have 4 cases for pgds and pmds:
940 * (1) invalid (all zeroes)
941 * (2) pointer to next table, as normal; bottom 6 bits == 0
942 * (3) leaf pte for huge page, bottom two bits != 00
943 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
945 * So long as we atomically load page table pointers we are safe against teardown,
946 * we can follow the address down to the the page and take a ref on it.
949 pte_t
*find_linux_pte_or_hugepte(pgd_t
*pgdir
, unsigned long ea
, unsigned *shift
)
955 hugepd_t
*hpdp
= NULL
;
956 unsigned pdshift
= PGDIR_SHIFT
;
961 pgdp
= pgdir
+ pgd_index(ea
);
962 pgd
= ACCESS_ONCE(*pgdp
);
964 * Always operate on the local stack value. This make sure the
965 * value don't get updated by a parallel THP split/collapse,
966 * page fault or a page unmap. The return pte_t * is still not
967 * stable. So should be checked there for above conditions.
971 else if (pgd_huge(pgd
)) {
972 ret_pte
= (pte_t
*) pgdp
;
974 } else if (is_hugepd(&pgd
))
975 hpdp
= (hugepd_t
*)&pgd
;
978 * Even if we end up with an unmap, the pgtable will not
979 * be freed, because we do an rcu free and here we are
983 pudp
= pud_offset(&pgd
, ea
);
984 pud
= ACCESS_ONCE(*pudp
);
988 else if (pud_huge(pud
)) {
989 ret_pte
= (pte_t
*) pudp
;
991 } else if (is_hugepd(&pud
))
992 hpdp
= (hugepd_t
*)&pud
;
995 pmdp
= pmd_offset(&pud
, ea
);
996 pmd
= ACCESS_ONCE(*pmdp
);
998 * A hugepage collapse is captured by pmd_none, because
999 * it mark the pmd none and do a hpte invalidate.
1001 * A hugepage split is captured by pmd_trans_splitting
1002 * because we mark the pmd trans splitting and do a
1006 if (pmd_none(pmd
) || pmd_trans_splitting(pmd
))
1009 if (pmd_huge(pmd
) || pmd_large(pmd
)) {
1010 ret_pte
= (pte_t
*) pmdp
;
1012 } else if (is_hugepd(&pmd
))
1013 hpdp
= (hugepd_t
*)&pmd
;
1015 return pte_offset_kernel(&pmd
, ea
);
1021 ret_pte
= hugepte_offset(hpdp
, ea
, pdshift
);
1022 pdshift
= hugepd_shift(*hpdp
);
1028 EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte
);
1030 int gup_hugepte(pte_t
*ptep
, unsigned long sz
, unsigned long addr
,
1031 unsigned long end
, int write
, struct page
**pages
, int *nr
)
1034 unsigned long pte_end
;
1035 struct page
*head
, *page
, *tail
;
1039 pte_end
= (addr
+ sz
) & ~(sz
-1);
1043 pte
= ACCESS_ONCE(*ptep
);
1044 mask
= _PAGE_PRESENT
| _PAGE_USER
;
1048 if ((pte_val(pte
) & mask
) != mask
)
1051 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1053 * check for splitting here
1055 if (pmd_trans_splitting(pte_pmd(pte
)))
1059 /* hugepages are never "special" */
1060 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
1063 head
= pte_page(pte
);
1065 page
= head
+ ((addr
& (sz
-1)) >> PAGE_SHIFT
);
1068 VM_BUG_ON(compound_head(page
) != head
);
1073 } while (addr
+= PAGE_SIZE
, addr
!= end
);
1075 if (!page_cache_add_speculative(head
, refs
)) {
1080 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
1081 /* Could be optimized better */
1089 * Any tail page need their mapcount reference taken before we
1094 get_huge_page_tail(tail
);