]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * IA-32 Huge TLB Page Support for Kernel. | |
3 | * | |
4 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <linux/init.h> |
8 | #include <linux/fs.h> | |
9 | #include <linux/mm.h> | |
01042607 | 10 | #include <linux/sched/mm.h> |
1da177e4 LT |
11 | #include <linux/hugetlb.h> |
12 | #include <linux/pagemap.h> | |
1da177e4 LT |
13 | #include <linux/err.h> |
14 | #include <linux/sysctl.h> | |
e13b73dd | 15 | #include <linux/compat.h> |
1da177e4 LT |
16 | #include <asm/mman.h> |
17 | #include <asm/tlb.h> | |
18 | #include <asm/tlbflush.h> | |
a5a19c63 | 19 | #include <asm/pgalloc.h> |
e13b73dd | 20 | #include <asm/elf.h> |
44b04912 | 21 | #include <asm/mpx.h> |
1da177e4 | 22 | |
1da177e4 LT |
23 | #if 0 /* This is just for testing */ |
24 | struct page * | |
25 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | |
26 | { | |
27 | unsigned long start = address; | |
28 | int length = 1; | |
29 | int nr; | |
30 | struct page *page; | |
31 | struct vm_area_struct *vma; | |
32 | ||
33 | vma = find_vma(mm, addr); | |
34 | if (!vma || !is_vm_hugetlb_page(vma)) | |
35 | return ERR_PTR(-EINVAL); | |
36 | ||
7868a208 | 37 | pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
1da177e4 LT |
38 | |
39 | /* hugetlb should be locked, and hence, prefaulted */ | |
40 | WARN_ON(!pte || pte_none(*pte)); | |
41 | ||
42 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | |
43 | ||
25e59881 | 44 | WARN_ON(!PageHead(page)); |
1da177e4 LT |
45 | |
46 | return page; | |
47 | } | |
48 | ||
49 | int pmd_huge(pmd_t pmd) | |
50 | { | |
51 | return 0; | |
52 | } | |
53 | ||
ceb86879 AK |
54 | int pud_huge(pud_t pud) |
55 | { | |
56 | return 0; | |
57 | } | |
58 | ||
1da177e4 LT |
59 | #else |
60 | ||
cbef8478 NH |
61 | /* |
62 | * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal | |
63 | * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. | |
64 | * Otherwise, returns 0. | |
65 | */ | |
1da177e4 LT |
66 | int pmd_huge(pmd_t pmd) |
67 | { | |
cbef8478 NH |
68 | return !pmd_none(pmd) && |
69 | (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; | |
1da177e4 LT |
70 | } |
71 | ||
ceb86879 AK |
72 | int pud_huge(pud_t pud) |
73 | { | |
39c11e6c | 74 | return !!(pud_val(pud) & _PAGE_PSE); |
ceb86879 | 75 | } |
1da177e4 LT |
76 | #endif |
77 | ||
fd8526ad | 78 | #ifdef CONFIG_HUGETLB_PAGE |
1da177e4 LT |
79 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, |
80 | unsigned long addr, unsigned long len, | |
81 | unsigned long pgoff, unsigned long flags) | |
82 | { | |
39c11e6c | 83 | struct hstate *h = hstate_file(file); |
cdc17344 ML |
84 | struct vm_unmapped_area_info info; |
85 | ||
86 | info.flags = 0; | |
87 | info.length = len; | |
e13b73dd DS |
88 | info.low_limit = get_mmap_base(1); |
89 | info.high_limit = in_compat_syscall() ? | |
e8f01a8d | 90 | task_size_32bit() : task_size_64bit(); |
cdc17344 ML |
91 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
92 | info.align_offset = 0; | |
93 | return vm_unmapped_area(&info); | |
1da177e4 LT |
94 | } |
95 | ||
96 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, | |
97 | unsigned long addr0, unsigned long len, | |
98 | unsigned long pgoff, unsigned long flags) | |
99 | { | |
39c11e6c | 100 | struct hstate *h = hstate_file(file); |
cdc17344 ML |
101 | struct vm_unmapped_area_info info; |
102 | unsigned long addr; | |
1da177e4 | 103 | |
cdc17344 ML |
104 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
105 | info.length = len; | |
106 | info.low_limit = PAGE_SIZE; | |
e13b73dd | 107 | info.high_limit = get_mmap_base(0); |
cdc17344 ML |
108 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
109 | info.align_offset = 0; | |
110 | addr = vm_unmapped_area(&info); | |
1da177e4 | 111 | |
1da177e4 LT |
112 | /* |
113 | * A failed mmap() very likely causes application failure, | |
114 | * so fall back to the bottom-up function here. This scenario | |
115 | * can happen with large stack limits and large mmap() | |
116 | * allocations. | |
117 | */ | |
cdc17344 ML |
118 | if (addr & ~PAGE_MASK) { |
119 | VM_BUG_ON(addr != -ENOMEM); | |
120 | info.flags = 0; | |
121 | info.low_limit = TASK_UNMAPPED_BASE; | |
122 | info.high_limit = TASK_SIZE; | |
123 | addr = vm_unmapped_area(&info); | |
124 | } | |
1da177e4 LT |
125 | |
126 | return addr; | |
127 | } | |
128 | ||
129 | unsigned long | |
130 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
131 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
132 | { | |
39c11e6c | 133 | struct hstate *h = hstate_file(file); |
1da177e4 LT |
134 | struct mm_struct *mm = current->mm; |
135 | struct vm_area_struct *vma; | |
136 | ||
39c11e6c | 137 | if (len & ~huge_page_mask(h)) |
1da177e4 | 138 | return -EINVAL; |
44b04912 KS |
139 | |
140 | addr = mpx_unmapped_area_check(addr, len, flags); | |
141 | if (IS_ERR_VALUE(addr)) | |
142 | return addr; | |
143 | ||
1da177e4 LT |
144 | if (len > TASK_SIZE) |
145 | return -ENOMEM; | |
146 | ||
5a8130f2 | 147 | if (flags & MAP_FIXED) { |
a5516438 | 148 | if (prepare_hugepage_range(file, addr, len)) |
5a8130f2 BH |
149 | return -EINVAL; |
150 | return addr; | |
151 | } | |
152 | ||
1da177e4 | 153 | if (addr) { |
39c11e6c | 154 | addr = ALIGN(addr, huge_page_size(h)); |
1da177e4 LT |
155 | vma = find_vma(mm, addr); |
156 | if (TASK_SIZE - len >= addr && | |
1be7107f | 157 | (!vma || addr + len <= vm_start_gap(vma))) |
1da177e4 LT |
158 | return addr; |
159 | } | |
160 | if (mm->get_unmapped_area == arch_get_unmapped_area) | |
161 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | |
162 | pgoff, flags); | |
163 | else | |
164 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | |
165 | pgoff, flags); | |
166 | } | |
fd8526ad | 167 | #endif /* CONFIG_HUGETLB_PAGE */ |
1da177e4 | 168 | |
b4718e62 AK |
169 | #ifdef CONFIG_X86_64 |
170 | static __init int setup_hugepagesz(char *opt) | |
171 | { | |
172 | unsigned long ps = memparse(opt, &opt); | |
173 | if (ps == PMD_SIZE) { | |
174 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); | |
b8291adc | 175 | } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) { |
b4718e62 AK |
176 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
177 | } else { | |
2b18e532 | 178 | hugetlb_bad_size(); |
b4718e62 AK |
179 | printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", |
180 | ps >> 20); | |
181 | return 0; | |
182 | } | |
183 | return 1; | |
184 | } | |
185 | __setup("hugepagesz=", setup_hugepagesz); | |
ece84b39 | 186 | |
080fe206 | 187 | #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) |
ece84b39 KS |
188 | static __init int gigantic_pages_init(void) |
189 | { | |
080fe206 | 190 | /* With compaction or CMA we can allocate gigantic pages at runtime */ |
b8291adc | 191 | if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) |
ece84b39 KS |
192 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
193 | return 0; | |
194 | } | |
195 | arch_initcall(gigantic_pages_init); | |
196 | #endif | |
b4718e62 | 197 | #endif |