]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - arch/x86/mm/pgtable.c
x86: rename paravirt_alloc_pt etc after the pagetable structure
[thirdparty/kernel/linux.git] / arch / x86 / mm / pgtable.c
CommitLineData
4f76cd38
JF
1#include <linux/mm.h>
2#include <asm/pgalloc.h>
3#include <asm/tlb.h>
4
5pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
6{
7 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
8}
9
10pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
11{
12 struct page *pte;
13
14#ifdef CONFIG_HIGHPTE
15 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
16#else
17 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
18#endif
19 if (pte)
20 pgtable_page_ctor(pte);
21 return pte;
22}
23
397f687a
JF
24void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
25{
26 pgtable_page_dtor(pte);
6944a9c8 27 paravirt_release_pte(page_to_pfn(pte));
397f687a
JF
28 tlb_remove_page(tlb, pte);
29}
30
170fdff7
JF
31#if PAGETABLE_LEVELS > 2
32void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
33{
6944a9c8 34 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
170fdff7
JF
35 tlb_remove_page(tlb, virt_to_page(pmd));
36}
5a5f8f42
JF
37
38#if PAGETABLE_LEVELS > 3
39void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
40{
41 tlb_remove_page(tlb, virt_to_page(pud));
42}
43#endif /* PAGETABLE_LEVELS > 3 */
170fdff7
JF
44#endif /* PAGETABLE_LEVELS > 2 */
45
4f76cd38
JF
46static inline void pgd_list_add(pgd_t *pgd)
47{
48 struct page *page = virt_to_page(pgd);
4f76cd38 49
4f76cd38 50 list_add(&page->lru, &pgd_list);
4f76cd38
JF
51}
52
53static inline void pgd_list_del(pgd_t *pgd)
54{
55 struct page *page = virt_to_page(pgd);
4f76cd38 56
4f76cd38 57 list_del(&page->lru);
4f76cd38
JF
58}
59
39415855 60#ifdef CONFIG_X86_64
4f76cd38
JF
61pgd_t *pgd_alloc(struct mm_struct *mm)
62{
63 unsigned boundary;
64 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
39415855 65 unsigned long flags;
4f76cd38
JF
66 if (!pgd)
67 return NULL;
39415855 68 spin_lock_irqsave(&pgd_lock, flags);
4f76cd38 69 pgd_list_add(pgd);
39415855 70 spin_unlock_irqrestore(&pgd_lock, flags);
4f76cd38
JF
71 /*
72 * Copy kernel pointers in from init.
73 * Could keep a freelist or slab cache of those because the kernel
74 * part never changes.
75 */
76 boundary = pgd_index(__PAGE_OFFSET);
77 memset(pgd, 0, boundary * sizeof(pgd_t));
78 memcpy(pgd + boundary,
79 init_level4_pgt + boundary,
80 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
81 return pgd;
82}
83
84void pgd_free(struct mm_struct *mm, pgd_t *pgd)
85{
39415855 86 unsigned long flags;
4f76cd38 87 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
39415855 88 spin_lock_irqsave(&pgd_lock, flags);
4f76cd38 89 pgd_list_del(pgd);
39415855 90 spin_unlock_irqrestore(&pgd_lock, flags);
4f76cd38
JF
91 free_page((unsigned long)pgd);
92}
93#else
94/*
95 * List of all pgd's needed for non-PAE so it can invalidate entries
96 * in both cached and uncached pgd's; not needed for PAE since the
97 * kernel pmd is shared. If PAE were not to share the pmd a similar
98 * tactic would be needed. This is essentially codepath-based locking
99 * against pageattr.c; it is the unique case in which a valid change
100 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
101 * vmalloc faults work because attached pagetables are never freed.
102 * -- wli
103 */
4f76cd38
JF
104#define UNSHARED_PTRS_PER_PGD \
105 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
106
107static void pgd_ctor(void *p)
108{
109 pgd_t *pgd = p;
110 unsigned long flags;
111
112 /* Clear usermode parts of PGD */
113 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
114
115 spin_lock_irqsave(&pgd_lock, flags);
116
117 /* If the pgd points to a shared pagetable level (either the
118 ptes in non-PAE, or shared PMD in PAE), then just copy the
119 references from swapper_pg_dir. */
120 if (PAGETABLE_LEVELS == 2 ||
121 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
122 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
123 swapper_pg_dir + USER_PTRS_PER_PGD,
124 KERNEL_PGD_PTRS);
6944a9c8
JF
125 paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
126 __pa(swapper_pg_dir) >> PAGE_SHIFT,
127 USER_PTRS_PER_PGD,
128 KERNEL_PGD_PTRS);
4f76cd38
JF
129 }
130
131 /* list required to sync kernel mapping updates */
132 if (!SHARED_KERNEL_PMD)
133 pgd_list_add(pgd);
134
135 spin_unlock_irqrestore(&pgd_lock, flags);
136}
137
138static void pgd_dtor(void *pgd)
139{
140 unsigned long flags; /* can be called from interrupt context */
141
142 if (SHARED_KERNEL_PMD)
143 return;
144
145 spin_lock_irqsave(&pgd_lock, flags);
146 pgd_list_del(pgd);
147 spin_unlock_irqrestore(&pgd_lock, flags);
148}
149
150#ifdef CONFIG_X86_PAE
151/*
152 * Mop up any pmd pages which may still be attached to the pgd.
153 * Normally they will be freed by munmap/exit_mmap, but any pmd we
154 * preallocate which never got a corresponding vma will need to be
155 * freed manually.
156 */
157static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
158{
159 int i;
160
161 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
162 pgd_t pgd = pgdp[i];
163
164 if (pgd_val(pgd) != 0) {
165 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
166
167 pgdp[i] = native_make_pgd(0);
168
6944a9c8 169 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
4f76cd38
JF
170 pmd_free(mm, pmd);
171 }
172 }
173}
174
175/*
176 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
177 * updating the top-level pagetable entries to guarantee the
178 * processor notices the update. Since this is expensive, and
179 * all 4 top-level entries are used almost immediately in a
180 * new process's life, we just pre-populate them here.
181 *
182 * Also, if we're in a paravirt environment where the kernel pmd is
183 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
184 * and initialize the kernel pmds here.
185 */
186static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
187{
188 pud_t *pud;
189 unsigned long addr;
190 int i;
191
192 pud = pud_offset(pgd, 0);
193 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
194 i++, pud++, addr += PUD_SIZE) {
195 pmd_t *pmd = pmd_alloc_one(mm, addr);
196
197 if (!pmd) {
198 pgd_mop_up_pmds(mm, pgd);
199 return 0;
200 }
201
202 if (i >= USER_PTRS_PER_PGD)
203 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
204 sizeof(pmd_t) * PTRS_PER_PMD);
205
206 pud_populate(mm, pud, pmd);
207 }
208
209 return 1;
210}
1ec1fe73
IM
211
212void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
213{
6944a9c8 214 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
1ec1fe73
IM
215
216 /* Note: almost everything apart from _PAGE_PRESENT is
217 reserved at the pmd (PDPT) level. */
218 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
219
220 /*
221 * According to Intel App note "TLBs, Paging-Structure Caches,
222 * and Their Invalidation", April 2007, document 317080-001,
223 * section 8.1: in PAE mode we explicitly have to flush the
224 * TLB via cr3 if the top-level pgd is changed...
225 */
226 if (mm == current->active_mm)
227 write_cr3(read_cr3());
228}
4f76cd38
JF
229#else /* !CONFIG_X86_PAE */
230/* No need to prepopulate any pagetable entries in non-PAE modes. */
231static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
232{
233 return 1;
234}
235
236static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
237{
238}
239#endif /* CONFIG_X86_PAE */
240
241pgd_t *pgd_alloc(struct mm_struct *mm)
242{
243 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
244
6944a9c8 245 /* so that alloc_pmd can use it */
4f76cd38
JF
246 mm->pgd = pgd;
247 if (pgd)
248 pgd_ctor(pgd);
249
250 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
251 pgd_dtor(pgd);
252 free_page((unsigned long)pgd);
253 pgd = NULL;
254 }
255
256 return pgd;
257}
258
259void pgd_free(struct mm_struct *mm, pgd_t *pgd)
260{
261 pgd_mop_up_pmds(mm, pgd);
262 pgd_dtor(pgd);
263 free_page((unsigned long)pgd);
264}
265#endif