]>
Commit | Line | Data |
---|---|---|
4f76cd38 JF |
1 | #include <linux/mm.h> |
2 | #include <asm/pgalloc.h> | |
ee5aa8d3 | 3 | #include <asm/pgtable.h> |
4f76cd38 JF |
4 | #include <asm/tlb.h> |
5 | ||
6 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |
7 | { | |
8 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | |
9 | } | |
10 | ||
11 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
12 | { | |
13 | struct page *pte; | |
14 | ||
15 | #ifdef CONFIG_HIGHPTE | |
16 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | |
17 | #else | |
18 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | |
19 | #endif | |
20 | if (pte) | |
21 | pgtable_page_ctor(pte); | |
22 | return pte; | |
23 | } | |
24 | ||
397f687a JF |
25 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
26 | { | |
27 | pgtable_page_dtor(pte); | |
6944a9c8 | 28 | paravirt_release_pte(page_to_pfn(pte)); |
397f687a JF |
29 | tlb_remove_page(tlb, pte); |
30 | } | |
31 | ||
170fdff7 JF |
32 | #if PAGETABLE_LEVELS > 2 |
33 | void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | |
34 | { | |
6944a9c8 | 35 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
170fdff7 JF |
36 | tlb_remove_page(tlb, virt_to_page(pmd)); |
37 | } | |
5a5f8f42 JF |
38 | |
39 | #if PAGETABLE_LEVELS > 3 | |
40 | void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) | |
41 | { | |
2761fa09 | 42 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
5a5f8f42 JF |
43 | tlb_remove_page(tlb, virt_to_page(pud)); |
44 | } | |
45 | #endif /* PAGETABLE_LEVELS > 3 */ | |
170fdff7 JF |
46 | #endif /* PAGETABLE_LEVELS > 2 */ |
47 | ||
4f76cd38 JF |
48 | static inline void pgd_list_add(pgd_t *pgd) |
49 | { | |
50 | struct page *page = virt_to_page(pgd); | |
4f76cd38 | 51 | |
4f76cd38 | 52 | list_add(&page->lru, &pgd_list); |
4f76cd38 JF |
53 | } |
54 | ||
55 | static inline void pgd_list_del(pgd_t *pgd) | |
56 | { | |
57 | struct page *page = virt_to_page(pgd); | |
4f76cd38 | 58 | |
4f76cd38 | 59 | list_del(&page->lru); |
4f76cd38 JF |
60 | } |
61 | ||
4f76cd38 | 62 | #define UNSHARED_PTRS_PER_PGD \ |
68db065c | 63 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
4f76cd38 JF |
64 | |
65 | static void pgd_ctor(void *p) | |
66 | { | |
67 | pgd_t *pgd = p; | |
68 | unsigned long flags; | |
69 | ||
70 | /* Clear usermode parts of PGD */ | |
68db065c | 71 | memset(pgd, 0, KERNEL_PGD_BOUNDARY*sizeof(pgd_t)); |
4f76cd38 JF |
72 | |
73 | spin_lock_irqsave(&pgd_lock, flags); | |
74 | ||
75 | /* If the pgd points to a shared pagetable level (either the | |
76 | ptes in non-PAE, or shared PMD in PAE), then just copy the | |
77 | references from swapper_pg_dir. */ | |
78 | if (PAGETABLE_LEVELS == 2 || | |
85958b46 JF |
79 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || |
80 | PAGETABLE_LEVELS == 4) { | |
68db065c JF |
81 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
82 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | |
4f76cd38 | 83 | KERNEL_PGD_PTRS); |
6944a9c8 JF |
84 | paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT, |
85 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | |
68db065c | 86 | KERNEL_PGD_BOUNDARY, |
6944a9c8 | 87 | KERNEL_PGD_PTRS); |
4f76cd38 JF |
88 | } |
89 | ||
90 | /* list required to sync kernel mapping updates */ | |
91 | if (!SHARED_KERNEL_PMD) | |
92 | pgd_list_add(pgd); | |
93 | ||
94 | spin_unlock_irqrestore(&pgd_lock, flags); | |
95 | } | |
96 | ||
97 | static void pgd_dtor(void *pgd) | |
98 | { | |
99 | unsigned long flags; /* can be called from interrupt context */ | |
100 | ||
101 | if (SHARED_KERNEL_PMD) | |
102 | return; | |
103 | ||
104 | spin_lock_irqsave(&pgd_lock, flags); | |
105 | pgd_list_del(pgd); | |
106 | spin_unlock_irqrestore(&pgd_lock, flags); | |
107 | } | |
108 | ||
85958b46 JF |
109 | /* |
110 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
111 | * in both cached and uncached pgd's; not needed for PAE since the | |
112 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
113 | * tactic would be needed. This is essentially codepath-based locking | |
114 | * against pageattr.c; it is the unique case in which a valid change | |
115 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
116 | * vmalloc faults work because attached pagetables are never freed. | |
117 | * -- wli | |
118 | */ | |
119 | ||
4f76cd38 JF |
120 | #ifdef CONFIG_X86_PAE |
121 | /* | |
122 | * Mop up any pmd pages which may still be attached to the pgd. | |
123 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | |
124 | * preallocate which never got a corresponding vma will need to be | |
125 | * freed manually. | |
126 | */ | |
127 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | |
128 | { | |
129 | int i; | |
130 | ||
131 | for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { | |
132 | pgd_t pgd = pgdp[i]; | |
133 | ||
134 | if (pgd_val(pgd) != 0) { | |
135 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | |
136 | ||
137 | pgdp[i] = native_make_pgd(0); | |
138 | ||
6944a9c8 | 139 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); |
4f76cd38 JF |
140 | pmd_free(mm, pmd); |
141 | } | |
142 | } | |
143 | } | |
144 | ||
145 | /* | |
146 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | |
147 | * updating the top-level pagetable entries to guarantee the | |
148 | * processor notices the update. Since this is expensive, and | |
149 | * all 4 top-level entries are used almost immediately in a | |
150 | * new process's life, we just pre-populate them here. | |
151 | * | |
152 | * Also, if we're in a paravirt environment where the kernel pmd is | |
153 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | |
154 | * and initialize the kernel pmds here. | |
155 | */ | |
156 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | |
157 | { | |
158 | pud_t *pud; | |
159 | unsigned long addr; | |
160 | int i; | |
161 | ||
162 | pud = pud_offset(pgd, 0); | |
163 | for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD; | |
164 | i++, pud++, addr += PUD_SIZE) { | |
165 | pmd_t *pmd = pmd_alloc_one(mm, addr); | |
166 | ||
167 | if (!pmd) { | |
168 | pgd_mop_up_pmds(mm, pgd); | |
169 | return 0; | |
170 | } | |
171 | ||
68db065c | 172 | if (i >= KERNEL_PGD_BOUNDARY) |
4f76cd38 JF |
173 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), |
174 | sizeof(pmd_t) * PTRS_PER_PMD); | |
175 | ||
176 | pud_populate(mm, pud, pmd); | |
177 | } | |
178 | ||
179 | return 1; | |
180 | } | |
1ec1fe73 IM |
181 | |
182 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) | |
183 | { | |
6944a9c8 | 184 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
1ec1fe73 IM |
185 | |
186 | /* Note: almost everything apart from _PAGE_PRESENT is | |
187 | reserved at the pmd (PDPT) level. */ | |
188 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); | |
189 | ||
190 | /* | |
191 | * According to Intel App note "TLBs, Paging-Structure Caches, | |
192 | * and Their Invalidation", April 2007, document 317080-001, | |
193 | * section 8.1: in PAE mode we explicitly have to flush the | |
194 | * TLB via cr3 if the top-level pgd is changed... | |
195 | */ | |
196 | if (mm == current->active_mm) | |
197 | write_cr3(read_cr3()); | |
198 | } | |
4f76cd38 JF |
199 | #else /* !CONFIG_X86_PAE */ |
200 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | |
201 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | |
202 | { | |
203 | return 1; | |
204 | } | |
205 | ||
206 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd) | |
207 | { | |
208 | } | |
209 | #endif /* CONFIG_X86_PAE */ | |
210 | ||
211 | pgd_t *pgd_alloc(struct mm_struct *mm) | |
212 | { | |
213 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | |
214 | ||
6944a9c8 | 215 | /* so that alloc_pmd can use it */ |
4f76cd38 JF |
216 | mm->pgd = pgd; |
217 | if (pgd) | |
218 | pgd_ctor(pgd); | |
219 | ||
220 | if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { | |
221 | pgd_dtor(pgd); | |
222 | free_page((unsigned long)pgd); | |
223 | pgd = NULL; | |
224 | } | |
225 | ||
226 | return pgd; | |
227 | } | |
228 | ||
229 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
230 | { | |
231 | pgd_mop_up_pmds(mm, pgd); | |
232 | pgd_dtor(pgd); | |
233 | free_page((unsigned long)pgd); | |
234 | } | |
ee5aa8d3 JF |
235 | |
236 | int ptep_set_access_flags(struct vm_area_struct *vma, | |
237 | unsigned long address, pte_t *ptep, | |
238 | pte_t entry, int dirty) | |
239 | { | |
240 | int changed = !pte_same(*ptep, entry); | |
241 | ||
242 | if (changed && dirty) { | |
243 | *ptep = entry; | |
244 | pte_update_defer(vma->vm_mm, address, ptep); | |
245 | flush_tlb_page(vma, address); | |
246 | } | |
247 | ||
248 | return changed; | |
249 | } | |
f9fbf1a3 JF |
250 | |
251 | int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
252 | unsigned long addr, pte_t *ptep) | |
253 | { | |
254 | int ret = 0; | |
255 | ||
256 | if (pte_young(*ptep)) | |
257 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, | |
258 | &ptep->pte); | |
259 | ||
260 | if (ret) | |
261 | pte_update(vma->vm_mm, addr, ptep); | |
262 | ||
263 | return ret; | |
264 | } | |
c20311e1 JF |
265 | |
266 | int ptep_clear_flush_young(struct vm_area_struct *vma, | |
267 | unsigned long address, pte_t *ptep) | |
268 | { | |
269 | int young; | |
270 | ||
271 | young = ptep_test_and_clear_young(vma, address, ptep); | |
272 | if (young) | |
273 | flush_tlb_page(vma, address); | |
274 | ||
275 | return young; | |
276 | } |