]>
Commit | Line | Data |
---|---|---|
f938d2c8 RR |
1 | /*P:700 The pagetable code, on the other hand, still shows the scars of |
2 | * previous encounters. It's functional, and as neat as it can be in the | |
3 | * circumstances, but be wary, for these things are subtle and break easily. | |
4 | * The Guest provides a virtual to physical mapping, but we can neither trust | |
5 | * it nor use it: we verify and convert it here to point the hardware to the | |
6 | * actual Guest pages when running the Guest. :*/ | |
7 | ||
8 | /* Copyright (C) Rusty Russell IBM Corporation 2006. | |
d7e28ffe RR |
9 | * GPL v2 and any later version */ |
10 | #include <linux/mm.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/percpu.h> | |
15 | #include <asm/tlbflush.h> | |
16 | #include "lg.h" | |
17 | ||
bff672e6 RR |
18 | /*H:300 |
19 | * The Page Table Code | |
20 | * | |
21 | * We use two-level page tables for the Guest. If you're not entirely | |
22 | * comfortable with virtual addresses, physical addresses and page tables then | |
23 | * I recommend you review lguest.c's "Page Table Handling" (with diagrams!). | |
24 | * | |
25 | * The Guest keeps page tables, but we maintain the actual ones here: these are | |
26 | * called "shadow" page tables. Which is a very Guest-centric name: these are | |
27 | * the real page tables the CPU uses, although we keep them up to date to | |
28 | * reflect the Guest's. (See what I mean about weird naming? Since when do | |
29 | * shadows reflect anything?) | |
30 | * | |
31 | * Anyway, this is the most complicated part of the Host code. There are seven | |
32 | * parts to this: | |
33 | * (i) Setting up a page table entry for the Guest when it faults, | |
34 | * (ii) Setting up the page table entry for the Guest stack, | |
35 | * (iii) Setting up a page table entry when the Guest tells us it has changed, | |
36 | * (iv) Switching page tables, | |
37 | * (v) Flushing (thowing away) page tables, | |
38 | * (vi) Mapping the Switcher when the Guest is about to run, | |
39 | * (vii) Setting up the page tables initially. | |
40 | :*/ | |
41 | ||
42 | /* Pages a 4k long, and each page table entry is 4 bytes long, giving us 1024 | |
43 | * (or 2^10) entries per page. */ | |
d7e28ffe RR |
44 | #define PTES_PER_PAGE_SHIFT 10 |
45 | #define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT) | |
bff672e6 RR |
46 | |
47 | /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is | |
48 | * conveniently placed at the top 4MB, so it uses a separate, complete PTE | |
49 | * page. */ | |
d7e28ffe RR |
50 | #define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1) |
51 | ||
bff672e6 RR |
52 | /* We actually need a separate PTE page for each CPU. Remember that after the |
53 | * Switcher code itself comes two pages for each CPU, and we don't want this | |
54 | * CPU's guest to see the pages of any other CPU. */ | |
d7e28ffe RR |
55 | static DEFINE_PER_CPU(spte_t *, switcher_pte_pages); |
56 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) | |
57 | ||
bff672e6 RR |
58 | /*H:320 With our shadow and Guest types established, we need to deal with |
59 | * them: the page table code is curly enough to need helper functions to keep | |
60 | * it clear and clean. | |
61 | * | |
62 | * The first helper takes a virtual address, and says which entry in the top | |
63 | * level page table deals with that address. Since each top level entry deals | |
64 | * with 4M, this effectively divides by 4M. */ | |
d7e28ffe RR |
65 | static unsigned vaddr_to_pgd_index(unsigned long vaddr) |
66 | { | |
67 | return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); | |
68 | } | |
69 | ||
bff672e6 RR |
70 | /* There are two functions which return pointers to the shadow (aka "real") |
71 | * page tables. | |
72 | * | |
73 | * spgd_addr() takes the virtual address and returns a pointer to the top-level | |
74 | * page directory entry for that address. Since we keep track of several page | |
75 | * tables, the "i" argument tells us which one we're interested in (it's | |
76 | * usually the current one). */ | |
d7e28ffe RR |
77 | static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) |
78 | { | |
79 | unsigned int index = vaddr_to_pgd_index(vaddr); | |
80 | ||
bff672e6 | 81 | /* We kill any Guest trying to touch the Switcher addresses. */ |
d7e28ffe RR |
82 | if (index >= SWITCHER_PGD_INDEX) { |
83 | kill_guest(lg, "attempt to access switcher pages"); | |
84 | index = 0; | |
85 | } | |
bff672e6 | 86 | /* Return a pointer index'th pgd entry for the i'th page table. */ |
d7e28ffe RR |
87 | return &lg->pgdirs[i].pgdir[index]; |
88 | } | |
89 | ||
bff672e6 RR |
90 | /* This routine then takes the PGD entry given above, which contains the |
91 | * address of the PTE page. It then returns a pointer to the PTE entry for the | |
92 | * given address. */ | |
d7e28ffe RR |
93 | static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr) |
94 | { | |
95 | spte_t *page = __va(spgd.pfn << PAGE_SHIFT); | |
bff672e6 | 96 | /* You should never call this if the PGD entry wasn't valid */ |
d7e28ffe RR |
97 | BUG_ON(!(spgd.flags & _PAGE_PRESENT)); |
98 | return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE]; | |
99 | } | |
100 | ||
bff672e6 RR |
101 | /* These two functions just like the above two, except they access the Guest |
102 | * page tables. Hence they return a Guest address. */ | |
d7e28ffe RR |
103 | static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) |
104 | { | |
105 | unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); | |
106 | return lg->pgdirs[lg->pgdidx].cr3 + index * sizeof(gpgd_t); | |
107 | } | |
108 | ||
109 | static unsigned long gpte_addr(struct lguest *lg, | |
110 | gpgd_t gpgd, unsigned long vaddr) | |
111 | { | |
112 | unsigned long gpage = gpgd.pfn << PAGE_SHIFT; | |
113 | BUG_ON(!(gpgd.flags & _PAGE_PRESENT)); | |
114 | return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t); | |
115 | } | |
116 | ||
bff672e6 RR |
117 | /*H:350 This routine takes a page number given by the Guest and converts it to |
118 | * an actual, physical page number. It can fail for several reasons: the | |
119 | * virtual address might not be mapped by the Launcher, the write flag is set | |
120 | * and the page is read-only, or the write flag was set and the page was | |
121 | * shared so had to be copied, but we ran out of memory. | |
122 | * | |
123 | * This holds a reference to the page, so release_pte() is careful to | |
124 | * put that back. */ | |
d7e28ffe RR |
125 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
126 | { | |
127 | struct page *page; | |
bff672e6 | 128 | /* This value indicates failure. */ |
d7e28ffe RR |
129 | unsigned long ret = -1UL; |
130 | ||
bff672e6 RR |
131 | /* get_user_pages() is a complex interface: it gets the "struct |
132 | * vm_area_struct" and "struct page" assocated with a range of pages. | |
133 | * It also needs the task's mmap_sem held, and is not very quick. | |
134 | * It returns the number of pages it got. */ | |
d7e28ffe RR |
135 | down_read(¤t->mm->mmap_sem); |
136 | if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT, | |
137 | 1, write, 1, &page, NULL) == 1) | |
138 | ret = page_to_pfn(page); | |
139 | up_read(¤t->mm->mmap_sem); | |
140 | return ret; | |
141 | } | |
142 | ||
bff672e6 RR |
143 | /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table |
144 | * entry can be a little tricky. The flags are (almost) the same, but the | |
145 | * Guest PTE contains a virtual page number: the CPU needs the real page | |
146 | * number. */ | |
d7e28ffe RR |
147 | static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write) |
148 | { | |
149 | spte_t spte; | |
150 | unsigned long pfn; | |
151 | ||
bff672e6 RR |
152 | /* The Guest sets the global flag, because it thinks that it is using |
153 | * PGE. We only told it to use PGE so it would tell us whether it was | |
154 | * flushing a kernel mapping or a userspace mapping. We don't actually | |
155 | * use the global bit, so throw it away. */ | |
d7e28ffe | 156 | spte.flags = (gpte.flags & ~_PAGE_GLOBAL); |
bff672e6 RR |
157 | |
158 | /* We need a temporary "unsigned long" variable to hold the answer from | |
159 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't | |
160 | * fit in spte.pfn. get_pfn() finds the real physical number of the | |
161 | * page, given the virtual number. */ | |
d7e28ffe RR |
162 | pfn = get_pfn(gpte.pfn, write); |
163 | if (pfn == -1UL) { | |
164 | kill_guest(lg, "failed to get page %u", gpte.pfn); | |
bff672e6 RR |
165 | /* When we destroy the Guest, we'll go through the shadow page |
166 | * tables and release_pte() them. Make sure we don't think | |
167 | * this one is valid! */ | |
d7e28ffe RR |
168 | spte.flags = 0; |
169 | } | |
bff672e6 | 170 | /* Now we assign the page number, and our shadow PTE is complete. */ |
d7e28ffe RR |
171 | spte.pfn = pfn; |
172 | return spte; | |
173 | } | |
174 | ||
bff672e6 | 175 | /*H:460 And to complete the chain, release_pte() looks like this: */ |
d7e28ffe RR |
176 | static void release_pte(spte_t pte) |
177 | { | |
bff672e6 RR |
178 | /* Remember that get_user_pages() took a reference to the page, in |
179 | * get_pfn()? We have to put it back now. */ | |
d7e28ffe RR |
180 | if (pte.flags & _PAGE_PRESENT) |
181 | put_page(pfn_to_page(pte.pfn)); | |
182 | } | |
bff672e6 | 183 | /*:*/ |
d7e28ffe RR |
184 | |
185 | static void check_gpte(struct lguest *lg, gpte_t gpte) | |
186 | { | |
187 | if ((gpte.flags & (_PAGE_PWT|_PAGE_PSE)) || gpte.pfn >= lg->pfn_limit) | |
188 | kill_guest(lg, "bad page table entry"); | |
189 | } | |
190 | ||
191 | static void check_gpgd(struct lguest *lg, gpgd_t gpgd) | |
192 | { | |
193 | if ((gpgd.flags & ~_PAGE_TABLE) || gpgd.pfn >= lg->pfn_limit) | |
194 | kill_guest(lg, "bad page directory entry"); | |
195 | } | |
196 | ||
bff672e6 RR |
197 | /*H:330 |
198 | * (i) Setting up a page table entry for the Guest when it faults | |
199 | * | |
200 | * We saw this call in run_guest(): when we see a page fault in the Guest, we | |
201 | * come here. That's because we only set up the shadow page tables lazily as | |
202 | * they're needed, so we get page faults all the time and quietly fix them up | |
203 | * and return to the Guest without it knowing. | |
204 | * | |
205 | * If we fixed up the fault (ie. we mapped the address), this routine returns | |
206 | * true. */ | |
d7e28ffe RR |
207 | int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) |
208 | { | |
209 | gpgd_t gpgd; | |
210 | spgd_t *spgd; | |
211 | unsigned long gpte_ptr; | |
212 | gpte_t gpte; | |
213 | spte_t *spte; | |
214 | ||
bff672e6 | 215 | /* First step: get the top-level Guest page table entry. */ |
d7e28ffe | 216 | gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr))); |
bff672e6 | 217 | /* Toplevel not present? We can't map it in. */ |
d7e28ffe RR |
218 | if (!(gpgd.flags & _PAGE_PRESENT)) |
219 | return 0; | |
220 | ||
bff672e6 | 221 | /* Now look at the matching shadow entry. */ |
d7e28ffe RR |
222 | spgd = spgd_addr(lg, lg->pgdidx, vaddr); |
223 | if (!(spgd->flags & _PAGE_PRESENT)) { | |
bff672e6 | 224 | /* No shadow entry: allocate a new shadow PTE page. */ |
d7e28ffe | 225 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); |
bff672e6 RR |
226 | /* This is not really the Guest's fault, but killing it is |
227 | * simple for this corner case. */ | |
d7e28ffe RR |
228 | if (!ptepage) { |
229 | kill_guest(lg, "out of memory allocating pte page"); | |
230 | return 0; | |
231 | } | |
bff672e6 | 232 | /* We check that the Guest pgd is OK. */ |
d7e28ffe | 233 | check_gpgd(lg, gpgd); |
bff672e6 RR |
234 | /* And we copy the flags to the shadow PGD entry. The page |
235 | * number in the shadow PGD is the page we just allocated. */ | |
d7e28ffe RR |
236 | spgd->raw.val = (__pa(ptepage) | gpgd.flags); |
237 | } | |
238 | ||
bff672e6 RR |
239 | /* OK, now we look at the lower level in the Guest page table: keep its |
240 | * address, because we might update it later. */ | |
d7e28ffe RR |
241 | gpte_ptr = gpte_addr(lg, gpgd, vaddr); |
242 | gpte = mkgpte(lgread_u32(lg, gpte_ptr)); | |
243 | ||
bff672e6 | 244 | /* If this page isn't in the Guest page tables, we can't page it in. */ |
d7e28ffe RR |
245 | if (!(gpte.flags & _PAGE_PRESENT)) |
246 | return 0; | |
247 | ||
bff672e6 RR |
248 | /* Check they're not trying to write to a page the Guest wants |
249 | * read-only (bit 2 of errcode == write). */ | |
d7e28ffe RR |
250 | if ((errcode & 2) && !(gpte.flags & _PAGE_RW)) |
251 | return 0; | |
252 | ||
bff672e6 | 253 | /* User access to a kernel page? (bit 3 == user access) */ |
d7e28ffe RR |
254 | if ((errcode & 4) && !(gpte.flags & _PAGE_USER)) |
255 | return 0; | |
256 | ||
bff672e6 RR |
257 | /* Check that the Guest PTE flags are OK, and the page number is below |
258 | * the pfn_limit (ie. not mapping the Launcher binary). */ | |
d7e28ffe | 259 | check_gpte(lg, gpte); |
bff672e6 | 260 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ |
d7e28ffe RR |
261 | gpte.flags |= _PAGE_ACCESSED; |
262 | if (errcode & 2) | |
263 | gpte.flags |= _PAGE_DIRTY; | |
264 | ||
bff672e6 | 265 | /* Get the pointer to the shadow PTE entry we're going to set. */ |
d7e28ffe | 266 | spte = spte_addr(lg, *spgd, vaddr); |
bff672e6 RR |
267 | /* If there was a valid shadow PTE entry here before, we release it. |
268 | * This can happen with a write to a previously read-only entry. */ | |
d7e28ffe RR |
269 | release_pte(*spte); |
270 | ||
bff672e6 RR |
271 | /* If this is a write, we insist that the Guest page is writable (the |
272 | * final arg to gpte_to_spte()). */ | |
d7e28ffe RR |
273 | if (gpte.flags & _PAGE_DIRTY) |
274 | *spte = gpte_to_spte(lg, gpte, 1); | |
275 | else { | |
bff672e6 RR |
276 | /* If this is a read, don't set the "writable" bit in the page |
277 | * table entry, even if the Guest says it's writable. That way | |
278 | * we come back here when a write does actually ocur, so we can | |
279 | * update the Guest's _PAGE_DIRTY flag. */ | |
d7e28ffe RR |
280 | gpte_t ro_gpte = gpte; |
281 | ro_gpte.flags &= ~_PAGE_RW; | |
282 | *spte = gpte_to_spte(lg, ro_gpte, 0); | |
283 | } | |
284 | ||
bff672e6 RR |
285 | /* Finally, we write the Guest PTE entry back: we've set the |
286 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ | |
d7e28ffe | 287 | lgwrite_u32(lg, gpte_ptr, gpte.raw.val); |
bff672e6 RR |
288 | |
289 | /* We succeeded in mapping the page! */ | |
d7e28ffe RR |
290 | return 1; |
291 | } | |
292 | ||
bff672e6 RR |
293 | /*H:360 (ii) Setting up the page table entry for the Guest stack. |
294 | * | |
295 | * Remember pin_stack_pages() which makes sure the stack is mapped? It could | |
296 | * simply call demand_page(), but as we've seen that logic is quite long, and | |
297 | * usually the stack pages are already mapped anyway, so it's not required. | |
298 | * | |
299 | * This is a quick version which answers the question: is this virtual address | |
300 | * mapped by the shadow page tables, and is it writable? */ | |
d7e28ffe RR |
301 | static int page_writable(struct lguest *lg, unsigned long vaddr) |
302 | { | |
303 | spgd_t *spgd; | |
304 | unsigned long flags; | |
305 | ||
bff672e6 | 306 | /* Look at the top level entry: is it present? */ |
d7e28ffe RR |
307 | spgd = spgd_addr(lg, lg->pgdidx, vaddr); |
308 | if (!(spgd->flags & _PAGE_PRESENT)) | |
309 | return 0; | |
310 | ||
bff672e6 RR |
311 | /* Check the flags on the pte entry itself: it must be present and |
312 | * writable. */ | |
d7e28ffe RR |
313 | flags = spte_addr(lg, *spgd, vaddr)->flags; |
314 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); | |
315 | } | |
316 | ||
bff672e6 RR |
317 | /* So, when pin_stack_pages() asks us to pin a page, we check if it's already |
318 | * in the page tables, and if not, we call demand_page() with error code 2 | |
319 | * (meaning "write"). */ | |
d7e28ffe RR |
320 | void pin_page(struct lguest *lg, unsigned long vaddr) |
321 | { | |
322 | if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2)) | |
323 | kill_guest(lg, "bad stack page %#lx", vaddr); | |
324 | } | |
325 | ||
bff672e6 | 326 | /*H:450 If we chase down the release_pgd() code, it looks like this: */ |
d7e28ffe RR |
327 | static void release_pgd(struct lguest *lg, spgd_t *spgd) |
328 | { | |
bff672e6 | 329 | /* If the entry's not present, there's nothing to release. */ |
d7e28ffe RR |
330 | if (spgd->flags & _PAGE_PRESENT) { |
331 | unsigned int i; | |
bff672e6 RR |
332 | /* Converting the pfn to find the actual PTE page is easy: turn |
333 | * the page number into a physical address, then convert to a | |
334 | * virtual address (easy for kernel pages like this one). */ | |
d7e28ffe | 335 | spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT); |
bff672e6 | 336 | /* For each entry in the page, we might need to release it. */ |
d7e28ffe RR |
337 | for (i = 0; i < PTES_PER_PAGE; i++) |
338 | release_pte(ptepage[i]); | |
bff672e6 | 339 | /* Now we can free the page of PTEs */ |
d7e28ffe | 340 | free_page((long)ptepage); |
bff672e6 | 341 | /* And zero out the PGD entry we we never release it twice. */ |
d7e28ffe RR |
342 | spgd->raw.val = 0; |
343 | } | |
344 | } | |
345 | ||
bff672e6 RR |
346 | /*H:440 (v) Flushing (thowing away) page tables, |
347 | * | |
348 | * We saw flush_user_mappings() called when we re-used a top-level pgdir page. | |
349 | * It simply releases every PTE page from 0 up to the kernel address. */ | |
d7e28ffe RR |
350 | static void flush_user_mappings(struct lguest *lg, int idx) |
351 | { | |
352 | unsigned int i; | |
bff672e6 | 353 | /* Release every pgd entry up to the kernel's address. */ |
d7e28ffe RR |
354 | for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++) |
355 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); | |
356 | } | |
357 | ||
bff672e6 RR |
358 | /* The Guest also has a hypercall to do this manually: it's used when a large |
359 | * number of mappings have been changed. */ | |
d7e28ffe RR |
360 | void guest_pagetable_flush_user(struct lguest *lg) |
361 | { | |
bff672e6 | 362 | /* Drop the userspace part of the current page table. */ |
d7e28ffe RR |
363 | flush_user_mappings(lg, lg->pgdidx); |
364 | } | |
bff672e6 | 365 | /*:*/ |
d7e28ffe | 366 | |
bff672e6 RR |
367 | /* We keep several page tables. This is a simple routine to find the page |
368 | * table (if any) corresponding to this top-level address the Guest has given | |
369 | * us. */ | |
d7e28ffe RR |
370 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) |
371 | { | |
372 | unsigned int i; | |
373 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | |
374 | if (lg->pgdirs[i].cr3 == pgtable) | |
375 | break; | |
376 | return i; | |
377 | } | |
378 | ||
bff672e6 RR |
379 | /*H:435 And this is us, creating the new page directory. If we really do |
380 | * allocate a new one (and so the kernel parts are not there), we set | |
381 | * blank_pgdir. */ | |
d7e28ffe RR |
382 | static unsigned int new_pgdir(struct lguest *lg, |
383 | unsigned long cr3, | |
384 | int *blank_pgdir) | |
385 | { | |
386 | unsigned int next; | |
387 | ||
bff672e6 RR |
388 | /* We pick one entry at random to throw out. Choosing the Least |
389 | * Recently Used might be better, but this is easy. */ | |
d7e28ffe | 390 | next = random32() % ARRAY_SIZE(lg->pgdirs); |
bff672e6 | 391 | /* If it's never been allocated at all before, try now. */ |
d7e28ffe RR |
392 | if (!lg->pgdirs[next].pgdir) { |
393 | lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL); | |
bff672e6 | 394 | /* If the allocation fails, just keep using the one we have */ |
d7e28ffe RR |
395 | if (!lg->pgdirs[next].pgdir) |
396 | next = lg->pgdidx; | |
397 | else | |
bff672e6 RR |
398 | /* This is a blank page, so there are no kernel |
399 | * mappings: caller must map the stack! */ | |
d7e28ffe RR |
400 | *blank_pgdir = 1; |
401 | } | |
bff672e6 | 402 | /* Record which Guest toplevel this shadows. */ |
d7e28ffe RR |
403 | lg->pgdirs[next].cr3 = cr3; |
404 | /* Release all the non-kernel mappings. */ | |
405 | flush_user_mappings(lg, next); | |
406 | ||
407 | return next; | |
408 | } | |
409 | ||
bff672e6 RR |
410 | /*H:430 (iv) Switching page tables |
411 | * | |
412 | * This is what happens when the Guest changes page tables (ie. changes the | |
413 | * top-level pgdir). This happens on almost every context switch. */ | |
d7e28ffe RR |
414 | void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) |
415 | { | |
416 | int newpgdir, repin = 0; | |
417 | ||
bff672e6 | 418 | /* Look to see if we have this one already. */ |
d7e28ffe | 419 | newpgdir = find_pgdir(lg, pgtable); |
bff672e6 RR |
420 | /* If not, we allocate or mug an existing one: if it's a fresh one, |
421 | * repin gets set to 1. */ | |
d7e28ffe RR |
422 | if (newpgdir == ARRAY_SIZE(lg->pgdirs)) |
423 | newpgdir = new_pgdir(lg, pgtable, &repin); | |
bff672e6 | 424 | /* Change the current pgd index to the new one. */ |
d7e28ffe | 425 | lg->pgdidx = newpgdir; |
bff672e6 | 426 | /* If it was completely blank, we map in the Guest kernel stack */ |
d7e28ffe RR |
427 | if (repin) |
428 | pin_stack_pages(lg); | |
429 | } | |
430 | ||
bff672e6 RR |
431 | /*H:470 Finally, a routine which throws away everything: all PGD entries in all |
432 | * the shadow page tables. This is used when we destroy the Guest. */ | |
d7e28ffe RR |
433 | static void release_all_pagetables(struct lguest *lg) |
434 | { | |
435 | unsigned int i, j; | |
436 | ||
bff672e6 | 437 | /* Every shadow pagetable this Guest has */ |
d7e28ffe RR |
438 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
439 | if (lg->pgdirs[i].pgdir) | |
bff672e6 | 440 | /* Every PGD entry except the Switcher at the top */ |
d7e28ffe RR |
441 | for (j = 0; j < SWITCHER_PGD_INDEX; j++) |
442 | release_pgd(lg, lg->pgdirs[i].pgdir + j); | |
443 | } | |
444 | ||
bff672e6 RR |
445 | /* We also throw away everything when a Guest tells us it's changed a kernel |
446 | * mapping. Since kernel mappings are in every page table, it's easiest to | |
447 | * throw them all away. This is amazingly slow, but thankfully rare. */ | |
d7e28ffe RR |
448 | void guest_pagetable_clear_all(struct lguest *lg) |
449 | { | |
450 | release_all_pagetables(lg); | |
bff672e6 | 451 | /* We need the Guest kernel stack mapped again. */ |
d7e28ffe RR |
452 | pin_stack_pages(lg); |
453 | } | |
454 | ||
bff672e6 RR |
455 | /*H:420 This is the routine which actually sets the page table entry for then |
456 | * "idx"'th shadow page table. | |
457 | * | |
458 | * Normally, we can just throw out the old entry and replace it with 0: if they | |
459 | * use it demand_page() will put the new entry in. We need to do this anyway: | |
460 | * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page | |
461 | * is read from, and _PAGE_DIRTY when it's written to. | |
462 | * | |
463 | * But Avi Kivity pointed out that most Operating Systems (Linux included) set | |
464 | * these bits on PTEs immediately anyway. This is done to save the CPU from | |
465 | * having to update them, but it helps us the same way: if they set | |
466 | * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if | |
467 | * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. | |
468 | */ | |
d7e28ffe RR |
469 | static void do_set_pte(struct lguest *lg, int idx, |
470 | unsigned long vaddr, gpte_t gpte) | |
471 | { | |
bff672e6 | 472 | /* Look up the matching shadow page directot entry. */ |
d7e28ffe | 473 | spgd_t *spgd = spgd_addr(lg, idx, vaddr); |
bff672e6 RR |
474 | |
475 | /* If the top level isn't present, there's no entry to update. */ | |
d7e28ffe | 476 | if (spgd->flags & _PAGE_PRESENT) { |
bff672e6 | 477 | /* Otherwise, we start by releasing the existing entry. */ |
d7e28ffe RR |
478 | spte_t *spte = spte_addr(lg, *spgd, vaddr); |
479 | release_pte(*spte); | |
bff672e6 RR |
480 | |
481 | /* If they're setting this entry as dirty or accessed, we might | |
482 | * as well put that entry they've given us in now. This shaves | |
483 | * 10% off a copy-on-write micro-benchmark. */ | |
d7e28ffe RR |
484 | if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) { |
485 | check_gpte(lg, gpte); | |
486 | *spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY); | |
487 | } else | |
bff672e6 | 488 | /* Otherwise we can demand_page() it in later. */ |
d7e28ffe RR |
489 | spte->raw.val = 0; |
490 | } | |
491 | } | |
492 | ||
bff672e6 RR |
493 | /*H:410 Updating a PTE entry is a little trickier. |
494 | * | |
495 | * We keep track of several different page tables (the Guest uses one for each | |
496 | * process, so it makes sense to cache at least a few). Each of these have | |
497 | * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for | |
498 | * all processes. So when the page table above that address changes, we update | |
499 | * all the page tables, not just the current one. This is rare. | |
500 | * | |
501 | * The benefit is that when we have to track a new page table, we can copy keep | |
502 | * all the kernel mappings. This speeds up context switch immensely. */ | |
d7e28ffe RR |
503 | void guest_set_pte(struct lguest *lg, |
504 | unsigned long cr3, unsigned long vaddr, gpte_t gpte) | |
505 | { | |
bff672e6 RR |
506 | /* Kernel mappings must be changed on all top levels. Slow, but |
507 | * doesn't happen often. */ | |
d7e28ffe RR |
508 | if (vaddr >= lg->page_offset) { |
509 | unsigned int i; | |
510 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | |
511 | if (lg->pgdirs[i].pgdir) | |
512 | do_set_pte(lg, i, vaddr, gpte); | |
513 | } else { | |
bff672e6 | 514 | /* Is this page table one we have a shadow for? */ |
d7e28ffe RR |
515 | int pgdir = find_pgdir(lg, cr3); |
516 | if (pgdir != ARRAY_SIZE(lg->pgdirs)) | |
bff672e6 | 517 | /* If so, do the update. */ |
d7e28ffe RR |
518 | do_set_pte(lg, pgdir, vaddr, gpte); |
519 | } | |
520 | } | |
521 | ||
bff672e6 RR |
522 | /*H:400 |
523 | * (iii) Setting up a page table entry when the Guest tells us it has changed. | |
524 | * | |
525 | * Just like we did in interrupts_and_traps.c, it makes sense for us to deal | |
526 | * with the other side of page tables while we're here: what happens when the | |
527 | * Guest asks for a page table to be updated? | |
528 | * | |
529 | * We already saw that demand_page() will fill in the shadow page tables when | |
530 | * needed, so we can simply remove shadow page table entries whenever the Guest | |
531 | * tells us they've changed. When the Guest tries to use the new entry it will | |
532 | * fault and demand_page() will fix it up. | |
533 | * | |
534 | * So with that in mind here's our code to to update a (top-level) PGD entry: | |
535 | */ | |
d7e28ffe RR |
536 | void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) |
537 | { | |
538 | int pgdir; | |
539 | ||
bff672e6 RR |
540 | /* The kernel seems to try to initialize this early on: we ignore its |
541 | * attempts to map over the Switcher. */ | |
d7e28ffe RR |
542 | if (idx >= SWITCHER_PGD_INDEX) |
543 | return; | |
544 | ||
bff672e6 | 545 | /* If they're talking about a page table we have a shadow for... */ |
d7e28ffe RR |
546 | pgdir = find_pgdir(lg, cr3); |
547 | if (pgdir < ARRAY_SIZE(lg->pgdirs)) | |
bff672e6 | 548 | /* ... throw it away. */ |
d7e28ffe RR |
549 | release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); |
550 | } | |
551 | ||
bff672e6 RR |
552 | /*H:500 (vii) Setting up the page tables initially. |
553 | * | |
554 | * When a Guest is first created, the Launcher tells us where the toplevel of | |
555 | * its first page table is. We set some things up here: */ | |
d7e28ffe RR |
556 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) |
557 | { | |
bff672e6 RR |
558 | /* In flush_user_mappings() we loop from 0 to |
559 | * "vaddr_to_pgd_index(lg->page_offset)". This assumes it won't hit | |
560 | * the Switcher mappings, so check that now. */ | |
d7e28ffe RR |
561 | if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX) |
562 | return -EINVAL; | |
bff672e6 RR |
563 | /* We start on the first shadow page table, and give it a blank PGD |
564 | * page. */ | |
d7e28ffe RR |
565 | lg->pgdidx = 0; |
566 | lg->pgdirs[lg->pgdidx].cr3 = pgtable; | |
567 | lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL); | |
568 | if (!lg->pgdirs[lg->pgdidx].pgdir) | |
569 | return -ENOMEM; | |
570 | return 0; | |
571 | } | |
572 | ||
bff672e6 | 573 | /* When a Guest dies, our cleanup is fairly simple. */ |
d7e28ffe RR |
574 | void free_guest_pagetable(struct lguest *lg) |
575 | { | |
576 | unsigned int i; | |
577 | ||
bff672e6 | 578 | /* Throw away all page table pages. */ |
d7e28ffe | 579 | release_all_pagetables(lg); |
bff672e6 | 580 | /* Now free the top levels: free_page() can handle 0 just fine. */ |
d7e28ffe RR |
581 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
582 | free_page((long)lg->pgdirs[i].pgdir); | |
583 | } | |
584 | ||
bff672e6 RR |
585 | /*H:480 (vi) Mapping the Switcher when the Guest is about to run. |
586 | * | |
587 | * The Switcher and the two pages for this CPU need to be available to the | |
588 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages | |
589 | * for each CPU already set up, we just need to hook them in. */ | |
d7e28ffe RR |
590 | void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages) |
591 | { | |
592 | spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); | |
593 | spgd_t switcher_pgd; | |
594 | spte_t regs_pte; | |
595 | ||
bff672e6 RR |
596 | /* Make the last PGD entry for this Guest point to the Switcher's PTE |
597 | * page for this CPU (with appropriate flags). */ | |
d7e28ffe RR |
598 | switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT; |
599 | switcher_pgd.flags = _PAGE_KERNEL; | |
600 | lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; | |
601 | ||
bff672e6 RR |
602 | /* We also change the Switcher PTE page. When we're running the Guest, |
603 | * we want the Guest's "regs" page to appear where the first Switcher | |
604 | * page for this CPU is. This is an optimization: when the Switcher | |
605 | * saves the Guest registers, it saves them into the first page of this | |
606 | * CPU's "struct lguest_pages": if we make sure the Guest's register | |
607 | * page is already mapped there, we don't have to copy them out | |
608 | * again. */ | |
d7e28ffe RR |
609 | regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT; |
610 | regs_pte.flags = _PAGE_KERNEL; | |
611 | switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE] | |
612 | = regs_pte; | |
613 | } | |
bff672e6 | 614 | /*:*/ |
d7e28ffe RR |
615 | |
616 | static void free_switcher_pte_pages(void) | |
617 | { | |
618 | unsigned int i; | |
619 | ||
620 | for_each_possible_cpu(i) | |
621 | free_page((long)switcher_pte_page(i)); | |
622 | } | |
623 | ||
bff672e6 RR |
624 | /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given |
625 | * the CPU number and the "struct page"s for the Switcher code itself. | |
626 | * | |
627 | * Currently the Switcher is less than a page long, so "pages" is always 1. */ | |
d7e28ffe RR |
628 | static __init void populate_switcher_pte_page(unsigned int cpu, |
629 | struct page *switcher_page[], | |
630 | unsigned int pages) | |
631 | { | |
632 | unsigned int i; | |
633 | spte_t *pte = switcher_pte_page(cpu); | |
634 | ||
bff672e6 | 635 | /* The first entries are easy: they map the Switcher code. */ |
d7e28ffe RR |
636 | for (i = 0; i < pages; i++) { |
637 | pte[i].pfn = page_to_pfn(switcher_page[i]); | |
638 | pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED; | |
639 | } | |
640 | ||
bff672e6 | 641 | /* The only other thing we map is this CPU's pair of pages. */ |
d7e28ffe RR |
642 | i = pages + cpu*2; |
643 | ||
bff672e6 | 644 | /* First page (Guest registers) is writable from the Guest */ |
d7e28ffe RR |
645 | pte[i].pfn = page_to_pfn(switcher_page[i]); |
646 | pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW; | |
bff672e6 RR |
647 | /* The second page contains the "struct lguest_ro_state", and is |
648 | * read-only. */ | |
d7e28ffe RR |
649 | pte[i+1].pfn = page_to_pfn(switcher_page[i+1]); |
650 | pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED; | |
651 | } | |
652 | ||
bff672e6 RR |
653 | /*H:510 At boot or module load time, init_pagetables() allocates and populates |
654 | * the Switcher PTE page for each CPU. */ | |
d7e28ffe RR |
655 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) |
656 | { | |
657 | unsigned int i; | |
658 | ||
659 | for_each_possible_cpu(i) { | |
660 | switcher_pte_page(i) = (spte_t *)get_zeroed_page(GFP_KERNEL); | |
661 | if (!switcher_pte_page(i)) { | |
662 | free_switcher_pte_pages(); | |
663 | return -ENOMEM; | |
664 | } | |
665 | populate_switcher_pte_page(i, switcher_page, pages); | |
666 | } | |
667 | return 0; | |
668 | } | |
bff672e6 | 669 | /*:*/ |
d7e28ffe | 670 | |
bff672e6 | 671 | /* Cleaning up simply involves freeing the PTE page for each CPU. */ |
d7e28ffe RR |
672 | void free_pagetables(void) |
673 | { | |
674 | free_switcher_pte_pages(); | |
675 | } |