1 /*P:700 The pagetable code, on the other hand, still shows the scars of
2 * previous encounters. It's functional, and as neat as it can be in the
3 * circumstances, but be wary, for these things are subtle and break easily.
4 * The Guest provides a virtual to physical mapping, but we can neither trust
5 * it nor use it: we verify and convert it here to point the hardware to the
6 * actual Guest pages when running the Guest. :*/
8 /* Copyright (C) Rusty Russell IBM Corporation 2006.
9 * GPL v2 and any later version */
11 #include <linux/types.h>
12 #include <linux/spinlock.h>
13 #include <linux/random.h>
14 #include <linux/percpu.h>
15 #include <asm/tlbflush.h>
18 /*M:008 We hold reference to pages, which prevents them from being swapped.
19 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
20 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
21 * could probably consider launching Guests as non-root. :*/
26 * We use two-level page tables for the Guest. If you're not entirely
27 * comfortable with virtual addresses, physical addresses and page tables then
28 * I recommend you review lguest.c's "Page Table Handling" (with diagrams!).
30 * The Guest keeps page tables, but we maintain the actual ones here: these are
31 * called "shadow" page tables. Which is a very Guest-centric name: these are
32 * the real page tables the CPU uses, although we keep them up to date to
33 * reflect the Guest's. (See what I mean about weird naming? Since when do
34 * shadows reflect anything?)
36 * Anyway, this is the most complicated part of the Host code. There are seven
38 * (i) Setting up a page table entry for the Guest when it faults,
39 * (ii) Setting up the page table entry for the Guest stack,
40 * (iii) Setting up a page table entry when the Guest tells us it has changed,
41 * (iv) Switching page tables,
42 * (v) Flushing (thowing away) page tables,
43 * (vi) Mapping the Switcher when the Guest is about to run,
44 * (vii) Setting up the page tables initially.
47 /* Pages a 4k long, and each page table entry is 4 bytes long, giving us 1024
48 * (or 2^10) entries per page. */
49 #define PTES_PER_PAGE_SHIFT 10
50 #define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT)
52 /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is
53 * conveniently placed at the top 4MB, so it uses a separate, complete PTE
55 #define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1)
57 /* We actually need a separate PTE page for each CPU. Remember that after the
58 * Switcher code itself comes two pages for each CPU, and we don't want this
59 * CPU's guest to see the pages of any other CPU. */
60 static DEFINE_PER_CPU(spte_t
*, switcher_pte_pages
);
61 #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
63 /*H:320 With our shadow and Guest types established, we need to deal with
64 * them: the page table code is curly enough to need helper functions to keep
67 * The first helper takes a virtual address, and says which entry in the top
68 * level page table deals with that address. Since each top level entry deals
69 * with 4M, this effectively divides by 4M. */
70 static unsigned vaddr_to_pgd_index(unsigned long vaddr
)
72 return vaddr
>> (PAGE_SHIFT
+ PTES_PER_PAGE_SHIFT
);
75 /* There are two functions which return pointers to the shadow (aka "real")
78 * spgd_addr() takes the virtual address and returns a pointer to the top-level
79 * page directory entry for that address. Since we keep track of several page
80 * tables, the "i" argument tells us which one we're interested in (it's
81 * usually the current one). */
82 static spgd_t
*spgd_addr(struct lguest
*lg
, u32 i
, unsigned long vaddr
)
84 unsigned int index
= vaddr_to_pgd_index(vaddr
);
86 /* We kill any Guest trying to touch the Switcher addresses. */
87 if (index
>= SWITCHER_PGD_INDEX
) {
88 kill_guest(lg
, "attempt to access switcher pages");
91 /* Return a pointer index'th pgd entry for the i'th page table. */
92 return &lg
->pgdirs
[i
].pgdir
[index
];
95 /* This routine then takes the PGD entry given above, which contains the
96 * address of the PTE page. It then returns a pointer to the PTE entry for the
98 static spte_t
*spte_addr(struct lguest
*lg
, spgd_t spgd
, unsigned long vaddr
)
100 spte_t
*page
= __va(spgd
.pfn
<< PAGE_SHIFT
);
101 /* You should never call this if the PGD entry wasn't valid */
102 BUG_ON(!(spgd
.flags
& _PAGE_PRESENT
));
103 return &page
[(vaddr
>> PAGE_SHIFT
) % PTES_PER_PAGE
];
106 /* These two functions just like the above two, except they access the Guest
107 * page tables. Hence they return a Guest address. */
108 static unsigned long gpgd_addr(struct lguest
*lg
, unsigned long vaddr
)
110 unsigned int index
= vaddr
>> (PAGE_SHIFT
+ PTES_PER_PAGE_SHIFT
);
111 return lg
->pgdirs
[lg
->pgdidx
].cr3
+ index
* sizeof(gpgd_t
);
114 static unsigned long gpte_addr(struct lguest
*lg
,
115 gpgd_t gpgd
, unsigned long vaddr
)
117 unsigned long gpage
= gpgd
.pfn
<< PAGE_SHIFT
;
118 BUG_ON(!(gpgd
.flags
& _PAGE_PRESENT
));
119 return gpage
+ ((vaddr
>>PAGE_SHIFT
) % PTES_PER_PAGE
) * sizeof(gpte_t
);
122 /*H:350 This routine takes a page number given by the Guest and converts it to
123 * an actual, physical page number. It can fail for several reasons: the
124 * virtual address might not be mapped by the Launcher, the write flag is set
125 * and the page is read-only, or the write flag was set and the page was
126 * shared so had to be copied, but we ran out of memory.
128 * This holds a reference to the page, so release_pte() is careful to
130 static unsigned long get_pfn(unsigned long virtpfn
, int write
)
133 /* This value indicates failure. */
134 unsigned long ret
= -1UL;
136 /* get_user_pages() is a complex interface: it gets the "struct
137 * vm_area_struct" and "struct page" assocated with a range of pages.
138 * It also needs the task's mmap_sem held, and is not very quick.
139 * It returns the number of pages it got. */
140 down_read(¤t
->mm
->mmap_sem
);
141 if (get_user_pages(current
, current
->mm
, virtpfn
<< PAGE_SHIFT
,
142 1, write
, 1, &page
, NULL
) == 1)
143 ret
= page_to_pfn(page
);
144 up_read(¤t
->mm
->mmap_sem
);
148 /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
149 * entry can be a little tricky. The flags are (almost) the same, but the
150 * Guest PTE contains a virtual page number: the CPU needs the real page
152 static spte_t
gpte_to_spte(struct lguest
*lg
, gpte_t gpte
, int write
)
157 /* The Guest sets the global flag, because it thinks that it is using
158 * PGE. We only told it to use PGE so it would tell us whether it was
159 * flushing a kernel mapping or a userspace mapping. We don't actually
160 * use the global bit, so throw it away. */
161 spte
.flags
= (gpte
.flags
& ~_PAGE_GLOBAL
);
163 /* We need a temporary "unsigned long" variable to hold the answer from
164 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
165 * fit in spte.pfn. get_pfn() finds the real physical number of the
166 * page, given the virtual number. */
167 pfn
= get_pfn(gpte
.pfn
, write
);
169 kill_guest(lg
, "failed to get page %u", gpte
.pfn
);
170 /* When we destroy the Guest, we'll go through the shadow page
171 * tables and release_pte() them. Make sure we don't think
172 * this one is valid! */
175 /* Now we assign the page number, and our shadow PTE is complete. */
180 /*H:460 And to complete the chain, release_pte() looks like this: */
181 static void release_pte(spte_t pte
)
183 /* Remember that get_user_pages() took a reference to the page, in
184 * get_pfn()? We have to put it back now. */
185 if (pte
.flags
& _PAGE_PRESENT
)
186 put_page(pfn_to_page(pte
.pfn
));
190 static void check_gpte(struct lguest
*lg
, gpte_t gpte
)
192 if ((gpte
.flags
& (_PAGE_PWT
|_PAGE_PSE
)) || gpte
.pfn
>= lg
->pfn_limit
)
193 kill_guest(lg
, "bad page table entry");
196 static void check_gpgd(struct lguest
*lg
, gpgd_t gpgd
)
198 if ((gpgd
.flags
& ~_PAGE_TABLE
) || gpgd
.pfn
>= lg
->pfn_limit
)
199 kill_guest(lg
, "bad page directory entry");
203 * (i) Setting up a page table entry for the Guest when it faults
205 * We saw this call in run_guest(): when we see a page fault in the Guest, we
206 * come here. That's because we only set up the shadow page tables lazily as
207 * they're needed, so we get page faults all the time and quietly fix them up
208 * and return to the Guest without it knowing.
210 * If we fixed up the fault (ie. we mapped the address), this routine returns
212 int demand_page(struct lguest
*lg
, unsigned long vaddr
, int errcode
)
216 unsigned long gpte_ptr
;
220 /* First step: get the top-level Guest page table entry. */
221 gpgd
= mkgpgd(lgread_u32(lg
, gpgd_addr(lg
, vaddr
)));
222 /* Toplevel not present? We can't map it in. */
223 if (!(gpgd
.flags
& _PAGE_PRESENT
))
226 /* Now look at the matching shadow entry. */
227 spgd
= spgd_addr(lg
, lg
->pgdidx
, vaddr
);
228 if (!(spgd
->flags
& _PAGE_PRESENT
)) {
229 /* No shadow entry: allocate a new shadow PTE page. */
230 unsigned long ptepage
= get_zeroed_page(GFP_KERNEL
);
231 /* This is not really the Guest's fault, but killing it is
232 * simple for this corner case. */
234 kill_guest(lg
, "out of memory allocating pte page");
237 /* We check that the Guest pgd is OK. */
238 check_gpgd(lg
, gpgd
);
239 /* And we copy the flags to the shadow PGD entry. The page
240 * number in the shadow PGD is the page we just allocated. */
241 spgd
->raw
.val
= (__pa(ptepage
) | gpgd
.flags
);
244 /* OK, now we look at the lower level in the Guest page table: keep its
245 * address, because we might update it later. */
246 gpte_ptr
= gpte_addr(lg
, gpgd
, vaddr
);
247 gpte
= mkgpte(lgread_u32(lg
, gpte_ptr
));
249 /* If this page isn't in the Guest page tables, we can't page it in. */
250 if (!(gpte
.flags
& _PAGE_PRESENT
))
253 /* Check they're not trying to write to a page the Guest wants
254 * read-only (bit 2 of errcode == write). */
255 if ((errcode
& 2) && !(gpte
.flags
& _PAGE_RW
))
258 /* User access to a kernel page? (bit 3 == user access) */
259 if ((errcode
& 4) && !(gpte
.flags
& _PAGE_USER
))
262 /* Check that the Guest PTE flags are OK, and the page number is below
263 * the pfn_limit (ie. not mapping the Launcher binary). */
264 check_gpte(lg
, gpte
);
265 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
266 gpte
.flags
|= _PAGE_ACCESSED
;
268 gpte
.flags
|= _PAGE_DIRTY
;
270 /* Get the pointer to the shadow PTE entry we're going to set. */
271 spte
= spte_addr(lg
, *spgd
, vaddr
);
272 /* If there was a valid shadow PTE entry here before, we release it.
273 * This can happen with a write to a previously read-only entry. */
276 /* If this is a write, we insist that the Guest page is writable (the
277 * final arg to gpte_to_spte()). */
278 if (gpte
.flags
& _PAGE_DIRTY
)
279 *spte
= gpte_to_spte(lg
, gpte
, 1);
281 /* If this is a read, don't set the "writable" bit in the page
282 * table entry, even if the Guest says it's writable. That way
283 * we come back here when a write does actually ocur, so we can
284 * update the Guest's _PAGE_DIRTY flag. */
285 gpte_t ro_gpte
= gpte
;
286 ro_gpte
.flags
&= ~_PAGE_RW
;
287 *spte
= gpte_to_spte(lg
, ro_gpte
, 0);
290 /* Finally, we write the Guest PTE entry back: we've set the
291 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
292 lgwrite_u32(lg
, gpte_ptr
, gpte
.raw
.val
);
294 /* We succeeded in mapping the page! */
298 /*H:360 (ii) Setting up the page table entry for the Guest stack.
300 * Remember pin_stack_pages() which makes sure the stack is mapped? It could
301 * simply call demand_page(), but as we've seen that logic is quite long, and
302 * usually the stack pages are already mapped anyway, so it's not required.
304 * This is a quick version which answers the question: is this virtual address
305 * mapped by the shadow page tables, and is it writable? */
306 static int page_writable(struct lguest
*lg
, unsigned long vaddr
)
311 /* Look at the top level entry: is it present? */
312 spgd
= spgd_addr(lg
, lg
->pgdidx
, vaddr
);
313 if (!(spgd
->flags
& _PAGE_PRESENT
))
316 /* Check the flags on the pte entry itself: it must be present and
318 flags
= spte_addr(lg
, *spgd
, vaddr
)->flags
;
319 return (flags
& (_PAGE_PRESENT
|_PAGE_RW
)) == (_PAGE_PRESENT
|_PAGE_RW
);
322 /* So, when pin_stack_pages() asks us to pin a page, we check if it's already
323 * in the page tables, and if not, we call demand_page() with error code 2
324 * (meaning "write"). */
325 void pin_page(struct lguest
*lg
, unsigned long vaddr
)
327 if (!page_writable(lg
, vaddr
) && !demand_page(lg
, vaddr
, 2))
328 kill_guest(lg
, "bad stack page %#lx", vaddr
);
331 /*H:450 If we chase down the release_pgd() code, it looks like this: */
332 static void release_pgd(struct lguest
*lg
, spgd_t
*spgd
)
334 /* If the entry's not present, there's nothing to release. */
335 if (spgd
->flags
& _PAGE_PRESENT
) {
337 /* Converting the pfn to find the actual PTE page is easy: turn
338 * the page number into a physical address, then convert to a
339 * virtual address (easy for kernel pages like this one). */
340 spte_t
*ptepage
= __va(spgd
->pfn
<< PAGE_SHIFT
);
341 /* For each entry in the page, we might need to release it. */
342 for (i
= 0; i
< PTES_PER_PAGE
; i
++)
343 release_pte(ptepage
[i
]);
344 /* Now we can free the page of PTEs */
345 free_page((long)ptepage
);
346 /* And zero out the PGD entry we we never release it twice. */
351 /*H:440 (v) Flushing (thowing away) page tables,
353 * We saw flush_user_mappings() called when we re-used a top-level pgdir page.
354 * It simply releases every PTE page from 0 up to the kernel address. */
355 static void flush_user_mappings(struct lguest
*lg
, int idx
)
358 /* Release every pgd entry up to the kernel's address. */
359 for (i
= 0; i
< vaddr_to_pgd_index(lg
->page_offset
); i
++)
360 release_pgd(lg
, lg
->pgdirs
[idx
].pgdir
+ i
);
363 /* The Guest also has a hypercall to do this manually: it's used when a large
364 * number of mappings have been changed. */
365 void guest_pagetable_flush_user(struct lguest
*lg
)
367 /* Drop the userspace part of the current page table. */
368 flush_user_mappings(lg
, lg
->pgdidx
);
372 /* We keep several page tables. This is a simple routine to find the page
373 * table (if any) corresponding to this top-level address the Guest has given
375 static unsigned int find_pgdir(struct lguest
*lg
, unsigned long pgtable
)
378 for (i
= 0; i
< ARRAY_SIZE(lg
->pgdirs
); i
++)
379 if (lg
->pgdirs
[i
].cr3
== pgtable
)
384 /*H:435 And this is us, creating the new page directory. If we really do
385 * allocate a new one (and so the kernel parts are not there), we set
387 static unsigned int new_pgdir(struct lguest
*lg
,
393 /* We pick one entry at random to throw out. Choosing the Least
394 * Recently Used might be better, but this is easy. */
395 next
= random32() % ARRAY_SIZE(lg
->pgdirs
);
396 /* If it's never been allocated at all before, try now. */
397 if (!lg
->pgdirs
[next
].pgdir
) {
398 lg
->pgdirs
[next
].pgdir
= (spgd_t
*)get_zeroed_page(GFP_KERNEL
);
399 /* If the allocation fails, just keep using the one we have */
400 if (!lg
->pgdirs
[next
].pgdir
)
403 /* This is a blank page, so there are no kernel
404 * mappings: caller must map the stack! */
407 /* Record which Guest toplevel this shadows. */
408 lg
->pgdirs
[next
].cr3
= cr3
;
409 /* Release all the non-kernel mappings. */
410 flush_user_mappings(lg
, next
);
415 /*H:430 (iv) Switching page tables
417 * This is what happens when the Guest changes page tables (ie. changes the
418 * top-level pgdir). This happens on almost every context switch. */
419 void guest_new_pagetable(struct lguest
*lg
, unsigned long pgtable
)
421 int newpgdir
, repin
= 0;
423 /* Look to see if we have this one already. */
424 newpgdir
= find_pgdir(lg
, pgtable
);
425 /* If not, we allocate or mug an existing one: if it's a fresh one,
426 * repin gets set to 1. */
427 if (newpgdir
== ARRAY_SIZE(lg
->pgdirs
))
428 newpgdir
= new_pgdir(lg
, pgtable
, &repin
);
429 /* Change the current pgd index to the new one. */
430 lg
->pgdidx
= newpgdir
;
431 /* If it was completely blank, we map in the Guest kernel stack */
436 /*H:470 Finally, a routine which throws away everything: all PGD entries in all
437 * the shadow page tables. This is used when we destroy the Guest. */
438 static void release_all_pagetables(struct lguest
*lg
)
442 /* Every shadow pagetable this Guest has */
443 for (i
= 0; i
< ARRAY_SIZE(lg
->pgdirs
); i
++)
444 if (lg
->pgdirs
[i
].pgdir
)
445 /* Every PGD entry except the Switcher at the top */
446 for (j
= 0; j
< SWITCHER_PGD_INDEX
; j
++)
447 release_pgd(lg
, lg
->pgdirs
[i
].pgdir
+ j
);
450 /* We also throw away everything when a Guest tells us it's changed a kernel
451 * mapping. Since kernel mappings are in every page table, it's easiest to
452 * throw them all away. This is amazingly slow, but thankfully rare. */
453 void guest_pagetable_clear_all(struct lguest
*lg
)
455 release_all_pagetables(lg
);
456 /* We need the Guest kernel stack mapped again. */
460 /*H:420 This is the routine which actually sets the page table entry for then
461 * "idx"'th shadow page table.
463 * Normally, we can just throw out the old entry and replace it with 0: if they
464 * use it demand_page() will put the new entry in. We need to do this anyway:
465 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
466 * is read from, and _PAGE_DIRTY when it's written to.
468 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
469 * these bits on PTEs immediately anyway. This is done to save the CPU from
470 * having to update them, but it helps us the same way: if they set
471 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
472 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
474 static void do_set_pte(struct lguest
*lg
, int idx
,
475 unsigned long vaddr
, gpte_t gpte
)
477 /* Look up the matching shadow page directot entry. */
478 spgd_t
*spgd
= spgd_addr(lg
, idx
, vaddr
);
480 /* If the top level isn't present, there's no entry to update. */
481 if (spgd
->flags
& _PAGE_PRESENT
) {
482 /* Otherwise, we start by releasing the existing entry. */
483 spte_t
*spte
= spte_addr(lg
, *spgd
, vaddr
);
486 /* If they're setting this entry as dirty or accessed, we might
487 * as well put that entry they've given us in now. This shaves
488 * 10% off a copy-on-write micro-benchmark. */
489 if (gpte
.flags
& (_PAGE_DIRTY
| _PAGE_ACCESSED
)) {
490 check_gpte(lg
, gpte
);
491 *spte
= gpte_to_spte(lg
, gpte
, gpte
.flags
&_PAGE_DIRTY
);
493 /* Otherwise we can demand_page() it in later. */
498 /*H:410 Updating a PTE entry is a little trickier.
500 * We keep track of several different page tables (the Guest uses one for each
501 * process, so it makes sense to cache at least a few). Each of these have
502 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
503 * all processes. So when the page table above that address changes, we update
504 * all the page tables, not just the current one. This is rare.
506 * The benefit is that when we have to track a new page table, we can copy keep
507 * all the kernel mappings. This speeds up context switch immensely. */
508 void guest_set_pte(struct lguest
*lg
,
509 unsigned long cr3
, unsigned long vaddr
, gpte_t gpte
)
511 /* Kernel mappings must be changed on all top levels. Slow, but
512 * doesn't happen often. */
513 if (vaddr
>= lg
->page_offset
) {
515 for (i
= 0; i
< ARRAY_SIZE(lg
->pgdirs
); i
++)
516 if (lg
->pgdirs
[i
].pgdir
)
517 do_set_pte(lg
, i
, vaddr
, gpte
);
519 /* Is this page table one we have a shadow for? */
520 int pgdir
= find_pgdir(lg
, cr3
);
521 if (pgdir
!= ARRAY_SIZE(lg
->pgdirs
))
522 /* If so, do the update. */
523 do_set_pte(lg
, pgdir
, vaddr
, gpte
);
528 * (iii) Setting up a page table entry when the Guest tells us it has changed.
530 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
531 * with the other side of page tables while we're here: what happens when the
532 * Guest asks for a page table to be updated?
534 * We already saw that demand_page() will fill in the shadow page tables when
535 * needed, so we can simply remove shadow page table entries whenever the Guest
536 * tells us they've changed. When the Guest tries to use the new entry it will
537 * fault and demand_page() will fix it up.
539 * So with that in mind here's our code to to update a (top-level) PGD entry:
541 void guest_set_pmd(struct lguest
*lg
, unsigned long cr3
, u32 idx
)
545 /* The kernel seems to try to initialize this early on: we ignore its
546 * attempts to map over the Switcher. */
547 if (idx
>= SWITCHER_PGD_INDEX
)
550 /* If they're talking about a page table we have a shadow for... */
551 pgdir
= find_pgdir(lg
, cr3
);
552 if (pgdir
< ARRAY_SIZE(lg
->pgdirs
))
553 /* ... throw it away. */
554 release_pgd(lg
, lg
->pgdirs
[pgdir
].pgdir
+ idx
);
557 /*H:500 (vii) Setting up the page tables initially.
559 * When a Guest is first created, the Launcher tells us where the toplevel of
560 * its first page table is. We set some things up here: */
561 int init_guest_pagetable(struct lguest
*lg
, unsigned long pgtable
)
563 /* In flush_user_mappings() we loop from 0 to
564 * "vaddr_to_pgd_index(lg->page_offset)". This assumes it won't hit
565 * the Switcher mappings, so check that now. */
566 if (vaddr_to_pgd_index(lg
->page_offset
) >= SWITCHER_PGD_INDEX
)
568 /* We start on the first shadow page table, and give it a blank PGD
571 lg
->pgdirs
[lg
->pgdidx
].cr3
= pgtable
;
572 lg
->pgdirs
[lg
->pgdidx
].pgdir
= (spgd_t
*)get_zeroed_page(GFP_KERNEL
);
573 if (!lg
->pgdirs
[lg
->pgdidx
].pgdir
)
578 /* When a Guest dies, our cleanup is fairly simple. */
579 void free_guest_pagetable(struct lguest
*lg
)
583 /* Throw away all page table pages. */
584 release_all_pagetables(lg
);
585 /* Now free the top levels: free_page() can handle 0 just fine. */
586 for (i
= 0; i
< ARRAY_SIZE(lg
->pgdirs
); i
++)
587 free_page((long)lg
->pgdirs
[i
].pgdir
);
590 /*H:480 (vi) Mapping the Switcher when the Guest is about to run.
592 * The Switcher and the two pages for this CPU need to be available to the
593 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
594 * for each CPU already set up, we just need to hook them in. */
595 void map_switcher_in_guest(struct lguest
*lg
, struct lguest_pages
*pages
)
597 spte_t
*switcher_pte_page
= __get_cpu_var(switcher_pte_pages
);
601 /* Make the last PGD entry for this Guest point to the Switcher's PTE
602 * page for this CPU (with appropriate flags). */
603 switcher_pgd
.pfn
= __pa(switcher_pte_page
) >> PAGE_SHIFT
;
604 switcher_pgd
.flags
= _PAGE_KERNEL
;
605 lg
->pgdirs
[lg
->pgdidx
].pgdir
[SWITCHER_PGD_INDEX
] = switcher_pgd
;
607 /* We also change the Switcher PTE page. When we're running the Guest,
608 * we want the Guest's "regs" page to appear where the first Switcher
609 * page for this CPU is. This is an optimization: when the Switcher
610 * saves the Guest registers, it saves them into the first page of this
611 * CPU's "struct lguest_pages": if we make sure the Guest's register
612 * page is already mapped there, we don't have to copy them out
614 regs_pte
.pfn
= __pa(lg
->regs_page
) >> PAGE_SHIFT
;
615 regs_pte
.flags
= _PAGE_KERNEL
;
616 switcher_pte_page
[(unsigned long)pages
/PAGE_SIZE
%PTES_PER_PAGE
]
621 static void free_switcher_pte_pages(void)
625 for_each_possible_cpu(i
)
626 free_page((long)switcher_pte_page(i
));
629 /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
630 * the CPU number and the "struct page"s for the Switcher code itself.
632 * Currently the Switcher is less than a page long, so "pages" is always 1. */
633 static __init
void populate_switcher_pte_page(unsigned int cpu
,
634 struct page
*switcher_page
[],
638 spte_t
*pte
= switcher_pte_page(cpu
);
640 /* The first entries are easy: they map the Switcher code. */
641 for (i
= 0; i
< pages
; i
++) {
642 pte
[i
].pfn
= page_to_pfn(switcher_page
[i
]);
643 pte
[i
].flags
= _PAGE_PRESENT
|_PAGE_ACCESSED
;
646 /* The only other thing we map is this CPU's pair of pages. */
649 /* First page (Guest registers) is writable from the Guest */
650 pte
[i
].pfn
= page_to_pfn(switcher_page
[i
]);
651 pte
[i
].flags
= _PAGE_PRESENT
|_PAGE_ACCESSED
|_PAGE_RW
;
652 /* The second page contains the "struct lguest_ro_state", and is
654 pte
[i
+1].pfn
= page_to_pfn(switcher_page
[i
+1]);
655 pte
[i
+1].flags
= _PAGE_PRESENT
|_PAGE_ACCESSED
;
658 /*H:510 At boot or module load time, init_pagetables() allocates and populates
659 * the Switcher PTE page for each CPU. */
660 __init
int init_pagetables(struct page
**switcher_page
, unsigned int pages
)
664 for_each_possible_cpu(i
) {
665 switcher_pte_page(i
) = (spte_t
*)get_zeroed_page(GFP_KERNEL
);
666 if (!switcher_pte_page(i
)) {
667 free_switcher_pte_pages();
670 populate_switcher_pte_page(i
, switcher_page
, pages
);
676 /* Cleaning up simply involves freeing the PTE page for each CPU. */
677 void free_pagetables(void)
679 free_switcher_pte_pages();