2 * The pagetable code, on the other hand, still shows the scars of
3 * previous encounters. It's functional, and as neat as it can be in the
4 * circumstances, but be wary, for these things are subtle and break easily.
5 * The Guest provides a virtual to physical mapping, but we can neither trust
6 * it nor use it: we verify and convert it here then point the CPU to the
7 * converted Guest pages when running the Guest.
10 /* Copyright (C) Rusty Russell IBM Corporation 2013.
11 * GPL v2 and any later version */
13 #include <linux/gfp.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/random.h>
17 #include <linux/percpu.h>
18 #include <asm/tlbflush.h>
19 #include <asm/uaccess.h>
23 * We hold reference to pages, which prevents them from being swapped.
24 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
25 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
26 * could probably consider launching Guests as non-root.
32 * We use two-level page tables for the Guest, or three-level with PAE. If
33 * you're not entirely comfortable with virtual addresses, physical addresses
34 * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
35 * Table Handling" (with diagrams!).
37 * The Guest keeps page tables, but we maintain the actual ones here: these are
38 * called "shadow" page tables. Which is a very Guest-centric name: these are
39 * the real page tables the CPU uses, although we keep them up to date to
40 * reflect the Guest's. (See what I mean about weird naming? Since when do
41 * shadows reflect anything?)
43 * Anyway, this is the most complicated part of the Host code. There are seven
45 * (i) Looking up a page table entry when the Guest faults,
46 * (ii) Making sure the Guest stack is mapped,
47 * (iii) Setting up a page table entry when the Guest tells us one has changed,
48 * (iv) Switching page tables,
49 * (v) Flushing (throwing away) page tables,
50 * (vi) Mapping the Switcher when the Guest is about to run,
51 * (vii) Setting up the page tables initially.
55 * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
56 * or 512 PTE entries with PAE (2MB).
58 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
61 * For PAE we need the PMD index as well. We use the last 2MB, so we
62 * will need the last pmd entry of the last pmd page.
65 #define CHECK_GPGD_MASK _PAGE_PRESENT
67 #define CHECK_GPGD_MASK _PAGE_TABLE
71 * The page table code is curly enough to need helper functions to keep it
72 * clear and clean. The kernel itself provides many of them; one advantage
73 * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting.
75 * There are two functions which return pointers to the shadow (aka "real")
78 * spgd_addr() takes the virtual address and returns a pointer to the top-level
79 * page directory entry (PGD) for that address. Since we keep track of several
80 * page tables, the "i" argument tells us which one we're interested in (it's
81 * usually the current one).
83 static pgd_t
*spgd_addr(struct lg_cpu
*cpu
, u32 i
, unsigned long vaddr
)
85 unsigned int index
= pgd_index(vaddr
);
87 /* Return a pointer index'th pgd entry for the i'th page table. */
88 return &cpu
->lg
->pgdirs
[i
].pgdir
[index
];
93 * This routine then takes the PGD entry given above, which contains the
94 * address of the PMD page. It then returns a pointer to the PMD entry for the
97 static pmd_t
*spmd_addr(struct lg_cpu
*cpu
, pgd_t spgd
, unsigned long vaddr
)
99 unsigned int index
= pmd_index(vaddr
);
102 /* You should never call this if the PGD entry wasn't valid */
103 BUG_ON(!(pgd_flags(spgd
) & _PAGE_PRESENT
));
104 page
= __va(pgd_pfn(spgd
) << PAGE_SHIFT
);
111 * This routine then takes the page directory entry returned above, which
112 * contains the address of the page table entry (PTE) page. It then returns a
113 * pointer to the PTE entry for the given address.
115 static pte_t
*spte_addr(struct lg_cpu
*cpu
, pgd_t spgd
, unsigned long vaddr
)
117 #ifdef CONFIG_X86_PAE
118 pmd_t
*pmd
= spmd_addr(cpu
, spgd
, vaddr
);
119 pte_t
*page
= __va(pmd_pfn(*pmd
) << PAGE_SHIFT
);
121 /* You should never call this if the PMD entry wasn't valid */
122 BUG_ON(!(pmd_flags(*pmd
) & _PAGE_PRESENT
));
124 pte_t
*page
= __va(pgd_pfn(spgd
) << PAGE_SHIFT
);
125 /* You should never call this if the PGD entry wasn't valid */
126 BUG_ON(!(pgd_flags(spgd
) & _PAGE_PRESENT
));
129 return &page
[pte_index(vaddr
)];
133 * These functions are just like the above, except they access the Guest
134 * page tables. Hence they return a Guest address.
136 static unsigned long gpgd_addr(struct lg_cpu
*cpu
, unsigned long vaddr
)
138 unsigned int index
= vaddr
>> (PGDIR_SHIFT
);
139 return cpu
->lg
->pgdirs
[cpu
->cpu_pgd
].gpgdir
+ index
* sizeof(pgd_t
);
142 #ifdef CONFIG_X86_PAE
143 /* Follow the PGD to the PMD. */
144 static unsigned long gpmd_addr(pgd_t gpgd
, unsigned long vaddr
)
146 unsigned long gpage
= pgd_pfn(gpgd
) << PAGE_SHIFT
;
147 BUG_ON(!(pgd_flags(gpgd
) & _PAGE_PRESENT
));
148 return gpage
+ pmd_index(vaddr
) * sizeof(pmd_t
);
151 /* Follow the PMD to the PTE. */
152 static unsigned long gpte_addr(struct lg_cpu
*cpu
,
153 pmd_t gpmd
, unsigned long vaddr
)
155 unsigned long gpage
= pmd_pfn(gpmd
) << PAGE_SHIFT
;
157 BUG_ON(!(pmd_flags(gpmd
) & _PAGE_PRESENT
));
158 return gpage
+ pte_index(vaddr
) * sizeof(pte_t
);
161 /* Follow the PGD to the PTE (no mid-level for !PAE). */
162 static unsigned long gpte_addr(struct lg_cpu
*cpu
,
163 pgd_t gpgd
, unsigned long vaddr
)
165 unsigned long gpage
= pgd_pfn(gpgd
) << PAGE_SHIFT
;
167 BUG_ON(!(pgd_flags(gpgd
) & _PAGE_PRESENT
));
168 return gpage
+ pte_index(vaddr
) * sizeof(pte_t
);
174 * get_pfn is slow: we could probably try to grab batches of pages here as
175 * an optimization (ie. pre-faulting).
179 * This routine takes a page number given by the Guest and converts it to
180 * an actual, physical page number. It can fail for several reasons: the
181 * virtual address might not be mapped by the Launcher, the write flag is set
182 * and the page is read-only, or the write flag was set and the page was
183 * shared so had to be copied, but we ran out of memory.
185 * This holds a reference to the page, so release_pte() is careful to put that
188 static unsigned long get_pfn(unsigned long virtpfn
, int write
)
192 /* gup me one page at this address please! */
193 if (get_user_pages_fast(virtpfn
<< PAGE_SHIFT
, 1, write
, &page
) == 1)
194 return page_to_pfn(page
);
196 /* This value indicates failure. */
201 * Converting a Guest page table entry to a shadow (ie. real) page table
202 * entry can be a little tricky. The flags are (almost) the same, but the
203 * Guest PTE contains a virtual page number: the CPU needs the real page
206 static pte_t
gpte_to_spte(struct lg_cpu
*cpu
, pte_t gpte
, int write
)
208 unsigned long pfn
, base
, flags
;
211 * The Guest sets the global flag, because it thinks that it is using
212 * PGE. We only told it to use PGE so it would tell us whether it was
213 * flushing a kernel mapping or a userspace mapping. We don't actually
214 * use the global bit, so throw it away.
216 flags
= (pte_flags(gpte
) & ~_PAGE_GLOBAL
);
218 /* The Guest's pages are offset inside the Launcher. */
219 base
= (unsigned long)cpu
->lg
->mem_base
/ PAGE_SIZE
;
222 * We need a temporary "unsigned long" variable to hold the answer from
223 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
224 * fit in spte.pfn. get_pfn() finds the real physical number of the
225 * page, given the virtual number.
227 pfn
= get_pfn(base
+ pte_pfn(gpte
), write
);
229 kill_guest(cpu
, "failed to get page %lu", pte_pfn(gpte
));
231 * When we destroy the Guest, we'll go through the shadow page
232 * tables and release_pte() them. Make sure we don't think
237 /* Now we assemble our shadow PTE from the page number and flags. */
238 return pfn_pte(pfn
, __pgprot(flags
));
241 /*H:460 And to complete the chain, release_pte() looks like this: */
242 static void release_pte(pte_t pte
)
245 * Remember that get_user_pages_fast() took a reference to the page, in
246 * get_pfn()? We have to put it back now.
248 if (pte_flags(pte
) & _PAGE_PRESENT
)
249 put_page(pte_page(pte
));
253 static bool gpte_in_iomem(struct lg_cpu
*cpu
, pte_t gpte
)
255 /* We don't handle large pages. */
256 if (pte_flags(gpte
) & _PAGE_PSE
)
259 return (pte_pfn(gpte
) >= cpu
->lg
->pfn_limit
260 && pte_pfn(gpte
) < cpu
->lg
->device_limit
);
263 static bool check_gpte(struct lg_cpu
*cpu
, pte_t gpte
)
265 if ((pte_flags(gpte
) & _PAGE_PSE
) ||
266 pte_pfn(gpte
) >= cpu
->lg
->pfn_limit
) {
267 kill_guest(cpu
, "bad page table entry");
273 static bool check_gpgd(struct lg_cpu
*cpu
, pgd_t gpgd
)
275 if ((pgd_flags(gpgd
) & ~CHECK_GPGD_MASK
) ||
276 (pgd_pfn(gpgd
) >= cpu
->lg
->pfn_limit
)) {
277 kill_guest(cpu
, "bad page directory entry");
283 #ifdef CONFIG_X86_PAE
284 static bool check_gpmd(struct lg_cpu
*cpu
, pmd_t gpmd
)
286 if ((pmd_flags(gpmd
) & ~_PAGE_TABLE
) ||
287 (pmd_pfn(gpmd
) >= cpu
->lg
->pfn_limit
)) {
288 kill_guest(cpu
, "bad page middle directory entry");
296 * This is the core routine to walk the shadow page tables and find the page
297 * table entry for a specific address.
299 * If allocate is set, then we allocate any missing levels, setting the flags
300 * on the new page directory and mid-level directories using the arguments
301 * (which are copied from the Guest's page table entries).
303 static pte_t
*find_spte(struct lg_cpu
*cpu
, unsigned long vaddr
, bool allocate
,
304 int pgd_flags
, int pmd_flags
)
307 /* Mid level for PAE. */
308 #ifdef CONFIG_X86_PAE
312 /* Get top level entry. */
313 spgd
= spgd_addr(cpu
, cpu
->cpu_pgd
, vaddr
);
314 if (!(pgd_flags(*spgd
) & _PAGE_PRESENT
)) {
315 /* No shadow entry: allocate a new shadow PTE page. */
316 unsigned long ptepage
;
318 /* If they didn't want us to allocate anything, stop. */
322 ptepage
= get_zeroed_page(GFP_KERNEL
);
324 * This is not really the Guest's fault, but killing it is
325 * simple for this corner case.
328 kill_guest(cpu
, "out of memory allocating pte page");
332 * And we copy the flags to the shadow PGD entry. The page
333 * number in the shadow PGD is the page we just allocated.
335 set_pgd(spgd
, __pgd(__pa(ptepage
) | pgd_flags
));
339 * Intel's Physical Address Extension actually uses three levels of
340 * page tables, so we need to look in the mid-level.
342 #ifdef CONFIG_X86_PAE
343 /* Now look at the mid-level shadow entry. */
344 spmd
= spmd_addr(cpu
, *spgd
, vaddr
);
346 if (!(pmd_flags(*spmd
) & _PAGE_PRESENT
)) {
347 /* No shadow entry: allocate a new shadow PTE page. */
348 unsigned long ptepage
;
350 /* If they didn't want us to allocate anything, stop. */
354 ptepage
= get_zeroed_page(GFP_KERNEL
);
357 * This is not really the Guest's fault, but killing it is
358 * simple for this corner case.
361 kill_guest(cpu
, "out of memory allocating pmd page");
366 * And we copy the flags to the shadow PMD entry. The page
367 * number in the shadow PMD is the page we just allocated.
369 set_pmd(spmd
, __pmd(__pa(ptepage
) | pmd_flags
));
373 /* Get the pointer to the shadow PTE entry we're going to set. */
374 return spte_addr(cpu
, *spgd
, vaddr
);
378 * (i) Looking up a page table entry when the Guest faults.
380 * We saw this call in run_guest(): when we see a page fault in the Guest, we
381 * come here. That's because we only set up the shadow page tables lazily as
382 * they're needed, so we get page faults all the time and quietly fix them up
383 * and return to the Guest without it knowing.
385 * If we fixed up the fault (ie. we mapped the address), this routine returns
386 * true. Otherwise, it was a real fault and we need to tell the Guest.
388 * There's a corner case: they're trying to access memory between
389 * pfn_limit and device_limit, which is I/O memory. In this case, we
390 * return false and set @iomem to the physical address, so the the
391 * Launcher can handle the instruction manually.
393 bool demand_page(struct lg_cpu
*cpu
, unsigned long vaddr
, int errcode
,
394 unsigned long *iomem
)
396 unsigned long gpte_ptr
;
404 /* We never demand page the Switcher, so trying is a mistake. */
405 if (vaddr
>= switcher_addr
)
408 /* First step: get the top-level Guest page table entry. */
409 if (unlikely(cpu
->linear_pages
)) {
410 /* Faking up a linear mapping. */
411 gpgd
= __pgd(CHECK_GPGD_MASK
);
413 gpgd
= lgread(cpu
, gpgd_addr(cpu
, vaddr
), pgd_t
);
414 /* Toplevel not present? We can't map it in. */
415 if (!(pgd_flags(gpgd
) & _PAGE_PRESENT
))
419 * This kills the Guest if it has weird flags or tries to
420 * refer to a "physical" address outside the bounds.
422 if (!check_gpgd(cpu
, gpgd
))
426 /* This "mid-level" entry is only used for non-linear, PAE mode. */
427 gpmd
= __pmd(_PAGE_TABLE
);
429 #ifdef CONFIG_X86_PAE
430 if (likely(!cpu
->linear_pages
)) {
431 gpmd
= lgread(cpu
, gpmd_addr(gpgd
, vaddr
), pmd_t
);
432 /* Middle level not present? We can't map it in. */
433 if (!(pmd_flags(gpmd
) & _PAGE_PRESENT
))
437 * This kills the Guest if it has weird flags or tries to
438 * refer to a "physical" address outside the bounds.
440 if (!check_gpmd(cpu
, gpmd
))
445 * OK, now we look at the lower level in the Guest page table: keep its
446 * address, because we might update it later.
448 gpte_ptr
= gpte_addr(cpu
, gpmd
, vaddr
);
451 * OK, now we look at the lower level in the Guest page table: keep its
452 * address, because we might update it later.
454 gpte_ptr
= gpte_addr(cpu
, gpgd
, vaddr
);
457 if (unlikely(cpu
->linear_pages
)) {
458 /* Linear? Make up a PTE which points to same page. */
459 gpte
= __pte((vaddr
& PAGE_MASK
) | _PAGE_RW
| _PAGE_PRESENT
);
461 /* Read the actual PTE value. */
462 gpte
= lgread(cpu
, gpte_ptr
, pte_t
);
465 /* If this page isn't in the Guest page tables, we can't page it in. */
466 if (!(pte_flags(gpte
) & _PAGE_PRESENT
))
470 * Check they're not trying to write to a page the Guest wants
471 * read-only (bit 2 of errcode == write).
473 if ((errcode
& 2) && !(pte_flags(gpte
) & _PAGE_RW
))
476 /* User access to a kernel-only page? (bit 3 == user access) */
477 if ((errcode
& 4) && !(pte_flags(gpte
) & _PAGE_USER
))
480 /* If they're accessing io memory, we expect a fault. */
481 if (gpte_in_iomem(cpu
, gpte
)) {
482 *iomem
= (pte_pfn(gpte
) << PAGE_SHIFT
) | (vaddr
& ~PAGE_MASK
);
487 * Check that the Guest PTE flags are OK, and the page number is below
488 * the pfn_limit (ie. not mapping the Launcher binary).
490 if (!check_gpte(cpu
, gpte
))
493 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
494 gpte
= pte_mkyoung(gpte
);
496 gpte
= pte_mkdirty(gpte
);
498 /* Get the pointer to the shadow PTE entry we're going to set. */
499 spte
= find_spte(cpu
, vaddr
, true, pgd_flags(gpgd
), pmd_flags(gpmd
));
504 * If there was a valid shadow PTE entry here before, we release it.
505 * This can happen with a write to a previously read-only entry.
510 * If this is a write, we insist that the Guest page is writable (the
511 * final arg to gpte_to_spte()).
514 *spte
= gpte_to_spte(cpu
, gpte
, 1);
517 * If this is a read, don't set the "writable" bit in the page
518 * table entry, even if the Guest says it's writable. That way
519 * we will come back here when a write does actually occur, so
520 * we can update the Guest's _PAGE_DIRTY flag.
522 set_pte(spte
, gpte_to_spte(cpu
, pte_wrprotect(gpte
), 0));
525 * Finally, we write the Guest PTE entry back: we've set the
526 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
528 if (likely(!cpu
->linear_pages
))
529 lgwrite(cpu
, gpte_ptr
, pte_t
, gpte
);
532 * The fault is fixed, the page table is populated, the mapping
533 * manipulated, the result returned and the code complete. A small
534 * delay and a trace of alliteration are the only indications the Guest
535 * has that a page fault occurred at all.
541 * (ii) Making sure the Guest stack is mapped.
543 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
544 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
545 * we've seen that logic is quite long, and usually the stack pages are already
546 * mapped, so it's overkill.
548 * This is a quick version which answers the question: is this virtual address
549 * mapped by the shadow page tables, and is it writable?
551 static bool page_writable(struct lg_cpu
*cpu
, unsigned long vaddr
)
556 /* You can't put your stack in the Switcher! */
557 if (vaddr
>= switcher_addr
)
560 /* If there's no shadow PTE, it's not writable. */
561 spte
= find_spte(cpu
, vaddr
, false, 0, 0);
566 * Check the flags on the pte entry itself: it must be present and
569 flags
= pte_flags(*spte
);
570 return (flags
& (_PAGE_PRESENT
|_PAGE_RW
)) == (_PAGE_PRESENT
|_PAGE_RW
);
574 * So, when pin_stack_pages() asks us to pin a page, we check if it's already
575 * in the page tables, and if not, we call demand_page() with error code 2
578 void pin_page(struct lg_cpu
*cpu
, unsigned long vaddr
)
582 if (!page_writable(cpu
, vaddr
) && !demand_page(cpu
, vaddr
, 2, &iomem
))
583 kill_guest(cpu
, "bad stack page %#lx", vaddr
);
587 #ifdef CONFIG_X86_PAE
588 static void release_pmd(pmd_t
*spmd
)
590 /* If the entry's not present, there's nothing to release. */
591 if (pmd_flags(*spmd
) & _PAGE_PRESENT
) {
593 pte_t
*ptepage
= __va(pmd_pfn(*spmd
) << PAGE_SHIFT
);
594 /* For each entry in the page, we might need to release it. */
595 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
596 release_pte(ptepage
[i
]);
597 /* Now we can free the page of PTEs */
598 free_page((long)ptepage
);
599 /* And zero out the PMD entry so we never release it twice. */
600 set_pmd(spmd
, __pmd(0));
604 static void release_pgd(pgd_t
*spgd
)
606 /* If the entry's not present, there's nothing to release. */
607 if (pgd_flags(*spgd
) & _PAGE_PRESENT
) {
609 pmd_t
*pmdpage
= __va(pgd_pfn(*spgd
) << PAGE_SHIFT
);
611 for (i
= 0; i
< PTRS_PER_PMD
; i
++)
612 release_pmd(&pmdpage
[i
]);
614 /* Now we can free the page of PMDs */
615 free_page((long)pmdpage
);
616 /* And zero out the PGD entry so we never release it twice. */
617 set_pgd(spgd
, __pgd(0));
621 #else /* !CONFIG_X86_PAE */
623 * If we chase down the release_pgd() code, the non-PAE version looks like
624 * this. The PAE version is almost identical, but instead of calling
625 * release_pte it calls release_pmd(), which looks much like this.
627 static void release_pgd(pgd_t
*spgd
)
629 /* If the entry's not present, there's nothing to release. */
630 if (pgd_flags(*spgd
) & _PAGE_PRESENT
) {
633 * Converting the pfn to find the actual PTE page is easy: turn
634 * the page number into a physical address, then convert to a
635 * virtual address (easy for kernel pages like this one).
637 pte_t
*ptepage
= __va(pgd_pfn(*spgd
) << PAGE_SHIFT
);
638 /* For each entry in the page, we might need to release it. */
639 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
640 release_pte(ptepage
[i
]);
641 /* Now we can free the page of PTEs */
642 free_page((long)ptepage
);
643 /* And zero out the PGD entry so we never release it twice. */
650 * We saw flush_user_mappings() twice: once from the flush_user_mappings()
651 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
652 * It simply releases every PTE page from 0 up to the Guest's kernel address.
654 static void flush_user_mappings(struct lguest
*lg
, int idx
)
657 /* Release every pgd entry up to the kernel's address. */
658 for (i
= 0; i
< pgd_index(lg
->kernel_address
); i
++)
659 release_pgd(lg
->pgdirs
[idx
].pgdir
+ i
);
663 * (v) Flushing (throwing away) page tables,
665 * The Guest has a hypercall to throw away the page tables: it's used when a
666 * large number of mappings have been changed.
668 void guest_pagetable_flush_user(struct lg_cpu
*cpu
)
670 /* Drop the userspace part of the current page table. */
671 flush_user_mappings(cpu
->lg
, cpu
->cpu_pgd
);
675 /* We walk down the guest page tables to get a guest-physical address */
676 bool __guest_pa(struct lg_cpu
*cpu
, unsigned long vaddr
, unsigned long *paddr
)
680 #ifdef CONFIG_X86_PAE
684 /* Still not set up? Just map 1:1. */
685 if (unlikely(cpu
->linear_pages
)) {
690 /* First step: get the top-level Guest page table entry. */
691 gpgd
= lgread(cpu
, gpgd_addr(cpu
, vaddr
), pgd_t
);
692 /* Toplevel not present? We can't map it in. */
693 if (!(pgd_flags(gpgd
) & _PAGE_PRESENT
))
696 #ifdef CONFIG_X86_PAE
697 gpmd
= lgread(cpu
, gpmd_addr(gpgd
, vaddr
), pmd_t
);
698 if (!(pmd_flags(gpmd
) & _PAGE_PRESENT
))
700 gpte
= lgread(cpu
, gpte_addr(cpu
, gpmd
, vaddr
), pte_t
);
702 gpte
= lgread(cpu
, gpte_addr(cpu
, gpgd
, vaddr
), pte_t
);
704 if (!(pte_flags(gpte
) & _PAGE_PRESENT
))
707 *paddr
= pte_pfn(gpte
) * PAGE_SIZE
| (vaddr
& ~PAGE_MASK
);
716 * This is the version we normally use: kills the Guest if it uses a
719 unsigned long guest_pa(struct lg_cpu
*cpu
, unsigned long vaddr
)
723 if (!__guest_pa(cpu
, vaddr
, &paddr
))
724 kill_guest(cpu
, "Bad address %#lx", vaddr
);
729 * We keep several page tables. This is a simple routine to find the page
730 * table (if any) corresponding to this top-level address the Guest has given
733 static unsigned int find_pgdir(struct lguest
*lg
, unsigned long pgtable
)
736 for (i
= 0; i
< ARRAY_SIZE(lg
->pgdirs
); i
++)
737 if (lg
->pgdirs
[i
].pgdir
&& lg
->pgdirs
[i
].gpgdir
== pgtable
)
743 * And this is us, creating the new page directory. If we really do
744 * allocate a new one (and so the kernel parts are not there), we set
747 static unsigned int new_pgdir(struct lg_cpu
*cpu
,
748 unsigned long gpgdir
,
754 * We pick one entry at random to throw out. Choosing the Least
755 * Recently Used might be better, but this is easy.
757 next
= prandom_u32() % ARRAY_SIZE(cpu
->lg
->pgdirs
);
758 /* If it's never been allocated at all before, try now. */
759 if (!cpu
->lg
->pgdirs
[next
].pgdir
) {
760 cpu
->lg
->pgdirs
[next
].pgdir
=
761 (pgd_t
*)get_zeroed_page(GFP_KERNEL
);
762 /* If the allocation fails, just keep using the one we have */
763 if (!cpu
->lg
->pgdirs
[next
].pgdir
)
767 * This is a blank page, so there are no kernel
768 * mappings: caller must map the stack!
773 /* Record which Guest toplevel this shadows. */
774 cpu
->lg
->pgdirs
[next
].gpgdir
= gpgdir
;
775 /* Release all the non-kernel mappings. */
776 flush_user_mappings(cpu
->lg
, next
);
778 /* This hasn't run on any CPU at all. */
779 cpu
->lg
->pgdirs
[next
].last_host_cpu
= -1;
785 * We do need the Switcher code mapped at all times, so we allocate that
786 * part of the Guest page table here. We map the Switcher code immediately,
787 * but defer mapping of the guest register page and IDT/LDT etc page until
788 * just before we run the guest in map_switcher_in_guest().
790 * We *could* do this setup in map_switcher_in_guest(), but at that point
791 * we've interrupts disabled, and allocating pages like that is fraught: we
792 * can't sleep if we need to free up some memory.
794 static bool allocate_switcher_mapping(struct lg_cpu
*cpu
)
798 for (i
= 0; i
< TOTAL_SWITCHER_PAGES
; i
++) {
799 pte_t
*pte
= find_spte(cpu
, switcher_addr
+ i
* PAGE_SIZE
, true,
800 CHECK_GPGD_MASK
, _PAGE_TABLE
);
805 * Map the switcher page if not already there. It might
806 * already be there because we call allocate_switcher_mapping()
807 * in guest_set_pgd() just in case it did discard our Switcher
808 * mapping, but it probably didn't.
810 if (i
== 0 && !(pte_flags(*pte
) & _PAGE_PRESENT
)) {
811 /* Get a reference to the Switcher page. */
812 get_page(lg_switcher_pages
[0]);
813 /* Create a read-only, exectuable, kernel-style PTE */
815 mk_pte(lg_switcher_pages
[0], PAGE_KERNEL_RX
));
818 cpu
->lg
->pgdirs
[cpu
->cpu_pgd
].switcher_mapped
= true;
823 * Finally, a routine which throws away everything: all PGD entries in all
824 * the shadow page tables, including the Guest's kernel mappings. This is used
825 * when we destroy the Guest.
827 static void release_all_pagetables(struct lguest
*lg
)
831 /* Every shadow pagetable this Guest has */
832 for (i
= 0; i
< ARRAY_SIZE(lg
->pgdirs
); i
++) {
833 if (!lg
->pgdirs
[i
].pgdir
)
836 /* Every PGD entry. */
837 for (j
= 0; j
< PTRS_PER_PGD
; j
++)
838 release_pgd(lg
->pgdirs
[i
].pgdir
+ j
);
839 lg
->pgdirs
[i
].switcher_mapped
= false;
840 lg
->pgdirs
[i
].last_host_cpu
= -1;
845 * We also throw away everything when a Guest tells us it's changed a kernel
846 * mapping. Since kernel mappings are in every page table, it's easiest to
847 * throw them all away. This traps the Guest in amber for a while as
848 * everything faults back in, but it's rare.
850 void guest_pagetable_clear_all(struct lg_cpu
*cpu
)
852 release_all_pagetables(cpu
->lg
);
853 /* We need the Guest kernel stack mapped again. */
854 pin_stack_pages(cpu
);
855 /* And we need Switcher allocated. */
856 if (!allocate_switcher_mapping(cpu
))
857 kill_guest(cpu
, "Cannot populate switcher mapping");
861 * (iv) Switching page tables
863 * Now we've seen all the page table setting and manipulation, let's see
864 * what happens when the Guest changes page tables (ie. changes the top-level
865 * pgdir). This occurs on almost every context switch.
867 void guest_new_pagetable(struct lg_cpu
*cpu
, unsigned long pgtable
)
869 int newpgdir
, repin
= 0;
872 * The very first time they call this, we're actually running without
873 * any page tables; we've been making it up. Throw them away now.
875 if (unlikely(cpu
->linear_pages
)) {
876 release_all_pagetables(cpu
->lg
);
877 cpu
->linear_pages
= false;
878 /* Force allocation of a new pgdir. */
879 newpgdir
= ARRAY_SIZE(cpu
->lg
->pgdirs
);
881 /* Look to see if we have this one already. */
882 newpgdir
= find_pgdir(cpu
->lg
, pgtable
);
886 * If not, we allocate or mug an existing one: if it's a fresh one,
887 * repin gets set to 1.
889 if (newpgdir
== ARRAY_SIZE(cpu
->lg
->pgdirs
))
890 newpgdir
= new_pgdir(cpu
, pgtable
, &repin
);
891 /* Change the current pgd index to the new one. */
892 cpu
->cpu_pgd
= newpgdir
;
894 * If it was completely blank, we map in the Guest kernel stack and
898 pin_stack_pages(cpu
);
900 if (!cpu
->lg
->pgdirs
[cpu
->cpu_pgd
].switcher_mapped
) {
901 if (!allocate_switcher_mapping(cpu
))
902 kill_guest(cpu
, "Cannot populate switcher mapping");
908 * Since we throw away all mappings when a kernel mapping changes, our
909 * performance sucks for guests using highmem. In fact, a guest with
910 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
911 * usually slower than a Guest with less memory.
913 * This, of course, cannot be fixed. It would take some kind of... well, I
914 * don't know, but the term "puissant code-fu" comes to mind.
918 * This is the routine which actually sets the page table entry for then
919 * "idx"'th shadow page table.
921 * Normally, we can just throw out the old entry and replace it with 0: if they
922 * use it demand_page() will put the new entry in. We need to do this anyway:
923 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
924 * is read from, and _PAGE_DIRTY when it's written to.
926 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
927 * these bits on PTEs immediately anyway. This is done to save the CPU from
928 * having to update them, but it helps us the same way: if they set
929 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
930 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
932 static void __guest_set_pte(struct lg_cpu
*cpu
, int idx
,
933 unsigned long vaddr
, pte_t gpte
)
935 /* Look up the matching shadow page directory entry. */
936 pgd_t
*spgd
= spgd_addr(cpu
, idx
, vaddr
);
937 #ifdef CONFIG_X86_PAE
941 /* If the top level isn't present, there's no entry to update. */
942 if (pgd_flags(*spgd
) & _PAGE_PRESENT
) {
943 #ifdef CONFIG_X86_PAE
944 spmd
= spmd_addr(cpu
, *spgd
, vaddr
);
945 if (pmd_flags(*spmd
) & _PAGE_PRESENT
) {
947 /* Otherwise, start by releasing the existing entry. */
948 pte_t
*spte
= spte_addr(cpu
, *spgd
, vaddr
);
952 * If they're setting this entry as dirty or accessed,
953 * we might as well put that entry they've given us in
954 * now. This shaves 10% off a copy-on-write
957 if ((pte_flags(gpte
) & (_PAGE_DIRTY
| _PAGE_ACCESSED
))
958 && !gpte_in_iomem(cpu
, gpte
)) {
959 if (!check_gpte(cpu
, gpte
))
962 gpte_to_spte(cpu
, gpte
,
963 pte_flags(gpte
) & _PAGE_DIRTY
));
966 * Otherwise kill it and we can demand_page()
969 set_pte(spte
, __pte(0));
971 #ifdef CONFIG_X86_PAE
978 * Updating a PTE entry is a little trickier.
980 * We keep track of several different page tables (the Guest uses one for each
981 * process, so it makes sense to cache at least a few). Each of these have
982 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
983 * all processes. So when the page table above that address changes, we update
984 * all the page tables, not just the current one. This is rare.
986 * The benefit is that when we have to track a new page table, we can keep all
987 * the kernel mappings. This speeds up context switch immensely.
989 void guest_set_pte(struct lg_cpu
*cpu
,
990 unsigned long gpgdir
, unsigned long vaddr
, pte_t gpte
)
992 /* We don't let you remap the Switcher; we need it to get back! */
993 if (vaddr
>= switcher_addr
) {
994 kill_guest(cpu
, "attempt to set pte into Switcher pages");
999 * Kernel mappings must be changed on all top levels. Slow, but doesn't
1002 if (vaddr
>= cpu
->lg
->kernel_address
) {
1004 for (i
= 0; i
< ARRAY_SIZE(cpu
->lg
->pgdirs
); i
++)
1005 if (cpu
->lg
->pgdirs
[i
].pgdir
)
1006 __guest_set_pte(cpu
, i
, vaddr
, gpte
);
1008 /* Is this page table one we have a shadow for? */
1009 int pgdir
= find_pgdir(cpu
->lg
, gpgdir
);
1010 if (pgdir
!= ARRAY_SIZE(cpu
->lg
->pgdirs
))
1011 /* If so, do the update. */
1012 __guest_set_pte(cpu
, pgdir
, vaddr
, gpte
);
1017 * (iii) Setting up a page table entry when the Guest tells us one has changed.
1019 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
1020 * with the other side of page tables while we're here: what happens when the
1021 * Guest asks for a page table to be updated?
1023 * We already saw that demand_page() will fill in the shadow page tables when
1024 * needed, so we can simply remove shadow page table entries whenever the Guest
1025 * tells us they've changed. When the Guest tries to use the new entry it will
1026 * fault and demand_page() will fix it up.
1028 * So with that in mind here's our code to update a (top-level) PGD entry:
1030 void guest_set_pgd(struct lguest
*lg
, unsigned long gpgdir
, u32 idx
)
1034 if (idx
> PTRS_PER_PGD
) {
1035 kill_guest(&lg
->cpus
[0], "Attempt to set pgd %u/%u",
1040 /* If they're talking about a page table we have a shadow for... */
1041 pgdir
= find_pgdir(lg
, gpgdir
);
1042 if (pgdir
< ARRAY_SIZE(lg
->pgdirs
)) {
1043 /* ... throw it away. */
1044 release_pgd(lg
->pgdirs
[pgdir
].pgdir
+ idx
);
1045 /* That might have been the Switcher mapping, remap it. */
1046 if (!allocate_switcher_mapping(&lg
->cpus
[0])) {
1047 kill_guest(&lg
->cpus
[0],
1048 "Cannot populate switcher mapping");
1050 lg
->pgdirs
[pgdir
].last_host_cpu
= -1;
1054 #ifdef CONFIG_X86_PAE
1055 /* For setting a mid-level, we just throw everything away. It's easy. */
1056 void guest_set_pmd(struct lguest
*lg
, unsigned long pmdp
, u32 idx
)
1058 guest_pagetable_clear_all(&lg
->cpus
[0]);
1063 * (vii) Setting up the page tables initially.
1065 * When a Guest is first created, set initialize a shadow page table which
1066 * we will populate on future faults. The Guest doesn't have any actual
1067 * pagetables yet, so we set linear_pages to tell demand_page() to fake it
1070 * We do need the Switcher to be mapped at all times, so we allocate that
1071 * part of the Guest page table here.
1073 int init_guest_pagetable(struct lguest
*lg
)
1075 struct lg_cpu
*cpu
= &lg
->cpus
[0];
1078 /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
1079 cpu
->cpu_pgd
= new_pgdir(cpu
, 0, &allocated
);
1083 /* We start with a linear mapping until the initialize. */
1084 cpu
->linear_pages
= true;
1086 /* Allocate the page tables for the Switcher. */
1087 if (!allocate_switcher_mapping(cpu
)) {
1088 release_all_pagetables(lg
);
1095 /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
1096 void page_table_guest_data_init(struct lg_cpu
*cpu
)
1099 * We tell the Guest that it can't use the virtual addresses
1100 * used by the Switcher. This trick is equivalent to 4GB -
1103 u32 top
= ~switcher_addr
+ 1;
1105 /* We get the kernel address: above this is all kernel memory. */
1106 if (get_user(cpu
->lg
->kernel_address
,
1107 &cpu
->lg
->lguest_data
->kernel_address
)
1109 * We tell the Guest that it can't use the top virtual
1110 * addresses (used by the Switcher).
1112 || put_user(top
, &cpu
->lg
->lguest_data
->reserve_mem
)) {
1113 kill_guest(cpu
, "bad guest page %p", cpu
->lg
->lguest_data
);
1118 * In flush_user_mappings() we loop from 0 to
1119 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
1120 * Switcher mappings, so check that now.
1122 if (cpu
->lg
->kernel_address
>= switcher_addr
)
1123 kill_guest(cpu
, "bad kernel address %#lx",
1124 cpu
->lg
->kernel_address
);
1127 /* When a Guest dies, our cleanup is fairly simple. */
1128 void free_guest_pagetable(struct lguest
*lg
)
1132 /* Throw away all page table pages. */
1133 release_all_pagetables(lg
);
1134 /* Now free the top levels: free_page() can handle 0 just fine. */
1135 for (i
= 0; i
< ARRAY_SIZE(lg
->pgdirs
); i
++)
1136 free_page((long)lg
->pgdirs
[i
].pgdir
);
1140 * This clears the Switcher mappings for cpu #i.
1142 static void remove_switcher_percpu_map(struct lg_cpu
*cpu
, unsigned int i
)
1144 unsigned long base
= switcher_addr
+ PAGE_SIZE
+ i
* PAGE_SIZE
*2;
1147 /* Clear the mappings for both pages. */
1148 pte
= find_spte(cpu
, base
, false, 0, 0);
1150 set_pte(pte
, __pte(0));
1152 pte
= find_spte(cpu
, base
+ PAGE_SIZE
, false, 0, 0);
1154 set_pte(pte
, __pte(0));
1158 * (vi) Mapping the Switcher when the Guest is about to run.
1160 * The Switcher and the two pages for this CPU need to be visible in the Guest
1161 * (and not the pages for other CPUs).
1163 * The pages for the pagetables have all been allocated before: we just need
1164 * to make sure the actual PTEs are up-to-date for the CPU we're about to run
1167 void map_switcher_in_guest(struct lg_cpu
*cpu
, struct lguest_pages
*pages
)
1170 struct page
*percpu_switcher_page
, *regs_page
;
1172 struct pgdir
*pgdir
= &cpu
->lg
->pgdirs
[cpu
->cpu_pgd
];
1174 /* Switcher page should always be mapped by now! */
1175 BUG_ON(!pgdir
->switcher_mapped
);
1178 * Remember that we have two pages for each Host CPU, so we can run a
1179 * Guest on each CPU without them interfering. We need to make sure
1180 * those pages are mapped correctly in the Guest, but since we usually
1181 * run on the same CPU, we cache that, and only update the mappings
1184 if (pgdir
->last_host_cpu
== raw_smp_processor_id())
1187 /* -1 means unknown so we remove everything. */
1188 if (pgdir
->last_host_cpu
== -1) {
1190 for_each_possible_cpu(i
)
1191 remove_switcher_percpu_map(cpu
, i
);
1193 /* We know exactly what CPU mapping to remove. */
1194 remove_switcher_percpu_map(cpu
, pgdir
->last_host_cpu
);
1198 * When we're running the Guest, we want the Guest's "regs" page to
1199 * appear where the first Switcher page for this CPU is. This is an
1200 * optimization: when the Switcher saves the Guest registers, it saves
1201 * them into the first page of this CPU's "struct lguest_pages": if we
1202 * make sure the Guest's register page is already mapped there, we
1203 * don't have to copy them out again.
1205 /* Find the shadow PTE for this regs page. */
1206 base
= switcher_addr
+ PAGE_SIZE
1207 + raw_smp_processor_id() * sizeof(struct lguest_pages
);
1208 pte
= find_spte(cpu
, base
, false, 0, 0);
1209 regs_page
= pfn_to_page(__pa(cpu
->regs_page
) >> PAGE_SHIFT
);
1210 get_page(regs_page
);
1211 set_pte(pte
, mk_pte(regs_page
, __pgprot(__PAGE_KERNEL
& ~_PAGE_GLOBAL
)));
1214 * We map the second page of the struct lguest_pages read-only in
1215 * the Guest: the IDT, GDT and other things it's not supposed to
1218 pte
= find_spte(cpu
, base
+ PAGE_SIZE
, false, 0, 0);
1219 percpu_switcher_page
1220 = lg_switcher_pages
[1 + raw_smp_processor_id()*2 + 1];
1221 get_page(percpu_switcher_page
);
1222 set_pte(pte
, mk_pte(percpu_switcher_page
,
1223 __pgprot(__PAGE_KERNEL_RO
& ~_PAGE_GLOBAL
)));
1225 pgdir
->last_host_cpu
= raw_smp_processor_id();
1229 * We've made it through the page table code. Perhaps our tired brains are
1230 * still processing the details, or perhaps we're simply glad it's over.
1232 * If nothing else, note that all this complexity in juggling shadow page tables
1233 * in sync with the Guest's page tables is for one reason: for most Guests this
1234 * page table dance determines how bad performance will be. This is why Xen
1235 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
1236 * have implemented shadow page table support directly into hardware.
1238 * There is just one file remaining in the Host.