Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / x86 / xen / mmu.c
blob2423ef04ffea596fd43eeb918f290003277fbb21
1 /*
2 * Xen mmu operations
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/module.h>
47 #include <linux/gfp.h>
48 #include <linux/memblock.h>
49 #include <linux/seq_file.h>
50 #include <linux/crash_dump.h>
52 #include <trace/events/xen.h>
54 #include <asm/pgtable.h>
55 #include <asm/tlbflush.h>
56 #include <asm/fixmap.h>
57 #include <asm/mmu_context.h>
58 #include <asm/setup.h>
59 #include <asm/paravirt.h>
60 #include <asm/e820.h>
61 #include <asm/linkage.h>
62 #include <asm/page.h>
63 #include <asm/init.h>
64 #include <asm/pat.h>
65 #include <asm/smp.h>
67 #include <asm/xen/hypercall.h>
68 #include <asm/xen/hypervisor.h>
70 #include <xen/xen.h>
71 #include <xen/page.h>
72 #include <xen/interface/xen.h>
73 #include <xen/interface/hvm/hvm_op.h>
74 #include <xen/interface/version.h>
75 #include <xen/interface/memory.h>
76 #include <xen/hvc-console.h>
78 #include "multicalls.h"
79 #include "mmu.h"
80 #include "debugfs.h"
83 * Protects atomic reservation decrease/increase against concurrent increases.
84 * Also protects non-atomic updates of current_pages and balloon lists.
86 DEFINE_SPINLOCK(xen_reservation_lock);
88 #ifdef CONFIG_X86_32
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
94 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
96 #endif
97 #ifdef CONFIG_X86_64
98 /* l3 pud for userspace vsyscall mapping */
99 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100 #endif /* CONFIG_X86_64 */
103 * Note about cr3 (pagetable base) values:
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
116 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
121 * Just beyond the highest usermode address. STACK_TOP_MAX has a
122 * redzone above it, so round it up to a PGD boundary.
124 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
126 unsigned long arbitrary_virt_to_mfn(void *vaddr)
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
130 return PFN_DOWN(maddr.maddr);
133 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
135 unsigned long address = (unsigned long)vaddr;
136 unsigned int level;
137 pte_t *pte;
138 unsigned offset;
141 * if the PFN is in the linear mapped vaddr range, we can just use
142 * the (quick) virt_to_machine() p2m lookup
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
147 /* otherwise we have to do a (slower) full page-table walk */
149 pte = lookup_address(address, &level);
150 BUG_ON(pte == NULL);
151 offset = address & ~PAGE_MASK;
152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
154 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
156 void make_lowmem_page_readonly(void *vaddr)
158 pte_t *pte, ptev;
159 unsigned long address = (unsigned long)vaddr;
160 unsigned int level;
162 pte = lookup_address(address, &level);
163 if (pte == NULL)
164 return; /* vaddr missing */
166 ptev = pte_wrprotect(*pte);
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
169 BUG();
172 void make_lowmem_page_readwrite(void *vaddr)
174 pte_t *pte, ptev;
175 unsigned long address = (unsigned long)vaddr;
176 unsigned int level;
178 pte = lookup_address(address, &level);
179 if (pte == NULL)
180 return; /* vaddr missing */
182 ptev = pte_mkwrite(*pte);
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
185 BUG();
189 static bool xen_page_pinned(void *ptr)
191 struct page *page = virt_to_page(ptr);
193 return PagePinned(page);
196 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
198 struct multicall_space mcs;
199 struct mmu_update *u;
201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
203 mcs = xen_mc_entry(sizeof(*u));
204 u = mcs.args;
206 /* ptep might be kmapped when using 32-bit HIGHPTE */
207 u->ptr = virt_to_machine(ptep).maddr;
208 u->val = pte_val_ma(pteval);
210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
214 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
216 static void xen_extend_mmu_update(const struct mmu_update *update)
218 struct multicall_space mcs;
219 struct mmu_update *u;
221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
223 if (mcs.mc != NULL) {
224 mcs.mc->args[1]++;
225 } else {
226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
230 u = mcs.args;
231 *u = *update;
234 static void xen_extend_mmuext_op(const struct mmuext_op *op)
236 struct multicall_space mcs;
237 struct mmuext_op *u;
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
241 if (mcs.mc != NULL) {
242 mcs.mc->args[1]++;
243 } else {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
248 u = mcs.args;
249 *u = *op;
252 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
254 struct mmu_update u;
256 preempt_disable();
258 xen_mc_batch();
260 /* ptr may be ioremapped for 64-bit pagetable setup */
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
262 u.val = pmd_val_ma(val);
263 xen_extend_mmu_update(&u);
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
267 preempt_enable();
270 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
272 trace_xen_mmu_set_pmd(ptr, val);
274 /* If page is not pinned, we can just update the entry
275 directly */
276 if (!xen_page_pinned(ptr)) {
277 *ptr = val;
278 return;
281 xen_set_pmd_hyper(ptr, val);
285 * Associate a virtual page frame with a given physical page frame
286 * and protection flags for that frame.
288 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
293 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
295 struct mmu_update u;
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
298 return false;
300 xen_mc_batch();
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
306 xen_mc_issue(PARAVIRT_LAZY_MMU);
308 return true;
311 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
313 if (!xen_batched_set_pte(ptep, pteval)) {
315 * Could call native_set_pte() here and trap and
316 * emulate the PTE write but with 32-bit guests this
317 * needs two traps (one for each of the two 32-bit
318 * words in the PTE) so do one hypercall directly
319 * instead.
321 struct mmu_update u;
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
329 static void xen_set_pte(pte_t *ptep, pte_t pteval)
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
335 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
336 pte_t *ptep, pte_t pteval)
338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
342 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
345 /* Just return the pte as-is. We preserve the bits on commit */
346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
347 return *ptep;
350 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
353 struct mmu_update u;
355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
356 xen_mc_batch();
358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
359 u.val = pte_val_ma(pte);
360 xen_extend_mmu_update(&u);
362 xen_mc_issue(PARAVIRT_LAZY_MMU);
365 /* Assume pteval_t is equivalent to all the other *val_t types. */
366 static pteval_t pte_mfn_to_pfn(pteval_t val)
368 if (val & _PAGE_PRESENT) {
369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
370 unsigned long pfn = mfn_to_pfn(mfn);
372 pteval_t flags = val & PTE_FLAGS_MASK;
373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
375 else
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
379 return val;
382 static pteval_t pte_pfn_to_mfn(pteval_t val)
384 if (val & _PAGE_PRESENT) {
385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
386 pteval_t flags = val & PTE_FLAGS_MASK;
387 unsigned long mfn;
389 if (!xen_feature(XENFEAT_auto_translated_physmap))
390 mfn = get_phys_to_machine(pfn);
391 else
392 mfn = pfn;
394 * If there's no mfn for the pfn, then just create an
395 * empty non-present pte. Unfortunately this loses
396 * information about the original pfn, so
397 * pte_mfn_to_pfn is asymmetric.
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
400 mfn = 0;
401 flags = 0;
402 } else {
404 * Paramount to do this test _after_ the
405 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
406 * IDENTITY_FRAME_BIT resolves to true.
408 mfn &= ~FOREIGN_FRAME_BIT;
409 if (mfn & IDENTITY_FRAME_BIT) {
410 mfn &= ~IDENTITY_FRAME_BIT;
411 flags |= _PAGE_IOMAP;
414 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
417 return val;
420 static pteval_t iomap_pte(pteval_t val)
422 if (val & _PAGE_PRESENT) {
423 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
424 pteval_t flags = val & PTE_FLAGS_MASK;
426 /* We assume the pte frame number is a MFN, so
427 just use it as-is. */
428 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
431 return val;
434 __visible pteval_t xen_pte_val(pte_t pte)
436 pteval_t pteval = pte.pte;
437 #if 0
438 /* If this is a WC pte, convert back from Xen WC to Linux WC */
439 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
440 WARN_ON(!pat_enabled);
441 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
443 #endif
444 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
445 return pteval;
447 return pte_mfn_to_pfn(pteval);
449 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
451 __visible pgdval_t xen_pgd_val(pgd_t pgd)
453 return pte_mfn_to_pfn(pgd.pgd);
455 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
458 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
459 * are reserved for now, to correspond to the Intel-reserved PAT
460 * types.
462 * We expect Linux's PAT set as follows:
464 * Idx PTE flags Linux Xen Default
465 * 0 WB WB WB
466 * 1 PWT WC WT WT
467 * 2 PCD UC- UC- UC-
468 * 3 PCD PWT UC UC UC
469 * 4 PAT WB WC WB
470 * 5 PAT PWT WC WP WT
471 * 6 PAT PCD UC- rsv UC-
472 * 7 PAT PCD PWT UC rsv UC
475 void xen_set_pat(u64 pat)
477 /* We expect Linux to use a PAT setting of
478 * UC UC- WC WB (ignoring the PAT flag) */
479 WARN_ON(pat != 0x0007010600070106ull);
482 __visible pte_t xen_make_pte(pteval_t pte)
484 phys_addr_t addr = (pte & PTE_PFN_MASK);
485 #if 0
486 /* If Linux is trying to set a WC pte, then map to the Xen WC.
487 * If _PAGE_PAT is set, then it probably means it is really
488 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
489 * things work out OK...
491 * (We should never see kernel mappings with _PAGE_PSE set,
492 * but we could see hugetlbfs mappings, I think.).
494 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
495 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
496 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
498 #endif
500 * Unprivileged domains are allowed to do IOMAPpings for
501 * PCI passthrough, but not map ISA space. The ISA
502 * mappings are just dummy local mappings to keep other
503 * parts of the kernel happy.
505 if (unlikely(pte & _PAGE_IOMAP) &&
506 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
507 pte = iomap_pte(pte);
508 } else {
509 pte &= ~_PAGE_IOMAP;
510 pte = pte_pfn_to_mfn(pte);
513 return native_make_pte(pte);
515 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
517 __visible pgd_t xen_make_pgd(pgdval_t pgd)
519 pgd = pte_pfn_to_mfn(pgd);
520 return native_make_pgd(pgd);
522 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
524 __visible pmdval_t xen_pmd_val(pmd_t pmd)
526 return pte_mfn_to_pfn(pmd.pmd);
528 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
530 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
532 struct mmu_update u;
534 preempt_disable();
536 xen_mc_batch();
538 /* ptr may be ioremapped for 64-bit pagetable setup */
539 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
540 u.val = pud_val_ma(val);
541 xen_extend_mmu_update(&u);
543 xen_mc_issue(PARAVIRT_LAZY_MMU);
545 preempt_enable();
548 static void xen_set_pud(pud_t *ptr, pud_t val)
550 trace_xen_mmu_set_pud(ptr, val);
552 /* If page is not pinned, we can just update the entry
553 directly */
554 if (!xen_page_pinned(ptr)) {
555 *ptr = val;
556 return;
559 xen_set_pud_hyper(ptr, val);
562 #ifdef CONFIG_X86_PAE
563 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
565 trace_xen_mmu_set_pte_atomic(ptep, pte);
566 set_64bit((u64 *)ptep, native_pte_val(pte));
569 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
571 trace_xen_mmu_pte_clear(mm, addr, ptep);
572 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
573 native_pte_clear(mm, addr, ptep);
576 static void xen_pmd_clear(pmd_t *pmdp)
578 trace_xen_mmu_pmd_clear(pmdp);
579 set_pmd(pmdp, __pmd(0));
581 #endif /* CONFIG_X86_PAE */
583 __visible pmd_t xen_make_pmd(pmdval_t pmd)
585 pmd = pte_pfn_to_mfn(pmd);
586 return native_make_pmd(pmd);
588 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
590 #if PAGETABLE_LEVELS == 4
591 __visible pudval_t xen_pud_val(pud_t pud)
593 return pte_mfn_to_pfn(pud.pud);
595 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
597 __visible pud_t xen_make_pud(pudval_t pud)
599 pud = pte_pfn_to_mfn(pud);
601 return native_make_pud(pud);
603 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
605 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
607 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
608 unsigned offset = pgd - pgd_page;
609 pgd_t *user_ptr = NULL;
611 if (offset < pgd_index(USER_LIMIT)) {
612 struct page *page = virt_to_page(pgd_page);
613 user_ptr = (pgd_t *)page->private;
614 if (user_ptr)
615 user_ptr += offset;
618 return user_ptr;
621 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
623 struct mmu_update u;
625 u.ptr = virt_to_machine(ptr).maddr;
626 u.val = pgd_val_ma(val);
627 xen_extend_mmu_update(&u);
631 * Raw hypercall-based set_pgd, intended for in early boot before
632 * there's a page structure. This implies:
633 * 1. The only existing pagetable is the kernel's
634 * 2. It is always pinned
635 * 3. It has no user pagetable attached to it
637 static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
639 preempt_disable();
641 xen_mc_batch();
643 __xen_set_pgd_hyper(ptr, val);
645 xen_mc_issue(PARAVIRT_LAZY_MMU);
647 preempt_enable();
650 static void xen_set_pgd(pgd_t *ptr, pgd_t val)
652 pgd_t *user_ptr = xen_get_user_pgd(ptr);
654 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
656 /* If page is not pinned, we can just update the entry
657 directly */
658 if (!xen_page_pinned(ptr)) {
659 *ptr = val;
660 if (user_ptr) {
661 WARN_ON(xen_page_pinned(user_ptr));
662 *user_ptr = val;
664 return;
667 /* If it's pinned, then we can at least batch the kernel and
668 user updates together. */
669 xen_mc_batch();
671 __xen_set_pgd_hyper(ptr, val);
672 if (user_ptr)
673 __xen_set_pgd_hyper(user_ptr, val);
675 xen_mc_issue(PARAVIRT_LAZY_MMU);
677 #endif /* PAGETABLE_LEVELS == 4 */
680 * (Yet another) pagetable walker. This one is intended for pinning a
681 * pagetable. This means that it walks a pagetable and calls the
682 * callback function on each page it finds making up the page table,
683 * at every level. It walks the entire pagetable, but it only bothers
684 * pinning pte pages which are below limit. In the normal case this
685 * will be STACK_TOP_MAX, but at boot we need to pin up to
686 * FIXADDR_TOP.
688 * For 32-bit the important bit is that we don't pin beyond there,
689 * because then we start getting into Xen's ptes.
691 * For 64-bit, we must skip the Xen hole in the middle of the address
692 * space, just after the big x86-64 virtual hole.
694 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
695 int (*func)(struct mm_struct *mm, struct page *,
696 enum pt_level),
697 unsigned long limit)
699 int flush = 0;
700 unsigned hole_low, hole_high;
701 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
702 unsigned pgdidx, pudidx, pmdidx;
704 /* The limit is the last byte to be touched */
705 limit--;
706 BUG_ON(limit >= FIXADDR_TOP);
708 if (xen_feature(XENFEAT_auto_translated_physmap))
709 return 0;
712 * 64-bit has a great big hole in the middle of the address
713 * space, which contains the Xen mappings. On 32-bit these
714 * will end up making a zero-sized hole and so is a no-op.
716 hole_low = pgd_index(USER_LIMIT);
717 hole_high = pgd_index(PAGE_OFFSET);
719 pgdidx_limit = pgd_index(limit);
720 #if PTRS_PER_PUD > 1
721 pudidx_limit = pud_index(limit);
722 #else
723 pudidx_limit = 0;
724 #endif
725 #if PTRS_PER_PMD > 1
726 pmdidx_limit = pmd_index(limit);
727 #else
728 pmdidx_limit = 0;
729 #endif
731 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
732 pud_t *pud;
734 if (pgdidx >= hole_low && pgdidx < hole_high)
735 continue;
737 if (!pgd_val(pgd[pgdidx]))
738 continue;
740 pud = pud_offset(&pgd[pgdidx], 0);
742 if (PTRS_PER_PUD > 1) /* not folded */
743 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
745 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
746 pmd_t *pmd;
748 if (pgdidx == pgdidx_limit &&
749 pudidx > pudidx_limit)
750 goto out;
752 if (pud_none(pud[pudidx]))
753 continue;
755 pmd = pmd_offset(&pud[pudidx], 0);
757 if (PTRS_PER_PMD > 1) /* not folded */
758 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
760 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
761 struct page *pte;
763 if (pgdidx == pgdidx_limit &&
764 pudidx == pudidx_limit &&
765 pmdidx > pmdidx_limit)
766 goto out;
768 if (pmd_none(pmd[pmdidx]))
769 continue;
771 pte = pmd_page(pmd[pmdidx]);
772 flush |= (*func)(mm, pte, PT_PTE);
777 out:
778 /* Do the top level last, so that the callbacks can use it as
779 a cue to do final things like tlb flushes. */
780 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
782 return flush;
785 static int xen_pgd_walk(struct mm_struct *mm,
786 int (*func)(struct mm_struct *mm, struct page *,
787 enum pt_level),
788 unsigned long limit)
790 return __xen_pgd_walk(mm, mm->pgd, func, limit);
793 /* If we're using split pte locks, then take the page's lock and
794 return a pointer to it. Otherwise return NULL. */
795 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
797 spinlock_t *ptl = NULL;
799 #if USE_SPLIT_PTE_PTLOCKS
800 ptl = ptlock_ptr(page);
801 spin_lock_nest_lock(ptl, &mm->page_table_lock);
802 #endif
804 return ptl;
807 static void xen_pte_unlock(void *v)
809 spinlock_t *ptl = v;
810 spin_unlock(ptl);
813 static void xen_do_pin(unsigned level, unsigned long pfn)
815 struct mmuext_op op;
817 op.cmd = level;
818 op.arg1.mfn = pfn_to_mfn(pfn);
820 xen_extend_mmuext_op(&op);
823 static int xen_pin_page(struct mm_struct *mm, struct page *page,
824 enum pt_level level)
826 unsigned pgfl = TestSetPagePinned(page);
827 int flush;
829 if (pgfl)
830 flush = 0; /* already pinned */
831 else if (PageHighMem(page))
832 /* kmaps need flushing if we found an unpinned
833 highpage */
834 flush = 1;
835 else {
836 void *pt = lowmem_page_address(page);
837 unsigned long pfn = page_to_pfn(page);
838 struct multicall_space mcs = __xen_mc_entry(0);
839 spinlock_t *ptl;
841 flush = 0;
844 * We need to hold the pagetable lock between the time
845 * we make the pagetable RO and when we actually pin
846 * it. If we don't, then other users may come in and
847 * attempt to update the pagetable by writing it,
848 * which will fail because the memory is RO but not
849 * pinned, so Xen won't do the trap'n'emulate.
851 * If we're using split pte locks, we can't hold the
852 * entire pagetable's worth of locks during the
853 * traverse, because we may wrap the preempt count (8
854 * bits). The solution is to mark RO and pin each PTE
855 * page while holding the lock. This means the number
856 * of locks we end up holding is never more than a
857 * batch size (~32 entries, at present).
859 * If we're not using split pte locks, we needn't pin
860 * the PTE pages independently, because we're
861 * protected by the overall pagetable lock.
863 ptl = NULL;
864 if (level == PT_PTE)
865 ptl = xen_pte_lock(page, mm);
867 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
868 pfn_pte(pfn, PAGE_KERNEL_RO),
869 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
871 if (ptl) {
872 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
874 /* Queue a deferred unlock for when this batch
875 is completed. */
876 xen_mc_callback(xen_pte_unlock, ptl);
880 return flush;
883 /* This is called just after a mm has been created, but it has not
884 been used yet. We need to make sure that its pagetable is all
885 read-only, and can be pinned. */
886 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
888 trace_xen_mmu_pgd_pin(mm, pgd);
890 xen_mc_batch();
892 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
893 /* re-enable interrupts for flushing */
894 xen_mc_issue(0);
896 kmap_flush_unused();
898 xen_mc_batch();
901 #ifdef CONFIG_X86_64
903 pgd_t *user_pgd = xen_get_user_pgd(pgd);
905 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
907 if (user_pgd) {
908 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
909 xen_do_pin(MMUEXT_PIN_L4_TABLE,
910 PFN_DOWN(__pa(user_pgd)));
913 #else /* CONFIG_X86_32 */
914 #ifdef CONFIG_X86_PAE
915 /* Need to make sure unshared kernel PMD is pinnable */
916 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
917 PT_PMD);
918 #endif
919 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
920 #endif /* CONFIG_X86_64 */
921 xen_mc_issue(0);
924 static void xen_pgd_pin(struct mm_struct *mm)
926 __xen_pgd_pin(mm, mm->pgd);
930 * On save, we need to pin all pagetables to make sure they get their
931 * mfns turned into pfns. Search the list for any unpinned pgds and pin
932 * them (unpinned pgds are not currently in use, probably because the
933 * process is under construction or destruction).
935 * Expected to be called in stop_machine() ("equivalent to taking
936 * every spinlock in the system"), so the locking doesn't really
937 * matter all that much.
939 void xen_mm_pin_all(void)
941 struct page *page;
943 spin_lock(&pgd_lock);
945 list_for_each_entry(page, &pgd_list, lru) {
946 if (!PagePinned(page)) {
947 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
948 SetPageSavePinned(page);
952 spin_unlock(&pgd_lock);
956 * The init_mm pagetable is really pinned as soon as its created, but
957 * that's before we have page structures to store the bits. So do all
958 * the book-keeping now.
960 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
961 enum pt_level level)
963 SetPagePinned(page);
964 return 0;
967 static void __init xen_mark_init_mm_pinned(void)
969 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
972 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
973 enum pt_level level)
975 unsigned pgfl = TestClearPagePinned(page);
977 if (pgfl && !PageHighMem(page)) {
978 void *pt = lowmem_page_address(page);
979 unsigned long pfn = page_to_pfn(page);
980 spinlock_t *ptl = NULL;
981 struct multicall_space mcs;
984 * Do the converse to pin_page. If we're using split
985 * pte locks, we must be holding the lock for while
986 * the pte page is unpinned but still RO to prevent
987 * concurrent updates from seeing it in this
988 * partially-pinned state.
990 if (level == PT_PTE) {
991 ptl = xen_pte_lock(page, mm);
993 if (ptl)
994 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
997 mcs = __xen_mc_entry(0);
999 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1000 pfn_pte(pfn, PAGE_KERNEL),
1001 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1003 if (ptl) {
1004 /* unlock when batch completed */
1005 xen_mc_callback(xen_pte_unlock, ptl);
1009 return 0; /* never need to flush on unpin */
1012 /* Release a pagetables pages back as normal RW */
1013 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
1015 trace_xen_mmu_pgd_unpin(mm, pgd);
1017 xen_mc_batch();
1019 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1021 #ifdef CONFIG_X86_64
1023 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1025 if (user_pgd) {
1026 xen_do_pin(MMUEXT_UNPIN_TABLE,
1027 PFN_DOWN(__pa(user_pgd)));
1028 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
1031 #endif
1033 #ifdef CONFIG_X86_PAE
1034 /* Need to make sure unshared kernel PMD is unpinned */
1035 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1036 PT_PMD);
1037 #endif
1039 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1041 xen_mc_issue(0);
1044 static void xen_pgd_unpin(struct mm_struct *mm)
1046 __xen_pgd_unpin(mm, mm->pgd);
1050 * On resume, undo any pinning done at save, so that the rest of the
1051 * kernel doesn't see any unexpected pinned pagetables.
1053 void xen_mm_unpin_all(void)
1055 struct page *page;
1057 spin_lock(&pgd_lock);
1059 list_for_each_entry(page, &pgd_list, lru) {
1060 if (PageSavePinned(page)) {
1061 BUG_ON(!PagePinned(page));
1062 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
1063 ClearPageSavePinned(page);
1067 spin_unlock(&pgd_lock);
1070 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1072 spin_lock(&next->page_table_lock);
1073 xen_pgd_pin(next);
1074 spin_unlock(&next->page_table_lock);
1077 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1079 spin_lock(&mm->page_table_lock);
1080 xen_pgd_pin(mm);
1081 spin_unlock(&mm->page_table_lock);
1085 #ifdef CONFIG_SMP
1086 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1087 we need to repoint it somewhere else before we can unpin it. */
1088 static void drop_other_mm_ref(void *info)
1090 struct mm_struct *mm = info;
1091 struct mm_struct *active_mm;
1093 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
1095 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1096 leave_mm(smp_processor_id());
1098 /* If this cpu still has a stale cr3 reference, then make sure
1099 it has been flushed. */
1100 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
1101 load_cr3(swapper_pg_dir);
1104 static void xen_drop_mm_ref(struct mm_struct *mm)
1106 cpumask_var_t mask;
1107 unsigned cpu;
1109 if (current->active_mm == mm) {
1110 if (current->mm == mm)
1111 load_cr3(swapper_pg_dir);
1112 else
1113 leave_mm(smp_processor_id());
1116 /* Get the "official" set of cpus referring to our pagetable. */
1117 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1118 for_each_online_cpu(cpu) {
1119 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1120 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1121 continue;
1122 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1124 return;
1126 cpumask_copy(mask, mm_cpumask(mm));
1128 /* It's possible that a vcpu may have a stale reference to our
1129 cr3, because its in lazy mode, and it hasn't yet flushed
1130 its set of pending hypercalls yet. In this case, we can
1131 look at its actual current cr3 value, and force it to flush
1132 if needed. */
1133 for_each_online_cpu(cpu) {
1134 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1135 cpumask_set_cpu(cpu, mask);
1138 if (!cpumask_empty(mask))
1139 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1140 free_cpumask_var(mask);
1142 #else
1143 static void xen_drop_mm_ref(struct mm_struct *mm)
1145 if (current->active_mm == mm)
1146 load_cr3(swapper_pg_dir);
1148 #endif
1151 * While a process runs, Xen pins its pagetables, which means that the
1152 * hypervisor forces it to be read-only, and it controls all updates
1153 * to it. This means that all pagetable updates have to go via the
1154 * hypervisor, which is moderately expensive.
1156 * Since we're pulling the pagetable down, we switch to use init_mm,
1157 * unpin old process pagetable and mark it all read-write, which
1158 * allows further operations on it to be simple memory accesses.
1160 * The only subtle point is that another CPU may be still using the
1161 * pagetable because of lazy tlb flushing. This means we need need to
1162 * switch all CPUs off this pagetable before we can unpin it.
1164 static void xen_exit_mmap(struct mm_struct *mm)
1166 get_cpu(); /* make sure we don't move around */
1167 xen_drop_mm_ref(mm);
1168 put_cpu();
1170 spin_lock(&mm->page_table_lock);
1172 /* pgd may not be pinned in the error exit path of execve */
1173 if (xen_page_pinned(mm->pgd))
1174 xen_pgd_unpin(mm);
1176 spin_unlock(&mm->page_table_lock);
1179 static void xen_post_allocator_init(void);
1181 #ifdef CONFIG_X86_64
1182 static void __init xen_cleanhighmap(unsigned long vaddr,
1183 unsigned long vaddr_end)
1185 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1186 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1188 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1189 * We include the PMD passed in on _both_ boundaries. */
1190 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1191 pmd++, vaddr += PMD_SIZE) {
1192 if (pmd_none(*pmd))
1193 continue;
1194 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1195 set_pmd(pmd, __pmd(0));
1197 /* In case we did something silly, we should crash in this function
1198 * instead of somewhere later and be confusing. */
1199 xen_mc_flush();
1201 static void __init xen_pagetable_p2m_copy(void)
1203 unsigned long size;
1204 unsigned long addr;
1205 unsigned long new_mfn_list;
1207 if (xen_feature(XENFEAT_auto_translated_physmap))
1208 return;
1210 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1212 new_mfn_list = xen_revector_p2m_tree();
1213 /* No memory or already called. */
1214 if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
1215 return;
1217 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1218 memset((void *)xen_start_info->mfn_list, 0xff, size);
1220 /* We should be in __ka space. */
1221 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1222 addr = xen_start_info->mfn_list;
1223 /* We roundup to the PMD, which means that if anybody at this stage is
1224 * using the __ka address of xen_start_info or xen_start_info->shared_info
1225 * they are in going to crash. Fortunatly we have already revectored
1226 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1227 size = roundup(size, PMD_SIZE);
1228 xen_cleanhighmap(addr, addr + size);
1230 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1231 memblock_free(__pa(xen_start_info->mfn_list), size);
1232 /* And revector! Bye bye old array */
1233 xen_start_info->mfn_list = new_mfn_list;
1235 /* At this stage, cleanup_highmap has already cleaned __ka space
1236 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1237 * the ramdisk). We continue on, erasing PMD entries that point to page
1238 * tables - do note that they are accessible at this stage via __va.
1239 * For good measure we also round up to the PMD - which means that if
1240 * anybody is using __ka address to the initial boot-stack - and try
1241 * to use it - they are going to crash. The xen_start_info has been
1242 * taken care of already in xen_setup_kernel_pagetable. */
1243 addr = xen_start_info->pt_base;
1244 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1246 xen_cleanhighmap(addr, addr + size);
1247 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1248 #ifdef DEBUG
1249 /* This is superflous and is not neccessary, but you know what
1250 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1251 * anything at this stage. */
1252 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1253 #endif
1255 #endif
1257 static void __init xen_pagetable_init(void)
1259 paging_init();
1260 xen_setup_shared_info();
1261 #ifdef CONFIG_X86_64
1262 xen_pagetable_p2m_copy();
1263 #endif
1264 xen_post_allocator_init();
1266 static void xen_write_cr2(unsigned long cr2)
1268 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1271 static unsigned long xen_read_cr2(void)
1273 return this_cpu_read(xen_vcpu)->arch.cr2;
1276 unsigned long xen_read_cr2_direct(void)
1278 return this_cpu_read(xen_vcpu_info.arch.cr2);
1281 void xen_flush_tlb_all(void)
1283 struct mmuext_op *op;
1284 struct multicall_space mcs;
1286 trace_xen_mmu_flush_tlb_all(0);
1288 preempt_disable();
1290 mcs = xen_mc_entry(sizeof(*op));
1292 op = mcs.args;
1293 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1294 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1296 xen_mc_issue(PARAVIRT_LAZY_MMU);
1298 preempt_enable();
1300 static void xen_flush_tlb(void)
1302 struct mmuext_op *op;
1303 struct multicall_space mcs;
1305 trace_xen_mmu_flush_tlb(0);
1307 preempt_disable();
1309 mcs = xen_mc_entry(sizeof(*op));
1311 op = mcs.args;
1312 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1313 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1315 xen_mc_issue(PARAVIRT_LAZY_MMU);
1317 preempt_enable();
1320 static void xen_flush_tlb_single(unsigned long addr)
1322 struct mmuext_op *op;
1323 struct multicall_space mcs;
1325 trace_xen_mmu_flush_tlb_single(addr);
1327 preempt_disable();
1329 mcs = xen_mc_entry(sizeof(*op));
1330 op = mcs.args;
1331 op->cmd = MMUEXT_INVLPG_LOCAL;
1332 op->arg1.linear_addr = addr & PAGE_MASK;
1333 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1335 xen_mc_issue(PARAVIRT_LAZY_MMU);
1337 preempt_enable();
1340 static void xen_flush_tlb_others(const struct cpumask *cpus,
1341 struct mm_struct *mm, unsigned long start,
1342 unsigned long end)
1344 struct {
1345 struct mmuext_op op;
1346 #ifdef CONFIG_SMP
1347 DECLARE_BITMAP(mask, num_processors);
1348 #else
1349 DECLARE_BITMAP(mask, NR_CPUS);
1350 #endif
1351 } *args;
1352 struct multicall_space mcs;
1354 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
1356 if (cpumask_empty(cpus))
1357 return; /* nothing to do */
1359 mcs = xen_mc_entry(sizeof(*args));
1360 args = mcs.args;
1361 args->op.arg2.vcpumask = to_cpumask(args->mask);
1363 /* Remove us, and any offline CPUS. */
1364 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1365 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1367 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1368 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1369 args->op.cmd = MMUEXT_INVLPG_MULTI;
1370 args->op.arg1.linear_addr = start;
1373 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1375 xen_mc_issue(PARAVIRT_LAZY_MMU);
1378 static unsigned long xen_read_cr3(void)
1380 return this_cpu_read(xen_cr3);
1383 static void set_current_cr3(void *v)
1385 this_cpu_write(xen_current_cr3, (unsigned long)v);
1388 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1390 struct mmuext_op op;
1391 unsigned long mfn;
1393 trace_xen_mmu_write_cr3(kernel, cr3);
1395 if (cr3)
1396 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1397 else
1398 mfn = 0;
1400 WARN_ON(mfn == 0 && kernel);
1402 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1403 op.arg1.mfn = mfn;
1405 xen_extend_mmuext_op(&op);
1407 if (kernel) {
1408 this_cpu_write(xen_cr3, cr3);
1410 /* Update xen_current_cr3 once the batch has actually
1411 been submitted. */
1412 xen_mc_callback(set_current_cr3, (void *)cr3);
1415 static void xen_write_cr3(unsigned long cr3)
1417 BUG_ON(preemptible());
1419 xen_mc_batch(); /* disables interrupts */
1421 /* Update while interrupts are disabled, so its atomic with
1422 respect to ipis */
1423 this_cpu_write(xen_cr3, cr3);
1425 __xen_write_cr3(true, cr3);
1427 #ifdef CONFIG_X86_64
1429 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1430 if (user_pgd)
1431 __xen_write_cr3(false, __pa(user_pgd));
1432 else
1433 __xen_write_cr3(false, 0);
1435 #endif
1437 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1440 #ifdef CONFIG_X86_64
1442 * At the start of the day - when Xen launches a guest, it has already
1443 * built pagetables for the guest. We diligently look over them
1444 * in xen_setup_kernel_pagetable and graft as appropiate them in the
1445 * init_level4_pgt and its friends. Then when we are happy we load
1446 * the new init_level4_pgt - and continue on.
1448 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1449 * up the rest of the pagetables. When it has completed it loads the cr3.
1450 * N.B. that baremetal would start at 'start_kernel' (and the early
1451 * #PF handler would create bootstrap pagetables) - so we are running
1452 * with the same assumptions as what to do when write_cr3 is executed
1453 * at this point.
1455 * Since there are no user-page tables at all, we have two variants
1456 * of xen_write_cr3 - the early bootup (this one), and the late one
1457 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1458 * the Linux kernel and user-space are both in ring 3 while the
1459 * hypervisor is in ring 0.
1461 static void __init xen_write_cr3_init(unsigned long cr3)
1463 BUG_ON(preemptible());
1465 xen_mc_batch(); /* disables interrupts */
1467 /* Update while interrupts are disabled, so its atomic with
1468 respect to ipis */
1469 this_cpu_write(xen_cr3, cr3);
1471 __xen_write_cr3(true, cr3);
1473 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1475 #endif
1477 static int xen_pgd_alloc(struct mm_struct *mm)
1479 pgd_t *pgd = mm->pgd;
1480 int ret = 0;
1482 BUG_ON(PagePinned(virt_to_page(pgd)));
1484 #ifdef CONFIG_X86_64
1486 struct page *page = virt_to_page(pgd);
1487 pgd_t *user_pgd;
1489 BUG_ON(page->private != 0);
1491 ret = -ENOMEM;
1493 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1494 page->private = (unsigned long)user_pgd;
1496 if (user_pgd != NULL) {
1497 user_pgd[pgd_index(VSYSCALL_START)] =
1498 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1499 ret = 0;
1502 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1504 #endif
1506 return ret;
1509 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1511 #ifdef CONFIG_X86_64
1512 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1514 if (user_pgd)
1515 free_page((unsigned long)user_pgd);
1516 #endif
1519 #ifdef CONFIG_X86_32
1520 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1522 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1523 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1524 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1525 pte_val_ma(pte));
1527 return pte;
1529 #else /* CONFIG_X86_64 */
1530 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1532 return pte;
1534 #endif /* CONFIG_X86_64 */
1537 * Init-time set_pte while constructing initial pagetables, which
1538 * doesn't allow RO page table pages to be remapped RW.
1540 * If there is no MFN for this PFN then this page is initially
1541 * ballooned out so clear the PTE (as in decrease_reservation() in
1542 * drivers/xen/balloon.c).
1544 * Many of these PTE updates are done on unpinned and writable pages
1545 * and doing a hypercall for these is unnecessary and expensive. At
1546 * this point it is not possible to tell if a page is pinned or not,
1547 * so always write the PTE directly and rely on Xen trapping and
1548 * emulating any updates as necessary.
1550 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1552 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1553 pte = mask_rw_pte(ptep, pte);
1554 else
1555 pte = __pte_ma(0);
1557 native_set_pte(ptep, pte);
1560 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1562 struct mmuext_op op;
1563 op.cmd = cmd;
1564 op.arg1.mfn = pfn_to_mfn(pfn);
1565 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1566 BUG();
1569 /* Early in boot, while setting up the initial pagetable, assume
1570 everything is pinned. */
1571 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1573 #ifdef CONFIG_FLATMEM
1574 BUG_ON(mem_map); /* should only be used early */
1575 #endif
1576 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1577 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1580 /* Used for pmd and pud */
1581 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1583 #ifdef CONFIG_FLATMEM
1584 BUG_ON(mem_map); /* should only be used early */
1585 #endif
1586 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1589 /* Early release_pte assumes that all pts are pinned, since there's
1590 only init_mm and anything attached to that is pinned. */
1591 static void __init xen_release_pte_init(unsigned long pfn)
1593 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1594 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1597 static void __init xen_release_pmd_init(unsigned long pfn)
1599 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1602 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1604 struct multicall_space mcs;
1605 struct mmuext_op *op;
1607 mcs = __xen_mc_entry(sizeof(*op));
1608 op = mcs.args;
1609 op->cmd = cmd;
1610 op->arg1.mfn = pfn_to_mfn(pfn);
1612 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1615 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1617 struct multicall_space mcs;
1618 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1620 mcs = __xen_mc_entry(0);
1621 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1622 pfn_pte(pfn, prot), 0);
1625 /* This needs to make sure the new pte page is pinned iff its being
1626 attached to a pinned pagetable. */
1627 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1628 unsigned level)
1630 bool pinned = PagePinned(virt_to_page(mm->pgd));
1632 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1634 if (pinned) {
1635 struct page *page = pfn_to_page(pfn);
1637 SetPagePinned(page);
1639 if (!PageHighMem(page)) {
1640 xen_mc_batch();
1642 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1644 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1645 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1647 xen_mc_issue(PARAVIRT_LAZY_MMU);
1648 } else {
1649 /* make sure there are no stray mappings of
1650 this page */
1651 kmap_flush_unused();
1656 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1658 xen_alloc_ptpage(mm, pfn, PT_PTE);
1661 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1663 xen_alloc_ptpage(mm, pfn, PT_PMD);
1666 /* This should never happen until we're OK to use struct page */
1667 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1669 struct page *page = pfn_to_page(pfn);
1670 bool pinned = PagePinned(page);
1672 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1674 if (pinned) {
1675 if (!PageHighMem(page)) {
1676 xen_mc_batch();
1678 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1679 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1681 __set_pfn_prot(pfn, PAGE_KERNEL);
1683 xen_mc_issue(PARAVIRT_LAZY_MMU);
1685 ClearPagePinned(page);
1689 static void xen_release_pte(unsigned long pfn)
1691 xen_release_ptpage(pfn, PT_PTE);
1694 static void xen_release_pmd(unsigned long pfn)
1696 xen_release_ptpage(pfn, PT_PMD);
1699 #if PAGETABLE_LEVELS == 4
1700 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1702 xen_alloc_ptpage(mm, pfn, PT_PUD);
1705 static void xen_release_pud(unsigned long pfn)
1707 xen_release_ptpage(pfn, PT_PUD);
1709 #endif
1711 void __init xen_reserve_top(void)
1713 #ifdef CONFIG_X86_32
1714 unsigned long top = HYPERVISOR_VIRT_START;
1715 struct xen_platform_parameters pp;
1717 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1718 top = pp.virt_start;
1720 reserve_top_address(-top);
1721 #endif /* CONFIG_X86_32 */
1725 * Like __va(), but returns address in the kernel mapping (which is
1726 * all we have until the physical memory mapping has been set up.
1728 static void *__ka(phys_addr_t paddr)
1730 #ifdef CONFIG_X86_64
1731 return (void *)(paddr + __START_KERNEL_map);
1732 #else
1733 return __va(paddr);
1734 #endif
1737 /* Convert a machine address to physical address */
1738 static unsigned long m2p(phys_addr_t maddr)
1740 phys_addr_t paddr;
1742 maddr &= PTE_PFN_MASK;
1743 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1745 return paddr;
1748 /* Convert a machine address to kernel virtual */
1749 static void *m2v(phys_addr_t maddr)
1751 return __ka(m2p(maddr));
1754 /* Set the page permissions on an identity-mapped pages */
1755 static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1757 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1758 pte_t pte = pfn_pte(pfn, prot);
1760 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1761 if (xen_feature(XENFEAT_auto_translated_physmap))
1762 return;
1764 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1765 BUG();
1767 static void set_page_prot(void *addr, pgprot_t prot)
1769 return set_page_prot_flags(addr, prot, UVMF_NONE);
1771 #ifdef CONFIG_X86_32
1772 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1774 unsigned pmdidx, pteidx;
1775 unsigned ident_pte;
1776 unsigned long pfn;
1778 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1779 PAGE_SIZE);
1781 ident_pte = 0;
1782 pfn = 0;
1783 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1784 pte_t *pte_page;
1786 /* Reuse or allocate a page of ptes */
1787 if (pmd_present(pmd[pmdidx]))
1788 pte_page = m2v(pmd[pmdidx].pmd);
1789 else {
1790 /* Check for free pte pages */
1791 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1792 break;
1794 pte_page = &level1_ident_pgt[ident_pte];
1795 ident_pte += PTRS_PER_PTE;
1797 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1800 /* Install mappings */
1801 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1802 pte_t pte;
1804 #ifdef CONFIG_X86_32
1805 if (pfn > max_pfn_mapped)
1806 max_pfn_mapped = pfn;
1807 #endif
1809 if (!pte_none(pte_page[pteidx]))
1810 continue;
1812 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1813 pte_page[pteidx] = pte;
1817 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1818 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1820 set_page_prot(pmd, PAGE_KERNEL_RO);
1822 #endif
1823 void __init xen_setup_machphys_mapping(void)
1825 struct xen_machphys_mapping mapping;
1827 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1828 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1829 machine_to_phys_nr = mapping.max_mfn + 1;
1830 } else {
1831 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1833 #ifdef CONFIG_X86_32
1834 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1835 < machine_to_phys_mapping);
1836 #endif
1839 #ifdef CONFIG_X86_64
1840 static void convert_pfn_mfn(void *v)
1842 pte_t *pte = v;
1843 int i;
1845 /* All levels are converted the same way, so just treat them
1846 as ptes. */
1847 for (i = 0; i < PTRS_PER_PTE; i++)
1848 pte[i] = xen_make_pte(pte[i].pte);
1850 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1851 unsigned long addr)
1853 if (*pt_base == PFN_DOWN(__pa(addr))) {
1854 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1855 clear_page((void *)addr);
1856 (*pt_base)++;
1858 if (*pt_end == PFN_DOWN(__pa(addr))) {
1859 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1860 clear_page((void *)addr);
1861 (*pt_end)--;
1865 * Set up the initial kernel pagetable.
1867 * We can construct this by grafting the Xen provided pagetable into
1868 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1869 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1870 * means that only the kernel has a physical mapping to start with -
1871 * but that's enough to get __va working. We need to fill in the rest
1872 * of the physical mapping once some sort of allocator has been set
1873 * up.
1874 * NOTE: for PVH, the page tables are native.
1876 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1878 pud_t *l3;
1879 pmd_t *l2;
1880 unsigned long addr[3];
1881 unsigned long pt_base, pt_end;
1882 unsigned i;
1884 /* max_pfn_mapped is the last pfn mapped in the initial memory
1885 * mappings. Considering that on Xen after the kernel mappings we
1886 * have the mappings of some pages that don't exist in pfn space, we
1887 * set max_pfn_mapped to the last real pfn mapped. */
1888 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1890 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1891 pt_end = pt_base + xen_start_info->nr_pt_frames;
1893 /* Zap identity mapping */
1894 init_level4_pgt[0] = __pgd(0);
1896 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1897 /* Pre-constructed entries are in pfn, so convert to mfn */
1898 /* L4[272] -> level3_ident_pgt
1899 * L4[511] -> level3_kernel_pgt */
1900 convert_pfn_mfn(init_level4_pgt);
1902 /* L3_i[0] -> level2_ident_pgt */
1903 convert_pfn_mfn(level3_ident_pgt);
1904 /* L3_k[510] -> level2_kernel_pgt
1905 * L3_i[511] -> level2_fixmap_pgt */
1906 convert_pfn_mfn(level3_kernel_pgt);
1908 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1909 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1910 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1912 addr[0] = (unsigned long)pgd;
1913 addr[1] = (unsigned long)l3;
1914 addr[2] = (unsigned long)l2;
1915 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1916 * Both L4[272][0] and L4[511][511] have entries that point to the same
1917 * L2 (PMD) tables. Meaning that if you modify it in __va space
1918 * it will be also modified in the __ka space! (But if you just
1919 * modify the PMD table to point to other PTE's or none, then you
1920 * are OK - which is what cleanup_highmap does) */
1921 copy_page(level2_ident_pgt, l2);
1922 /* Graft it onto L4[511][511] */
1923 copy_page(level2_kernel_pgt, l2);
1925 /* Get [511][510] and graft that in level2_fixmap_pgt */
1926 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1927 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1928 copy_page(level2_fixmap_pgt, l2);
1929 /* Note that we don't do anything with level1_fixmap_pgt which
1930 * we don't need. */
1931 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1932 /* Make pagetable pieces RO */
1933 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1934 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1935 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1936 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1937 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1938 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1939 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1941 /* Pin down new L4 */
1942 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1943 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1945 /* Unpin Xen-provided one */
1946 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1949 * At this stage there can be no user pgd, and no page
1950 * structure to attach it to, so make sure we just set kernel
1951 * pgd.
1953 xen_mc_batch();
1954 __xen_write_cr3(true, __pa(init_level4_pgt));
1955 xen_mc_issue(PARAVIRT_LAZY_CPU);
1956 } else
1957 native_write_cr3(__pa(init_level4_pgt));
1959 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1960 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1961 * the initial domain. For guests using the toolstack, they are in:
1962 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1963 * rip out the [L4] (pgd), but for guests we shave off three pages.
1965 for (i = 0; i < ARRAY_SIZE(addr); i++)
1966 check_pt_base(&pt_base, &pt_end, addr[i]);
1968 /* Our (by three pages) smaller Xen pagetable that we are using */
1969 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
1970 /* Revector the xen_start_info */
1971 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1973 #else /* !CONFIG_X86_64 */
1974 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1975 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1977 static void __init xen_write_cr3_init(unsigned long cr3)
1979 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1981 BUG_ON(read_cr3() != __pa(initial_page_table));
1982 BUG_ON(cr3 != __pa(swapper_pg_dir));
1985 * We are switching to swapper_pg_dir for the first time (from
1986 * initial_page_table) and therefore need to mark that page
1987 * read-only and then pin it.
1989 * Xen disallows sharing of kernel PMDs for PAE
1990 * guests. Therefore we must copy the kernel PMD from
1991 * initial_page_table into a new kernel PMD to be used in
1992 * swapper_pg_dir.
1994 swapper_kernel_pmd =
1995 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1996 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
1997 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1998 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1999 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2001 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2002 xen_write_cr3(cr3);
2003 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2005 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2006 PFN_DOWN(__pa(initial_page_table)));
2007 set_page_prot(initial_page_table, PAGE_KERNEL);
2008 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2010 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2013 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2015 pmd_t *kernel_pmd;
2017 initial_kernel_pmd =
2018 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2020 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2021 xen_start_info->nr_pt_frames * PAGE_SIZE +
2022 512*1024);
2024 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2025 copy_page(initial_kernel_pmd, kernel_pmd);
2027 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2029 copy_page(initial_page_table, pgd);
2030 initial_page_table[KERNEL_PGD_BOUNDARY] =
2031 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2033 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2034 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2035 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2037 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2039 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2040 PFN_DOWN(__pa(initial_page_table)));
2041 xen_write_cr3(__pa(initial_page_table));
2043 memblock_reserve(__pa(xen_start_info->pt_base),
2044 xen_start_info->nr_pt_frames * PAGE_SIZE);
2046 #endif /* CONFIG_X86_64 */
2048 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2050 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2052 pte_t pte;
2054 phys >>= PAGE_SHIFT;
2056 switch (idx) {
2057 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2058 case FIX_RO_IDT:
2059 #ifdef CONFIG_X86_32
2060 case FIX_WP_TEST:
2061 case FIX_VDSO:
2062 # ifdef CONFIG_HIGHMEM
2063 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2064 # endif
2065 #else
2066 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
2067 case VVAR_PAGE:
2068 #endif
2069 case FIX_TEXT_POKE0:
2070 case FIX_TEXT_POKE1:
2071 /* All local page mappings */
2072 pte = pfn_pte(phys, prot);
2073 break;
2075 #ifdef CONFIG_X86_LOCAL_APIC
2076 case FIX_APIC_BASE: /* maps dummy local APIC */
2077 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2078 break;
2079 #endif
2081 #ifdef CONFIG_X86_IO_APIC
2082 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2084 * We just don't map the IO APIC - all access is via
2085 * hypercalls. Keep the address in the pte for reference.
2087 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2088 break;
2089 #endif
2091 case FIX_PARAVIRT_BOOTMAP:
2092 /* This is an MFN, but it isn't an IO mapping from the
2093 IO domain */
2094 pte = mfn_pte(phys, prot);
2095 break;
2097 default:
2098 /* By default, set_fixmap is used for hardware mappings */
2099 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2100 break;
2103 __native_set_fixmap(idx, pte);
2105 #ifdef CONFIG_X86_64
2106 /* Replicate changes to map the vsyscall page into the user
2107 pagetable vsyscall mapping. */
2108 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
2109 idx == VVAR_PAGE) {
2110 unsigned long vaddr = __fix_to_virt(idx);
2111 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2113 #endif
2116 static void __init xen_post_allocator_init(void)
2118 if (xen_feature(XENFEAT_auto_translated_physmap))
2119 return;
2121 pv_mmu_ops.set_pte = xen_set_pte;
2122 pv_mmu_ops.set_pmd = xen_set_pmd;
2123 pv_mmu_ops.set_pud = xen_set_pud;
2124 #if PAGETABLE_LEVELS == 4
2125 pv_mmu_ops.set_pgd = xen_set_pgd;
2126 #endif
2128 /* This will work as long as patching hasn't happened yet
2129 (which it hasn't) */
2130 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2131 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2132 pv_mmu_ops.release_pte = xen_release_pte;
2133 pv_mmu_ops.release_pmd = xen_release_pmd;
2134 #if PAGETABLE_LEVELS == 4
2135 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2136 pv_mmu_ops.release_pud = xen_release_pud;
2137 #endif
2139 #ifdef CONFIG_X86_64
2140 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2141 SetPagePinned(virt_to_page(level3_user_vsyscall));
2142 #endif
2143 xen_mark_init_mm_pinned();
2146 static void xen_leave_lazy_mmu(void)
2148 preempt_disable();
2149 xen_mc_flush();
2150 paravirt_leave_lazy_mmu();
2151 preempt_enable();
2154 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2155 .read_cr2 = xen_read_cr2,
2156 .write_cr2 = xen_write_cr2,
2158 .read_cr3 = xen_read_cr3,
2159 .write_cr3 = xen_write_cr3_init,
2161 .flush_tlb_user = xen_flush_tlb,
2162 .flush_tlb_kernel = xen_flush_tlb,
2163 .flush_tlb_single = xen_flush_tlb_single,
2164 .flush_tlb_others = xen_flush_tlb_others,
2166 .pte_update = paravirt_nop,
2167 .pte_update_defer = paravirt_nop,
2169 .pgd_alloc = xen_pgd_alloc,
2170 .pgd_free = xen_pgd_free,
2172 .alloc_pte = xen_alloc_pte_init,
2173 .release_pte = xen_release_pte_init,
2174 .alloc_pmd = xen_alloc_pmd_init,
2175 .release_pmd = xen_release_pmd_init,
2177 .set_pte = xen_set_pte_init,
2178 .set_pte_at = xen_set_pte_at,
2179 .set_pmd = xen_set_pmd_hyper,
2181 .ptep_modify_prot_start = __ptep_modify_prot_start,
2182 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2184 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2185 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2187 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2188 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2190 #ifdef CONFIG_X86_PAE
2191 .set_pte_atomic = xen_set_pte_atomic,
2192 .pte_clear = xen_pte_clear,
2193 .pmd_clear = xen_pmd_clear,
2194 #endif /* CONFIG_X86_PAE */
2195 .set_pud = xen_set_pud_hyper,
2197 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2198 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2200 #if PAGETABLE_LEVELS == 4
2201 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2202 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2203 .set_pgd = xen_set_pgd_hyper,
2205 .alloc_pud = xen_alloc_pmd_init,
2206 .release_pud = xen_release_pmd_init,
2207 #endif /* PAGETABLE_LEVELS == 4 */
2209 .activate_mm = xen_activate_mm,
2210 .dup_mmap = xen_dup_mmap,
2211 .exit_mmap = xen_exit_mmap,
2213 .lazy_mode = {
2214 .enter = paravirt_enter_lazy_mmu,
2215 .leave = xen_leave_lazy_mmu,
2216 .flush = paravirt_flush_lazy_mmu,
2219 .set_fixmap = xen_set_fixmap,
2222 void __init xen_init_mmu_ops(void)
2224 x86_init.paging.pagetable_init = xen_pagetable_init;
2226 /* Optimization - we can use the HVM one but it has no idea which
2227 * VCPUs are descheduled - which means that it will needlessly IPI
2228 * them. Xen knows so let it do the job.
2230 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2231 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2232 return;
2234 pv_mmu_ops = xen_mmu_ops;
2236 memset(dummy_mapping, 0xff, PAGE_SIZE);
2239 /* Protected by xen_reservation_lock. */
2240 #define MAX_CONTIG_ORDER 9 /* 2MB */
2241 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2243 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2244 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2245 unsigned long *in_frames,
2246 unsigned long *out_frames)
2248 int i;
2249 struct multicall_space mcs;
2251 xen_mc_batch();
2252 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2253 mcs = __xen_mc_entry(0);
2255 if (in_frames)
2256 in_frames[i] = virt_to_mfn(vaddr);
2258 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2259 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2261 if (out_frames)
2262 out_frames[i] = virt_to_pfn(vaddr);
2264 xen_mc_issue(0);
2268 * Update the pfn-to-mfn mappings for a virtual address range, either to
2269 * point to an array of mfns, or contiguously from a single starting
2270 * mfn.
2272 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2273 unsigned long *mfns,
2274 unsigned long first_mfn)
2276 unsigned i, limit;
2277 unsigned long mfn;
2279 xen_mc_batch();
2281 limit = 1u << order;
2282 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2283 struct multicall_space mcs;
2284 unsigned flags;
2286 mcs = __xen_mc_entry(0);
2287 if (mfns)
2288 mfn = mfns[i];
2289 else
2290 mfn = first_mfn + i;
2292 if (i < (limit - 1))
2293 flags = 0;
2294 else {
2295 if (order == 0)
2296 flags = UVMF_INVLPG | UVMF_ALL;
2297 else
2298 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2301 MULTI_update_va_mapping(mcs.mc, vaddr,
2302 mfn_pte(mfn, PAGE_KERNEL), flags);
2304 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2307 xen_mc_issue(0);
2311 * Perform the hypercall to exchange a region of our pfns to point to
2312 * memory with the required contiguous alignment. Takes the pfns as
2313 * input, and populates mfns as output.
2315 * Returns a success code indicating whether the hypervisor was able to
2316 * satisfy the request or not.
2318 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2319 unsigned long *pfns_in,
2320 unsigned long extents_out,
2321 unsigned int order_out,
2322 unsigned long *mfns_out,
2323 unsigned int address_bits)
2325 long rc;
2326 int success;
2328 struct xen_memory_exchange exchange = {
2329 .in = {
2330 .nr_extents = extents_in,
2331 .extent_order = order_in,
2332 .extent_start = pfns_in,
2333 .domid = DOMID_SELF
2335 .out = {
2336 .nr_extents = extents_out,
2337 .extent_order = order_out,
2338 .extent_start = mfns_out,
2339 .address_bits = address_bits,
2340 .domid = DOMID_SELF
2344 BUG_ON(extents_in << order_in != extents_out << order_out);
2346 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2347 success = (exchange.nr_exchanged == extents_in);
2349 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2350 BUG_ON(success && (rc != 0));
2352 return success;
2355 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2356 unsigned int address_bits,
2357 dma_addr_t *dma_handle)
2359 unsigned long *in_frames = discontig_frames, out_frame;
2360 unsigned long flags;
2361 int success;
2362 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2365 * Currently an auto-translated guest will not perform I/O, nor will
2366 * it require PAE page directories below 4GB. Therefore any calls to
2367 * this function are redundant and can be ignored.
2370 if (xen_feature(XENFEAT_auto_translated_physmap))
2371 return 0;
2373 if (unlikely(order > MAX_CONTIG_ORDER))
2374 return -ENOMEM;
2376 memset((void *) vstart, 0, PAGE_SIZE << order);
2378 spin_lock_irqsave(&xen_reservation_lock, flags);
2380 /* 1. Zap current PTEs, remembering MFNs. */
2381 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2383 /* 2. Get a new contiguous memory extent. */
2384 out_frame = virt_to_pfn(vstart);
2385 success = xen_exchange_memory(1UL << order, 0, in_frames,
2386 1, order, &out_frame,
2387 address_bits);
2389 /* 3. Map the new extent in place of old pages. */
2390 if (success)
2391 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2392 else
2393 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2395 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2397 *dma_handle = virt_to_machine(vstart).maddr;
2398 return success ? 0 : -ENOMEM;
2400 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2402 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2404 unsigned long *out_frames = discontig_frames, in_frame;
2405 unsigned long flags;
2406 int success;
2407 unsigned long vstart;
2409 if (xen_feature(XENFEAT_auto_translated_physmap))
2410 return;
2412 if (unlikely(order > MAX_CONTIG_ORDER))
2413 return;
2415 vstart = (unsigned long)phys_to_virt(pstart);
2416 memset((void *) vstart, 0, PAGE_SIZE << order);
2418 spin_lock_irqsave(&xen_reservation_lock, flags);
2420 /* 1. Find start MFN of contiguous extent. */
2421 in_frame = virt_to_mfn(vstart);
2423 /* 2. Zap current PTEs. */
2424 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2426 /* 3. Do the exchange for non-contiguous MFNs. */
2427 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2428 0, out_frames, 0);
2430 /* 4. Map new pages in place of old pages. */
2431 if (success)
2432 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2433 else
2434 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2436 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2438 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2440 #ifdef CONFIG_XEN_PVHVM
2441 #ifdef CONFIG_PROC_VMCORE
2443 * This function is used in two contexts:
2444 * - the kdump kernel has to check whether a pfn of the crashed kernel
2445 * was a ballooned page. vmcore is using this function to decide
2446 * whether to access a pfn of the crashed kernel.
2447 * - the kexec kernel has to check whether a pfn was ballooned by the
2448 * previous kernel. If the pfn is ballooned, handle it properly.
2449 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2450 * handle the pfn special in this case.
2452 static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2454 struct xen_hvm_get_mem_type a = {
2455 .domid = DOMID_SELF,
2456 .pfn = pfn,
2458 int ram;
2460 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2461 return -ENXIO;
2463 switch (a.mem_type) {
2464 case HVMMEM_mmio_dm:
2465 ram = 0;
2466 break;
2467 case HVMMEM_ram_rw:
2468 case HVMMEM_ram_ro:
2469 default:
2470 ram = 1;
2471 break;
2474 return ram;
2476 #endif
2478 static void xen_hvm_exit_mmap(struct mm_struct *mm)
2480 struct xen_hvm_pagetable_dying a;
2481 int rc;
2483 a.domid = DOMID_SELF;
2484 a.gpa = __pa(mm->pgd);
2485 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2486 WARN_ON_ONCE(rc < 0);
2489 static int is_pagetable_dying_supported(void)
2491 struct xen_hvm_pagetable_dying a;
2492 int rc = 0;
2494 a.domid = DOMID_SELF;
2495 a.gpa = 0x00;
2496 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2497 if (rc < 0) {
2498 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2499 return 0;
2501 return 1;
2504 void __init xen_hvm_init_mmu_ops(void)
2506 if (is_pagetable_dying_supported())
2507 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2508 #ifdef CONFIG_PROC_VMCORE
2509 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2510 #endif
2512 #endif
2514 #define REMAP_BATCH_SIZE 16
2516 struct remap_data {
2517 unsigned long mfn;
2518 pgprot_t prot;
2519 struct mmu_update *mmu_update;
2522 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2523 unsigned long addr, void *data)
2525 struct remap_data *rmd = data;
2526 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2528 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2529 rmd->mmu_update->val = pte_val_ma(pte);
2530 rmd->mmu_update++;
2532 return 0;
2535 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2536 unsigned long addr,
2537 xen_pfn_t mfn, int nr,
2538 pgprot_t prot, unsigned domid,
2539 struct page **pages)
2542 struct remap_data rmd;
2543 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2544 int batch;
2545 unsigned long range;
2546 int err = 0;
2548 if (xen_feature(XENFEAT_auto_translated_physmap))
2549 return -EINVAL;
2551 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2553 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2555 rmd.mfn = mfn;
2556 rmd.prot = prot;
2558 while (nr) {
2559 batch = min(REMAP_BATCH_SIZE, nr);
2560 range = (unsigned long)batch << PAGE_SHIFT;
2562 rmd.mmu_update = mmu_update;
2563 err = apply_to_page_range(vma->vm_mm, addr, range,
2564 remap_area_mfn_pte_fn, &rmd);
2565 if (err)
2566 goto out;
2568 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2569 if (err < 0)
2570 goto out;
2572 nr -= batch;
2573 addr += range;
2576 err = 0;
2577 out:
2579 xen_flush_tlb_all();
2581 return err;
2583 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2585 /* Returns: 0 success */
2586 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2587 int numpgs, struct page **pages)
2589 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2590 return 0;
2592 return -EINVAL;
2594 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);