Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / x86 / xen / mmu_pv.c
blobcf2ade864c3020be11927d71b67b636c75a0a3da
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Xen mmu operations
6 * This file contains the various mmu fetch and update operations.
7 * The most important job they must perform is the mapping between the
8 * domain's pfn and the overall machine mfns.
10 * Xen allows guests to directly update the pagetable, in a controlled
11 * fashion. In other words, the guest modifies the same pagetable
12 * that the CPU actually uses, which eliminates the overhead of having
13 * a separate shadow pagetable.
15 * In order to allow this, it falls on the guest domain to map its
16 * notion of a "physical" pfn - which is just a domain-local linear
17 * address - into a real "machine address" which the CPU's MMU can
18 * use.
20 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
21 * inserted directly into the pagetable. When creating a new
22 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
23 * when reading the content back with __(pgd|pmd|pte)_val, it converts
24 * the mfn back into a pfn.
26 * The other constraint is that all pages which make up a pagetable
27 * must be mapped read-only in the guest. This prevents uncontrolled
28 * guest updates to the pagetable. Xen strictly enforces this, and
29 * will disallow any pagetable update which will end up mapping a
30 * pagetable page RW, and will disallow using any writable page as a
31 * pagetable.
33 * Naively, when loading %cr3 with the base of a new pagetable, Xen
34 * would need to validate the whole pagetable before going on.
35 * Naturally, this is quite slow. The solution is to "pin" a
36 * pagetable, which enforces all the constraints on the pagetable even
37 * when it is not actively in use. This menas that Xen can be assured
38 * that it is still valid when you do load it into %cr3, and doesn't
39 * need to revalidate it.
41 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
43 #include <linux/sched/mm.h>
44 #include <linux/highmem.h>
45 #include <linux/debugfs.h>
46 #include <linux/bug.h>
47 #include <linux/vmalloc.h>
48 #include <linux/export.h>
49 #include <linux/init.h>
50 #include <linux/gfp.h>
51 #include <linux/memblock.h>
52 #include <linux/seq_file.h>
53 #include <linux/crash_dump.h>
54 #include <linux/pgtable.h>
55 #ifdef CONFIG_KEXEC_CORE
56 #include <linux/kexec.h>
57 #endif
59 #include <trace/events/xen.h>
61 #include <asm/tlbflush.h>
62 #include <asm/fixmap.h>
63 #include <asm/mmu_context.h>
64 #include <asm/setup.h>
65 #include <asm/paravirt.h>
66 #include <asm/e820/api.h>
67 #include <asm/linkage.h>
68 #include <asm/page.h>
69 #include <asm/init.h>
70 #include <asm/memtype.h>
71 #include <asm/smp.h>
72 #include <asm/tlb.h>
74 #include <asm/xen/hypercall.h>
75 #include <asm/xen/hypervisor.h>
77 #include <xen/xen.h>
78 #include <xen/page.h>
79 #include <xen/interface/xen.h>
80 #include <xen/interface/hvm/hvm_op.h>
81 #include <xen/interface/version.h>
82 #include <xen/interface/memory.h>
83 #include <xen/hvc-console.h>
85 #include "multicalls.h"
86 #include "mmu.h"
87 #include "debugfs.h"
89 /* l3 pud for userspace vsyscall mapping */
90 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
93 * Protects atomic reservation decrease/increase against concurrent increases.
94 * Also protects non-atomic updates of current_pages and balloon lists.
96 static DEFINE_SPINLOCK(xen_reservation_lock);
99 * Note about cr3 (pagetable base) values:
101 * xen_cr3 contains the current logical cr3 value; it contains the
102 * last set cr3. This may not be the current effective cr3, because
103 * its update may be being lazily deferred. However, a vcpu looking
104 * at its own cr3 can use this value knowing that it everything will
105 * be self-consistent.
107 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
108 * hypercall to set the vcpu cr3 is complete (so it may be a little
109 * out of date, but it will never be set early). If one vcpu is
110 * looking at another vcpu's cr3 value, it should use this variable.
112 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
113 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
115 static phys_addr_t xen_pt_base, xen_pt_size __initdata;
117 static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
120 * Just beyond the highest usermode address. STACK_TOP_MAX has a
121 * redzone above it, so round it up to a PGD boundary.
123 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125 void make_lowmem_page_readonly(void *vaddr)
127 pte_t *pte, ptev;
128 unsigned long address = (unsigned long)vaddr;
129 unsigned int level;
131 pte = lookup_address(address, &level);
132 if (pte == NULL)
133 return; /* vaddr missing */
135 ptev = pte_wrprotect(*pte);
137 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
138 BUG();
141 void make_lowmem_page_readwrite(void *vaddr)
143 pte_t *pte, ptev;
144 unsigned long address = (unsigned long)vaddr;
145 unsigned int level;
147 pte = lookup_address(address, &level);
148 if (pte == NULL)
149 return; /* vaddr missing */
151 ptev = pte_mkwrite(*pte);
153 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
154 BUG();
159 * During early boot all page table pages are pinned, but we do not have struct
160 * pages, so return true until struct pages are ready.
162 static bool xen_page_pinned(void *ptr)
164 if (static_branch_likely(&xen_struct_pages_ready)) {
165 struct page *page = virt_to_page(ptr);
167 return PagePinned(page);
169 return true;
172 static void xen_extend_mmu_update(const struct mmu_update *update)
174 struct multicall_space mcs;
175 struct mmu_update *u;
177 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
179 if (mcs.mc != NULL) {
180 mcs.mc->args[1]++;
181 } else {
182 mcs = __xen_mc_entry(sizeof(*u));
183 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
186 u = mcs.args;
187 *u = *update;
190 static void xen_extend_mmuext_op(const struct mmuext_op *op)
192 struct multicall_space mcs;
193 struct mmuext_op *u;
195 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
197 if (mcs.mc != NULL) {
198 mcs.mc->args[1]++;
199 } else {
200 mcs = __xen_mc_entry(sizeof(*u));
201 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
204 u = mcs.args;
205 *u = *op;
208 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
210 struct mmu_update u;
212 preempt_disable();
214 xen_mc_batch();
216 /* ptr may be ioremapped for 64-bit pagetable setup */
217 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
218 u.val = pmd_val_ma(val);
219 xen_extend_mmu_update(&u);
221 xen_mc_issue(PARAVIRT_LAZY_MMU);
223 preempt_enable();
226 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
228 trace_xen_mmu_set_pmd(ptr, val);
230 /* If page is not pinned, we can just update the entry
231 directly */
232 if (!xen_page_pinned(ptr)) {
233 *ptr = val;
234 return;
237 xen_set_pmd_hyper(ptr, val);
241 * Associate a virtual page frame with a given physical page frame
242 * and protection flags for that frame.
244 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
246 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
249 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
251 struct mmu_update u;
253 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
254 return false;
256 xen_mc_batch();
258 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
259 u.val = pte_val_ma(pteval);
260 xen_extend_mmu_update(&u);
262 xen_mc_issue(PARAVIRT_LAZY_MMU);
264 return true;
267 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
269 if (!xen_batched_set_pte(ptep, pteval)) {
271 * Could call native_set_pte() here and trap and
272 * emulate the PTE write, but a hypercall is much cheaper.
274 struct mmu_update u;
276 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
277 u.val = pte_val_ma(pteval);
278 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
282 static void xen_set_pte(pte_t *ptep, pte_t pteval)
284 trace_xen_mmu_set_pte(ptep, pteval);
285 __xen_set_pte(ptep, pteval);
288 pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
289 unsigned long addr, pte_t *ptep)
291 /* Just return the pte as-is. We preserve the bits on commit */
292 trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
293 return *ptep;
296 void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
297 pte_t *ptep, pte_t pte)
299 struct mmu_update u;
301 trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
302 xen_mc_batch();
304 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
305 u.val = pte_val_ma(pte);
306 xen_extend_mmu_update(&u);
308 xen_mc_issue(PARAVIRT_LAZY_MMU);
311 /* Assume pteval_t is equivalent to all the other *val_t types. */
312 static pteval_t pte_mfn_to_pfn(pteval_t val)
314 if (val & _PAGE_PRESENT) {
315 unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
316 unsigned long pfn = mfn_to_pfn(mfn);
318 pteval_t flags = val & PTE_FLAGS_MASK;
319 if (unlikely(pfn == ~0))
320 val = flags & ~_PAGE_PRESENT;
321 else
322 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
325 return val;
328 static pteval_t pte_pfn_to_mfn(pteval_t val)
330 if (val & _PAGE_PRESENT) {
331 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
332 pteval_t flags = val & PTE_FLAGS_MASK;
333 unsigned long mfn;
335 mfn = __pfn_to_mfn(pfn);
338 * If there's no mfn for the pfn, then just create an
339 * empty non-present pte. Unfortunately this loses
340 * information about the original pfn, so
341 * pte_mfn_to_pfn is asymmetric.
343 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
344 mfn = 0;
345 flags = 0;
346 } else
347 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
348 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
351 return val;
354 __visible pteval_t xen_pte_val(pte_t pte)
356 pteval_t pteval = pte.pte;
358 return pte_mfn_to_pfn(pteval);
360 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
362 __visible pgdval_t xen_pgd_val(pgd_t pgd)
364 return pte_mfn_to_pfn(pgd.pgd);
366 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
368 __visible pte_t xen_make_pte(pteval_t pte)
370 pte = pte_pfn_to_mfn(pte);
372 return native_make_pte(pte);
374 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
376 __visible pgd_t xen_make_pgd(pgdval_t pgd)
378 pgd = pte_pfn_to_mfn(pgd);
379 return native_make_pgd(pgd);
381 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
383 __visible pmdval_t xen_pmd_val(pmd_t pmd)
385 return pte_mfn_to_pfn(pmd.pmd);
387 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
389 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
391 struct mmu_update u;
393 preempt_disable();
395 xen_mc_batch();
397 /* ptr may be ioremapped for 64-bit pagetable setup */
398 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
399 u.val = pud_val_ma(val);
400 xen_extend_mmu_update(&u);
402 xen_mc_issue(PARAVIRT_LAZY_MMU);
404 preempt_enable();
407 static void xen_set_pud(pud_t *ptr, pud_t val)
409 trace_xen_mmu_set_pud(ptr, val);
411 /* If page is not pinned, we can just update the entry
412 directly */
413 if (!xen_page_pinned(ptr)) {
414 *ptr = val;
415 return;
418 xen_set_pud_hyper(ptr, val);
421 __visible pmd_t xen_make_pmd(pmdval_t pmd)
423 pmd = pte_pfn_to_mfn(pmd);
424 return native_make_pmd(pmd);
426 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
428 __visible pudval_t xen_pud_val(pud_t pud)
430 return pte_mfn_to_pfn(pud.pud);
432 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
434 __visible pud_t xen_make_pud(pudval_t pud)
436 pud = pte_pfn_to_mfn(pud);
438 return native_make_pud(pud);
440 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
442 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
444 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
445 unsigned offset = pgd - pgd_page;
446 pgd_t *user_ptr = NULL;
448 if (offset < pgd_index(USER_LIMIT)) {
449 struct page *page = virt_to_page(pgd_page);
450 user_ptr = (pgd_t *)page->private;
451 if (user_ptr)
452 user_ptr += offset;
455 return user_ptr;
458 static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
460 struct mmu_update u;
462 u.ptr = virt_to_machine(ptr).maddr;
463 u.val = p4d_val_ma(val);
464 xen_extend_mmu_update(&u);
468 * Raw hypercall-based set_p4d, intended for in early boot before
469 * there's a page structure. This implies:
470 * 1. The only existing pagetable is the kernel's
471 * 2. It is always pinned
472 * 3. It has no user pagetable attached to it
474 static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
476 preempt_disable();
478 xen_mc_batch();
480 __xen_set_p4d_hyper(ptr, val);
482 xen_mc_issue(PARAVIRT_LAZY_MMU);
484 preempt_enable();
487 static void xen_set_p4d(p4d_t *ptr, p4d_t val)
489 pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
490 pgd_t pgd_val;
492 trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
494 /* If page is not pinned, we can just update the entry
495 directly */
496 if (!xen_page_pinned(ptr)) {
497 *ptr = val;
498 if (user_ptr) {
499 WARN_ON(xen_page_pinned(user_ptr));
500 pgd_val.pgd = p4d_val_ma(val);
501 *user_ptr = pgd_val;
503 return;
506 /* If it's pinned, then we can at least batch the kernel and
507 user updates together. */
508 xen_mc_batch();
510 __xen_set_p4d_hyper(ptr, val);
511 if (user_ptr)
512 __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
514 xen_mc_issue(PARAVIRT_LAZY_MMU);
517 #if CONFIG_PGTABLE_LEVELS >= 5
518 __visible p4dval_t xen_p4d_val(p4d_t p4d)
520 return pte_mfn_to_pfn(p4d.p4d);
522 PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
524 __visible p4d_t xen_make_p4d(p4dval_t p4d)
526 p4d = pte_pfn_to_mfn(p4d);
528 return native_make_p4d(p4d);
530 PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
531 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
533 static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
534 void (*func)(struct mm_struct *mm, struct page *,
535 enum pt_level),
536 bool last, unsigned long limit)
538 int i, nr;
540 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
541 for (i = 0; i < nr; i++) {
542 if (!pmd_none(pmd[i]))
543 (*func)(mm, pmd_page(pmd[i]), PT_PTE);
547 static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
548 void (*func)(struct mm_struct *mm, struct page *,
549 enum pt_level),
550 bool last, unsigned long limit)
552 int i, nr;
554 nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
555 for (i = 0; i < nr; i++) {
556 pmd_t *pmd;
558 if (pud_none(pud[i]))
559 continue;
561 pmd = pmd_offset(&pud[i], 0);
562 if (PTRS_PER_PMD > 1)
563 (*func)(mm, virt_to_page(pmd), PT_PMD);
564 xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
568 static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
569 void (*func)(struct mm_struct *mm, struct page *,
570 enum pt_level),
571 bool last, unsigned long limit)
573 pud_t *pud;
576 if (p4d_none(*p4d))
577 return;
579 pud = pud_offset(p4d, 0);
580 if (PTRS_PER_PUD > 1)
581 (*func)(mm, virt_to_page(pud), PT_PUD);
582 xen_pud_walk(mm, pud, func, last, limit);
586 * (Yet another) pagetable walker. This one is intended for pinning a
587 * pagetable. This means that it walks a pagetable and calls the
588 * callback function on each page it finds making up the page table,
589 * at every level. It walks the entire pagetable, but it only bothers
590 * pinning pte pages which are below limit. In the normal case this
591 * will be STACK_TOP_MAX, but at boot we need to pin up to
592 * FIXADDR_TOP.
594 * We must skip the Xen hole in the middle of the address space, just after
595 * the big x86-64 virtual hole.
597 static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
598 void (*func)(struct mm_struct *mm, struct page *,
599 enum pt_level),
600 unsigned long limit)
602 int i, nr;
603 unsigned hole_low = 0, hole_high = 0;
605 /* The limit is the last byte to be touched */
606 limit--;
607 BUG_ON(limit >= FIXADDR_TOP);
610 * 64-bit has a great big hole in the middle of the address
611 * space, which contains the Xen mappings.
613 hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
614 hole_high = pgd_index(GUARD_HOLE_END_ADDR);
616 nr = pgd_index(limit) + 1;
617 for (i = 0; i < nr; i++) {
618 p4d_t *p4d;
620 if (i >= hole_low && i < hole_high)
621 continue;
623 if (pgd_none(pgd[i]))
624 continue;
626 p4d = p4d_offset(&pgd[i], 0);
627 xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
630 /* Do the top level last, so that the callbacks can use it as
631 a cue to do final things like tlb flushes. */
632 (*func)(mm, virt_to_page(pgd), PT_PGD);
635 static void xen_pgd_walk(struct mm_struct *mm,
636 void (*func)(struct mm_struct *mm, struct page *,
637 enum pt_level),
638 unsigned long limit)
640 __xen_pgd_walk(mm, mm->pgd, func, limit);
643 /* If we're using split pte locks, then take the page's lock and
644 return a pointer to it. Otherwise return NULL. */
645 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
647 spinlock_t *ptl = NULL;
649 #if USE_SPLIT_PTE_PTLOCKS
650 ptl = ptlock_ptr(page);
651 spin_lock_nest_lock(ptl, &mm->page_table_lock);
652 #endif
654 return ptl;
657 static void xen_pte_unlock(void *v)
659 spinlock_t *ptl = v;
660 spin_unlock(ptl);
663 static void xen_do_pin(unsigned level, unsigned long pfn)
665 struct mmuext_op op;
667 op.cmd = level;
668 op.arg1.mfn = pfn_to_mfn(pfn);
670 xen_extend_mmuext_op(&op);
673 static void xen_pin_page(struct mm_struct *mm, struct page *page,
674 enum pt_level level)
676 unsigned pgfl = TestSetPagePinned(page);
678 if (!pgfl) {
679 void *pt = lowmem_page_address(page);
680 unsigned long pfn = page_to_pfn(page);
681 struct multicall_space mcs = __xen_mc_entry(0);
682 spinlock_t *ptl;
685 * We need to hold the pagetable lock between the time
686 * we make the pagetable RO and when we actually pin
687 * it. If we don't, then other users may come in and
688 * attempt to update the pagetable by writing it,
689 * which will fail because the memory is RO but not
690 * pinned, so Xen won't do the trap'n'emulate.
692 * If we're using split pte locks, we can't hold the
693 * entire pagetable's worth of locks during the
694 * traverse, because we may wrap the preempt count (8
695 * bits). The solution is to mark RO and pin each PTE
696 * page while holding the lock. This means the number
697 * of locks we end up holding is never more than a
698 * batch size (~32 entries, at present).
700 * If we're not using split pte locks, we needn't pin
701 * the PTE pages independently, because we're
702 * protected by the overall pagetable lock.
704 ptl = NULL;
705 if (level == PT_PTE)
706 ptl = xen_pte_lock(page, mm);
708 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
709 pfn_pte(pfn, PAGE_KERNEL_RO),
710 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
712 if (ptl) {
713 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
715 /* Queue a deferred unlock for when this batch
716 is completed. */
717 xen_mc_callback(xen_pte_unlock, ptl);
722 /* This is called just after a mm has been created, but it has not
723 been used yet. We need to make sure that its pagetable is all
724 read-only, and can be pinned. */
725 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
727 pgd_t *user_pgd = xen_get_user_pgd(pgd);
729 trace_xen_mmu_pgd_pin(mm, pgd);
731 xen_mc_batch();
733 __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
735 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
737 if (user_pgd) {
738 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
739 xen_do_pin(MMUEXT_PIN_L4_TABLE,
740 PFN_DOWN(__pa(user_pgd)));
743 xen_mc_issue(0);
746 static void xen_pgd_pin(struct mm_struct *mm)
748 __xen_pgd_pin(mm, mm->pgd);
752 * On save, we need to pin all pagetables to make sure they get their
753 * mfns turned into pfns. Search the list for any unpinned pgds and pin
754 * them (unpinned pgds are not currently in use, probably because the
755 * process is under construction or destruction).
757 * Expected to be called in stop_machine() ("equivalent to taking
758 * every spinlock in the system"), so the locking doesn't really
759 * matter all that much.
761 void xen_mm_pin_all(void)
763 struct page *page;
765 spin_lock(&pgd_lock);
767 list_for_each_entry(page, &pgd_list, lru) {
768 if (!PagePinned(page)) {
769 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
770 SetPageSavePinned(page);
774 spin_unlock(&pgd_lock);
777 static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
778 enum pt_level level)
780 SetPagePinned(page);
784 * The init_mm pagetable is really pinned as soon as its created, but
785 * that's before we have page structures to store the bits. So do all
786 * the book-keeping now once struct pages for allocated pages are
787 * initialized. This happens only after memblock_free_all() is called.
789 static void __init xen_after_bootmem(void)
791 static_branch_enable(&xen_struct_pages_ready);
792 SetPagePinned(virt_to_page(level3_user_vsyscall));
793 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
796 static void xen_unpin_page(struct mm_struct *mm, struct page *page,
797 enum pt_level level)
799 unsigned pgfl = TestClearPagePinned(page);
801 if (pgfl) {
802 void *pt = lowmem_page_address(page);
803 unsigned long pfn = page_to_pfn(page);
804 spinlock_t *ptl = NULL;
805 struct multicall_space mcs;
808 * Do the converse to pin_page. If we're using split
809 * pte locks, we must be holding the lock for while
810 * the pte page is unpinned but still RO to prevent
811 * concurrent updates from seeing it in this
812 * partially-pinned state.
814 if (level == PT_PTE) {
815 ptl = xen_pte_lock(page, mm);
817 if (ptl)
818 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
821 mcs = __xen_mc_entry(0);
823 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
824 pfn_pte(pfn, PAGE_KERNEL),
825 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
827 if (ptl) {
828 /* unlock when batch completed */
829 xen_mc_callback(xen_pte_unlock, ptl);
834 /* Release a pagetables pages back as normal RW */
835 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
837 pgd_t *user_pgd = xen_get_user_pgd(pgd);
839 trace_xen_mmu_pgd_unpin(mm, pgd);
841 xen_mc_batch();
843 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
845 if (user_pgd) {
846 xen_do_pin(MMUEXT_UNPIN_TABLE,
847 PFN_DOWN(__pa(user_pgd)));
848 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
851 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
853 xen_mc_issue(0);
856 static void xen_pgd_unpin(struct mm_struct *mm)
858 __xen_pgd_unpin(mm, mm->pgd);
862 * On resume, undo any pinning done at save, so that the rest of the
863 * kernel doesn't see any unexpected pinned pagetables.
865 void xen_mm_unpin_all(void)
867 struct page *page;
869 spin_lock(&pgd_lock);
871 list_for_each_entry(page, &pgd_list, lru) {
872 if (PageSavePinned(page)) {
873 BUG_ON(!PagePinned(page));
874 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
875 ClearPageSavePinned(page);
879 spin_unlock(&pgd_lock);
882 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
884 spin_lock(&next->page_table_lock);
885 xen_pgd_pin(next);
886 spin_unlock(&next->page_table_lock);
889 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
891 spin_lock(&mm->page_table_lock);
892 xen_pgd_pin(mm);
893 spin_unlock(&mm->page_table_lock);
896 static void drop_mm_ref_this_cpu(void *info)
898 struct mm_struct *mm = info;
900 if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
901 leave_mm(smp_processor_id());
904 * If this cpu still has a stale cr3 reference, then make sure
905 * it has been flushed.
907 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
908 xen_mc_flush();
911 #ifdef CONFIG_SMP
913 * Another cpu may still have their %cr3 pointing at the pagetable, so
914 * we need to repoint it somewhere else before we can unpin it.
916 static void xen_drop_mm_ref(struct mm_struct *mm)
918 cpumask_var_t mask;
919 unsigned cpu;
921 drop_mm_ref_this_cpu(mm);
923 /* Get the "official" set of cpus referring to our pagetable. */
924 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
925 for_each_online_cpu(cpu) {
926 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
927 continue;
928 smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
930 return;
934 * It's possible that a vcpu may have a stale reference to our
935 * cr3, because its in lazy mode, and it hasn't yet flushed
936 * its set of pending hypercalls yet. In this case, we can
937 * look at its actual current cr3 value, and force it to flush
938 * if needed.
940 cpumask_clear(mask);
941 for_each_online_cpu(cpu) {
942 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
943 cpumask_set_cpu(cpu, mask);
946 smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
947 free_cpumask_var(mask);
949 #else
950 static void xen_drop_mm_ref(struct mm_struct *mm)
952 drop_mm_ref_this_cpu(mm);
954 #endif
957 * While a process runs, Xen pins its pagetables, which means that the
958 * hypervisor forces it to be read-only, and it controls all updates
959 * to it. This means that all pagetable updates have to go via the
960 * hypervisor, which is moderately expensive.
962 * Since we're pulling the pagetable down, we switch to use init_mm,
963 * unpin old process pagetable and mark it all read-write, which
964 * allows further operations on it to be simple memory accesses.
966 * The only subtle point is that another CPU may be still using the
967 * pagetable because of lazy tlb flushing. This means we need need to
968 * switch all CPUs off this pagetable before we can unpin it.
970 static void xen_exit_mmap(struct mm_struct *mm)
972 get_cpu(); /* make sure we don't move around */
973 xen_drop_mm_ref(mm);
974 put_cpu();
976 spin_lock(&mm->page_table_lock);
978 /* pgd may not be pinned in the error exit path of execve */
979 if (xen_page_pinned(mm->pgd))
980 xen_pgd_unpin(mm);
982 spin_unlock(&mm->page_table_lock);
985 static void xen_post_allocator_init(void);
987 static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
989 struct mmuext_op op;
991 op.cmd = cmd;
992 op.arg1.mfn = pfn_to_mfn(pfn);
993 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
994 BUG();
997 static void __init xen_cleanhighmap(unsigned long vaddr,
998 unsigned long vaddr_end)
1000 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1001 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1003 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1004 * We include the PMD passed in on _both_ boundaries. */
1005 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1006 pmd++, vaddr += PMD_SIZE) {
1007 if (pmd_none(*pmd))
1008 continue;
1009 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1010 set_pmd(pmd, __pmd(0));
1012 /* In case we did something silly, we should crash in this function
1013 * instead of somewhere later and be confusing. */
1014 xen_mc_flush();
1018 * Make a page range writeable and free it.
1020 static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1022 void *vaddr = __va(paddr);
1023 void *vaddr_end = vaddr + size;
1025 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1026 make_lowmem_page_readwrite(vaddr);
1028 memblock_free(paddr, size);
1031 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1033 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1035 if (unpin)
1036 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1037 ClearPagePinned(virt_to_page(__va(pa)));
1038 xen_free_ro_pages(pa, PAGE_SIZE);
1041 static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1043 unsigned long pa;
1044 pte_t *pte_tbl;
1045 int i;
1047 if (pmd_large(*pmd)) {
1048 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1049 xen_free_ro_pages(pa, PMD_SIZE);
1050 return;
1053 pte_tbl = pte_offset_kernel(pmd, 0);
1054 for (i = 0; i < PTRS_PER_PTE; i++) {
1055 if (pte_none(pte_tbl[i]))
1056 continue;
1057 pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1058 xen_free_ro_pages(pa, PAGE_SIZE);
1060 set_pmd(pmd, __pmd(0));
1061 xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1064 static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1066 unsigned long pa;
1067 pmd_t *pmd_tbl;
1068 int i;
1070 if (pud_large(*pud)) {
1071 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1072 xen_free_ro_pages(pa, PUD_SIZE);
1073 return;
1076 pmd_tbl = pmd_offset(pud, 0);
1077 for (i = 0; i < PTRS_PER_PMD; i++) {
1078 if (pmd_none(pmd_tbl[i]))
1079 continue;
1080 xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1082 set_pud(pud, __pud(0));
1083 xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1086 static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1088 unsigned long pa;
1089 pud_t *pud_tbl;
1090 int i;
1092 if (p4d_large(*p4d)) {
1093 pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1094 xen_free_ro_pages(pa, P4D_SIZE);
1095 return;
1098 pud_tbl = pud_offset(p4d, 0);
1099 for (i = 0; i < PTRS_PER_PUD; i++) {
1100 if (pud_none(pud_tbl[i]))
1101 continue;
1102 xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1104 set_p4d(p4d, __p4d(0));
1105 xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1109 * Since it is well isolated we can (and since it is perhaps large we should)
1110 * also free the page tables mapping the initial P->M table.
1112 static void __init xen_cleanmfnmap(unsigned long vaddr)
1114 pgd_t *pgd;
1115 p4d_t *p4d;
1116 bool unpin;
1118 unpin = (vaddr == 2 * PGDIR_SIZE);
1119 vaddr &= PMD_MASK;
1120 pgd = pgd_offset_k(vaddr);
1121 p4d = p4d_offset(pgd, 0);
1122 if (!p4d_none(*p4d))
1123 xen_cleanmfnmap_p4d(p4d, unpin);
1126 static void __init xen_pagetable_p2m_free(void)
1128 unsigned long size;
1129 unsigned long addr;
1131 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1133 /* No memory or already called. */
1134 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1135 return;
1137 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1138 memset((void *)xen_start_info->mfn_list, 0xff, size);
1140 addr = xen_start_info->mfn_list;
1142 * We could be in __ka space.
1143 * We roundup to the PMD, which means that if anybody at this stage is
1144 * using the __ka address of xen_start_info or
1145 * xen_start_info->shared_info they are in going to crash. Fortunately
1146 * we have already revectored in xen_setup_kernel_pagetable.
1148 size = roundup(size, PMD_SIZE);
1150 if (addr >= __START_KERNEL_map) {
1151 xen_cleanhighmap(addr, addr + size);
1152 size = PAGE_ALIGN(xen_start_info->nr_pages *
1153 sizeof(unsigned long));
1154 memblock_free(__pa(addr), size);
1155 } else {
1156 xen_cleanmfnmap(addr);
1160 static void __init xen_pagetable_cleanhighmap(void)
1162 unsigned long size;
1163 unsigned long addr;
1165 /* At this stage, cleanup_highmap has already cleaned __ka space
1166 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1167 * the ramdisk). We continue on, erasing PMD entries that point to page
1168 * tables - do note that they are accessible at this stage via __va.
1169 * As Xen is aligning the memory end to a 4MB boundary, for good
1170 * measure we also round up to PMD_SIZE * 2 - which means that if
1171 * anybody is using __ka address to the initial boot-stack - and try
1172 * to use it - they are going to crash. The xen_start_info has been
1173 * taken care of already in xen_setup_kernel_pagetable. */
1174 addr = xen_start_info->pt_base;
1175 size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1177 xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1178 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1181 static void __init xen_pagetable_p2m_setup(void)
1183 xen_vmalloc_p2m_tree();
1185 xen_pagetable_p2m_free();
1187 xen_pagetable_cleanhighmap();
1189 /* And revector! Bye bye old array */
1190 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1193 static void __init xen_pagetable_init(void)
1195 paging_init();
1196 xen_post_allocator_init();
1198 xen_pagetable_p2m_setup();
1200 /* Allocate and initialize top and mid mfn levels for p2m structure */
1201 xen_build_mfn_list_list();
1203 /* Remap memory freed due to conflicts with E820 map */
1204 xen_remap_memory();
1205 xen_setup_mfn_list_list();
1207 static void xen_write_cr2(unsigned long cr2)
1209 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1212 static noinline void xen_flush_tlb(void)
1214 struct mmuext_op *op;
1215 struct multicall_space mcs;
1217 preempt_disable();
1219 mcs = xen_mc_entry(sizeof(*op));
1221 op = mcs.args;
1222 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1223 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1225 xen_mc_issue(PARAVIRT_LAZY_MMU);
1227 preempt_enable();
1230 static void xen_flush_tlb_one_user(unsigned long addr)
1232 struct mmuext_op *op;
1233 struct multicall_space mcs;
1235 trace_xen_mmu_flush_tlb_one_user(addr);
1237 preempt_disable();
1239 mcs = xen_mc_entry(sizeof(*op));
1240 op = mcs.args;
1241 op->cmd = MMUEXT_INVLPG_LOCAL;
1242 op->arg1.linear_addr = addr & PAGE_MASK;
1243 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1245 xen_mc_issue(PARAVIRT_LAZY_MMU);
1247 preempt_enable();
1250 static void xen_flush_tlb_others(const struct cpumask *cpus,
1251 const struct flush_tlb_info *info)
1253 struct {
1254 struct mmuext_op op;
1255 DECLARE_BITMAP(mask, NR_CPUS);
1256 } *args;
1257 struct multicall_space mcs;
1258 const size_t mc_entry_size = sizeof(args->op) +
1259 sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1261 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1263 if (cpumask_empty(cpus))
1264 return; /* nothing to do */
1266 mcs = xen_mc_entry(mc_entry_size);
1267 args = mcs.args;
1268 args->op.arg2.vcpumask = to_cpumask(args->mask);
1270 /* Remove us, and any offline CPUS. */
1271 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1272 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1274 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1275 if (info->end != TLB_FLUSH_ALL &&
1276 (info->end - info->start) <= PAGE_SIZE) {
1277 args->op.cmd = MMUEXT_INVLPG_MULTI;
1278 args->op.arg1.linear_addr = info->start;
1281 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1283 xen_mc_issue(PARAVIRT_LAZY_MMU);
1286 static unsigned long xen_read_cr3(void)
1288 return this_cpu_read(xen_cr3);
1291 static void set_current_cr3(void *v)
1293 this_cpu_write(xen_current_cr3, (unsigned long)v);
1296 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1298 struct mmuext_op op;
1299 unsigned long mfn;
1301 trace_xen_mmu_write_cr3(kernel, cr3);
1303 if (cr3)
1304 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1305 else
1306 mfn = 0;
1308 WARN_ON(mfn == 0 && kernel);
1310 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1311 op.arg1.mfn = mfn;
1313 xen_extend_mmuext_op(&op);
1315 if (kernel) {
1316 this_cpu_write(xen_cr3, cr3);
1318 /* Update xen_current_cr3 once the batch has actually
1319 been submitted. */
1320 xen_mc_callback(set_current_cr3, (void *)cr3);
1323 static void xen_write_cr3(unsigned long cr3)
1325 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1327 BUG_ON(preemptible());
1329 xen_mc_batch(); /* disables interrupts */
1331 /* Update while interrupts are disabled, so its atomic with
1332 respect to ipis */
1333 this_cpu_write(xen_cr3, cr3);
1335 __xen_write_cr3(true, cr3);
1337 if (user_pgd)
1338 __xen_write_cr3(false, __pa(user_pgd));
1339 else
1340 __xen_write_cr3(false, 0);
1342 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1346 * At the start of the day - when Xen launches a guest, it has already
1347 * built pagetables for the guest. We diligently look over them
1348 * in xen_setup_kernel_pagetable and graft as appropriate them in the
1349 * init_top_pgt and its friends. Then when we are happy we load
1350 * the new init_top_pgt - and continue on.
1352 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1353 * up the rest of the pagetables. When it has completed it loads the cr3.
1354 * N.B. that baremetal would start at 'start_kernel' (and the early
1355 * #PF handler would create bootstrap pagetables) - so we are running
1356 * with the same assumptions as what to do when write_cr3 is executed
1357 * at this point.
1359 * Since there are no user-page tables at all, we have two variants
1360 * of xen_write_cr3 - the early bootup (this one), and the late one
1361 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1362 * the Linux kernel and user-space are both in ring 3 while the
1363 * hypervisor is in ring 0.
1365 static void __init xen_write_cr3_init(unsigned long cr3)
1367 BUG_ON(preemptible());
1369 xen_mc_batch(); /* disables interrupts */
1371 /* Update while interrupts are disabled, so its atomic with
1372 respect to ipis */
1373 this_cpu_write(xen_cr3, cr3);
1375 __xen_write_cr3(true, cr3);
1377 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1380 static int xen_pgd_alloc(struct mm_struct *mm)
1382 pgd_t *pgd = mm->pgd;
1383 struct page *page = virt_to_page(pgd);
1384 pgd_t *user_pgd;
1385 int ret = -ENOMEM;
1387 BUG_ON(PagePinned(virt_to_page(pgd)));
1388 BUG_ON(page->private != 0);
1390 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1391 page->private = (unsigned long)user_pgd;
1393 if (user_pgd != NULL) {
1394 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1395 user_pgd[pgd_index(VSYSCALL_ADDR)] =
1396 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1397 #endif
1398 ret = 0;
1401 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1403 return ret;
1406 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1408 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1410 if (user_pgd)
1411 free_page((unsigned long)user_pgd);
1415 * Init-time set_pte while constructing initial pagetables, which
1416 * doesn't allow RO page table pages to be remapped RW.
1418 * If there is no MFN for this PFN then this page is initially
1419 * ballooned out so clear the PTE (as in decrease_reservation() in
1420 * drivers/xen/balloon.c).
1422 * Many of these PTE updates are done on unpinned and writable pages
1423 * and doing a hypercall for these is unnecessary and expensive. At
1424 * this point it is not possible to tell if a page is pinned or not,
1425 * so always write the PTE directly and rely on Xen trapping and
1426 * emulating any updates as necessary.
1428 __visible pte_t xen_make_pte_init(pteval_t pte)
1430 unsigned long pfn;
1433 * Pages belonging to the initial p2m list mapped outside the default
1434 * address range must be mapped read-only. This region contains the
1435 * page tables for mapping the p2m list, too, and page tables MUST be
1436 * mapped read-only.
1438 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1439 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1440 pfn >= xen_start_info->first_p2m_pfn &&
1441 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1442 pte &= ~_PAGE_RW;
1444 pte = pte_pfn_to_mfn(pte);
1445 return native_make_pte(pte);
1447 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1449 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1451 __xen_set_pte(ptep, pte);
1454 /* Early in boot, while setting up the initial pagetable, assume
1455 everything is pinned. */
1456 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1458 #ifdef CONFIG_FLATMEM
1459 BUG_ON(mem_map); /* should only be used early */
1460 #endif
1461 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1462 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1465 /* Used for pmd and pud */
1466 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1468 #ifdef CONFIG_FLATMEM
1469 BUG_ON(mem_map); /* should only be used early */
1470 #endif
1471 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1474 /* Early release_pte assumes that all pts are pinned, since there's
1475 only init_mm and anything attached to that is pinned. */
1476 static void __init xen_release_pte_init(unsigned long pfn)
1478 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1479 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1482 static void __init xen_release_pmd_init(unsigned long pfn)
1484 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1487 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1489 struct multicall_space mcs;
1490 struct mmuext_op *op;
1492 mcs = __xen_mc_entry(sizeof(*op));
1493 op = mcs.args;
1494 op->cmd = cmd;
1495 op->arg1.mfn = pfn_to_mfn(pfn);
1497 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1500 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1502 struct multicall_space mcs;
1503 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1505 mcs = __xen_mc_entry(0);
1506 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1507 pfn_pte(pfn, prot), 0);
1510 /* This needs to make sure the new pte page is pinned iff its being
1511 attached to a pinned pagetable. */
1512 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1513 unsigned level)
1515 bool pinned = xen_page_pinned(mm->pgd);
1517 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1519 if (pinned) {
1520 struct page *page = pfn_to_page(pfn);
1522 if (static_branch_likely(&xen_struct_pages_ready))
1523 SetPagePinned(page);
1525 xen_mc_batch();
1527 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1529 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1530 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1532 xen_mc_issue(PARAVIRT_LAZY_MMU);
1536 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1538 xen_alloc_ptpage(mm, pfn, PT_PTE);
1541 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1543 xen_alloc_ptpage(mm, pfn, PT_PMD);
1546 /* This should never happen until we're OK to use struct page */
1547 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1549 struct page *page = pfn_to_page(pfn);
1550 bool pinned = PagePinned(page);
1552 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1554 if (pinned) {
1555 xen_mc_batch();
1557 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1558 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1560 __set_pfn_prot(pfn, PAGE_KERNEL);
1562 xen_mc_issue(PARAVIRT_LAZY_MMU);
1564 ClearPagePinned(page);
1568 static void xen_release_pte(unsigned long pfn)
1570 xen_release_ptpage(pfn, PT_PTE);
1573 static void xen_release_pmd(unsigned long pfn)
1575 xen_release_ptpage(pfn, PT_PMD);
1578 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1580 xen_alloc_ptpage(mm, pfn, PT_PUD);
1583 static void xen_release_pud(unsigned long pfn)
1585 xen_release_ptpage(pfn, PT_PUD);
1589 * Like __va(), but returns address in the kernel mapping (which is
1590 * all we have until the physical memory mapping has been set up.
1592 static void * __init __ka(phys_addr_t paddr)
1594 return (void *)(paddr + __START_KERNEL_map);
1597 /* Convert a machine address to physical address */
1598 static unsigned long __init m2p(phys_addr_t maddr)
1600 phys_addr_t paddr;
1602 maddr &= XEN_PTE_MFN_MASK;
1603 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1605 return paddr;
1608 /* Convert a machine address to kernel virtual */
1609 static void * __init m2v(phys_addr_t maddr)
1611 return __ka(m2p(maddr));
1614 /* Set the page permissions on an identity-mapped pages */
1615 static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1616 unsigned long flags)
1618 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1619 pte_t pte = pfn_pte(pfn, prot);
1621 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1622 BUG();
1624 static void __init set_page_prot(void *addr, pgprot_t prot)
1626 return set_page_prot_flags(addr, prot, UVMF_NONE);
1629 void __init xen_setup_machphys_mapping(void)
1631 struct xen_machphys_mapping mapping;
1633 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1634 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1635 machine_to_phys_nr = mapping.max_mfn + 1;
1636 } else {
1637 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1641 static void __init convert_pfn_mfn(void *v)
1643 pte_t *pte = v;
1644 int i;
1646 /* All levels are converted the same way, so just treat them
1647 as ptes. */
1648 for (i = 0; i < PTRS_PER_PTE; i++)
1649 pte[i] = xen_make_pte(pte[i].pte);
1651 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1652 unsigned long addr)
1654 if (*pt_base == PFN_DOWN(__pa(addr))) {
1655 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1656 clear_page((void *)addr);
1657 (*pt_base)++;
1659 if (*pt_end == PFN_DOWN(__pa(addr))) {
1660 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1661 clear_page((void *)addr);
1662 (*pt_end)--;
1666 * Set up the initial kernel pagetable.
1668 * We can construct this by grafting the Xen provided pagetable into
1669 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1670 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1671 * kernel has a physical mapping to start with - but that's enough to
1672 * get __va working. We need to fill in the rest of the physical
1673 * mapping once some sort of allocator has been set up.
1675 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1677 pud_t *l3;
1678 pmd_t *l2;
1679 unsigned long addr[3];
1680 unsigned long pt_base, pt_end;
1681 unsigned i;
1683 /* max_pfn_mapped is the last pfn mapped in the initial memory
1684 * mappings. Considering that on Xen after the kernel mappings we
1685 * have the mappings of some pages that don't exist in pfn space, we
1686 * set max_pfn_mapped to the last real pfn mapped. */
1687 if (xen_start_info->mfn_list < __START_KERNEL_map)
1688 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1689 else
1690 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1692 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1693 pt_end = pt_base + xen_start_info->nr_pt_frames;
1695 /* Zap identity mapping */
1696 init_top_pgt[0] = __pgd(0);
1698 /* Pre-constructed entries are in pfn, so convert to mfn */
1699 /* L4[273] -> level3_ident_pgt */
1700 /* L4[511] -> level3_kernel_pgt */
1701 convert_pfn_mfn(init_top_pgt);
1703 /* L3_i[0] -> level2_ident_pgt */
1704 convert_pfn_mfn(level3_ident_pgt);
1705 /* L3_k[510] -> level2_kernel_pgt */
1706 /* L3_k[511] -> level2_fixmap_pgt */
1707 convert_pfn_mfn(level3_kernel_pgt);
1709 /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
1710 convert_pfn_mfn(level2_fixmap_pgt);
1712 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1713 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1714 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1716 addr[0] = (unsigned long)pgd;
1717 addr[1] = (unsigned long)l3;
1718 addr[2] = (unsigned long)l2;
1719 /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
1720 * Both L4[273][0] and L4[511][510] have entries that point to the same
1721 * L2 (PMD) tables. Meaning that if you modify it in __va space
1722 * it will be also modified in the __ka space! (But if you just
1723 * modify the PMD table to point to other PTE's or none, then you
1724 * are OK - which is what cleanup_highmap does) */
1725 copy_page(level2_ident_pgt, l2);
1726 /* Graft it onto L4[511][510] */
1727 copy_page(level2_kernel_pgt, l2);
1730 * Zap execute permission from the ident map. Due to the sharing of
1731 * L1 entries we need to do this in the L2.
1733 if (__supported_pte_mask & _PAGE_NX) {
1734 for (i = 0; i < PTRS_PER_PMD; ++i) {
1735 if (pmd_none(level2_ident_pgt[i]))
1736 continue;
1737 level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
1741 /* Copy the initial P->M table mappings if necessary. */
1742 i = pgd_index(xen_start_info->mfn_list);
1743 if (i && i < pgd_index(__START_KERNEL_map))
1744 init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1746 /* Make pagetable pieces RO */
1747 set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
1748 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1749 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1750 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1751 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1752 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1753 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1755 for (i = 0; i < FIXMAP_PMD_NUM; i++) {
1756 set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
1757 PAGE_KERNEL_RO);
1760 /* Pin down new L4 */
1761 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1762 PFN_DOWN(__pa_symbol(init_top_pgt)));
1764 /* Unpin Xen-provided one */
1765 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1768 * At this stage there can be no user pgd, and no page structure to
1769 * attach it to, so make sure we just set kernel pgd.
1771 xen_mc_batch();
1772 __xen_write_cr3(true, __pa(init_top_pgt));
1773 xen_mc_issue(PARAVIRT_LAZY_CPU);
1775 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1776 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1777 * the initial domain. For guests using the toolstack, they are in:
1778 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1779 * rip out the [L4] (pgd), but for guests we shave off three pages.
1781 for (i = 0; i < ARRAY_SIZE(addr); i++)
1782 check_pt_base(&pt_base, &pt_end, addr[i]);
1784 /* Our (by three pages) smaller Xen pagetable that we are using */
1785 xen_pt_base = PFN_PHYS(pt_base);
1786 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
1787 memblock_reserve(xen_pt_base, xen_pt_size);
1789 /* Revector the xen_start_info */
1790 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1794 * Read a value from a physical address.
1796 static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
1798 unsigned long *vaddr;
1799 unsigned long val;
1801 vaddr = early_memremap_ro(addr, sizeof(val));
1802 val = *vaddr;
1803 early_memunmap(vaddr, sizeof(val));
1804 return val;
1808 * Translate a virtual address to a physical one without relying on mapped
1809 * page tables. Don't rely on big pages being aligned in (guest) physical
1810 * space!
1812 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
1814 phys_addr_t pa;
1815 pgd_t pgd;
1816 pud_t pud;
1817 pmd_t pmd;
1818 pte_t pte;
1820 pa = read_cr3_pa();
1821 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
1822 sizeof(pgd)));
1823 if (!pgd_present(pgd))
1824 return 0;
1826 pa = pgd_val(pgd) & PTE_PFN_MASK;
1827 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
1828 sizeof(pud)));
1829 if (!pud_present(pud))
1830 return 0;
1831 pa = pud_val(pud) & PTE_PFN_MASK;
1832 if (pud_large(pud))
1833 return pa + (vaddr & ~PUD_MASK);
1835 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
1836 sizeof(pmd)));
1837 if (!pmd_present(pmd))
1838 return 0;
1839 pa = pmd_val(pmd) & PTE_PFN_MASK;
1840 if (pmd_large(pmd))
1841 return pa + (vaddr & ~PMD_MASK);
1843 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
1844 sizeof(pte)));
1845 if (!pte_present(pte))
1846 return 0;
1847 pa = pte_pfn(pte) << PAGE_SHIFT;
1849 return pa | (vaddr & ~PAGE_MASK);
1853 * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
1854 * this area.
1856 void __init xen_relocate_p2m(void)
1858 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
1859 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
1860 int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
1861 pte_t *pt;
1862 pmd_t *pmd;
1863 pud_t *pud;
1864 pgd_t *pgd;
1865 unsigned long *new_p2m;
1867 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1868 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
1869 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
1870 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
1871 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
1872 n_frames = n_pte + n_pt + n_pmd + n_pud;
1874 new_area = xen_find_free_area(PFN_PHYS(n_frames));
1875 if (!new_area) {
1876 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
1877 BUG();
1881 * Setup the page tables for addressing the new p2m list.
1882 * We have asked the hypervisor to map the p2m list at the user address
1883 * PUD_SIZE. It may have done so, or it may have used a kernel space
1884 * address depending on the Xen version.
1885 * To avoid any possible virtual address collision, just use
1886 * 2 * PUD_SIZE for the new area.
1888 pud_phys = new_area;
1889 pmd_phys = pud_phys + PFN_PHYS(n_pud);
1890 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
1891 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
1893 pgd = __va(read_cr3_pa());
1894 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
1895 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
1896 pud = early_memremap(pud_phys, PAGE_SIZE);
1897 clear_page(pud);
1898 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
1899 idx_pmd++) {
1900 pmd = early_memremap(pmd_phys, PAGE_SIZE);
1901 clear_page(pmd);
1902 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
1903 idx_pt++) {
1904 pt = early_memremap(pt_phys, PAGE_SIZE);
1905 clear_page(pt);
1906 for (idx_pte = 0;
1907 idx_pte < min(n_pte, PTRS_PER_PTE);
1908 idx_pte++) {
1909 pt[idx_pte] = pfn_pte(p2m_pfn,
1910 PAGE_KERNEL);
1911 p2m_pfn++;
1913 n_pte -= PTRS_PER_PTE;
1914 early_memunmap(pt, PAGE_SIZE);
1915 make_lowmem_page_readonly(__va(pt_phys));
1916 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
1917 PFN_DOWN(pt_phys));
1918 pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
1919 pt_phys += PAGE_SIZE;
1921 n_pt -= PTRS_PER_PMD;
1922 early_memunmap(pmd, PAGE_SIZE);
1923 make_lowmem_page_readonly(__va(pmd_phys));
1924 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
1925 PFN_DOWN(pmd_phys));
1926 pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
1927 pmd_phys += PAGE_SIZE;
1929 n_pmd -= PTRS_PER_PUD;
1930 early_memunmap(pud, PAGE_SIZE);
1931 make_lowmem_page_readonly(__va(pud_phys));
1932 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
1933 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
1934 pud_phys += PAGE_SIZE;
1937 /* Now copy the old p2m info to the new area. */
1938 memcpy(new_p2m, xen_p2m_addr, size);
1939 xen_p2m_addr = new_p2m;
1941 /* Release the old p2m list and set new list info. */
1942 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
1943 BUG_ON(!p2m_pfn);
1944 p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
1946 if (xen_start_info->mfn_list < __START_KERNEL_map) {
1947 pfn = xen_start_info->first_p2m_pfn;
1948 pfn_end = xen_start_info->first_p2m_pfn +
1949 xen_start_info->nr_p2m_frames;
1950 set_pgd(pgd + 1, __pgd(0));
1951 } else {
1952 pfn = p2m_pfn;
1953 pfn_end = p2m_pfn_end;
1956 memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
1957 while (pfn < pfn_end) {
1958 if (pfn == p2m_pfn) {
1959 pfn = p2m_pfn_end;
1960 continue;
1962 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1963 pfn++;
1966 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1967 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
1968 xen_start_info->nr_p2m_frames = n_frames;
1971 void __init xen_reserve_special_pages(void)
1973 phys_addr_t paddr;
1975 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
1976 if (xen_start_info->store_mfn) {
1977 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
1978 memblock_reserve(paddr, PAGE_SIZE);
1980 if (!xen_initial_domain()) {
1981 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
1982 memblock_reserve(paddr, PAGE_SIZE);
1986 void __init xen_pt_check_e820(void)
1988 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
1989 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
1990 BUG();
1994 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1996 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1998 pte_t pte;
2000 phys >>= PAGE_SHIFT;
2002 switch (idx) {
2003 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2004 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2005 case VSYSCALL_PAGE:
2006 #endif
2007 /* All local page mappings */
2008 pte = pfn_pte(phys, prot);
2009 break;
2011 #ifdef CONFIG_X86_LOCAL_APIC
2012 case FIX_APIC_BASE: /* maps dummy local APIC */
2013 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2014 break;
2015 #endif
2017 #ifdef CONFIG_X86_IO_APIC
2018 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2020 * We just don't map the IO APIC - all access is via
2021 * hypercalls. Keep the address in the pte for reference.
2023 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2024 break;
2025 #endif
2027 case FIX_PARAVIRT_BOOTMAP:
2028 /* This is an MFN, but it isn't an IO mapping from the
2029 IO domain */
2030 pte = mfn_pte(phys, prot);
2031 break;
2033 default:
2034 /* By default, set_fixmap is used for hardware mappings */
2035 pte = mfn_pte(phys, prot);
2036 break;
2039 __native_set_fixmap(idx, pte);
2041 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2042 /* Replicate changes to map the vsyscall page into the user
2043 pagetable vsyscall mapping. */
2044 if (idx == VSYSCALL_PAGE) {
2045 unsigned long vaddr = __fix_to_virt(idx);
2046 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2048 #endif
2051 static void __init xen_post_allocator_init(void)
2053 pv_ops.mmu.set_pte = xen_set_pte;
2054 pv_ops.mmu.set_pmd = xen_set_pmd;
2055 pv_ops.mmu.set_pud = xen_set_pud;
2056 pv_ops.mmu.set_p4d = xen_set_p4d;
2058 /* This will work as long as patching hasn't happened yet
2059 (which it hasn't) */
2060 pv_ops.mmu.alloc_pte = xen_alloc_pte;
2061 pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
2062 pv_ops.mmu.release_pte = xen_release_pte;
2063 pv_ops.mmu.release_pmd = xen_release_pmd;
2064 pv_ops.mmu.alloc_pud = xen_alloc_pud;
2065 pv_ops.mmu.release_pud = xen_release_pud;
2066 pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2068 pv_ops.mmu.write_cr3 = &xen_write_cr3;
2071 static void xen_leave_lazy_mmu(void)
2073 preempt_disable();
2074 xen_mc_flush();
2075 paravirt_leave_lazy_mmu();
2076 preempt_enable();
2079 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2080 .read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
2081 .write_cr2 = xen_write_cr2,
2083 .read_cr3 = xen_read_cr3,
2084 .write_cr3 = xen_write_cr3_init,
2086 .flush_tlb_user = xen_flush_tlb,
2087 .flush_tlb_kernel = xen_flush_tlb,
2088 .flush_tlb_one_user = xen_flush_tlb_one_user,
2089 .flush_tlb_others = xen_flush_tlb_others,
2090 .tlb_remove_table = tlb_remove_table,
2092 .pgd_alloc = xen_pgd_alloc,
2093 .pgd_free = xen_pgd_free,
2095 .alloc_pte = xen_alloc_pte_init,
2096 .release_pte = xen_release_pte_init,
2097 .alloc_pmd = xen_alloc_pmd_init,
2098 .release_pmd = xen_release_pmd_init,
2100 .set_pte = xen_set_pte_init,
2101 .set_pmd = xen_set_pmd_hyper,
2103 .ptep_modify_prot_start = __ptep_modify_prot_start,
2104 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2106 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2107 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2109 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2110 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2112 .set_pud = xen_set_pud_hyper,
2114 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2115 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2117 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2118 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2119 .set_p4d = xen_set_p4d_hyper,
2121 .alloc_pud = xen_alloc_pmd_init,
2122 .release_pud = xen_release_pmd_init,
2124 #if CONFIG_PGTABLE_LEVELS >= 5
2125 .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
2126 .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
2127 #endif
2129 .activate_mm = xen_activate_mm,
2130 .dup_mmap = xen_dup_mmap,
2131 .exit_mmap = xen_exit_mmap,
2133 .lazy_mode = {
2134 .enter = paravirt_enter_lazy_mmu,
2135 .leave = xen_leave_lazy_mmu,
2136 .flush = paravirt_flush_lazy_mmu,
2139 .set_fixmap = xen_set_fixmap,
2142 void __init xen_init_mmu_ops(void)
2144 x86_init.paging.pagetable_init = xen_pagetable_init;
2145 x86_init.hyper.init_after_bootmem = xen_after_bootmem;
2147 pv_ops.mmu = xen_mmu_ops;
2149 memset(dummy_mapping, 0xff, PAGE_SIZE);
2152 /* Protected by xen_reservation_lock. */
2153 #define MAX_CONTIG_ORDER 9 /* 2MB */
2154 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2156 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2157 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2158 unsigned long *in_frames,
2159 unsigned long *out_frames)
2161 int i;
2162 struct multicall_space mcs;
2164 xen_mc_batch();
2165 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2166 mcs = __xen_mc_entry(0);
2168 if (in_frames)
2169 in_frames[i] = virt_to_mfn(vaddr);
2171 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2172 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2174 if (out_frames)
2175 out_frames[i] = virt_to_pfn(vaddr);
2177 xen_mc_issue(0);
2181 * Update the pfn-to-mfn mappings for a virtual address range, either to
2182 * point to an array of mfns, or contiguously from a single starting
2183 * mfn.
2185 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2186 unsigned long *mfns,
2187 unsigned long first_mfn)
2189 unsigned i, limit;
2190 unsigned long mfn;
2192 xen_mc_batch();
2194 limit = 1u << order;
2195 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2196 struct multicall_space mcs;
2197 unsigned flags;
2199 mcs = __xen_mc_entry(0);
2200 if (mfns)
2201 mfn = mfns[i];
2202 else
2203 mfn = first_mfn + i;
2205 if (i < (limit - 1))
2206 flags = 0;
2207 else {
2208 if (order == 0)
2209 flags = UVMF_INVLPG | UVMF_ALL;
2210 else
2211 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2214 MULTI_update_va_mapping(mcs.mc, vaddr,
2215 mfn_pte(mfn, PAGE_KERNEL), flags);
2217 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2220 xen_mc_issue(0);
2224 * Perform the hypercall to exchange a region of our pfns to point to
2225 * memory with the required contiguous alignment. Takes the pfns as
2226 * input, and populates mfns as output.
2228 * Returns a success code indicating whether the hypervisor was able to
2229 * satisfy the request or not.
2231 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2232 unsigned long *pfns_in,
2233 unsigned long extents_out,
2234 unsigned int order_out,
2235 unsigned long *mfns_out,
2236 unsigned int address_bits)
2238 long rc;
2239 int success;
2241 struct xen_memory_exchange exchange = {
2242 .in = {
2243 .nr_extents = extents_in,
2244 .extent_order = order_in,
2245 .extent_start = pfns_in,
2246 .domid = DOMID_SELF
2248 .out = {
2249 .nr_extents = extents_out,
2250 .extent_order = order_out,
2251 .extent_start = mfns_out,
2252 .address_bits = address_bits,
2253 .domid = DOMID_SELF
2257 BUG_ON(extents_in << order_in != extents_out << order_out);
2259 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2260 success = (exchange.nr_exchanged == extents_in);
2262 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2263 BUG_ON(success && (rc != 0));
2265 return success;
2268 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2269 unsigned int address_bits,
2270 dma_addr_t *dma_handle)
2272 unsigned long *in_frames = discontig_frames, out_frame;
2273 unsigned long flags;
2274 int success;
2275 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2278 * Currently an auto-translated guest will not perform I/O, nor will
2279 * it require PAE page directories below 4GB. Therefore any calls to
2280 * this function are redundant and can be ignored.
2283 if (unlikely(order > MAX_CONTIG_ORDER))
2284 return -ENOMEM;
2286 memset((void *) vstart, 0, PAGE_SIZE << order);
2288 spin_lock_irqsave(&xen_reservation_lock, flags);
2290 /* 1. Zap current PTEs, remembering MFNs. */
2291 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2293 /* 2. Get a new contiguous memory extent. */
2294 out_frame = virt_to_pfn(vstart);
2295 success = xen_exchange_memory(1UL << order, 0, in_frames,
2296 1, order, &out_frame,
2297 address_bits);
2299 /* 3. Map the new extent in place of old pages. */
2300 if (success)
2301 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2302 else
2303 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2305 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2307 *dma_handle = virt_to_machine(vstart).maddr;
2308 return success ? 0 : -ENOMEM;
2311 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2313 unsigned long *out_frames = discontig_frames, in_frame;
2314 unsigned long flags;
2315 int success;
2316 unsigned long vstart;
2318 if (unlikely(order > MAX_CONTIG_ORDER))
2319 return;
2321 vstart = (unsigned long)phys_to_virt(pstart);
2322 memset((void *) vstart, 0, PAGE_SIZE << order);
2324 spin_lock_irqsave(&xen_reservation_lock, flags);
2326 /* 1. Find start MFN of contiguous extent. */
2327 in_frame = virt_to_mfn(vstart);
2329 /* 2. Zap current PTEs. */
2330 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2332 /* 3. Do the exchange for non-contiguous MFNs. */
2333 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2334 0, out_frames, 0);
2336 /* 4. Map new pages in place of old pages. */
2337 if (success)
2338 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2339 else
2340 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2342 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2345 static noinline void xen_flush_tlb_all(void)
2347 struct mmuext_op *op;
2348 struct multicall_space mcs;
2350 preempt_disable();
2352 mcs = xen_mc_entry(sizeof(*op));
2354 op = mcs.args;
2355 op->cmd = MMUEXT_TLB_FLUSH_ALL;
2356 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
2358 xen_mc_issue(PARAVIRT_LAZY_MMU);
2360 preempt_enable();
2363 #define REMAP_BATCH_SIZE 16
2365 struct remap_data {
2366 xen_pfn_t *pfn;
2367 bool contiguous;
2368 bool no_translate;
2369 pgprot_t prot;
2370 struct mmu_update *mmu_update;
2373 static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
2375 struct remap_data *rmd = data;
2376 pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
2379 * If we have a contiguous range, just update the pfn itself,
2380 * else update pointer to be "next pfn".
2382 if (rmd->contiguous)
2383 (*rmd->pfn)++;
2384 else
2385 rmd->pfn++;
2387 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2388 rmd->mmu_update->ptr |= rmd->no_translate ?
2389 MMU_PT_UPDATE_NO_TRANSLATE :
2390 MMU_NORMAL_PT_UPDATE;
2391 rmd->mmu_update->val = pte_val_ma(pte);
2392 rmd->mmu_update++;
2394 return 0;
2397 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
2398 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
2399 unsigned int domid, bool no_translate, struct page **pages)
2401 int err = 0;
2402 struct remap_data rmd;
2403 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2404 unsigned long range;
2405 int mapped = 0;
2407 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2409 rmd.pfn = pfn;
2410 rmd.prot = prot;
2412 * We use the err_ptr to indicate if there we are doing a contiguous
2413 * mapping or a discontigious mapping.
2415 rmd.contiguous = !err_ptr;
2416 rmd.no_translate = no_translate;
2418 while (nr) {
2419 int index = 0;
2420 int done = 0;
2421 int batch = min(REMAP_BATCH_SIZE, nr);
2422 int batch_left = batch;
2424 range = (unsigned long)batch << PAGE_SHIFT;
2426 rmd.mmu_update = mmu_update;
2427 err = apply_to_page_range(vma->vm_mm, addr, range,
2428 remap_area_pfn_pte_fn, &rmd);
2429 if (err)
2430 goto out;
2433 * We record the error for each page that gives an error, but
2434 * continue mapping until the whole set is done
2436 do {
2437 int i;
2439 err = HYPERVISOR_mmu_update(&mmu_update[index],
2440 batch_left, &done, domid);
2443 * @err_ptr may be the same buffer as @gfn, so
2444 * only clear it after each chunk of @gfn is
2445 * used.
2447 if (err_ptr) {
2448 for (i = index; i < index + done; i++)
2449 err_ptr[i] = 0;
2451 if (err < 0) {
2452 if (!err_ptr)
2453 goto out;
2454 err_ptr[i] = err;
2455 done++; /* Skip failed frame. */
2456 } else
2457 mapped += done;
2458 batch_left -= done;
2459 index += done;
2460 } while (batch_left);
2462 nr -= batch;
2463 addr += range;
2464 if (err_ptr)
2465 err_ptr += batch;
2466 cond_resched();
2468 out:
2470 xen_flush_tlb_all();
2472 return err < 0 ? err : mapped;
2474 EXPORT_SYMBOL_GPL(xen_remap_pfn);
2476 #ifdef CONFIG_KEXEC_CORE
2477 phys_addr_t paddr_vmcoreinfo_note(void)
2479 if (xen_pv_domain())
2480 return virt_to_machine(vmcoreinfo_note).maddr;
2481 else
2482 return __pa(vmcoreinfo_note);
2484 #endif /* CONFIG_KEXEC_CORE */