4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/module.h>
47 #include <linux/gfp.h>
48 #include <linux/memblock.h>
49 #include <linux/seq_file.h>
50 #include <linux/crash_dump.h>
52 #include <trace/events/xen.h>
54 #include <asm/pgtable.h>
55 #include <asm/tlbflush.h>
56 #include <asm/fixmap.h>
57 #include <asm/mmu_context.h>
58 #include <asm/setup.h>
59 #include <asm/paravirt.h>
61 #include <asm/linkage.h>
67 #include <asm/xen/hypercall.h>
68 #include <asm/xen/hypervisor.h>
72 #include <xen/interface/xen.h>
73 #include <xen/interface/hvm/hvm_op.h>
74 #include <xen/interface/version.h>
75 #include <xen/interface/memory.h>
76 #include <xen/hvc-console.h>
78 #include "multicalls.h"
83 * Protects atomic reservation decrease/increase against concurrent increases.
84 * Also protects non-atomic updates of current_pages and balloon lists.
86 DEFINE_SPINLOCK(xen_reservation_lock
);
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
94 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95 static RESERVE_BRK_ARRAY(pte_t
, level1_ident_pgt
, LEVEL1_IDENT_ENTRIES
);
98 /* l3 pud for userspace vsyscall mapping */
99 static pud_t level3_user_vsyscall
[PTRS_PER_PUD
] __page_aligned_bss
;
100 #endif /* CONFIG_X86_64 */
103 * Note about cr3 (pagetable base) values:
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
116 DEFINE_PER_CPU(unsigned long, xen_cr3
); /* cr3 stored as physaddr */
117 DEFINE_PER_CPU(unsigned long, xen_current_cr3
); /* actual vcpu cr3 */
119 static phys_addr_t xen_pt_base
, xen_pt_size __initdata
;
122 * Just beyond the highest usermode address. STACK_TOP_MAX has a
123 * redzone above it, so round it up to a PGD boundary.
125 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
127 unsigned long arbitrary_virt_to_mfn(void *vaddr
)
129 xmaddr_t maddr
= arbitrary_virt_to_machine(vaddr
);
131 return PFN_DOWN(maddr
.maddr
);
134 xmaddr_t
arbitrary_virt_to_machine(void *vaddr
)
136 unsigned long address
= (unsigned long)vaddr
;
142 * if the PFN is in the linear mapped vaddr range, we can just use
143 * the (quick) virt_to_machine() p2m lookup
145 if (virt_addr_valid(vaddr
))
146 return virt_to_machine(vaddr
);
148 /* otherwise we have to do a (slower) full page-table walk */
150 pte
= lookup_address(address
, &level
);
152 offset
= address
& ~PAGE_MASK
;
153 return XMADDR(((phys_addr_t
)pte_mfn(*pte
) << PAGE_SHIFT
) + offset
);
155 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine
);
157 void make_lowmem_page_readonly(void *vaddr
)
160 unsigned long address
= (unsigned long)vaddr
;
163 pte
= lookup_address(address
, &level
);
165 return; /* vaddr missing */
167 ptev
= pte_wrprotect(*pte
);
169 if (HYPERVISOR_update_va_mapping(address
, ptev
, 0))
173 void make_lowmem_page_readwrite(void *vaddr
)
176 unsigned long address
= (unsigned long)vaddr
;
179 pte
= lookup_address(address
, &level
);
181 return; /* vaddr missing */
183 ptev
= pte_mkwrite(*pte
);
185 if (HYPERVISOR_update_va_mapping(address
, ptev
, 0))
190 static bool xen_page_pinned(void *ptr
)
192 struct page
*page
= virt_to_page(ptr
);
194 return PagePinned(page
);
197 void xen_set_domain_pte(pte_t
*ptep
, pte_t pteval
, unsigned domid
)
199 struct multicall_space mcs
;
200 struct mmu_update
*u
;
202 trace_xen_mmu_set_domain_pte(ptep
, pteval
, domid
);
204 mcs
= xen_mc_entry(sizeof(*u
));
207 /* ptep might be kmapped when using 32-bit HIGHPTE */
208 u
->ptr
= virt_to_machine(ptep
).maddr
;
209 u
->val
= pte_val_ma(pteval
);
211 MULTI_mmu_update(mcs
.mc
, mcs
.args
, 1, NULL
, domid
);
213 xen_mc_issue(PARAVIRT_LAZY_MMU
);
215 EXPORT_SYMBOL_GPL(xen_set_domain_pte
);
217 static void xen_extend_mmu_update(const struct mmu_update
*update
)
219 struct multicall_space mcs
;
220 struct mmu_update
*u
;
222 mcs
= xen_mc_extend_args(__HYPERVISOR_mmu_update
, sizeof(*u
));
224 if (mcs
.mc
!= NULL
) {
227 mcs
= __xen_mc_entry(sizeof(*u
));
228 MULTI_mmu_update(mcs
.mc
, mcs
.args
, 1, NULL
, DOMID_SELF
);
235 static void xen_extend_mmuext_op(const struct mmuext_op
*op
)
237 struct multicall_space mcs
;
240 mcs
= xen_mc_extend_args(__HYPERVISOR_mmuext_op
, sizeof(*u
));
242 if (mcs
.mc
!= NULL
) {
245 mcs
= __xen_mc_entry(sizeof(*u
));
246 MULTI_mmuext_op(mcs
.mc
, mcs
.args
, 1, NULL
, DOMID_SELF
);
253 static void xen_set_pmd_hyper(pmd_t
*ptr
, pmd_t val
)
261 /* ptr may be ioremapped for 64-bit pagetable setup */
262 u
.ptr
= arbitrary_virt_to_machine(ptr
).maddr
;
263 u
.val
= pmd_val_ma(val
);
264 xen_extend_mmu_update(&u
);
266 xen_mc_issue(PARAVIRT_LAZY_MMU
);
271 static void xen_set_pmd(pmd_t
*ptr
, pmd_t val
)
273 trace_xen_mmu_set_pmd(ptr
, val
);
275 /* If page is not pinned, we can just update the entry
277 if (!xen_page_pinned(ptr
)) {
282 xen_set_pmd_hyper(ptr
, val
);
286 * Associate a virtual page frame with a given physical page frame
287 * and protection flags for that frame.
289 void set_pte_mfn(unsigned long vaddr
, unsigned long mfn
, pgprot_t flags
)
291 set_pte_vaddr(vaddr
, mfn_pte(mfn
, flags
));
294 static bool xen_batched_set_pte(pte_t
*ptep
, pte_t pteval
)
298 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU
)
303 u
.ptr
= virt_to_machine(ptep
).maddr
| MMU_NORMAL_PT_UPDATE
;
304 u
.val
= pte_val_ma(pteval
);
305 xen_extend_mmu_update(&u
);
307 xen_mc_issue(PARAVIRT_LAZY_MMU
);
312 static inline void __xen_set_pte(pte_t
*ptep
, pte_t pteval
)
314 if (!xen_batched_set_pte(ptep
, pteval
)) {
316 * Could call native_set_pte() here and trap and
317 * emulate the PTE write but with 32-bit guests this
318 * needs two traps (one for each of the two 32-bit
319 * words in the PTE) so do one hypercall directly
324 u
.ptr
= virt_to_machine(ptep
).maddr
| MMU_NORMAL_PT_UPDATE
;
325 u
.val
= pte_val_ma(pteval
);
326 HYPERVISOR_mmu_update(&u
, 1, NULL
, DOMID_SELF
);
330 static void xen_set_pte(pte_t
*ptep
, pte_t pteval
)
332 trace_xen_mmu_set_pte(ptep
, pteval
);
333 __xen_set_pte(ptep
, pteval
);
336 static void xen_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
337 pte_t
*ptep
, pte_t pteval
)
339 trace_xen_mmu_set_pte_at(mm
, addr
, ptep
, pteval
);
340 __xen_set_pte(ptep
, pteval
);
343 pte_t
xen_ptep_modify_prot_start(struct mm_struct
*mm
,
344 unsigned long addr
, pte_t
*ptep
)
346 /* Just return the pte as-is. We preserve the bits on commit */
347 trace_xen_mmu_ptep_modify_prot_start(mm
, addr
, ptep
, *ptep
);
351 void xen_ptep_modify_prot_commit(struct mm_struct
*mm
, unsigned long addr
,
352 pte_t
*ptep
, pte_t pte
)
356 trace_xen_mmu_ptep_modify_prot_commit(mm
, addr
, ptep
, pte
);
359 u
.ptr
= virt_to_machine(ptep
).maddr
| MMU_PT_UPDATE_PRESERVE_AD
;
360 u
.val
= pte_val_ma(pte
);
361 xen_extend_mmu_update(&u
);
363 xen_mc_issue(PARAVIRT_LAZY_MMU
);
366 /* Assume pteval_t is equivalent to all the other *val_t types. */
367 static pteval_t
pte_mfn_to_pfn(pteval_t val
)
369 if (val
& _PAGE_PRESENT
) {
370 unsigned long mfn
= (val
& PTE_PFN_MASK
) >> PAGE_SHIFT
;
371 unsigned long pfn
= mfn_to_pfn(mfn
);
373 pteval_t flags
= val
& PTE_FLAGS_MASK
;
374 if (unlikely(pfn
== ~0))
375 val
= flags
& ~_PAGE_PRESENT
;
377 val
= ((pteval_t
)pfn
<< PAGE_SHIFT
) | flags
;
383 static pteval_t
pte_pfn_to_mfn(pteval_t val
)
385 if (val
& _PAGE_PRESENT
) {
386 unsigned long pfn
= (val
& PTE_PFN_MASK
) >> PAGE_SHIFT
;
387 pteval_t flags
= val
& PTE_FLAGS_MASK
;
390 if (!xen_feature(XENFEAT_auto_translated_physmap
))
391 mfn
= __pfn_to_mfn(pfn
);
395 * If there's no mfn for the pfn, then just create an
396 * empty non-present pte. Unfortunately this loses
397 * information about the original pfn, so
398 * pte_mfn_to_pfn is asymmetric.
400 if (unlikely(mfn
== INVALID_P2M_ENTRY
)) {
404 mfn
&= ~(FOREIGN_FRAME_BIT
| IDENTITY_FRAME_BIT
);
405 val
= ((pteval_t
)mfn
<< PAGE_SHIFT
) | flags
;
411 __visible pteval_t
xen_pte_val(pte_t pte
)
413 pteval_t pteval
= pte
.pte
;
415 return pte_mfn_to_pfn(pteval
);
417 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val
);
419 __visible pgdval_t
xen_pgd_val(pgd_t pgd
)
421 return pte_mfn_to_pfn(pgd
.pgd
);
423 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val
);
425 __visible pte_t
xen_make_pte(pteval_t pte
)
427 pte
= pte_pfn_to_mfn(pte
);
429 return native_make_pte(pte
);
431 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte
);
433 __visible pgd_t
xen_make_pgd(pgdval_t pgd
)
435 pgd
= pte_pfn_to_mfn(pgd
);
436 return native_make_pgd(pgd
);
438 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd
);
440 __visible pmdval_t
xen_pmd_val(pmd_t pmd
)
442 return pte_mfn_to_pfn(pmd
.pmd
);
444 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val
);
446 static void xen_set_pud_hyper(pud_t
*ptr
, pud_t val
)
454 /* ptr may be ioremapped for 64-bit pagetable setup */
455 u
.ptr
= arbitrary_virt_to_machine(ptr
).maddr
;
456 u
.val
= pud_val_ma(val
);
457 xen_extend_mmu_update(&u
);
459 xen_mc_issue(PARAVIRT_LAZY_MMU
);
464 static void xen_set_pud(pud_t
*ptr
, pud_t val
)
466 trace_xen_mmu_set_pud(ptr
, val
);
468 /* If page is not pinned, we can just update the entry
470 if (!xen_page_pinned(ptr
)) {
475 xen_set_pud_hyper(ptr
, val
);
478 #ifdef CONFIG_X86_PAE
479 static void xen_set_pte_atomic(pte_t
*ptep
, pte_t pte
)
481 trace_xen_mmu_set_pte_atomic(ptep
, pte
);
482 set_64bit((u64
*)ptep
, native_pte_val(pte
));
485 static void xen_pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
487 trace_xen_mmu_pte_clear(mm
, addr
, ptep
);
488 if (!xen_batched_set_pte(ptep
, native_make_pte(0)))
489 native_pte_clear(mm
, addr
, ptep
);
492 static void xen_pmd_clear(pmd_t
*pmdp
)
494 trace_xen_mmu_pmd_clear(pmdp
);
495 set_pmd(pmdp
, __pmd(0));
497 #endif /* CONFIG_X86_PAE */
499 __visible pmd_t
xen_make_pmd(pmdval_t pmd
)
501 pmd
= pte_pfn_to_mfn(pmd
);
502 return native_make_pmd(pmd
);
504 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd
);
506 #if CONFIG_PGTABLE_LEVELS == 4
507 __visible pudval_t
xen_pud_val(pud_t pud
)
509 return pte_mfn_to_pfn(pud
.pud
);
511 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val
);
513 __visible pud_t
xen_make_pud(pudval_t pud
)
515 pud
= pte_pfn_to_mfn(pud
);
517 return native_make_pud(pud
);
519 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud
);
521 static pgd_t
*xen_get_user_pgd(pgd_t
*pgd
)
523 pgd_t
*pgd_page
= (pgd_t
*)(((unsigned long)pgd
) & PAGE_MASK
);
524 unsigned offset
= pgd
- pgd_page
;
525 pgd_t
*user_ptr
= NULL
;
527 if (offset
< pgd_index(USER_LIMIT
)) {
528 struct page
*page
= virt_to_page(pgd_page
);
529 user_ptr
= (pgd_t
*)page
->private;
537 static void __xen_set_pgd_hyper(pgd_t
*ptr
, pgd_t val
)
541 u
.ptr
= virt_to_machine(ptr
).maddr
;
542 u
.val
= pgd_val_ma(val
);
543 xen_extend_mmu_update(&u
);
547 * Raw hypercall-based set_pgd, intended for in early boot before
548 * there's a page structure. This implies:
549 * 1. The only existing pagetable is the kernel's
550 * 2. It is always pinned
551 * 3. It has no user pagetable attached to it
553 static void __init
xen_set_pgd_hyper(pgd_t
*ptr
, pgd_t val
)
559 __xen_set_pgd_hyper(ptr
, val
);
561 xen_mc_issue(PARAVIRT_LAZY_MMU
);
566 static void xen_set_pgd(pgd_t
*ptr
, pgd_t val
)
568 pgd_t
*user_ptr
= xen_get_user_pgd(ptr
);
570 trace_xen_mmu_set_pgd(ptr
, user_ptr
, val
);
572 /* If page is not pinned, we can just update the entry
574 if (!xen_page_pinned(ptr
)) {
577 WARN_ON(xen_page_pinned(user_ptr
));
583 /* If it's pinned, then we can at least batch the kernel and
584 user updates together. */
587 __xen_set_pgd_hyper(ptr
, val
);
589 __xen_set_pgd_hyper(user_ptr
, val
);
591 xen_mc_issue(PARAVIRT_LAZY_MMU
);
593 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
596 * (Yet another) pagetable walker. This one is intended for pinning a
597 * pagetable. This means that it walks a pagetable and calls the
598 * callback function on each page it finds making up the page table,
599 * at every level. It walks the entire pagetable, but it only bothers
600 * pinning pte pages which are below limit. In the normal case this
601 * will be STACK_TOP_MAX, but at boot we need to pin up to
604 * For 32-bit the important bit is that we don't pin beyond there,
605 * because then we start getting into Xen's ptes.
607 * For 64-bit, we must skip the Xen hole in the middle of the address
608 * space, just after the big x86-64 virtual hole.
610 static int __xen_pgd_walk(struct mm_struct
*mm
, pgd_t
*pgd
,
611 int (*func
)(struct mm_struct
*mm
, struct page
*,
616 unsigned hole_low
, hole_high
;
617 unsigned pgdidx_limit
, pudidx_limit
, pmdidx_limit
;
618 unsigned pgdidx
, pudidx
, pmdidx
;
620 /* The limit is the last byte to be touched */
622 BUG_ON(limit
>= FIXADDR_TOP
);
624 if (xen_feature(XENFEAT_auto_translated_physmap
))
628 * 64-bit has a great big hole in the middle of the address
629 * space, which contains the Xen mappings. On 32-bit these
630 * will end up making a zero-sized hole and so is a no-op.
632 hole_low
= pgd_index(USER_LIMIT
);
633 hole_high
= pgd_index(PAGE_OFFSET
);
635 pgdidx_limit
= pgd_index(limit
);
637 pudidx_limit
= pud_index(limit
);
642 pmdidx_limit
= pmd_index(limit
);
647 for (pgdidx
= 0; pgdidx
<= pgdidx_limit
; pgdidx
++) {
650 if (pgdidx
>= hole_low
&& pgdidx
< hole_high
)
653 if (!pgd_val(pgd
[pgdidx
]))
656 pud
= pud_offset(&pgd
[pgdidx
], 0);
658 if (PTRS_PER_PUD
> 1) /* not folded */
659 flush
|= (*func
)(mm
, virt_to_page(pud
), PT_PUD
);
661 for (pudidx
= 0; pudidx
< PTRS_PER_PUD
; pudidx
++) {
664 if (pgdidx
== pgdidx_limit
&&
665 pudidx
> pudidx_limit
)
668 if (pud_none(pud
[pudidx
]))
671 pmd
= pmd_offset(&pud
[pudidx
], 0);
673 if (PTRS_PER_PMD
> 1) /* not folded */
674 flush
|= (*func
)(mm
, virt_to_page(pmd
), PT_PMD
);
676 for (pmdidx
= 0; pmdidx
< PTRS_PER_PMD
; pmdidx
++) {
679 if (pgdidx
== pgdidx_limit
&&
680 pudidx
== pudidx_limit
&&
681 pmdidx
> pmdidx_limit
)
684 if (pmd_none(pmd
[pmdidx
]))
687 pte
= pmd_page(pmd
[pmdidx
]);
688 flush
|= (*func
)(mm
, pte
, PT_PTE
);
694 /* Do the top level last, so that the callbacks can use it as
695 a cue to do final things like tlb flushes. */
696 flush
|= (*func
)(mm
, virt_to_page(pgd
), PT_PGD
);
701 static int xen_pgd_walk(struct mm_struct
*mm
,
702 int (*func
)(struct mm_struct
*mm
, struct page
*,
706 return __xen_pgd_walk(mm
, mm
->pgd
, func
, limit
);
709 /* If we're using split pte locks, then take the page's lock and
710 return a pointer to it. Otherwise return NULL. */
711 static spinlock_t
*xen_pte_lock(struct page
*page
, struct mm_struct
*mm
)
713 spinlock_t
*ptl
= NULL
;
715 #if USE_SPLIT_PTE_PTLOCKS
716 ptl
= ptlock_ptr(page
);
717 spin_lock_nest_lock(ptl
, &mm
->page_table_lock
);
723 static void xen_pte_unlock(void *v
)
729 static void xen_do_pin(unsigned level
, unsigned long pfn
)
734 op
.arg1
.mfn
= pfn_to_mfn(pfn
);
736 xen_extend_mmuext_op(&op
);
739 static int xen_pin_page(struct mm_struct
*mm
, struct page
*page
,
742 unsigned pgfl
= TestSetPagePinned(page
);
746 flush
= 0; /* already pinned */
747 else if (PageHighMem(page
))
748 /* kmaps need flushing if we found an unpinned
752 void *pt
= lowmem_page_address(page
);
753 unsigned long pfn
= page_to_pfn(page
);
754 struct multicall_space mcs
= __xen_mc_entry(0);
760 * We need to hold the pagetable lock between the time
761 * we make the pagetable RO and when we actually pin
762 * it. If we don't, then other users may come in and
763 * attempt to update the pagetable by writing it,
764 * which will fail because the memory is RO but not
765 * pinned, so Xen won't do the trap'n'emulate.
767 * If we're using split pte locks, we can't hold the
768 * entire pagetable's worth of locks during the
769 * traverse, because we may wrap the preempt count (8
770 * bits). The solution is to mark RO and pin each PTE
771 * page while holding the lock. This means the number
772 * of locks we end up holding is never more than a
773 * batch size (~32 entries, at present).
775 * If we're not using split pte locks, we needn't pin
776 * the PTE pages independently, because we're
777 * protected by the overall pagetable lock.
781 ptl
= xen_pte_lock(page
, mm
);
783 MULTI_update_va_mapping(mcs
.mc
, (unsigned long)pt
,
784 pfn_pte(pfn
, PAGE_KERNEL_RO
),
785 level
== PT_PGD
? UVMF_TLB_FLUSH
: 0);
788 xen_do_pin(MMUEXT_PIN_L1_TABLE
, pfn
);
790 /* Queue a deferred unlock for when this batch
792 xen_mc_callback(xen_pte_unlock
, ptl
);
799 /* This is called just after a mm has been created, but it has not
800 been used yet. We need to make sure that its pagetable is all
801 read-only, and can be pinned. */
802 static void __xen_pgd_pin(struct mm_struct
*mm
, pgd_t
*pgd
)
804 trace_xen_mmu_pgd_pin(mm
, pgd
);
808 if (__xen_pgd_walk(mm
, pgd
, xen_pin_page
, USER_LIMIT
)) {
809 /* re-enable interrupts for flushing */
819 pgd_t
*user_pgd
= xen_get_user_pgd(pgd
);
821 xen_do_pin(MMUEXT_PIN_L4_TABLE
, PFN_DOWN(__pa(pgd
)));
824 xen_pin_page(mm
, virt_to_page(user_pgd
), PT_PGD
);
825 xen_do_pin(MMUEXT_PIN_L4_TABLE
,
826 PFN_DOWN(__pa(user_pgd
)));
829 #else /* CONFIG_X86_32 */
830 #ifdef CONFIG_X86_PAE
831 /* Need to make sure unshared kernel PMD is pinnable */
832 xen_pin_page(mm
, pgd_page(pgd
[pgd_index(TASK_SIZE
)]),
835 xen_do_pin(MMUEXT_PIN_L3_TABLE
, PFN_DOWN(__pa(pgd
)));
836 #endif /* CONFIG_X86_64 */
840 static void xen_pgd_pin(struct mm_struct
*mm
)
842 __xen_pgd_pin(mm
, mm
->pgd
);
846 * On save, we need to pin all pagetables to make sure they get their
847 * mfns turned into pfns. Search the list for any unpinned pgds and pin
848 * them (unpinned pgds are not currently in use, probably because the
849 * process is under construction or destruction).
851 * Expected to be called in stop_machine() ("equivalent to taking
852 * every spinlock in the system"), so the locking doesn't really
853 * matter all that much.
855 void xen_mm_pin_all(void)
859 spin_lock(&pgd_lock
);
861 list_for_each_entry(page
, &pgd_list
, lru
) {
862 if (!PagePinned(page
)) {
863 __xen_pgd_pin(&init_mm
, (pgd_t
*)page_address(page
));
864 SetPageSavePinned(page
);
868 spin_unlock(&pgd_lock
);
872 * The init_mm pagetable is really pinned as soon as its created, but
873 * that's before we have page structures to store the bits. So do all
874 * the book-keeping now.
876 static int __init
xen_mark_pinned(struct mm_struct
*mm
, struct page
*page
,
883 static void __init
xen_mark_init_mm_pinned(void)
885 xen_pgd_walk(&init_mm
, xen_mark_pinned
, FIXADDR_TOP
);
888 static int xen_unpin_page(struct mm_struct
*mm
, struct page
*page
,
891 unsigned pgfl
= TestClearPagePinned(page
);
893 if (pgfl
&& !PageHighMem(page
)) {
894 void *pt
= lowmem_page_address(page
);
895 unsigned long pfn
= page_to_pfn(page
);
896 spinlock_t
*ptl
= NULL
;
897 struct multicall_space mcs
;
900 * Do the converse to pin_page. If we're using split
901 * pte locks, we must be holding the lock for while
902 * the pte page is unpinned but still RO to prevent
903 * concurrent updates from seeing it in this
904 * partially-pinned state.
906 if (level
== PT_PTE
) {
907 ptl
= xen_pte_lock(page
, mm
);
910 xen_do_pin(MMUEXT_UNPIN_TABLE
, pfn
);
913 mcs
= __xen_mc_entry(0);
915 MULTI_update_va_mapping(mcs
.mc
, (unsigned long)pt
,
916 pfn_pte(pfn
, PAGE_KERNEL
),
917 level
== PT_PGD
? UVMF_TLB_FLUSH
: 0);
920 /* unlock when batch completed */
921 xen_mc_callback(xen_pte_unlock
, ptl
);
925 return 0; /* never need to flush on unpin */
928 /* Release a pagetables pages back as normal RW */
929 static void __xen_pgd_unpin(struct mm_struct
*mm
, pgd_t
*pgd
)
931 trace_xen_mmu_pgd_unpin(mm
, pgd
);
935 xen_do_pin(MMUEXT_UNPIN_TABLE
, PFN_DOWN(__pa(pgd
)));
939 pgd_t
*user_pgd
= xen_get_user_pgd(pgd
);
942 xen_do_pin(MMUEXT_UNPIN_TABLE
,
943 PFN_DOWN(__pa(user_pgd
)));
944 xen_unpin_page(mm
, virt_to_page(user_pgd
), PT_PGD
);
949 #ifdef CONFIG_X86_PAE
950 /* Need to make sure unshared kernel PMD is unpinned */
951 xen_unpin_page(mm
, pgd_page(pgd
[pgd_index(TASK_SIZE
)]),
955 __xen_pgd_walk(mm
, pgd
, xen_unpin_page
, USER_LIMIT
);
960 static void xen_pgd_unpin(struct mm_struct
*mm
)
962 __xen_pgd_unpin(mm
, mm
->pgd
);
966 * On resume, undo any pinning done at save, so that the rest of the
967 * kernel doesn't see any unexpected pinned pagetables.
969 void xen_mm_unpin_all(void)
973 spin_lock(&pgd_lock
);
975 list_for_each_entry(page
, &pgd_list
, lru
) {
976 if (PageSavePinned(page
)) {
977 BUG_ON(!PagePinned(page
));
978 __xen_pgd_unpin(&init_mm
, (pgd_t
*)page_address(page
));
979 ClearPageSavePinned(page
);
983 spin_unlock(&pgd_lock
);
986 static void xen_activate_mm(struct mm_struct
*prev
, struct mm_struct
*next
)
988 spin_lock(&next
->page_table_lock
);
990 spin_unlock(&next
->page_table_lock
);
993 static void xen_dup_mmap(struct mm_struct
*oldmm
, struct mm_struct
*mm
)
995 spin_lock(&mm
->page_table_lock
);
997 spin_unlock(&mm
->page_table_lock
);
1002 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1003 we need to repoint it somewhere else before we can unpin it. */
1004 static void drop_other_mm_ref(void *info
)
1006 struct mm_struct
*mm
= info
;
1007 struct mm_struct
*active_mm
;
1009 active_mm
= this_cpu_read(cpu_tlbstate
.active_mm
);
1011 if (active_mm
== mm
&& this_cpu_read(cpu_tlbstate
.state
) != TLBSTATE_OK
)
1012 leave_mm(smp_processor_id());
1014 /* If this cpu still has a stale cr3 reference, then make sure
1015 it has been flushed. */
1016 if (this_cpu_read(xen_current_cr3
) == __pa(mm
->pgd
))
1017 load_cr3(swapper_pg_dir
);
1020 static void xen_drop_mm_ref(struct mm_struct
*mm
)
1025 if (current
->active_mm
== mm
) {
1026 if (current
->mm
== mm
)
1027 load_cr3(swapper_pg_dir
);
1029 leave_mm(smp_processor_id());
1032 /* Get the "official" set of cpus referring to our pagetable. */
1033 if (!alloc_cpumask_var(&mask
, GFP_ATOMIC
)) {
1034 for_each_online_cpu(cpu
) {
1035 if (!cpumask_test_cpu(cpu
, mm_cpumask(mm
))
1036 && per_cpu(xen_current_cr3
, cpu
) != __pa(mm
->pgd
))
1038 smp_call_function_single(cpu
, drop_other_mm_ref
, mm
, 1);
1042 cpumask_copy(mask
, mm_cpumask(mm
));
1044 /* It's possible that a vcpu may have a stale reference to our
1045 cr3, because its in lazy mode, and it hasn't yet flushed
1046 its set of pending hypercalls yet. In this case, we can
1047 look at its actual current cr3 value, and force it to flush
1049 for_each_online_cpu(cpu
) {
1050 if (per_cpu(xen_current_cr3
, cpu
) == __pa(mm
->pgd
))
1051 cpumask_set_cpu(cpu
, mask
);
1054 if (!cpumask_empty(mask
))
1055 smp_call_function_many(mask
, drop_other_mm_ref
, mm
, 1);
1056 free_cpumask_var(mask
);
1059 static void xen_drop_mm_ref(struct mm_struct
*mm
)
1061 if (current
->active_mm
== mm
)
1062 load_cr3(swapper_pg_dir
);
1067 * While a process runs, Xen pins its pagetables, which means that the
1068 * hypervisor forces it to be read-only, and it controls all updates
1069 * to it. This means that all pagetable updates have to go via the
1070 * hypervisor, which is moderately expensive.
1072 * Since we're pulling the pagetable down, we switch to use init_mm,
1073 * unpin old process pagetable and mark it all read-write, which
1074 * allows further operations on it to be simple memory accesses.
1076 * The only subtle point is that another CPU may be still using the
1077 * pagetable because of lazy tlb flushing. This means we need need to
1078 * switch all CPUs off this pagetable before we can unpin it.
1080 static void xen_exit_mmap(struct mm_struct
*mm
)
1082 get_cpu(); /* make sure we don't move around */
1083 xen_drop_mm_ref(mm
);
1086 spin_lock(&mm
->page_table_lock
);
1088 /* pgd may not be pinned in the error exit path of execve */
1089 if (xen_page_pinned(mm
->pgd
))
1092 spin_unlock(&mm
->page_table_lock
);
1095 static void xen_post_allocator_init(void);
1097 static void __init
pin_pagetable_pfn(unsigned cmd
, unsigned long pfn
)
1099 struct mmuext_op op
;
1102 op
.arg1
.mfn
= pfn_to_mfn(pfn
);
1103 if (HYPERVISOR_mmuext_op(&op
, 1, NULL
, DOMID_SELF
))
1107 #ifdef CONFIG_X86_64
1108 static void __init
xen_cleanhighmap(unsigned long vaddr
,
1109 unsigned long vaddr_end
)
1111 unsigned long kernel_end
= roundup((unsigned long)_brk_end
, PMD_SIZE
) - 1;
1112 pmd_t
*pmd
= level2_kernel_pgt
+ pmd_index(vaddr
);
1114 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1115 * We include the PMD passed in on _both_ boundaries. */
1116 for (; vaddr
<= vaddr_end
&& (pmd
< (level2_kernel_pgt
+ PAGE_SIZE
));
1117 pmd
++, vaddr
+= PMD_SIZE
) {
1120 if (vaddr
< (unsigned long) _text
|| vaddr
> kernel_end
)
1121 set_pmd(pmd
, __pmd(0));
1123 /* In case we did something silly, we should crash in this function
1124 * instead of somewhere later and be confusing. */
1129 * Make a page range writeable and free it.
1131 static void __init
xen_free_ro_pages(unsigned long paddr
, unsigned long size
)
1133 void *vaddr
= __va(paddr
);
1134 void *vaddr_end
= vaddr
+ size
;
1136 for (; vaddr
< vaddr_end
; vaddr
+= PAGE_SIZE
)
1137 make_lowmem_page_readwrite(vaddr
);
1139 memblock_free(paddr
, size
);
1142 static void __init
xen_cleanmfnmap_free_pgtbl(void *pgtbl
, bool unpin
)
1144 unsigned long pa
= __pa(pgtbl
) & PHYSICAL_PAGE_MASK
;
1147 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE
, PFN_DOWN(pa
));
1148 ClearPagePinned(virt_to_page(__va(pa
)));
1149 xen_free_ro_pages(pa
, PAGE_SIZE
);
1153 * Since it is well isolated we can (and since it is perhaps large we should)
1154 * also free the page tables mapping the initial P->M table.
1156 static void __init
xen_cleanmfnmap(unsigned long vaddr
)
1158 unsigned long va
= vaddr
& PMD_MASK
;
1160 pgd_t
*pgd
= pgd_offset_k(va
);
1161 pud_t
*pud_page
= pud_offset(pgd
, 0);
1168 unpin
= (vaddr
== 2 * PGDIR_SIZE
);
1169 set_pgd(pgd
, __pgd(0));
1171 pud
= pud_page
+ pud_index(va
);
1172 if (pud_none(*pud
)) {
1174 } else if (pud_large(*pud
)) {
1175 pa
= pud_val(*pud
) & PHYSICAL_PAGE_MASK
;
1176 xen_free_ro_pages(pa
, PUD_SIZE
);
1179 pmd
= pmd_offset(pud
, va
);
1180 if (pmd_large(*pmd
)) {
1181 pa
= pmd_val(*pmd
) & PHYSICAL_PAGE_MASK
;
1182 xen_free_ro_pages(pa
, PMD_SIZE
);
1183 } else if (!pmd_none(*pmd
)) {
1184 pte
= pte_offset_kernel(pmd
, va
);
1185 set_pmd(pmd
, __pmd(0));
1186 for (i
= 0; i
< PTRS_PER_PTE
; ++i
) {
1187 if (pte_none(pte
[i
]))
1189 pa
= pte_pfn(pte
[i
]) << PAGE_SHIFT
;
1190 xen_free_ro_pages(pa
, PAGE_SIZE
);
1192 xen_cleanmfnmap_free_pgtbl(pte
, unpin
);
1197 set_pud(pud
, __pud(0));
1198 xen_cleanmfnmap_free_pgtbl(pmd
, unpin
);
1201 } while (pud_index(va
) || pmd_index(va
));
1202 xen_cleanmfnmap_free_pgtbl(pud_page
, unpin
);
1205 static void __init
xen_pagetable_p2m_free(void)
1210 size
= PAGE_ALIGN(xen_start_info
->nr_pages
* sizeof(unsigned long));
1212 /* No memory or already called. */
1213 if ((unsigned long)xen_p2m_addr
== xen_start_info
->mfn_list
)
1216 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1217 memset((void *)xen_start_info
->mfn_list
, 0xff, size
);
1219 addr
= xen_start_info
->mfn_list
;
1221 * We could be in __ka space.
1222 * We roundup to the PMD, which means that if anybody at this stage is
1223 * using the __ka address of xen_start_info or
1224 * xen_start_info->shared_info they are in going to crash. Fortunatly
1225 * we have already revectored in xen_setup_kernel_pagetable and in
1226 * xen_setup_shared_info.
1228 size
= roundup(size
, PMD_SIZE
);
1230 if (addr
>= __START_KERNEL_map
) {
1231 xen_cleanhighmap(addr
, addr
+ size
);
1232 size
= PAGE_ALIGN(xen_start_info
->nr_pages
*
1233 sizeof(unsigned long));
1234 memblock_free(__pa(addr
), size
);
1236 xen_cleanmfnmap(addr
);
1240 static void __init
xen_pagetable_cleanhighmap(void)
1245 /* At this stage, cleanup_highmap has already cleaned __ka space
1246 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1247 * the ramdisk). We continue on, erasing PMD entries that point to page
1248 * tables - do note that they are accessible at this stage via __va.
1249 * For good measure we also round up to the PMD - which means that if
1250 * anybody is using __ka address to the initial boot-stack - and try
1251 * to use it - they are going to crash. The xen_start_info has been
1252 * taken care of already in xen_setup_kernel_pagetable. */
1253 addr
= xen_start_info
->pt_base
;
1254 size
= roundup(xen_start_info
->nr_pt_frames
* PAGE_SIZE
, PMD_SIZE
);
1256 xen_cleanhighmap(addr
, addr
+ size
);
1257 xen_start_info
->pt_base
= (unsigned long)__va(__pa(xen_start_info
->pt_base
));
1259 /* This is superfluous and is not necessary, but you know what
1260 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1261 * anything at this stage. */
1262 xen_cleanhighmap(MODULES_VADDR
, roundup(MODULES_VADDR
, PUD_SIZE
) - 1);
1267 static void __init
xen_pagetable_p2m_setup(void)
1269 if (xen_feature(XENFEAT_auto_translated_physmap
))
1272 xen_vmalloc_p2m_tree();
1274 #ifdef CONFIG_X86_64
1275 xen_pagetable_p2m_free();
1277 xen_pagetable_cleanhighmap();
1279 /* And revector! Bye bye old array */
1280 xen_start_info
->mfn_list
= (unsigned long)xen_p2m_addr
;
1283 static void __init
xen_pagetable_init(void)
1286 xen_post_allocator_init();
1288 xen_pagetable_p2m_setup();
1290 /* Allocate and initialize top and mid mfn levels for p2m structure */
1291 xen_build_mfn_list_list();
1293 /* Remap memory freed due to conflicts with E820 map */
1294 if (!xen_feature(XENFEAT_auto_translated_physmap
))
1297 xen_setup_shared_info();
1299 static void xen_write_cr2(unsigned long cr2
)
1301 this_cpu_read(xen_vcpu
)->arch
.cr2
= cr2
;
1304 static unsigned long xen_read_cr2(void)
1306 return this_cpu_read(xen_vcpu
)->arch
.cr2
;
1309 unsigned long xen_read_cr2_direct(void)
1311 return this_cpu_read(xen_vcpu_info
.arch
.cr2
);
1314 void xen_flush_tlb_all(void)
1316 struct mmuext_op
*op
;
1317 struct multicall_space mcs
;
1319 trace_xen_mmu_flush_tlb_all(0);
1323 mcs
= xen_mc_entry(sizeof(*op
));
1326 op
->cmd
= MMUEXT_TLB_FLUSH_ALL
;
1327 MULTI_mmuext_op(mcs
.mc
, op
, 1, NULL
, DOMID_SELF
);
1329 xen_mc_issue(PARAVIRT_LAZY_MMU
);
1333 static void xen_flush_tlb(void)
1335 struct mmuext_op
*op
;
1336 struct multicall_space mcs
;
1338 trace_xen_mmu_flush_tlb(0);
1342 mcs
= xen_mc_entry(sizeof(*op
));
1345 op
->cmd
= MMUEXT_TLB_FLUSH_LOCAL
;
1346 MULTI_mmuext_op(mcs
.mc
, op
, 1, NULL
, DOMID_SELF
);
1348 xen_mc_issue(PARAVIRT_LAZY_MMU
);
1353 static void xen_flush_tlb_single(unsigned long addr
)
1355 struct mmuext_op
*op
;
1356 struct multicall_space mcs
;
1358 trace_xen_mmu_flush_tlb_single(addr
);
1362 mcs
= xen_mc_entry(sizeof(*op
));
1364 op
->cmd
= MMUEXT_INVLPG_LOCAL
;
1365 op
->arg1
.linear_addr
= addr
& PAGE_MASK
;
1366 MULTI_mmuext_op(mcs
.mc
, op
, 1, NULL
, DOMID_SELF
);
1368 xen_mc_issue(PARAVIRT_LAZY_MMU
);
1373 static void xen_flush_tlb_others(const struct cpumask
*cpus
,
1374 struct mm_struct
*mm
, unsigned long start
,
1378 struct mmuext_op op
;
1380 DECLARE_BITMAP(mask
, num_processors
);
1382 DECLARE_BITMAP(mask
, NR_CPUS
);
1385 struct multicall_space mcs
;
1387 trace_xen_mmu_flush_tlb_others(cpus
, mm
, start
, end
);
1389 if (cpumask_empty(cpus
))
1390 return; /* nothing to do */
1392 mcs
= xen_mc_entry(sizeof(*args
));
1394 args
->op
.arg2
.vcpumask
= to_cpumask(args
->mask
);
1396 /* Remove us, and any offline CPUS. */
1397 cpumask_and(to_cpumask(args
->mask
), cpus
, cpu_online_mask
);
1398 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args
->mask
));
1400 args
->op
.cmd
= MMUEXT_TLB_FLUSH_MULTI
;
1401 if (end
!= TLB_FLUSH_ALL
&& (end
- start
) <= PAGE_SIZE
) {
1402 args
->op
.cmd
= MMUEXT_INVLPG_MULTI
;
1403 args
->op
.arg1
.linear_addr
= start
;
1406 MULTI_mmuext_op(mcs
.mc
, &args
->op
, 1, NULL
, DOMID_SELF
);
1408 xen_mc_issue(PARAVIRT_LAZY_MMU
);
1411 static unsigned long xen_read_cr3(void)
1413 return this_cpu_read(xen_cr3
);
1416 static void set_current_cr3(void *v
)
1418 this_cpu_write(xen_current_cr3
, (unsigned long)v
);
1421 static void __xen_write_cr3(bool kernel
, unsigned long cr3
)
1423 struct mmuext_op op
;
1426 trace_xen_mmu_write_cr3(kernel
, cr3
);
1429 mfn
= pfn_to_mfn(PFN_DOWN(cr3
));
1433 WARN_ON(mfn
== 0 && kernel
);
1435 op
.cmd
= kernel
? MMUEXT_NEW_BASEPTR
: MMUEXT_NEW_USER_BASEPTR
;
1438 xen_extend_mmuext_op(&op
);
1441 this_cpu_write(xen_cr3
, cr3
);
1443 /* Update xen_current_cr3 once the batch has actually
1445 xen_mc_callback(set_current_cr3
, (void *)cr3
);
1448 static void xen_write_cr3(unsigned long cr3
)
1450 BUG_ON(preemptible());
1452 xen_mc_batch(); /* disables interrupts */
1454 /* Update while interrupts are disabled, so its atomic with
1456 this_cpu_write(xen_cr3
, cr3
);
1458 __xen_write_cr3(true, cr3
);
1460 #ifdef CONFIG_X86_64
1462 pgd_t
*user_pgd
= xen_get_user_pgd(__va(cr3
));
1464 __xen_write_cr3(false, __pa(user_pgd
));
1466 __xen_write_cr3(false, 0);
1470 xen_mc_issue(PARAVIRT_LAZY_CPU
); /* interrupts restored */
1473 #ifdef CONFIG_X86_64
1475 * At the start of the day - when Xen launches a guest, it has already
1476 * built pagetables for the guest. We diligently look over them
1477 * in xen_setup_kernel_pagetable and graft as appropriate them in the
1478 * init_level4_pgt and its friends. Then when we are happy we load
1479 * the new init_level4_pgt - and continue on.
1481 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1482 * up the rest of the pagetables. When it has completed it loads the cr3.
1483 * N.B. that baremetal would start at 'start_kernel' (and the early
1484 * #PF handler would create bootstrap pagetables) - so we are running
1485 * with the same assumptions as what to do when write_cr3 is executed
1488 * Since there are no user-page tables at all, we have two variants
1489 * of xen_write_cr3 - the early bootup (this one), and the late one
1490 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1491 * the Linux kernel and user-space are both in ring 3 while the
1492 * hypervisor is in ring 0.
1494 static void __init
xen_write_cr3_init(unsigned long cr3
)
1496 BUG_ON(preemptible());
1498 xen_mc_batch(); /* disables interrupts */
1500 /* Update while interrupts are disabled, so its atomic with
1502 this_cpu_write(xen_cr3
, cr3
);
1504 __xen_write_cr3(true, cr3
);
1506 xen_mc_issue(PARAVIRT_LAZY_CPU
); /* interrupts restored */
1510 static int xen_pgd_alloc(struct mm_struct
*mm
)
1512 pgd_t
*pgd
= mm
->pgd
;
1515 BUG_ON(PagePinned(virt_to_page(pgd
)));
1517 #ifdef CONFIG_X86_64
1519 struct page
*page
= virt_to_page(pgd
);
1522 BUG_ON(page
->private != 0);
1526 user_pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
1527 page
->private = (unsigned long)user_pgd
;
1529 if (user_pgd
!= NULL
) {
1530 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1531 user_pgd
[pgd_index(VSYSCALL_ADDR
)] =
1532 __pgd(__pa(level3_user_vsyscall
) | _PAGE_TABLE
);
1537 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd
))));
1544 static void xen_pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
1546 #ifdef CONFIG_X86_64
1547 pgd_t
*user_pgd
= xen_get_user_pgd(pgd
);
1550 free_page((unsigned long)user_pgd
);
1554 #ifdef CONFIG_X86_32
1555 static pte_t __init
mask_rw_pte(pte_t
*ptep
, pte_t pte
)
1557 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1558 if (pte_val_ma(*ptep
) & _PAGE_PRESENT
)
1559 pte
= __pte_ma(((pte_val_ma(*ptep
) & _PAGE_RW
) | ~_PAGE_RW
) &
1564 #else /* CONFIG_X86_64 */
1565 static pte_t __init
mask_rw_pte(pte_t
*ptep
, pte_t pte
)
1569 if (xen_feature(XENFEAT_writable_page_tables
) ||
1570 xen_feature(XENFEAT_auto_translated_physmap
) ||
1571 xen_start_info
->mfn_list
>= __START_KERNEL_map
)
1575 * Pages belonging to the initial p2m list mapped outside the default
1576 * address range must be mapped read-only. This region contains the
1577 * page tables for mapping the p2m list, too, and page tables MUST be
1581 if (pfn
>= xen_start_info
->first_p2m_pfn
&&
1582 pfn
< xen_start_info
->first_p2m_pfn
+ xen_start_info
->nr_p2m_frames
)
1583 pte
= __pte_ma(pte_val_ma(pte
) & ~_PAGE_RW
);
1587 #endif /* CONFIG_X86_64 */
1590 * Init-time set_pte while constructing initial pagetables, which
1591 * doesn't allow RO page table pages to be remapped RW.
1593 * If there is no MFN for this PFN then this page is initially
1594 * ballooned out so clear the PTE (as in decrease_reservation() in
1595 * drivers/xen/balloon.c).
1597 * Many of these PTE updates are done on unpinned and writable pages
1598 * and doing a hypercall for these is unnecessary and expensive. At
1599 * this point it is not possible to tell if a page is pinned or not,
1600 * so always write the PTE directly and rely on Xen trapping and
1601 * emulating any updates as necessary.
1603 static void __init
xen_set_pte_init(pte_t
*ptep
, pte_t pte
)
1605 if (pte_mfn(pte
) != INVALID_P2M_ENTRY
)
1606 pte
= mask_rw_pte(ptep
, pte
);
1610 native_set_pte(ptep
, pte
);
1613 /* Early in boot, while setting up the initial pagetable, assume
1614 everything is pinned. */
1615 static void __init
xen_alloc_pte_init(struct mm_struct
*mm
, unsigned long pfn
)
1617 #ifdef CONFIG_FLATMEM
1618 BUG_ON(mem_map
); /* should only be used early */
1620 make_lowmem_page_readonly(__va(PFN_PHYS(pfn
)));
1621 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE
, pfn
);
1624 /* Used for pmd and pud */
1625 static void __init
xen_alloc_pmd_init(struct mm_struct
*mm
, unsigned long pfn
)
1627 #ifdef CONFIG_FLATMEM
1628 BUG_ON(mem_map
); /* should only be used early */
1630 make_lowmem_page_readonly(__va(PFN_PHYS(pfn
)));
1633 /* Early release_pte assumes that all pts are pinned, since there's
1634 only init_mm and anything attached to that is pinned. */
1635 static void __init
xen_release_pte_init(unsigned long pfn
)
1637 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE
, pfn
);
1638 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn
)));
1641 static void __init
xen_release_pmd_init(unsigned long pfn
)
1643 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn
)));
1646 static inline void __pin_pagetable_pfn(unsigned cmd
, unsigned long pfn
)
1648 struct multicall_space mcs
;
1649 struct mmuext_op
*op
;
1651 mcs
= __xen_mc_entry(sizeof(*op
));
1654 op
->arg1
.mfn
= pfn_to_mfn(pfn
);
1656 MULTI_mmuext_op(mcs
.mc
, mcs
.args
, 1, NULL
, DOMID_SELF
);
1659 static inline void __set_pfn_prot(unsigned long pfn
, pgprot_t prot
)
1661 struct multicall_space mcs
;
1662 unsigned long addr
= (unsigned long)__va(pfn
<< PAGE_SHIFT
);
1664 mcs
= __xen_mc_entry(0);
1665 MULTI_update_va_mapping(mcs
.mc
, (unsigned long)addr
,
1666 pfn_pte(pfn
, prot
), 0);
1669 /* This needs to make sure the new pte page is pinned iff its being
1670 attached to a pinned pagetable. */
1671 static inline void xen_alloc_ptpage(struct mm_struct
*mm
, unsigned long pfn
,
1674 bool pinned
= PagePinned(virt_to_page(mm
->pgd
));
1676 trace_xen_mmu_alloc_ptpage(mm
, pfn
, level
, pinned
);
1679 struct page
*page
= pfn_to_page(pfn
);
1681 SetPagePinned(page
);
1683 if (!PageHighMem(page
)) {
1686 __set_pfn_prot(pfn
, PAGE_KERNEL_RO
);
1688 if (level
== PT_PTE
&& USE_SPLIT_PTE_PTLOCKS
)
1689 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE
, pfn
);
1691 xen_mc_issue(PARAVIRT_LAZY_MMU
);
1693 /* make sure there are no stray mappings of
1695 kmap_flush_unused();
1700 static void xen_alloc_pte(struct mm_struct
*mm
, unsigned long pfn
)
1702 xen_alloc_ptpage(mm
, pfn
, PT_PTE
);
1705 static void xen_alloc_pmd(struct mm_struct
*mm
, unsigned long pfn
)
1707 xen_alloc_ptpage(mm
, pfn
, PT_PMD
);
1710 /* This should never happen until we're OK to use struct page */
1711 static inline void xen_release_ptpage(unsigned long pfn
, unsigned level
)
1713 struct page
*page
= pfn_to_page(pfn
);
1714 bool pinned
= PagePinned(page
);
1716 trace_xen_mmu_release_ptpage(pfn
, level
, pinned
);
1719 if (!PageHighMem(page
)) {
1722 if (level
== PT_PTE
&& USE_SPLIT_PTE_PTLOCKS
)
1723 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE
, pfn
);
1725 __set_pfn_prot(pfn
, PAGE_KERNEL
);
1727 xen_mc_issue(PARAVIRT_LAZY_MMU
);
1729 ClearPagePinned(page
);
1733 static void xen_release_pte(unsigned long pfn
)
1735 xen_release_ptpage(pfn
, PT_PTE
);
1738 static void xen_release_pmd(unsigned long pfn
)
1740 xen_release_ptpage(pfn
, PT_PMD
);
1743 #if CONFIG_PGTABLE_LEVELS == 4
1744 static void xen_alloc_pud(struct mm_struct
*mm
, unsigned long pfn
)
1746 xen_alloc_ptpage(mm
, pfn
, PT_PUD
);
1749 static void xen_release_pud(unsigned long pfn
)
1751 xen_release_ptpage(pfn
, PT_PUD
);
1755 void __init
xen_reserve_top(void)
1757 #ifdef CONFIG_X86_32
1758 unsigned long top
= HYPERVISOR_VIRT_START
;
1759 struct xen_platform_parameters pp
;
1761 if (HYPERVISOR_xen_version(XENVER_platform_parameters
, &pp
) == 0)
1762 top
= pp
.virt_start
;
1764 reserve_top_address(-top
);
1765 #endif /* CONFIG_X86_32 */
1769 * Like __va(), but returns address in the kernel mapping (which is
1770 * all we have until the physical memory mapping has been set up.
1772 static void * __init
__ka(phys_addr_t paddr
)
1774 #ifdef CONFIG_X86_64
1775 return (void *)(paddr
+ __START_KERNEL_map
);
1781 /* Convert a machine address to physical address */
1782 static unsigned long __init
m2p(phys_addr_t maddr
)
1786 maddr
&= PTE_PFN_MASK
;
1787 paddr
= mfn_to_pfn(maddr
>> PAGE_SHIFT
) << PAGE_SHIFT
;
1792 /* Convert a machine address to kernel virtual */
1793 static void * __init
m2v(phys_addr_t maddr
)
1795 return __ka(m2p(maddr
));
1798 /* Set the page permissions on an identity-mapped pages */
1799 static void __init
set_page_prot_flags(void *addr
, pgprot_t prot
,
1800 unsigned long flags
)
1802 unsigned long pfn
= __pa(addr
) >> PAGE_SHIFT
;
1803 pte_t pte
= pfn_pte(pfn
, prot
);
1805 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1806 if (xen_feature(XENFEAT_auto_translated_physmap
))
1809 if (HYPERVISOR_update_va_mapping((unsigned long)addr
, pte
, flags
))
1812 static void __init
set_page_prot(void *addr
, pgprot_t prot
)
1814 return set_page_prot_flags(addr
, prot
, UVMF_NONE
);
1816 #ifdef CONFIG_X86_32
1817 static void __init
xen_map_identity_early(pmd_t
*pmd
, unsigned long max_pfn
)
1819 unsigned pmdidx
, pteidx
;
1823 level1_ident_pgt
= extend_brk(sizeof(pte_t
) * LEVEL1_IDENT_ENTRIES
,
1828 for (pmdidx
= 0; pmdidx
< PTRS_PER_PMD
&& pfn
< max_pfn
; pmdidx
++) {
1831 /* Reuse or allocate a page of ptes */
1832 if (pmd_present(pmd
[pmdidx
]))
1833 pte_page
= m2v(pmd
[pmdidx
].pmd
);
1835 /* Check for free pte pages */
1836 if (ident_pte
== LEVEL1_IDENT_ENTRIES
)
1839 pte_page
= &level1_ident_pgt
[ident_pte
];
1840 ident_pte
+= PTRS_PER_PTE
;
1842 pmd
[pmdidx
] = __pmd(__pa(pte_page
) | _PAGE_TABLE
);
1845 /* Install mappings */
1846 for (pteidx
= 0; pteidx
< PTRS_PER_PTE
; pteidx
++, pfn
++) {
1849 if (pfn
> max_pfn_mapped
)
1850 max_pfn_mapped
= pfn
;
1852 if (!pte_none(pte_page
[pteidx
]))
1855 pte
= pfn_pte(pfn
, PAGE_KERNEL_EXEC
);
1856 pte_page
[pteidx
] = pte
;
1860 for (pteidx
= 0; pteidx
< ident_pte
; pteidx
+= PTRS_PER_PTE
)
1861 set_page_prot(&level1_ident_pgt
[pteidx
], PAGE_KERNEL_RO
);
1863 set_page_prot(pmd
, PAGE_KERNEL_RO
);
1866 void __init
xen_setup_machphys_mapping(void)
1868 struct xen_machphys_mapping mapping
;
1870 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping
, &mapping
) == 0) {
1871 machine_to_phys_mapping
= (unsigned long *)mapping
.v_start
;
1872 machine_to_phys_nr
= mapping
.max_mfn
+ 1;
1874 machine_to_phys_nr
= MACH2PHYS_NR_ENTRIES
;
1876 #ifdef CONFIG_X86_32
1877 WARN_ON((machine_to_phys_mapping
+ (machine_to_phys_nr
- 1))
1878 < machine_to_phys_mapping
);
1882 #ifdef CONFIG_X86_64
1883 static void __init
convert_pfn_mfn(void *v
)
1888 /* All levels are converted the same way, so just treat them
1890 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
1891 pte
[i
] = xen_make_pte(pte
[i
].pte
);
1893 static void __init
check_pt_base(unsigned long *pt_base
, unsigned long *pt_end
,
1896 if (*pt_base
== PFN_DOWN(__pa(addr
))) {
1897 set_page_prot_flags((void *)addr
, PAGE_KERNEL
, UVMF_INVLPG
);
1898 clear_page((void *)addr
);
1901 if (*pt_end
== PFN_DOWN(__pa(addr
))) {
1902 set_page_prot_flags((void *)addr
, PAGE_KERNEL
, UVMF_INVLPG
);
1903 clear_page((void *)addr
);
1908 * Set up the initial kernel pagetable.
1910 * We can construct this by grafting the Xen provided pagetable into
1911 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1912 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1913 * kernel has a physical mapping to start with - but that's enough to
1914 * get __va working. We need to fill in the rest of the physical
1915 * mapping once some sort of allocator has been set up. NOTE: for
1916 * PVH, the page tables are native.
1918 void __init
xen_setup_kernel_pagetable(pgd_t
*pgd
, unsigned long max_pfn
)
1922 unsigned long addr
[3];
1923 unsigned long pt_base
, pt_end
;
1926 /* max_pfn_mapped is the last pfn mapped in the initial memory
1927 * mappings. Considering that on Xen after the kernel mappings we
1928 * have the mappings of some pages that don't exist in pfn space, we
1929 * set max_pfn_mapped to the last real pfn mapped. */
1930 if (xen_start_info
->mfn_list
< __START_KERNEL_map
)
1931 max_pfn_mapped
= xen_start_info
->first_p2m_pfn
;
1933 max_pfn_mapped
= PFN_DOWN(__pa(xen_start_info
->mfn_list
));
1935 pt_base
= PFN_DOWN(__pa(xen_start_info
->pt_base
));
1936 pt_end
= pt_base
+ xen_start_info
->nr_pt_frames
;
1938 /* Zap identity mapping */
1939 init_level4_pgt
[0] = __pgd(0);
1941 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
1942 /* Pre-constructed entries are in pfn, so convert to mfn */
1943 /* L4[272] -> level3_ident_pgt
1944 * L4[511] -> level3_kernel_pgt */
1945 convert_pfn_mfn(init_level4_pgt
);
1947 /* L3_i[0] -> level2_ident_pgt */
1948 convert_pfn_mfn(level3_ident_pgt
);
1949 /* L3_k[510] -> level2_kernel_pgt
1950 * L3_k[511] -> level2_fixmap_pgt */
1951 convert_pfn_mfn(level3_kernel_pgt
);
1953 /* L3_k[511][506] -> level1_fixmap_pgt */
1954 convert_pfn_mfn(level2_fixmap_pgt
);
1956 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1957 l3
= m2v(pgd
[pgd_index(__START_KERNEL_map
)].pgd
);
1958 l2
= m2v(l3
[pud_index(__START_KERNEL_map
)].pud
);
1960 addr
[0] = (unsigned long)pgd
;
1961 addr
[1] = (unsigned long)l3
;
1962 addr
[2] = (unsigned long)l2
;
1963 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1964 * Both L4[272][0] and L4[511][510] have entries that point to the same
1965 * L2 (PMD) tables. Meaning that if you modify it in __va space
1966 * it will be also modified in the __ka space! (But if you just
1967 * modify the PMD table to point to other PTE's or none, then you
1968 * are OK - which is what cleanup_highmap does) */
1969 copy_page(level2_ident_pgt
, l2
);
1970 /* Graft it onto L4[511][510] */
1971 copy_page(level2_kernel_pgt
, l2
);
1973 /* Copy the initial P->M table mappings if necessary. */
1974 i
= pgd_index(xen_start_info
->mfn_list
);
1975 if (i
&& i
< pgd_index(__START_KERNEL_map
))
1976 init_level4_pgt
[i
] = ((pgd_t
*)xen_start_info
->pt_base
)[i
];
1978 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
1979 /* Make pagetable pieces RO */
1980 set_page_prot(init_level4_pgt
, PAGE_KERNEL_RO
);
1981 set_page_prot(level3_ident_pgt
, PAGE_KERNEL_RO
);
1982 set_page_prot(level3_kernel_pgt
, PAGE_KERNEL_RO
);
1983 set_page_prot(level3_user_vsyscall
, PAGE_KERNEL_RO
);
1984 set_page_prot(level2_ident_pgt
, PAGE_KERNEL_RO
);
1985 set_page_prot(level2_kernel_pgt
, PAGE_KERNEL_RO
);
1986 set_page_prot(level2_fixmap_pgt
, PAGE_KERNEL_RO
);
1987 set_page_prot(level1_fixmap_pgt
, PAGE_KERNEL_RO
);
1989 /* Pin down new L4 */
1990 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE
,
1991 PFN_DOWN(__pa_symbol(init_level4_pgt
)));
1993 /* Unpin Xen-provided one */
1994 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE
, PFN_DOWN(__pa(pgd
)));
1997 * At this stage there can be no user pgd, and no page
1998 * structure to attach it to, so make sure we just set kernel
2002 __xen_write_cr3(true, __pa(init_level4_pgt
));
2003 xen_mc_issue(PARAVIRT_LAZY_CPU
);
2005 native_write_cr3(__pa(init_level4_pgt
));
2007 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
2008 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
2009 * the initial domain. For guests using the toolstack, they are in:
2010 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
2011 * rip out the [L4] (pgd), but for guests we shave off three pages.
2013 for (i
= 0; i
< ARRAY_SIZE(addr
); i
++)
2014 check_pt_base(&pt_base
, &pt_end
, addr
[i
]);
2016 /* Our (by three pages) smaller Xen pagetable that we are using */
2017 xen_pt_base
= PFN_PHYS(pt_base
);
2018 xen_pt_size
= (pt_end
- pt_base
) * PAGE_SIZE
;
2019 memblock_reserve(xen_pt_base
, xen_pt_size
);
2021 /* Revector the xen_start_info */
2022 xen_start_info
= (struct start_info
*)__va(__pa(xen_start_info
));
2026 * Read a value from a physical address.
2028 static unsigned long __init
xen_read_phys_ulong(phys_addr_t addr
)
2030 unsigned long *vaddr
;
2033 vaddr
= early_memremap_ro(addr
, sizeof(val
));
2035 early_memunmap(vaddr
, sizeof(val
));
2040 * Translate a virtual address to a physical one without relying on mapped
2043 static phys_addr_t __init
xen_early_virt_to_phys(unsigned long vaddr
)
2052 pgd
= native_make_pgd(xen_read_phys_ulong(pa
+ pgd_index(vaddr
) *
2054 if (!pgd_present(pgd
))
2057 pa
= pgd_val(pgd
) & PTE_PFN_MASK
;
2058 pud
= native_make_pud(xen_read_phys_ulong(pa
+ pud_index(vaddr
) *
2060 if (!pud_present(pud
))
2062 pa
= pud_pfn(pud
) << PAGE_SHIFT
;
2064 return pa
+ (vaddr
& ~PUD_MASK
);
2066 pmd
= native_make_pmd(xen_read_phys_ulong(pa
+ pmd_index(vaddr
) *
2068 if (!pmd_present(pmd
))
2070 pa
= pmd_pfn(pmd
) << PAGE_SHIFT
;
2072 return pa
+ (vaddr
& ~PMD_MASK
);
2074 pte
= native_make_pte(xen_read_phys_ulong(pa
+ pte_index(vaddr
) *
2076 if (!pte_present(pte
))
2078 pa
= pte_pfn(pte
) << PAGE_SHIFT
;
2080 return pa
| (vaddr
& ~PAGE_MASK
);
2084 * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2087 void __init
xen_relocate_p2m(void)
2089 phys_addr_t size
, new_area
, pt_phys
, pmd_phys
, pud_phys
;
2090 unsigned long p2m_pfn
, p2m_pfn_end
, n_frames
, pfn
, pfn_end
;
2091 int n_pte
, n_pt
, n_pmd
, n_pud
, idx_pte
, idx_pt
, idx_pmd
, idx_pud
;
2096 unsigned long *new_p2m
;
2098 size
= PAGE_ALIGN(xen_start_info
->nr_pages
* sizeof(unsigned long));
2099 n_pte
= roundup(size
, PAGE_SIZE
) >> PAGE_SHIFT
;
2100 n_pt
= roundup(size
, PMD_SIZE
) >> PMD_SHIFT
;
2101 n_pmd
= roundup(size
, PUD_SIZE
) >> PUD_SHIFT
;
2102 n_pud
= roundup(size
, PGDIR_SIZE
) >> PGDIR_SHIFT
;
2103 n_frames
= n_pte
+ n_pt
+ n_pmd
+ n_pud
;
2105 new_area
= xen_find_free_area(PFN_PHYS(n_frames
));
2107 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2112 * Setup the page tables for addressing the new p2m list.
2113 * We have asked the hypervisor to map the p2m list at the user address
2114 * PUD_SIZE. It may have done so, or it may have used a kernel space
2115 * address depending on the Xen version.
2116 * To avoid any possible virtual address collision, just use
2117 * 2 * PUD_SIZE for the new area.
2119 pud_phys
= new_area
;
2120 pmd_phys
= pud_phys
+ PFN_PHYS(n_pud
);
2121 pt_phys
= pmd_phys
+ PFN_PHYS(n_pmd
);
2122 p2m_pfn
= PFN_DOWN(pt_phys
) + n_pt
;
2124 pgd
= __va(read_cr3());
2125 new_p2m
= (unsigned long *)(2 * PGDIR_SIZE
);
2126 for (idx_pud
= 0; idx_pud
< n_pud
; idx_pud
++) {
2127 pud
= early_memremap(pud_phys
, PAGE_SIZE
);
2129 for (idx_pmd
= 0; idx_pmd
< min(n_pmd
, PTRS_PER_PUD
);
2131 pmd
= early_memremap(pmd_phys
, PAGE_SIZE
);
2133 for (idx_pt
= 0; idx_pt
< min(n_pt
, PTRS_PER_PMD
);
2135 pt
= early_memremap(pt_phys
, PAGE_SIZE
);
2138 idx_pte
< min(n_pte
, PTRS_PER_PTE
);
2140 set_pte(pt
+ idx_pte
,
2141 pfn_pte(p2m_pfn
, PAGE_KERNEL
));
2144 n_pte
-= PTRS_PER_PTE
;
2145 early_memunmap(pt
, PAGE_SIZE
);
2146 make_lowmem_page_readonly(__va(pt_phys
));
2147 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE
,
2149 set_pmd(pmd
+ idx_pt
,
2150 __pmd(_PAGE_TABLE
| pt_phys
));
2151 pt_phys
+= PAGE_SIZE
;
2153 n_pt
-= PTRS_PER_PMD
;
2154 early_memunmap(pmd
, PAGE_SIZE
);
2155 make_lowmem_page_readonly(__va(pmd_phys
));
2156 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE
,
2157 PFN_DOWN(pmd_phys
));
2158 set_pud(pud
+ idx_pmd
, __pud(_PAGE_TABLE
| pmd_phys
));
2159 pmd_phys
+= PAGE_SIZE
;
2161 n_pmd
-= PTRS_PER_PUD
;
2162 early_memunmap(pud
, PAGE_SIZE
);
2163 make_lowmem_page_readonly(__va(pud_phys
));
2164 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE
, PFN_DOWN(pud_phys
));
2165 set_pgd(pgd
+ 2 + idx_pud
, __pgd(_PAGE_TABLE
| pud_phys
));
2166 pud_phys
+= PAGE_SIZE
;
2169 /* Now copy the old p2m info to the new area. */
2170 memcpy(new_p2m
, xen_p2m_addr
, size
);
2171 xen_p2m_addr
= new_p2m
;
2173 /* Release the old p2m list and set new list info. */
2174 p2m_pfn
= PFN_DOWN(xen_early_virt_to_phys(xen_start_info
->mfn_list
));
2176 p2m_pfn_end
= p2m_pfn
+ PFN_DOWN(size
);
2178 if (xen_start_info
->mfn_list
< __START_KERNEL_map
) {
2179 pfn
= xen_start_info
->first_p2m_pfn
;
2180 pfn_end
= xen_start_info
->first_p2m_pfn
+
2181 xen_start_info
->nr_p2m_frames
;
2182 set_pgd(pgd
+ 1, __pgd(0));
2185 pfn_end
= p2m_pfn_end
;
2188 memblock_free(PFN_PHYS(pfn
), PAGE_SIZE
* (pfn_end
- pfn
));
2189 while (pfn
< pfn_end
) {
2190 if (pfn
== p2m_pfn
) {
2194 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn
)));
2198 xen_start_info
->mfn_list
= (unsigned long)xen_p2m_addr
;
2199 xen_start_info
->first_p2m_pfn
= PFN_DOWN(new_area
);
2200 xen_start_info
->nr_p2m_frames
= n_frames
;
2203 #else /* !CONFIG_X86_64 */
2204 static RESERVE_BRK_ARRAY(pmd_t
, initial_kernel_pmd
, PTRS_PER_PMD
);
2205 static RESERVE_BRK_ARRAY(pmd_t
, swapper_kernel_pmd
, PTRS_PER_PMD
);
2207 static void __init
xen_write_cr3_init(unsigned long cr3
)
2209 unsigned long pfn
= PFN_DOWN(__pa(swapper_pg_dir
));
2211 BUG_ON(read_cr3() != __pa(initial_page_table
));
2212 BUG_ON(cr3
!= __pa(swapper_pg_dir
));
2215 * We are switching to swapper_pg_dir for the first time (from
2216 * initial_page_table) and therefore need to mark that page
2217 * read-only and then pin it.
2219 * Xen disallows sharing of kernel PMDs for PAE
2220 * guests. Therefore we must copy the kernel PMD from
2221 * initial_page_table into a new kernel PMD to be used in
2224 swapper_kernel_pmd
=
2225 extend_brk(sizeof(pmd_t
) * PTRS_PER_PMD
, PAGE_SIZE
);
2226 copy_page(swapper_kernel_pmd
, initial_kernel_pmd
);
2227 swapper_pg_dir
[KERNEL_PGD_BOUNDARY
] =
2228 __pgd(__pa(swapper_kernel_pmd
) | _PAGE_PRESENT
);
2229 set_page_prot(swapper_kernel_pmd
, PAGE_KERNEL_RO
);
2231 set_page_prot(swapper_pg_dir
, PAGE_KERNEL_RO
);
2233 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE
, pfn
);
2235 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE
,
2236 PFN_DOWN(__pa(initial_page_table
)));
2237 set_page_prot(initial_page_table
, PAGE_KERNEL
);
2238 set_page_prot(initial_kernel_pmd
, PAGE_KERNEL
);
2240 pv_mmu_ops
.write_cr3
= &xen_write_cr3
;
2244 * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2245 * not the first page table in the page table pool.
2246 * Iterate through the initial page tables to find the real page table base.
2248 static phys_addr_t
xen_find_pt_base(pmd_t
*pmd
)
2250 phys_addr_t pt_base
, paddr
;
2253 pt_base
= min(__pa(xen_start_info
->pt_base
), __pa(pmd
));
2255 for (pmdidx
= 0; pmdidx
< PTRS_PER_PMD
; pmdidx
++)
2256 if (pmd_present(pmd
[pmdidx
]) && !pmd_large(pmd
[pmdidx
])) {
2257 paddr
= m2p(pmd
[pmdidx
].pmd
);
2258 pt_base
= min(pt_base
, paddr
);
2264 void __init
xen_setup_kernel_pagetable(pgd_t
*pgd
, unsigned long max_pfn
)
2268 kernel_pmd
= m2v(pgd
[KERNEL_PGD_BOUNDARY
].pgd
);
2270 xen_pt_base
= xen_find_pt_base(kernel_pmd
);
2271 xen_pt_size
= xen_start_info
->nr_pt_frames
* PAGE_SIZE
;
2273 initial_kernel_pmd
=
2274 extend_brk(sizeof(pmd_t
) * PTRS_PER_PMD
, PAGE_SIZE
);
2276 max_pfn_mapped
= PFN_DOWN(xen_pt_base
+ xen_pt_size
+ 512 * 1024);
2278 copy_page(initial_kernel_pmd
, kernel_pmd
);
2280 xen_map_identity_early(initial_kernel_pmd
, max_pfn
);
2282 copy_page(initial_page_table
, pgd
);
2283 initial_page_table
[KERNEL_PGD_BOUNDARY
] =
2284 __pgd(__pa(initial_kernel_pmd
) | _PAGE_PRESENT
);
2286 set_page_prot(initial_kernel_pmd
, PAGE_KERNEL_RO
);
2287 set_page_prot(initial_page_table
, PAGE_KERNEL_RO
);
2288 set_page_prot(empty_zero_page
, PAGE_KERNEL_RO
);
2290 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE
, PFN_DOWN(__pa(pgd
)));
2292 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE
,
2293 PFN_DOWN(__pa(initial_page_table
)));
2294 xen_write_cr3(__pa(initial_page_table
));
2296 memblock_reserve(xen_pt_base
, xen_pt_size
);
2298 #endif /* CONFIG_X86_64 */
2300 void __init
xen_reserve_special_pages(void)
2304 memblock_reserve(__pa(xen_start_info
), PAGE_SIZE
);
2305 if (xen_start_info
->store_mfn
) {
2306 paddr
= PFN_PHYS(mfn_to_pfn(xen_start_info
->store_mfn
));
2307 memblock_reserve(paddr
, PAGE_SIZE
);
2309 if (!xen_initial_domain()) {
2310 paddr
= PFN_PHYS(mfn_to_pfn(xen_start_info
->console
.domU
.mfn
));
2311 memblock_reserve(paddr
, PAGE_SIZE
);
2315 void __init
xen_pt_check_e820(void)
2317 if (xen_is_e820_reserved(xen_pt_base
, xen_pt_size
)) {
2318 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2323 static unsigned char dummy_mapping
[PAGE_SIZE
] __page_aligned_bss
;
2325 static void xen_set_fixmap(unsigned idx
, phys_addr_t phys
, pgprot_t prot
)
2329 phys
>>= PAGE_SHIFT
;
2332 case FIX_BTMAP_END
... FIX_BTMAP_BEGIN
:
2334 #ifdef CONFIG_X86_32
2336 # ifdef CONFIG_HIGHMEM
2337 case FIX_KMAP_BEGIN
... FIX_KMAP_END
:
2339 #elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2342 case FIX_TEXT_POKE0
:
2343 case FIX_TEXT_POKE1
:
2344 /* All local page mappings */
2345 pte
= pfn_pte(phys
, prot
);
2348 #ifdef CONFIG_X86_LOCAL_APIC
2349 case FIX_APIC_BASE
: /* maps dummy local APIC */
2350 pte
= pfn_pte(PFN_DOWN(__pa(dummy_mapping
)), PAGE_KERNEL
);
2354 #ifdef CONFIG_X86_IO_APIC
2355 case FIX_IO_APIC_BASE_0
... FIX_IO_APIC_BASE_END
:
2357 * We just don't map the IO APIC - all access is via
2358 * hypercalls. Keep the address in the pte for reference.
2360 pte
= pfn_pte(PFN_DOWN(__pa(dummy_mapping
)), PAGE_KERNEL
);
2364 case FIX_PARAVIRT_BOOTMAP
:
2365 /* This is an MFN, but it isn't an IO mapping from the
2367 pte
= mfn_pte(phys
, prot
);
2371 /* By default, set_fixmap is used for hardware mappings */
2372 pte
= mfn_pte(phys
, prot
);
2376 __native_set_fixmap(idx
, pte
);
2378 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2379 /* Replicate changes to map the vsyscall page into the user
2380 pagetable vsyscall mapping. */
2381 if (idx
== VSYSCALL_PAGE
) {
2382 unsigned long vaddr
= __fix_to_virt(idx
);
2383 set_pte_vaddr_pud(level3_user_vsyscall
, vaddr
, pte
);
2388 static void __init
xen_post_allocator_init(void)
2390 if (xen_feature(XENFEAT_auto_translated_physmap
))
2393 pv_mmu_ops
.set_pte
= xen_set_pte
;
2394 pv_mmu_ops
.set_pmd
= xen_set_pmd
;
2395 pv_mmu_ops
.set_pud
= xen_set_pud
;
2396 #if CONFIG_PGTABLE_LEVELS == 4
2397 pv_mmu_ops
.set_pgd
= xen_set_pgd
;
2400 /* This will work as long as patching hasn't happened yet
2401 (which it hasn't) */
2402 pv_mmu_ops
.alloc_pte
= xen_alloc_pte
;
2403 pv_mmu_ops
.alloc_pmd
= xen_alloc_pmd
;
2404 pv_mmu_ops
.release_pte
= xen_release_pte
;
2405 pv_mmu_ops
.release_pmd
= xen_release_pmd
;
2406 #if CONFIG_PGTABLE_LEVELS == 4
2407 pv_mmu_ops
.alloc_pud
= xen_alloc_pud
;
2408 pv_mmu_ops
.release_pud
= xen_release_pud
;
2411 #ifdef CONFIG_X86_64
2412 pv_mmu_ops
.write_cr3
= &xen_write_cr3
;
2413 SetPagePinned(virt_to_page(level3_user_vsyscall
));
2415 xen_mark_init_mm_pinned();
2418 static void xen_leave_lazy_mmu(void)
2422 paravirt_leave_lazy_mmu();
2426 static const struct pv_mmu_ops xen_mmu_ops __initconst
= {
2427 .read_cr2
= xen_read_cr2
,
2428 .write_cr2
= xen_write_cr2
,
2430 .read_cr3
= xen_read_cr3
,
2431 .write_cr3
= xen_write_cr3_init
,
2433 .flush_tlb_user
= xen_flush_tlb
,
2434 .flush_tlb_kernel
= xen_flush_tlb
,
2435 .flush_tlb_single
= xen_flush_tlb_single
,
2436 .flush_tlb_others
= xen_flush_tlb_others
,
2438 .pte_update
= paravirt_nop
,
2440 .pgd_alloc
= xen_pgd_alloc
,
2441 .pgd_free
= xen_pgd_free
,
2443 .alloc_pte
= xen_alloc_pte_init
,
2444 .release_pte
= xen_release_pte_init
,
2445 .alloc_pmd
= xen_alloc_pmd_init
,
2446 .release_pmd
= xen_release_pmd_init
,
2448 .set_pte
= xen_set_pte_init
,
2449 .set_pte_at
= xen_set_pte_at
,
2450 .set_pmd
= xen_set_pmd_hyper
,
2452 .ptep_modify_prot_start
= __ptep_modify_prot_start
,
2453 .ptep_modify_prot_commit
= __ptep_modify_prot_commit
,
2455 .pte_val
= PV_CALLEE_SAVE(xen_pte_val
),
2456 .pgd_val
= PV_CALLEE_SAVE(xen_pgd_val
),
2458 .make_pte
= PV_CALLEE_SAVE(xen_make_pte
),
2459 .make_pgd
= PV_CALLEE_SAVE(xen_make_pgd
),
2461 #ifdef CONFIG_X86_PAE
2462 .set_pte_atomic
= xen_set_pte_atomic
,
2463 .pte_clear
= xen_pte_clear
,
2464 .pmd_clear
= xen_pmd_clear
,
2465 #endif /* CONFIG_X86_PAE */
2466 .set_pud
= xen_set_pud_hyper
,
2468 .make_pmd
= PV_CALLEE_SAVE(xen_make_pmd
),
2469 .pmd_val
= PV_CALLEE_SAVE(xen_pmd_val
),
2471 #if CONFIG_PGTABLE_LEVELS == 4
2472 .pud_val
= PV_CALLEE_SAVE(xen_pud_val
),
2473 .make_pud
= PV_CALLEE_SAVE(xen_make_pud
),
2474 .set_pgd
= xen_set_pgd_hyper
,
2476 .alloc_pud
= xen_alloc_pmd_init
,
2477 .release_pud
= xen_release_pmd_init
,
2478 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
2480 .activate_mm
= xen_activate_mm
,
2481 .dup_mmap
= xen_dup_mmap
,
2482 .exit_mmap
= xen_exit_mmap
,
2485 .enter
= paravirt_enter_lazy_mmu
,
2486 .leave
= xen_leave_lazy_mmu
,
2487 .flush
= paravirt_flush_lazy_mmu
,
2490 .set_fixmap
= xen_set_fixmap
,
2493 void __init
xen_init_mmu_ops(void)
2495 x86_init
.paging
.pagetable_init
= xen_pagetable_init
;
2497 if (xen_feature(XENFEAT_auto_translated_physmap
))
2500 pv_mmu_ops
= xen_mmu_ops
;
2502 memset(dummy_mapping
, 0xff, PAGE_SIZE
);
2505 /* Protected by xen_reservation_lock. */
2506 #define MAX_CONTIG_ORDER 9 /* 2MB */
2507 static unsigned long discontig_frames
[1<<MAX_CONTIG_ORDER
];
2509 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2510 static void xen_zap_pfn_range(unsigned long vaddr
, unsigned int order
,
2511 unsigned long *in_frames
,
2512 unsigned long *out_frames
)
2515 struct multicall_space mcs
;
2518 for (i
= 0; i
< (1UL<<order
); i
++, vaddr
+= PAGE_SIZE
) {
2519 mcs
= __xen_mc_entry(0);
2522 in_frames
[i
] = virt_to_mfn(vaddr
);
2524 MULTI_update_va_mapping(mcs
.mc
, vaddr
, VOID_PTE
, 0);
2525 __set_phys_to_machine(virt_to_pfn(vaddr
), INVALID_P2M_ENTRY
);
2528 out_frames
[i
] = virt_to_pfn(vaddr
);
2534 * Update the pfn-to-mfn mappings for a virtual address range, either to
2535 * point to an array of mfns, or contiguously from a single starting
2538 static void xen_remap_exchanged_ptes(unsigned long vaddr
, int order
,
2539 unsigned long *mfns
,
2540 unsigned long first_mfn
)
2547 limit
= 1u << order
;
2548 for (i
= 0; i
< limit
; i
++, vaddr
+= PAGE_SIZE
) {
2549 struct multicall_space mcs
;
2552 mcs
= __xen_mc_entry(0);
2556 mfn
= first_mfn
+ i
;
2558 if (i
< (limit
- 1))
2562 flags
= UVMF_INVLPG
| UVMF_ALL
;
2564 flags
= UVMF_TLB_FLUSH
| UVMF_ALL
;
2567 MULTI_update_va_mapping(mcs
.mc
, vaddr
,
2568 mfn_pte(mfn
, PAGE_KERNEL
), flags
);
2570 set_phys_to_machine(virt_to_pfn(vaddr
), mfn
);
2577 * Perform the hypercall to exchange a region of our pfns to point to
2578 * memory with the required contiguous alignment. Takes the pfns as
2579 * input, and populates mfns as output.
2581 * Returns a success code indicating whether the hypervisor was able to
2582 * satisfy the request or not.
2584 static int xen_exchange_memory(unsigned long extents_in
, unsigned int order_in
,
2585 unsigned long *pfns_in
,
2586 unsigned long extents_out
,
2587 unsigned int order_out
,
2588 unsigned long *mfns_out
,
2589 unsigned int address_bits
)
2594 struct xen_memory_exchange exchange
= {
2596 .nr_extents
= extents_in
,
2597 .extent_order
= order_in
,
2598 .extent_start
= pfns_in
,
2602 .nr_extents
= extents_out
,
2603 .extent_order
= order_out
,
2604 .extent_start
= mfns_out
,
2605 .address_bits
= address_bits
,
2610 BUG_ON(extents_in
<< order_in
!= extents_out
<< order_out
);
2612 rc
= HYPERVISOR_memory_op(XENMEM_exchange
, &exchange
);
2613 success
= (exchange
.nr_exchanged
== extents_in
);
2615 BUG_ON(!success
&& ((exchange
.nr_exchanged
!= 0) || (rc
== 0)));
2616 BUG_ON(success
&& (rc
!= 0));
2621 int xen_create_contiguous_region(phys_addr_t pstart
, unsigned int order
,
2622 unsigned int address_bits
,
2623 dma_addr_t
*dma_handle
)
2625 unsigned long *in_frames
= discontig_frames
, out_frame
;
2626 unsigned long flags
;
2628 unsigned long vstart
= (unsigned long)phys_to_virt(pstart
);
2631 * Currently an auto-translated guest will not perform I/O, nor will
2632 * it require PAE page directories below 4GB. Therefore any calls to
2633 * this function are redundant and can be ignored.
2636 if (xen_feature(XENFEAT_auto_translated_physmap
))
2639 if (unlikely(order
> MAX_CONTIG_ORDER
))
2642 memset((void *) vstart
, 0, PAGE_SIZE
<< order
);
2644 spin_lock_irqsave(&xen_reservation_lock
, flags
);
2646 /* 1. Zap current PTEs, remembering MFNs. */
2647 xen_zap_pfn_range(vstart
, order
, in_frames
, NULL
);
2649 /* 2. Get a new contiguous memory extent. */
2650 out_frame
= virt_to_pfn(vstart
);
2651 success
= xen_exchange_memory(1UL << order
, 0, in_frames
,
2652 1, order
, &out_frame
,
2655 /* 3. Map the new extent in place of old pages. */
2657 xen_remap_exchanged_ptes(vstart
, order
, NULL
, out_frame
);
2659 xen_remap_exchanged_ptes(vstart
, order
, in_frames
, 0);
2661 spin_unlock_irqrestore(&xen_reservation_lock
, flags
);
2663 *dma_handle
= virt_to_machine(vstart
).maddr
;
2664 return success
? 0 : -ENOMEM
;
2666 EXPORT_SYMBOL_GPL(xen_create_contiguous_region
);
2668 void xen_destroy_contiguous_region(phys_addr_t pstart
, unsigned int order
)
2670 unsigned long *out_frames
= discontig_frames
, in_frame
;
2671 unsigned long flags
;
2673 unsigned long vstart
;
2675 if (xen_feature(XENFEAT_auto_translated_physmap
))
2678 if (unlikely(order
> MAX_CONTIG_ORDER
))
2681 vstart
= (unsigned long)phys_to_virt(pstart
);
2682 memset((void *) vstart
, 0, PAGE_SIZE
<< order
);
2684 spin_lock_irqsave(&xen_reservation_lock
, flags
);
2686 /* 1. Find start MFN of contiguous extent. */
2687 in_frame
= virt_to_mfn(vstart
);
2689 /* 2. Zap current PTEs. */
2690 xen_zap_pfn_range(vstart
, order
, NULL
, out_frames
);
2692 /* 3. Do the exchange for non-contiguous MFNs. */
2693 success
= xen_exchange_memory(1, order
, &in_frame
, 1UL << order
,
2696 /* 4. Map new pages in place of old pages. */
2698 xen_remap_exchanged_ptes(vstart
, order
, out_frames
, 0);
2700 xen_remap_exchanged_ptes(vstart
, order
, NULL
, in_frame
);
2702 spin_unlock_irqrestore(&xen_reservation_lock
, flags
);
2704 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region
);
2706 #ifdef CONFIG_XEN_PVHVM
2707 #ifdef CONFIG_PROC_VMCORE
2709 * This function is used in two contexts:
2710 * - the kdump kernel has to check whether a pfn of the crashed kernel
2711 * was a ballooned page. vmcore is using this function to decide
2712 * whether to access a pfn of the crashed kernel.
2713 * - the kexec kernel has to check whether a pfn was ballooned by the
2714 * previous kernel. If the pfn is ballooned, handle it properly.
2715 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2716 * handle the pfn special in this case.
2718 static int xen_oldmem_pfn_is_ram(unsigned long pfn
)
2720 struct xen_hvm_get_mem_type a
= {
2721 .domid
= DOMID_SELF
,
2726 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type
, &a
))
2729 switch (a
.mem_type
) {
2730 case HVMMEM_mmio_dm
:
2744 static void xen_hvm_exit_mmap(struct mm_struct
*mm
)
2746 struct xen_hvm_pagetable_dying a
;
2749 a
.domid
= DOMID_SELF
;
2750 a
.gpa
= __pa(mm
->pgd
);
2751 rc
= HYPERVISOR_hvm_op(HVMOP_pagetable_dying
, &a
);
2752 WARN_ON_ONCE(rc
< 0);
2755 static int is_pagetable_dying_supported(void)
2757 struct xen_hvm_pagetable_dying a
;
2760 a
.domid
= DOMID_SELF
;
2762 rc
= HYPERVISOR_hvm_op(HVMOP_pagetable_dying
, &a
);
2764 printk(KERN_DEBUG
"HVMOP_pagetable_dying not supported\n");
2770 void __init
xen_hvm_init_mmu_ops(void)
2772 if (is_pagetable_dying_supported())
2773 pv_mmu_ops
.exit_mmap
= xen_hvm_exit_mmap
;
2774 #ifdef CONFIG_PROC_VMCORE
2775 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram
);
2780 #define REMAP_BATCH_SIZE 16
2786 struct mmu_update
*mmu_update
;
2789 static int remap_area_mfn_pte_fn(pte_t
*ptep
, pgtable_t token
,
2790 unsigned long addr
, void *data
)
2792 struct remap_data
*rmd
= data
;
2793 pte_t pte
= pte_mkspecial(mfn_pte(*rmd
->mfn
, rmd
->prot
));
2795 /* If we have a contiguous range, just update the mfn itself,
2796 else update pointer to be "next mfn". */
2797 if (rmd
->contiguous
)
2802 rmd
->mmu_update
->ptr
= virt_to_machine(ptep
).maddr
;
2803 rmd
->mmu_update
->val
= pte_val_ma(pte
);
2809 static int do_remap_gfn(struct vm_area_struct
*vma
,
2811 xen_pfn_t
*gfn
, int nr
,
2812 int *err_ptr
, pgprot_t prot
,
2814 struct page
**pages
)
2817 struct remap_data rmd
;
2818 struct mmu_update mmu_update
[REMAP_BATCH_SIZE
];
2819 unsigned long range
;
2822 BUG_ON(!((vma
->vm_flags
& (VM_PFNMAP
| VM_IO
)) == (VM_PFNMAP
| VM_IO
)));
2824 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
2825 #ifdef CONFIG_XEN_PVH
2826 /* We need to update the local page tables and the xen HAP */
2827 return xen_xlate_remap_gfn_array(vma
, addr
, gfn
, nr
, err_ptr
,
2828 prot
, domid
, pages
);
2836 /* We use the err_ptr to indicate if there we are doing a contiguous
2837 * mapping or a discontigious mapping. */
2838 rmd
.contiguous
= !err_ptr
;
2843 int batch
= min(REMAP_BATCH_SIZE
, nr
);
2844 int batch_left
= batch
;
2845 range
= (unsigned long)batch
<< PAGE_SHIFT
;
2847 rmd
.mmu_update
= mmu_update
;
2848 err
= apply_to_page_range(vma
->vm_mm
, addr
, range
,
2849 remap_area_mfn_pte_fn
, &rmd
);
2853 /* We record the error for each page that gives an error, but
2854 * continue mapping until the whole set is done */
2858 err
= HYPERVISOR_mmu_update(&mmu_update
[index
],
2859 batch_left
, &done
, domid
);
2862 * @err_ptr may be the same buffer as @gfn, so
2863 * only clear it after each chunk of @gfn is
2867 for (i
= index
; i
< index
+ done
; i
++)
2874 done
++; /* Skip failed frame. */
2879 } while (batch_left
);
2889 xen_flush_tlb_all();
2891 return err
< 0 ? err
: mapped
;
2894 int xen_remap_domain_gfn_range(struct vm_area_struct
*vma
,
2896 xen_pfn_t gfn
, int nr
,
2897 pgprot_t prot
, unsigned domid
,
2898 struct page
**pages
)
2900 return do_remap_gfn(vma
, addr
, &gfn
, nr
, NULL
, prot
, domid
, pages
);
2902 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range
);
2904 int xen_remap_domain_gfn_array(struct vm_area_struct
*vma
,
2906 xen_pfn_t
*gfn
, int nr
,
2907 int *err_ptr
, pgprot_t prot
,
2908 unsigned domid
, struct page
**pages
)
2910 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
2911 * and the consequences later is quite hard to detect what the actual
2912 * cause of "wrong memory was mapped in".
2914 BUG_ON(err_ptr
== NULL
);
2915 return do_remap_gfn(vma
, addr
, gfn
, nr
, err_ptr
, prot
, domid
, pages
);
2917 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array
);
2920 /* Returns: 0 success */
2921 int xen_unmap_domain_gfn_range(struct vm_area_struct
*vma
,
2922 int numpgs
, struct page
**pages
)
2924 if (!pages
|| !xen_feature(XENFEAT_auto_translated_physmap
))
2927 #ifdef CONFIG_XEN_PVH
2928 return xen_xlate_unmap_gfn_range(vma
, numpgs
, pages
);
2933 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range
);