Linux 4.13.16
[linux/fpc-iii.git] / arch / x86 / xen / mmu.c
blob3be06f3caf3c1e25aa318fb2fd0ae8706cc0a717
1 #include <linux/pfn.h>
2 #include <asm/xen/page.h>
3 #include <asm/xen/hypercall.h>
4 #include <xen/interface/memory.h>
6 #include "multicalls.h"
7 #include "mmu.h"
9 /*
10 * Protects atomic reservation decrease/increase against concurrent increases.
11 * Also protects non-atomic updates of current_pages and balloon lists.
13 DEFINE_SPINLOCK(xen_reservation_lock);
15 unsigned long arbitrary_virt_to_mfn(void *vaddr)
17 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
19 return PFN_DOWN(maddr.maddr);
22 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
24 unsigned long address = (unsigned long)vaddr;
25 unsigned int level;
26 pte_t *pte;
27 unsigned offset;
30 * if the PFN is in the linear mapped vaddr range, we can just use
31 * the (quick) virt_to_machine() p2m lookup
33 if (virt_addr_valid(vaddr))
34 return virt_to_machine(vaddr);
36 /* otherwise we have to do a (slower) full page-table walk */
38 pte = lookup_address(address, &level);
39 BUG_ON(pte == NULL);
40 offset = address & ~PAGE_MASK;
41 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
43 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
45 static void xen_flush_tlb_all(void)
47 struct mmuext_op *op;
48 struct multicall_space mcs;
50 trace_xen_mmu_flush_tlb_all(0);
52 preempt_disable();
54 mcs = xen_mc_entry(sizeof(*op));
56 op = mcs.args;
57 op->cmd = MMUEXT_TLB_FLUSH_ALL;
58 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
60 xen_mc_issue(PARAVIRT_LAZY_MMU);
62 preempt_enable();
65 #define REMAP_BATCH_SIZE 16
67 struct remap_data {
68 xen_pfn_t *mfn;
69 bool contiguous;
70 pgprot_t prot;
71 struct mmu_update *mmu_update;
74 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
75 unsigned long addr, void *data)
77 struct remap_data *rmd = data;
78 pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
80 /* If we have a contiguous range, just update the mfn itself,
81 else update pointer to be "next mfn". */
82 if (rmd->contiguous)
83 (*rmd->mfn)++;
84 else
85 rmd->mfn++;
87 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
88 rmd->mmu_update->val = pte_val_ma(pte);
89 rmd->mmu_update++;
91 return 0;
94 static int do_remap_gfn(struct vm_area_struct *vma,
95 unsigned long addr,
96 xen_pfn_t *gfn, int nr,
97 int *err_ptr, pgprot_t prot,
98 unsigned domid,
99 struct page **pages)
101 int err = 0;
102 struct remap_data rmd;
103 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
104 unsigned long range;
105 int mapped = 0;
107 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
109 rmd.mfn = gfn;
110 rmd.prot = prot;
111 /* We use the err_ptr to indicate if there we are doing a contiguous
112 * mapping or a discontigious mapping. */
113 rmd.contiguous = !err_ptr;
115 while (nr) {
116 int index = 0;
117 int done = 0;
118 int batch = min(REMAP_BATCH_SIZE, nr);
119 int batch_left = batch;
120 range = (unsigned long)batch << PAGE_SHIFT;
122 rmd.mmu_update = mmu_update;
123 err = apply_to_page_range(vma->vm_mm, addr, range,
124 remap_area_mfn_pte_fn, &rmd);
125 if (err)
126 goto out;
128 /* We record the error for each page that gives an error, but
129 * continue mapping until the whole set is done */
130 do {
131 int i;
133 err = HYPERVISOR_mmu_update(&mmu_update[index],
134 batch_left, &done, domid);
137 * @err_ptr may be the same buffer as @gfn, so
138 * only clear it after each chunk of @gfn is
139 * used.
141 if (err_ptr) {
142 for (i = index; i < index + done; i++)
143 err_ptr[i] = 0;
145 if (err < 0) {
146 if (!err_ptr)
147 goto out;
148 err_ptr[i] = err;
149 done++; /* Skip failed frame. */
150 } else
151 mapped += done;
152 batch_left -= done;
153 index += done;
154 } while (batch_left);
156 nr -= batch;
157 addr += range;
158 if (err_ptr)
159 err_ptr += batch;
160 cond_resched();
162 out:
164 xen_flush_tlb_all();
166 return err < 0 ? err : mapped;
169 int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
170 unsigned long addr,
171 xen_pfn_t gfn, int nr,
172 pgprot_t prot, unsigned domid,
173 struct page **pages)
175 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
177 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
179 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
180 unsigned long addr,
181 xen_pfn_t *gfn, int nr,
182 int *err_ptr, pgprot_t prot,
183 unsigned domid, struct page **pages)
185 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
186 * and the consequences later is quite hard to detect what the actual
187 * cause of "wrong memory was mapped in".
189 BUG_ON(err_ptr == NULL);
190 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
192 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
194 /* Returns: 0 success */
195 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
196 int numpgs, struct page **pages)
198 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
199 return 0;
201 return -EINVAL;
203 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);