efi/x86: Merge 32-bit and 64-bit UGA draw protocol setup routines
[linux/fpc-iii.git] / arch / x86 / xen / mmu.c
blob96fc2f0fdbfede49ed8c85ddd21a11908abcaa24
1 #include <linux/pfn.h>
2 #include <asm/xen/page.h>
3 #include <asm/xen/hypercall.h>
4 #include <xen/interface/memory.h>
6 #include "multicalls.h"
7 #include "mmu.h"
9 /*
10 * Protects atomic reservation decrease/increase against concurrent increases.
11 * Also protects non-atomic updates of current_pages and balloon lists.
13 DEFINE_SPINLOCK(xen_reservation_lock);
15 unsigned long arbitrary_virt_to_mfn(void *vaddr)
17 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
19 return PFN_DOWN(maddr.maddr);
22 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
24 unsigned long address = (unsigned long)vaddr;
25 unsigned int level;
26 pte_t *pte;
27 unsigned offset;
30 * if the PFN is in the linear mapped vaddr range, we can just use
31 * the (quick) virt_to_machine() p2m lookup
33 if (virt_addr_valid(vaddr))
34 return virt_to_machine(vaddr);
36 /* otherwise we have to do a (slower) full page-table walk */
38 pte = lookup_address(address, &level);
39 BUG_ON(pte == NULL);
40 offset = address & ~PAGE_MASK;
41 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
43 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
45 static noinline void xen_flush_tlb_all(void)
47 struct mmuext_op *op;
48 struct multicall_space mcs;
50 preempt_disable();
52 mcs = xen_mc_entry(sizeof(*op));
54 op = mcs.args;
55 op->cmd = MMUEXT_TLB_FLUSH_ALL;
56 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
58 xen_mc_issue(PARAVIRT_LAZY_MMU);
60 preempt_enable();
63 #define REMAP_BATCH_SIZE 16
65 struct remap_data {
66 xen_pfn_t *pfn;
67 bool contiguous;
68 bool no_translate;
69 pgprot_t prot;
70 struct mmu_update *mmu_update;
73 static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
74 unsigned long addr, void *data)
76 struct remap_data *rmd = data;
77 pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
80 * If we have a contiguous range, just update the pfn itself,
81 * else update pointer to be "next pfn".
83 if (rmd->contiguous)
84 (*rmd->pfn)++;
85 else
86 rmd->pfn++;
88 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
89 rmd->mmu_update->ptr |= rmd->no_translate ?
90 MMU_PT_UPDATE_NO_TRANSLATE :
91 MMU_NORMAL_PT_UPDATE;
92 rmd->mmu_update->val = pte_val_ma(pte);
93 rmd->mmu_update++;
95 return 0;
98 static int do_remap_pfn(struct vm_area_struct *vma,
99 unsigned long addr,
100 xen_pfn_t *pfn, int nr,
101 int *err_ptr, pgprot_t prot,
102 unsigned int domid,
103 bool no_translate,
104 struct page **pages)
106 int err = 0;
107 struct remap_data rmd;
108 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
109 unsigned long range;
110 int mapped = 0;
112 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
114 rmd.pfn = pfn;
115 rmd.prot = prot;
117 * We use the err_ptr to indicate if there we are doing a contiguous
118 * mapping or a discontigious mapping.
120 rmd.contiguous = !err_ptr;
121 rmd.no_translate = no_translate;
123 while (nr) {
124 int index = 0;
125 int done = 0;
126 int batch = min(REMAP_BATCH_SIZE, nr);
127 int batch_left = batch;
128 range = (unsigned long)batch << PAGE_SHIFT;
130 rmd.mmu_update = mmu_update;
131 err = apply_to_page_range(vma->vm_mm, addr, range,
132 remap_area_pfn_pte_fn, &rmd);
133 if (err)
134 goto out;
136 /* We record the error for each page that gives an error, but
137 * continue mapping until the whole set is done */
138 do {
139 int i;
141 err = HYPERVISOR_mmu_update(&mmu_update[index],
142 batch_left, &done, domid);
145 * @err_ptr may be the same buffer as @gfn, so
146 * only clear it after each chunk of @gfn is
147 * used.
149 if (err_ptr) {
150 for (i = index; i < index + done; i++)
151 err_ptr[i] = 0;
153 if (err < 0) {
154 if (!err_ptr)
155 goto out;
156 err_ptr[i] = err;
157 done++; /* Skip failed frame. */
158 } else
159 mapped += done;
160 batch_left -= done;
161 index += done;
162 } while (batch_left);
164 nr -= batch;
165 addr += range;
166 if (err_ptr)
167 err_ptr += batch;
168 cond_resched();
170 out:
172 xen_flush_tlb_all();
174 return err < 0 ? err : mapped;
177 int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
178 unsigned long addr,
179 xen_pfn_t gfn, int nr,
180 pgprot_t prot, unsigned domid,
181 struct page **pages)
183 if (xen_feature(XENFEAT_auto_translated_physmap))
184 return -EOPNOTSUPP;
186 return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
187 pages);
189 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
191 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
192 unsigned long addr,
193 xen_pfn_t *gfn, int nr,
194 int *err_ptr, pgprot_t prot,
195 unsigned domid, struct page **pages)
197 if (xen_feature(XENFEAT_auto_translated_physmap))
198 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
199 prot, domid, pages);
201 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
202 * and the consequences later is quite hard to detect what the actual
203 * cause of "wrong memory was mapped in".
205 BUG_ON(err_ptr == NULL);
206 return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
207 false, pages);
209 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
211 int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
212 unsigned long addr,
213 xen_pfn_t *mfn, int nr,
214 int *err_ptr, pgprot_t prot,
215 unsigned int domid, struct page **pages)
217 if (xen_feature(XENFEAT_auto_translated_physmap))
218 return -EOPNOTSUPP;
220 return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
221 true, pages);
223 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
225 /* Returns: 0 success */
226 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
227 int nr, struct page **pages)
229 if (xen_feature(XENFEAT_auto_translated_physmap))
230 return xen_xlate_unmap_gfn_range(vma, nr, pages);
232 if (!pages)
233 return 0;
235 return -EINVAL;
237 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);