2 #include <asm/xen/page.h>
3 #include <asm/xen/hypercall.h>
4 #include <xen/interface/memory.h>
6 #include "multicalls.h"
10 * Protects atomic reservation decrease/increase against concurrent increases.
11 * Also protects non-atomic updates of current_pages and balloon lists.
13 DEFINE_SPINLOCK(xen_reservation_lock
);
15 unsigned long arbitrary_virt_to_mfn(void *vaddr
)
17 xmaddr_t maddr
= arbitrary_virt_to_machine(vaddr
);
19 return PFN_DOWN(maddr
.maddr
);
22 xmaddr_t
arbitrary_virt_to_machine(void *vaddr
)
24 unsigned long address
= (unsigned long)vaddr
;
30 * if the PFN is in the linear mapped vaddr range, we can just use
31 * the (quick) virt_to_machine() p2m lookup
33 if (virt_addr_valid(vaddr
))
34 return virt_to_machine(vaddr
);
36 /* otherwise we have to do a (slower) full page-table walk */
38 pte
= lookup_address(address
, &level
);
40 offset
= address
& ~PAGE_MASK
;
41 return XMADDR(((phys_addr_t
)pte_mfn(*pte
) << PAGE_SHIFT
) + offset
);
43 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine
);
45 static noinline
void xen_flush_tlb_all(void)
48 struct multicall_space mcs
;
52 mcs
= xen_mc_entry(sizeof(*op
));
55 op
->cmd
= MMUEXT_TLB_FLUSH_ALL
;
56 MULTI_mmuext_op(mcs
.mc
, op
, 1, NULL
, DOMID_SELF
);
58 xen_mc_issue(PARAVIRT_LAZY_MMU
);
63 #define REMAP_BATCH_SIZE 16
70 struct mmu_update
*mmu_update
;
73 static int remap_area_pfn_pte_fn(pte_t
*ptep
, pgtable_t token
,
74 unsigned long addr
, void *data
)
76 struct remap_data
*rmd
= data
;
77 pte_t pte
= pte_mkspecial(mfn_pte(*rmd
->pfn
, rmd
->prot
));
80 * If we have a contiguous range, just update the pfn itself,
81 * else update pointer to be "next pfn".
88 rmd
->mmu_update
->ptr
= virt_to_machine(ptep
).maddr
;
89 rmd
->mmu_update
->ptr
|= rmd
->no_translate
?
90 MMU_PT_UPDATE_NO_TRANSLATE
:
92 rmd
->mmu_update
->val
= pte_val_ma(pte
);
98 static int do_remap_pfn(struct vm_area_struct
*vma
,
100 xen_pfn_t
*pfn
, int nr
,
101 int *err_ptr
, pgprot_t prot
,
107 struct remap_data rmd
;
108 struct mmu_update mmu_update
[REMAP_BATCH_SIZE
];
112 BUG_ON(!((vma
->vm_flags
& (VM_PFNMAP
| VM_IO
)) == (VM_PFNMAP
| VM_IO
)));
117 * We use the err_ptr to indicate if there we are doing a contiguous
118 * mapping or a discontigious mapping.
120 rmd
.contiguous
= !err_ptr
;
121 rmd
.no_translate
= no_translate
;
126 int batch
= min(REMAP_BATCH_SIZE
, nr
);
127 int batch_left
= batch
;
128 range
= (unsigned long)batch
<< PAGE_SHIFT
;
130 rmd
.mmu_update
= mmu_update
;
131 err
= apply_to_page_range(vma
->vm_mm
, addr
, range
,
132 remap_area_pfn_pte_fn
, &rmd
);
136 /* We record the error for each page that gives an error, but
137 * continue mapping until the whole set is done */
141 err
= HYPERVISOR_mmu_update(&mmu_update
[index
],
142 batch_left
, &done
, domid
);
145 * @err_ptr may be the same buffer as @gfn, so
146 * only clear it after each chunk of @gfn is
150 for (i
= index
; i
< index
+ done
; i
++)
157 done
++; /* Skip failed frame. */
162 } while (batch_left
);
174 return err
< 0 ? err
: mapped
;
177 int xen_remap_domain_gfn_range(struct vm_area_struct
*vma
,
179 xen_pfn_t gfn
, int nr
,
180 pgprot_t prot
, unsigned domid
,
183 if (xen_feature(XENFEAT_auto_translated_physmap
))
186 return do_remap_pfn(vma
, addr
, &gfn
, nr
, NULL
, prot
, domid
, false,
189 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range
);
191 int xen_remap_domain_gfn_array(struct vm_area_struct
*vma
,
193 xen_pfn_t
*gfn
, int nr
,
194 int *err_ptr
, pgprot_t prot
,
195 unsigned domid
, struct page
**pages
)
197 if (xen_feature(XENFEAT_auto_translated_physmap
))
198 return xen_xlate_remap_gfn_array(vma
, addr
, gfn
, nr
, err_ptr
,
201 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
202 * and the consequences later is quite hard to detect what the actual
203 * cause of "wrong memory was mapped in".
205 BUG_ON(err_ptr
== NULL
);
206 return do_remap_pfn(vma
, addr
, gfn
, nr
, err_ptr
, prot
, domid
,
209 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array
);
211 int xen_remap_domain_mfn_array(struct vm_area_struct
*vma
,
213 xen_pfn_t
*mfn
, int nr
,
214 int *err_ptr
, pgprot_t prot
,
215 unsigned int domid
, struct page
**pages
)
217 if (xen_feature(XENFEAT_auto_translated_physmap
))
220 return do_remap_pfn(vma
, addr
, mfn
, nr
, err_ptr
, prot
, domid
,
223 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array
);
225 /* Returns: 0 success */
226 int xen_unmap_domain_gfn_range(struct vm_area_struct
*vma
,
227 int nr
, struct page
**pages
)
229 if (xen_feature(XENFEAT_auto_translated_physmap
))
230 return xen_xlate_unmap_gfn_range(vma
, nr
, pages
);
237 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range
);