2 * MMU operations common to all auto-translated physmap guests.
4 * Copyright (C) 2015 Citrix Systems R&D Ltd.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vmalloc.h>
35 #include <asm/xen/hypercall.h>
36 #include <asm/xen/hypervisor.h>
39 #include <xen/xen-ops.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/memory.h>
43 #include <xen/balloon.h>
45 typedef void (*xen_gfn_fn_t
)(unsigned long gfn
, void *data
);
47 /* Break down the pages in 4KB chunk and call fn for each gfn */
48 static void xen_for_each_gfn(struct page
**pages
, unsigned nr_gfn
,
49 xen_gfn_fn_t fn
, void *data
)
51 unsigned long xen_pfn
= 0;
55 for (i
= 0; i
< nr_gfn
; i
++) {
56 if ((i
% XEN_PFN_PER_PAGE
) == 0) {
57 page
= pages
[i
/ XEN_PFN_PER_PAGE
];
58 xen_pfn
= page_to_xen_pfn(page
);
60 fn(pfn_to_gfn(xen_pfn
++), data
);
65 xen_pfn_t
*fgfn
; /* foreign domain's gfn */
66 int nr_fgfn
; /* Number of foreign gfn left to map */
69 struct vm_area_struct
*vma
;
72 struct xen_remap_gfn_info
*info
;
76 /* Hypercall parameters */
77 int h_errs
[XEN_PFN_PER_PAGE
];
78 xen_ulong_t h_idxs
[XEN_PFN_PER_PAGE
];
79 xen_pfn_t h_gpfns
[XEN_PFN_PER_PAGE
];
81 int h_iter
; /* Iterator */
84 static void setup_hparams(unsigned long gfn
, void *data
)
86 struct remap_data
*info
= data
;
88 info
->h_idxs
[info
->h_iter
] = *info
->fgfn
;
89 info
->h_gpfns
[info
->h_iter
] = gfn
;
90 info
->h_errs
[info
->h_iter
] = 0;
96 static int remap_pte_fn(pte_t
*ptep
, pgtable_t token
, unsigned long addr
,
99 struct remap_data
*info
= data
;
100 struct page
*page
= info
->pages
[info
->index
++];
101 pte_t pte
= pte_mkspecial(pfn_pte(page_to_pfn(page
), info
->prot
));
104 struct xen_add_to_physmap_range xatp
= {
106 .foreign_domid
= info
->domid
,
107 .space
= XENMAPSPACE_gmfn_foreign
,
110 nr_gfn
= min_t(typeof(info
->nr_fgfn
), XEN_PFN_PER_PAGE
, info
->nr_fgfn
);
111 info
->nr_fgfn
-= nr_gfn
;
114 xen_for_each_gfn(&page
, nr_gfn
, setup_hparams
, info
);
115 BUG_ON(info
->h_iter
!= nr_gfn
);
117 set_xen_guest_handle(xatp
.idxs
, info
->h_idxs
);
118 set_xen_guest_handle(xatp
.gpfns
, info
->h_gpfns
);
119 set_xen_guest_handle(xatp
.errs
, info
->h_errs
);
122 rc
= HYPERVISOR_memory_op(XENMEM_add_to_physmap_range
, &xatp
);
124 /* info->err_ptr expect to have one error status per Xen PFN */
125 for (i
= 0; i
< nr_gfn
; i
++) {
126 int err
= (rc
< 0) ? rc
: info
->h_errs
[i
];
128 *(info
->err_ptr
++) = err
;
134 * Note: The hypercall will return 0 in most of the case if even if
135 * all the fgmfn are not mapped. We still have to update the pte
136 * as the userspace may decide to continue.
139 set_pte_at(info
->vma
->vm_mm
, addr
, ptep
, pte
);
144 int xen_xlate_remap_gfn_array(struct vm_area_struct
*vma
,
146 xen_pfn_t
*gfn
, int nr
,
147 int *err_ptr
, pgprot_t prot
,
152 struct remap_data data
;
153 unsigned long range
= DIV_ROUND_UP(nr
, XEN_PFN_PER_PAGE
) << PAGE_SHIFT
;
155 /* Kept here for the purpose of making sure code doesn't break
157 BUG_ON(!((vma
->vm_flags
& (VM_PFNMAP
| VM_IO
)) == (VM_PFNMAP
| VM_IO
)));
166 data
.err_ptr
= err_ptr
;
169 err
= apply_to_page_range(vma
->vm_mm
, addr
, range
,
170 remap_pte_fn
, &data
);
171 return err
< 0 ? err
: data
.mapped
;
173 EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array
);
175 static void unmap_gfn(unsigned long gfn
, void *data
)
177 struct xen_remove_from_physmap xrp
;
179 xrp
.domid
= DOMID_SELF
;
181 (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap
, &xrp
);
184 int xen_xlate_unmap_gfn_range(struct vm_area_struct
*vma
,
185 int nr
, struct page
**pages
)
187 xen_for_each_gfn(pages
, nr
, unmap_gfn
, NULL
);
191 EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range
);
193 struct map_balloon_pages
{
198 static void setup_balloon_gfn(unsigned long gfn
, void *data
)
200 struct map_balloon_pages
*info
= data
;
202 info
->pfns
[info
->idx
++] = gfn
;
206 * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
207 * @gfns: returns the array of corresponding GFNs
208 * @virt: returns the virtual address of the mapped region
209 * @nr_grant_frames: number of GFNs
210 * @return 0 on success, error otherwise
212 * This allocates a set of ballooned pages and maps them into the
213 * kernel's address space.
215 int __init
xen_xlate_map_ballooned_pages(xen_pfn_t
**gfns
, void **virt
,
216 unsigned long nr_grant_frames
)
221 struct map_balloon_pages data
;
223 unsigned long nr_pages
;
225 BUG_ON(nr_grant_frames
== 0);
226 nr_pages
= DIV_ROUND_UP(nr_grant_frames
, XEN_PFN_PER_PAGE
);
227 pages
= kcalloc(nr_pages
, sizeof(pages
[0]), GFP_KERNEL
);
231 pfns
= kcalloc(nr_grant_frames
, sizeof(pfns
[0]), GFP_KERNEL
);
236 rc
= alloc_xenballooned_pages(nr_pages
, pages
);
238 pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__
,
247 xen_for_each_gfn(pages
, nr_grant_frames
, setup_balloon_gfn
, &data
);
249 vaddr
= vmap(pages
, nr_pages
, 0, PAGE_KERNEL
);
251 pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__
,
253 free_xenballooned_pages(nr_pages
, pages
);
265 EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages
);