1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/uaccess.h>
20 #include <linux/swap.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/seq_file.h>
24 #include <linux/miscdevice.h>
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
29 #include <asm/xen/hypervisor.h>
30 #include <asm/xen/hypercall.h>
33 #include <xen/privcmd.h>
34 #include <xen/interface/xen.h>
35 #include <xen/features.h>
37 #include <xen/xen-ops.h>
38 #include <xen/balloon.h>
42 MODULE_LICENSE("GPL");
44 #define PRIV_VMA_LOCKED ((void *)1)
46 static int privcmd_vma_range_is_mapped(
47 struct vm_area_struct
*vma
,
49 unsigned long nr_pages
);
51 static long privcmd_ioctl_hypercall(void __user
*udata
)
53 struct privcmd_hypercall hypercall
;
56 if (copy_from_user(&hypercall
, udata
, sizeof(hypercall
)))
59 ret
= privcmd_call(hypercall
.op
,
60 hypercall
.arg
[0], hypercall
.arg
[1],
61 hypercall
.arg
[2], hypercall
.arg
[3],
67 static void free_page_list(struct list_head
*pages
)
71 list_for_each_entry_safe(p
, n
, pages
, lru
)
74 INIT_LIST_HEAD(pages
);
78 * Given an array of items in userspace, return a list of pages
79 * containing the data. If copying fails, either because of memory
80 * allocation failure or a problem reading user memory, return an
81 * error code; its up to the caller to dispose of any partial list.
83 static int gather_array(struct list_head
*pagelist
,
84 unsigned nelem
, size_t size
,
85 const void __user
*data
)
95 pagedata
= NULL
; /* quiet, gcc */
97 if (pageidx
> PAGE_SIZE
-size
) {
98 struct page
*page
= alloc_page(GFP_KERNEL
);
104 pagedata
= page_address(page
);
106 list_add_tail(&page
->lru
, pagelist
);
111 if (copy_from_user(pagedata
+ pageidx
, data
, size
))
125 * Call function "fn" on each element of the array fragmented
126 * over a list of pages.
128 static int traverse_pages(unsigned nelem
, size_t size
,
129 struct list_head
*pos
,
130 int (*fn
)(void *data
, void *state
),
137 BUG_ON(size
> PAGE_SIZE
);
140 pagedata
= NULL
; /* hush, gcc */
143 if (pageidx
> PAGE_SIZE
-size
) {
146 page
= list_entry(pos
, struct page
, lru
);
147 pagedata
= page_address(page
);
151 ret
= (*fn
)(pagedata
+ pageidx
, state
);
160 struct mmap_mfn_state
{
162 struct vm_area_struct
*vma
;
166 static int mmap_mfn_range(void *data
, void *state
)
168 struct privcmd_mmap_entry
*msg
= data
;
169 struct mmap_mfn_state
*st
= state
;
170 struct vm_area_struct
*vma
= st
->vma
;
173 /* Do not allow range to wrap the address space. */
174 if ((msg
->npages
> (LONG_MAX
>> PAGE_SHIFT
)) ||
175 ((unsigned long)(msg
->npages
<< PAGE_SHIFT
) >= -st
->va
))
178 /* Range chunks must be contiguous in va space. */
179 if ((msg
->va
!= st
->va
) ||
180 ((msg
->va
+(msg
->npages
<<PAGE_SHIFT
)) > vma
->vm_end
))
183 rc
= xen_remap_domain_mfn_range(vma
,
185 msg
->mfn
, msg
->npages
,
191 st
->va
+= msg
->npages
<< PAGE_SHIFT
;
196 static long privcmd_ioctl_mmap(void __user
*udata
)
198 struct privcmd_mmap mmapcmd
;
199 struct mm_struct
*mm
= current
->mm
;
200 struct vm_area_struct
*vma
;
203 struct mmap_mfn_state state
;
205 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
206 if (xen_feature(XENFEAT_auto_translated_physmap
))
209 if (copy_from_user(&mmapcmd
, udata
, sizeof(mmapcmd
)))
212 rc
= gather_array(&pagelist
,
213 mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
216 if (rc
|| list_empty(&pagelist
))
219 down_write(&mm
->mmap_sem
);
222 struct page
*page
= list_first_entry(&pagelist
,
224 struct privcmd_mmap_entry
*msg
= page_address(page
);
226 vma
= find_vma(mm
, msg
->va
);
229 if (!vma
|| (msg
->va
!= vma
->vm_start
) || vma
->vm_private_data
)
231 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
234 state
.va
= vma
->vm_start
;
236 state
.domain
= mmapcmd
.dom
;
238 rc
= traverse_pages(mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
240 mmap_mfn_range
, &state
);
244 up_write(&mm
->mmap_sem
);
247 free_page_list(&pagelist
);
252 struct mmap_batch_state
{
255 struct vm_area_struct
*vma
;
259 * 1 if at least one error has happened (and no
260 * -ENOENT errors have happened)
261 * -ENOENT if at least 1 -ENOENT has happened.
266 /* User-space mfn array to store errors in the second pass for V1. */
267 xen_pfn_t __user
*user_mfn
;
268 /* User-space int array to store errors in the second pass for V2. */
269 int __user
*user_err
;
272 /* auto translated dom0 note: if domU being created is PV, then mfn is
273 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
275 static int mmap_batch_fn(void *data
, void *state
)
277 xen_pfn_t
*mfnp
= data
;
278 struct mmap_batch_state
*st
= state
;
279 struct vm_area_struct
*vma
= st
->vma
;
280 struct page
**pages
= vma
->vm_private_data
;
281 struct page
*cur_page
= NULL
;
284 if (xen_feature(XENFEAT_auto_translated_physmap
))
285 cur_page
= pages
[st
->index
++];
287 ret
= xen_remap_domain_mfn_range(st
->vma
, st
->va
& PAGE_MASK
, *mfnp
, 1,
288 st
->vma
->vm_page_prot
, st
->domain
,
291 /* Store error code for second pass. */
292 if (st
->version
== 1) {
295 * V1 encodes the error codes in the 32bit top nibble of the
296 * mfn (with its known limitations vis-a-vis 64 bit callers).
298 *mfnp
|= (ret
== -ENOENT
) ?
299 PRIVCMD_MMAPBATCH_PAGED_ERROR
:
300 PRIVCMD_MMAPBATCH_MFN_ERROR
;
302 } else { /* st->version == 2 */
303 *((int *) mfnp
) = ret
;
306 /* And see if it affects the global_error. */
309 st
->global_error
= -ENOENT
;
311 /* Record that at least one error has happened. */
312 if (st
->global_error
== 0)
313 st
->global_error
= 1;
321 static int mmap_return_errors(void *data
, void *state
)
323 struct mmap_batch_state
*st
= state
;
325 if (st
->version
== 1) {
326 xen_pfn_t mfnp
= *((xen_pfn_t
*) data
);
327 if (mfnp
& PRIVCMD_MMAPBATCH_MFN_ERROR
)
328 return __put_user(mfnp
, st
->user_mfn
++);
331 } else { /* st->version == 2 */
332 int err
= *((int *) data
);
334 return __put_user(err
, st
->user_err
++);
342 /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
343 * the vma with the page info to use later.
344 * Returns: 0 if success, otherwise -errno
346 static int alloc_empty_pages(struct vm_area_struct
*vma
, int numpgs
)
351 pages
= kcalloc(numpgs
, sizeof(pages
[0]), GFP_KERNEL
);
355 rc
= alloc_xenballooned_pages(numpgs
, pages
, 0);
357 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__
,
362 BUG_ON(vma
->vm_private_data
!= NULL
);
363 vma
->vm_private_data
= pages
;
368 static struct vm_operations_struct privcmd_vm_ops
;
370 static long privcmd_ioctl_mmap_batch(void __user
*udata
, int version
)
373 struct privcmd_mmapbatch_v2 m
;
374 struct mm_struct
*mm
= current
->mm
;
375 struct vm_area_struct
*vma
;
376 unsigned long nr_pages
;
378 struct mmap_batch_state state
;
382 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch
)))
384 /* Returns per-frame error in m.arr. */
386 if (!access_ok(VERIFY_WRITE
, m
.arr
, m
.num
* sizeof(*m
.arr
)))
390 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch_v2
)))
392 /* Returns per-frame error code in m.err. */
393 if (!access_ok(VERIFY_WRITE
, m
.err
, m
.num
* (sizeof(*m
.err
))))
401 if ((m
.num
<= 0) || (nr_pages
> (LONG_MAX
>> PAGE_SHIFT
)))
404 ret
= gather_array(&pagelist
, m
.num
, sizeof(xen_pfn_t
), m
.arr
);
408 if (list_empty(&pagelist
)) {
414 /* Zero error array now to only copy back actual errors. */
415 if (clear_user(m
.err
, sizeof(int) * m
.num
)) {
421 down_write(&mm
->mmap_sem
);
423 vma
= find_vma(mm
, m
.addr
);
425 vma
->vm_ops
!= &privcmd_vm_ops
) {
431 * Caller must either:
433 * Map the whole VMA range, which will also allocate all the
434 * pages required for the auto_translated_physmap case.
438 * Map unmapped holes left from a previous map attempt (e.g.,
439 * because those foreign frames were previously paged out).
441 if (vma
->vm_private_data
== NULL
) {
442 if (m
.addr
!= vma
->vm_start
||
443 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) != vma
->vm_end
) {
447 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
448 ret
= alloc_empty_pages(vma
, m
.num
);
452 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
454 if (m
.addr
< vma
->vm_start
||
455 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) > vma
->vm_end
) {
459 if (privcmd_vma_range_is_mapped(vma
, m
.addr
, nr_pages
)) {
465 state
.domain
= m
.dom
;
469 state
.global_error
= 0;
470 state
.version
= version
;
472 /* mmap_batch_fn guarantees ret == 0 */
473 BUG_ON(traverse_pages(m
.num
, sizeof(xen_pfn_t
),
474 &pagelist
, mmap_batch_fn
, &state
));
476 up_write(&mm
->mmap_sem
);
478 if (state
.global_error
) {
479 /* Write back errors in second pass. */
480 state
.user_mfn
= (xen_pfn_t
*)m
.arr
;
481 state
.user_err
= m
.err
;
482 ret
= traverse_pages(m
.num
, sizeof(xen_pfn_t
),
483 &pagelist
, mmap_return_errors
, &state
);
487 /* If we have not had any EFAULT-like global errors then set the global
488 * error to -ENOENT if necessary. */
489 if ((ret
== 0) && (state
.global_error
== -ENOENT
))
493 free_page_list(&pagelist
);
497 up_write(&mm
->mmap_sem
);
501 static long privcmd_ioctl(struct file
*file
,
502 unsigned int cmd
, unsigned long data
)
505 void __user
*udata
= (void __user
*) data
;
508 case IOCTL_PRIVCMD_HYPERCALL
:
509 ret
= privcmd_ioctl_hypercall(udata
);
512 case IOCTL_PRIVCMD_MMAP
:
513 ret
= privcmd_ioctl_mmap(udata
);
516 case IOCTL_PRIVCMD_MMAPBATCH
:
517 ret
= privcmd_ioctl_mmap_batch(udata
, 1);
520 case IOCTL_PRIVCMD_MMAPBATCH_V2
:
521 ret
= privcmd_ioctl_mmap_batch(udata
, 2);
532 static void privcmd_close(struct vm_area_struct
*vma
)
534 struct page
**pages
= vma
->vm_private_data
;
535 int numpgs
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
538 if (!xen_feature(XENFEAT_auto_translated_physmap
) || !numpgs
|| !pages
)
541 rc
= xen_unmap_domain_mfn_range(vma
, numpgs
, pages
);
543 free_xenballooned_pages(numpgs
, pages
);
545 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
550 static int privcmd_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
552 printk(KERN_DEBUG
"privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
553 vma
, vma
->vm_start
, vma
->vm_end
,
554 vmf
->pgoff
, vmf
->virtual_address
);
556 return VM_FAULT_SIGBUS
;
559 static struct vm_operations_struct privcmd_vm_ops
= {
560 .close
= privcmd_close
,
561 .fault
= privcmd_fault
564 static int privcmd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
566 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
567 * how to recreate these mappings */
568 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTCOPY
|
569 VM_DONTEXPAND
| VM_DONTDUMP
;
570 vma
->vm_ops
= &privcmd_vm_ops
;
571 vma
->vm_private_data
= NULL
;
577 * For MMAPBATCH*. This allows asserting the singleshot mapping
578 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
579 * can be then retried until success.
581 static int is_mapped_fn(pte_t
*pte
, struct page
*pmd_page
,
582 unsigned long addr
, void *data
)
584 return pte_none(*pte
) ? 0 : -EBUSY
;
587 static int privcmd_vma_range_is_mapped(
588 struct vm_area_struct
*vma
,
590 unsigned long nr_pages
)
592 return apply_to_page_range(vma
->vm_mm
, addr
, nr_pages
<< PAGE_SHIFT
,
593 is_mapped_fn
, NULL
) != 0;
596 const struct file_operations xen_privcmd_fops
= {
597 .owner
= THIS_MODULE
,
598 .unlocked_ioctl
= privcmd_ioctl
,
599 .mmap
= privcmd_mmap
,
601 EXPORT_SYMBOL_GPL(xen_privcmd_fops
);
603 static struct miscdevice privcmd_dev
= {
604 .minor
= MISC_DYNAMIC_MINOR
,
605 .name
= "xen/privcmd",
606 .fops
= &xen_privcmd_fops
,
609 static int __init
privcmd_init(void)
616 err
= misc_register(&privcmd_dev
);
618 pr_err("Could not register Xen privcmd device\n");
624 static void __exit
privcmd_exit(void)
626 misc_deregister(&privcmd_dev
);
629 module_init(privcmd_init
);
630 module_exit(privcmd_exit
);