1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/uaccess.h>
20 #include <linux/swap.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/seq_file.h>
24 #include <linux/miscdevice.h>
25 #include <linux/moduleparam.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <asm/xen/hypervisor.h>
31 #include <asm/xen/hypercall.h>
34 #include <xen/privcmd.h>
35 #include <xen/interface/xen.h>
36 #include <xen/interface/hvm/dm_op.h>
37 #include <xen/features.h>
39 #include <xen/xen-ops.h>
40 #include <xen/balloon.h>
44 MODULE_LICENSE("GPL");
46 #define PRIV_VMA_LOCKED ((void *)1)
48 static unsigned int privcmd_dm_op_max_num
= 16;
49 module_param_named(dm_op_max_nr_bufs
, privcmd_dm_op_max_num
, uint
, 0644);
50 MODULE_PARM_DESC(dm_op_max_nr_bufs
,
51 "Maximum number of buffers per dm_op hypercall");
53 static unsigned int privcmd_dm_op_buf_max_size
= 4096;
54 module_param_named(dm_op_buf_max_size
, privcmd_dm_op_buf_max_size
, uint
,
56 MODULE_PARM_DESC(dm_op_buf_max_size
,
57 "Maximum size of a dm_op hypercall buffer");
63 static int privcmd_vma_range_is_mapped(
64 struct vm_area_struct
*vma
,
66 unsigned long nr_pages
);
68 static long privcmd_ioctl_hypercall(struct file
*file
, void __user
*udata
)
70 struct privcmd_data
*data
= file
->private_data
;
71 struct privcmd_hypercall hypercall
;
74 /* Disallow arbitrary hypercalls if restricted */
75 if (data
->domid
!= DOMID_INVALID
)
78 if (copy_from_user(&hypercall
, udata
, sizeof(hypercall
)))
81 xen_preemptible_hcall_begin();
82 ret
= privcmd_call(hypercall
.op
,
83 hypercall
.arg
[0], hypercall
.arg
[1],
84 hypercall
.arg
[2], hypercall
.arg
[3],
86 xen_preemptible_hcall_end();
91 static void free_page_list(struct list_head
*pages
)
95 list_for_each_entry_safe(p
, n
, pages
, lru
)
98 INIT_LIST_HEAD(pages
);
102 * Given an array of items in userspace, return a list of pages
103 * containing the data. If copying fails, either because of memory
104 * allocation failure or a problem reading user memory, return an
105 * error code; its up to the caller to dispose of any partial list.
107 static int gather_array(struct list_head
*pagelist
,
108 unsigned nelem
, size_t size
,
109 const void __user
*data
)
115 if (size
> PAGE_SIZE
)
119 pagedata
= NULL
; /* quiet, gcc */
121 if (pageidx
> PAGE_SIZE
-size
) {
122 struct page
*page
= alloc_page(GFP_KERNEL
);
128 pagedata
= page_address(page
);
130 list_add_tail(&page
->lru
, pagelist
);
135 if (copy_from_user(pagedata
+ pageidx
, data
, size
))
149 * Call function "fn" on each element of the array fragmented
150 * over a list of pages.
152 static int traverse_pages(unsigned nelem
, size_t size
,
153 struct list_head
*pos
,
154 int (*fn
)(void *data
, void *state
),
161 BUG_ON(size
> PAGE_SIZE
);
164 pagedata
= NULL
; /* hush, gcc */
167 if (pageidx
> PAGE_SIZE
-size
) {
170 page
= list_entry(pos
, struct page
, lru
);
171 pagedata
= page_address(page
);
175 ret
= (*fn
)(pagedata
+ pageidx
, state
);
185 * Similar to traverse_pages, but use each page as a "block" of
186 * data to be processed as one unit.
188 static int traverse_pages_block(unsigned nelem
, size_t size
,
189 struct list_head
*pos
,
190 int (*fn
)(void *data
, int nr
, void *state
),
197 BUG_ON(size
> PAGE_SIZE
);
202 int nr
= (PAGE_SIZE
/size
);
207 page
= list_entry(pos
, struct page
, lru
);
208 pagedata
= page_address(page
);
209 ret
= (*fn
)(pagedata
, nr
, state
);
218 struct mmap_gfn_state
{
220 struct vm_area_struct
*vma
;
224 static int mmap_gfn_range(void *data
, void *state
)
226 struct privcmd_mmap_entry
*msg
= data
;
227 struct mmap_gfn_state
*st
= state
;
228 struct vm_area_struct
*vma
= st
->vma
;
231 /* Do not allow range to wrap the address space. */
232 if ((msg
->npages
> (LONG_MAX
>> PAGE_SHIFT
)) ||
233 ((unsigned long)(msg
->npages
<< PAGE_SHIFT
) >= -st
->va
))
236 /* Range chunks must be contiguous in va space. */
237 if ((msg
->va
!= st
->va
) ||
238 ((msg
->va
+(msg
->npages
<<PAGE_SHIFT
)) > vma
->vm_end
))
241 rc
= xen_remap_domain_gfn_range(vma
,
243 msg
->mfn
, msg
->npages
,
249 st
->va
+= msg
->npages
<< PAGE_SHIFT
;
254 static long privcmd_ioctl_mmap(struct file
*file
, void __user
*udata
)
256 struct privcmd_data
*data
= file
->private_data
;
257 struct privcmd_mmap mmapcmd
;
258 struct mm_struct
*mm
= current
->mm
;
259 struct vm_area_struct
*vma
;
262 struct mmap_gfn_state state
;
264 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
265 if (xen_feature(XENFEAT_auto_translated_physmap
))
268 if (copy_from_user(&mmapcmd
, udata
, sizeof(mmapcmd
)))
271 /* If restriction is in place, check the domid matches */
272 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= mmapcmd
.dom
)
275 rc
= gather_array(&pagelist
,
276 mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
279 if (rc
|| list_empty(&pagelist
))
282 down_write(&mm
->mmap_sem
);
285 struct page
*page
= list_first_entry(&pagelist
,
287 struct privcmd_mmap_entry
*msg
= page_address(page
);
289 vma
= find_vma(mm
, msg
->va
);
292 if (!vma
|| (msg
->va
!= vma
->vm_start
) || vma
->vm_private_data
)
294 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
297 state
.va
= vma
->vm_start
;
299 state
.domain
= mmapcmd
.dom
;
301 rc
= traverse_pages(mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
303 mmap_gfn_range
, &state
);
307 up_write(&mm
->mmap_sem
);
310 free_page_list(&pagelist
);
315 struct mmap_batch_state
{
318 struct vm_area_struct
*vma
;
322 * 1 if at least one error has happened (and no
323 * -ENOENT errors have happened)
324 * -ENOENT if at least 1 -ENOENT has happened.
329 /* User-space gfn array to store errors in the second pass for V1. */
330 xen_pfn_t __user
*user_gfn
;
331 /* User-space int array to store errors in the second pass for V2. */
332 int __user
*user_err
;
335 /* auto translated dom0 note: if domU being created is PV, then gfn is
336 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
338 static int mmap_batch_fn(void *data
, int nr
, void *state
)
340 xen_pfn_t
*gfnp
= data
;
341 struct mmap_batch_state
*st
= state
;
342 struct vm_area_struct
*vma
= st
->vma
;
343 struct page
**pages
= vma
->vm_private_data
;
344 struct page
**cur_pages
= NULL
;
347 if (xen_feature(XENFEAT_auto_translated_physmap
))
348 cur_pages
= &pages
[st
->index
];
351 ret
= xen_remap_domain_gfn_array(st
->vma
, st
->va
& PAGE_MASK
, gfnp
, nr
,
352 (int *)gfnp
, st
->vma
->vm_page_prot
,
353 st
->domain
, cur_pages
);
355 /* Adjust the global_error? */
358 st
->global_error
= -ENOENT
;
360 /* Record that at least one error has happened. */
361 if (st
->global_error
== 0)
362 st
->global_error
= 1;
365 st
->va
+= XEN_PAGE_SIZE
* nr
;
366 st
->index
+= nr
/ XEN_PFN_PER_PAGE
;
371 static int mmap_return_error(int err
, struct mmap_batch_state
*st
)
375 if (st
->version
== 1) {
379 ret
= get_user(gfn
, st
->user_gfn
);
383 * V1 encodes the error codes in the 32bit top
384 * nibble of the gfn (with its known
385 * limitations vis-a-vis 64 bit callers).
387 gfn
|= (err
== -ENOENT
) ?
388 PRIVCMD_MMAPBATCH_PAGED_ERROR
:
389 PRIVCMD_MMAPBATCH_MFN_ERROR
;
390 return __put_user(gfn
, st
->user_gfn
++);
393 } else { /* st->version == 2 */
395 return __put_user(err
, st
->user_err
++);
403 static int mmap_return_errors(void *data
, int nr
, void *state
)
405 struct mmap_batch_state
*st
= state
;
410 for (i
= 0; i
< nr
; i
++) {
411 ret
= mmap_return_error(errs
[i
], st
);
418 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
419 * the vma with the page info to use later.
420 * Returns: 0 if success, otherwise -errno
422 static int alloc_empty_pages(struct vm_area_struct
*vma
, int numpgs
)
427 pages
= kcalloc(numpgs
, sizeof(pages
[0]), GFP_KERNEL
);
431 rc
= alloc_xenballooned_pages(numpgs
, pages
);
433 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__
,
438 BUG_ON(vma
->vm_private_data
!= NULL
);
439 vma
->vm_private_data
= pages
;
444 static const struct vm_operations_struct privcmd_vm_ops
;
446 static long privcmd_ioctl_mmap_batch(
447 struct file
*file
, void __user
*udata
, int version
)
449 struct privcmd_data
*data
= file
->private_data
;
451 struct privcmd_mmapbatch_v2 m
;
452 struct mm_struct
*mm
= current
->mm
;
453 struct vm_area_struct
*vma
;
454 unsigned long nr_pages
;
456 struct mmap_batch_state state
;
460 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch
)))
462 /* Returns per-frame error in m.arr. */
464 if (!access_ok(VERIFY_WRITE
, m
.arr
, m
.num
* sizeof(*m
.arr
)))
468 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch_v2
)))
470 /* Returns per-frame error code in m.err. */
471 if (!access_ok(VERIFY_WRITE
, m
.err
, m
.num
* (sizeof(*m
.err
))))
478 /* If restriction is in place, check the domid matches */
479 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= m
.dom
)
482 nr_pages
= DIV_ROUND_UP(m
.num
, XEN_PFN_PER_PAGE
);
483 if ((m
.num
<= 0) || (nr_pages
> (LONG_MAX
>> PAGE_SHIFT
)))
486 ret
= gather_array(&pagelist
, m
.num
, sizeof(xen_pfn_t
), m
.arr
);
490 if (list_empty(&pagelist
)) {
496 /* Zero error array now to only copy back actual errors. */
497 if (clear_user(m
.err
, sizeof(int) * m
.num
)) {
503 down_write(&mm
->mmap_sem
);
505 vma
= find_vma(mm
, m
.addr
);
507 vma
->vm_ops
!= &privcmd_vm_ops
) {
513 * Caller must either:
515 * Map the whole VMA range, which will also allocate all the
516 * pages required for the auto_translated_physmap case.
520 * Map unmapped holes left from a previous map attempt (e.g.,
521 * because those foreign frames were previously paged out).
523 if (vma
->vm_private_data
== NULL
) {
524 if (m
.addr
!= vma
->vm_start
||
525 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) != vma
->vm_end
) {
529 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
530 ret
= alloc_empty_pages(vma
, nr_pages
);
534 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
536 if (m
.addr
< vma
->vm_start
||
537 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) > vma
->vm_end
) {
541 if (privcmd_vma_range_is_mapped(vma
, m
.addr
, nr_pages
)) {
547 state
.domain
= m
.dom
;
551 state
.global_error
= 0;
552 state
.version
= version
;
554 BUILD_BUG_ON(((PAGE_SIZE
/ sizeof(xen_pfn_t
)) % XEN_PFN_PER_PAGE
) != 0);
555 /* mmap_batch_fn guarantees ret == 0 */
556 BUG_ON(traverse_pages_block(m
.num
, sizeof(xen_pfn_t
),
557 &pagelist
, mmap_batch_fn
, &state
));
559 up_write(&mm
->mmap_sem
);
561 if (state
.global_error
) {
562 /* Write back errors in second pass. */
563 state
.user_gfn
= (xen_pfn_t
*)m
.arr
;
564 state
.user_err
= m
.err
;
565 ret
= traverse_pages_block(m
.num
, sizeof(xen_pfn_t
),
566 &pagelist
, mmap_return_errors
, &state
);
570 /* If we have not had any EFAULT-like global errors then set the global
571 * error to -ENOENT if necessary. */
572 if ((ret
== 0) && (state
.global_error
== -ENOENT
))
576 free_page_list(&pagelist
);
580 up_write(&mm
->mmap_sem
);
584 static int lock_pages(
585 struct privcmd_dm_op_buf kbufs
[], unsigned int num
,
586 struct page
*pages
[], unsigned int nr_pages
)
590 for (i
= 0; i
< num
; i
++) {
591 unsigned int requested
;
594 requested
= DIV_ROUND_UP(
595 offset_in_page(kbufs
[i
].uptr
) + kbufs
[i
].size
,
597 if (requested
> nr_pages
)
600 pinned
= get_user_pages_fast(
601 (unsigned long) kbufs
[i
].uptr
,
602 requested
, FOLL_WRITE
, pages
);
613 static void unlock_pages(struct page
*pages
[], unsigned int nr_pages
)
620 for (i
= 0; i
< nr_pages
; i
++) {
626 static long privcmd_ioctl_dm_op(struct file
*file
, void __user
*udata
)
628 struct privcmd_data
*data
= file
->private_data
;
629 struct privcmd_dm_op kdata
;
630 struct privcmd_dm_op_buf
*kbufs
;
631 unsigned int nr_pages
= 0;
632 struct page
**pages
= NULL
;
633 struct xen_dm_op_buf
*xbufs
= NULL
;
637 if (copy_from_user(&kdata
, udata
, sizeof(kdata
)))
640 /* If restriction is in place, check the domid matches */
641 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= kdata
.dom
)
647 if (kdata
.num
> privcmd_dm_op_max_num
)
650 kbufs
= kcalloc(kdata
.num
, sizeof(*kbufs
), GFP_KERNEL
);
654 if (copy_from_user(kbufs
, kdata
.ubufs
,
655 sizeof(*kbufs
) * kdata
.num
)) {
660 for (i
= 0; i
< kdata
.num
; i
++) {
661 if (kbufs
[i
].size
> privcmd_dm_op_buf_max_size
) {
666 if (!access_ok(VERIFY_WRITE
, kbufs
[i
].uptr
,
672 nr_pages
+= DIV_ROUND_UP(
673 offset_in_page(kbufs
[i
].uptr
) + kbufs
[i
].size
,
677 pages
= kcalloc(nr_pages
, sizeof(*pages
), GFP_KERNEL
);
683 xbufs
= kcalloc(kdata
.num
, sizeof(*xbufs
), GFP_KERNEL
);
689 rc
= lock_pages(kbufs
, kdata
.num
, pages
, nr_pages
);
693 for (i
= 0; i
< kdata
.num
; i
++) {
694 set_xen_guest_handle(xbufs
[i
].h
, kbufs
[i
].uptr
);
695 xbufs
[i
].size
= kbufs
[i
].size
;
698 xen_preemptible_hcall_begin();
699 rc
= HYPERVISOR_dm_op(kdata
.dom
, kdata
.num
, xbufs
);
700 xen_preemptible_hcall_end();
703 unlock_pages(pages
, nr_pages
);
711 static long privcmd_ioctl_restrict(struct file
*file
, void __user
*udata
)
713 struct privcmd_data
*data
= file
->private_data
;
716 if (copy_from_user(&dom
, udata
, sizeof(dom
)))
719 /* Set restriction to the specified domain, or check it matches */
720 if (data
->domid
== DOMID_INVALID
)
722 else if (data
->domid
!= dom
)
728 static long privcmd_ioctl(struct file
*file
,
729 unsigned int cmd
, unsigned long data
)
732 void __user
*udata
= (void __user
*) data
;
735 case IOCTL_PRIVCMD_HYPERCALL
:
736 ret
= privcmd_ioctl_hypercall(file
, udata
);
739 case IOCTL_PRIVCMD_MMAP
:
740 ret
= privcmd_ioctl_mmap(file
, udata
);
743 case IOCTL_PRIVCMD_MMAPBATCH
:
744 ret
= privcmd_ioctl_mmap_batch(file
, udata
, 1);
747 case IOCTL_PRIVCMD_MMAPBATCH_V2
:
748 ret
= privcmd_ioctl_mmap_batch(file
, udata
, 2);
751 case IOCTL_PRIVCMD_DM_OP
:
752 ret
= privcmd_ioctl_dm_op(file
, udata
);
755 case IOCTL_PRIVCMD_RESTRICT
:
756 ret
= privcmd_ioctl_restrict(file
, udata
);
766 static int privcmd_open(struct inode
*ino
, struct file
*file
)
768 struct privcmd_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
773 /* DOMID_INVALID implies no restriction */
774 data
->domid
= DOMID_INVALID
;
776 file
->private_data
= data
;
780 static int privcmd_release(struct inode
*ino
, struct file
*file
)
782 struct privcmd_data
*data
= file
->private_data
;
788 static void privcmd_close(struct vm_area_struct
*vma
)
790 struct page
**pages
= vma
->vm_private_data
;
791 int numpgs
= vma_pages(vma
);
792 int numgfns
= (vma
->vm_end
- vma
->vm_start
) >> XEN_PAGE_SHIFT
;
795 if (!xen_feature(XENFEAT_auto_translated_physmap
) || !numpgs
|| !pages
)
798 rc
= xen_unmap_domain_gfn_range(vma
, numgfns
, pages
);
800 free_xenballooned_pages(numpgs
, pages
);
802 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
807 static int privcmd_fault(struct vm_fault
*vmf
)
809 printk(KERN_DEBUG
"privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
810 vmf
->vma
, vmf
->vma
->vm_start
, vmf
->vma
->vm_end
,
811 vmf
->pgoff
, (void *)vmf
->address
);
813 return VM_FAULT_SIGBUS
;
816 static const struct vm_operations_struct privcmd_vm_ops
= {
817 .close
= privcmd_close
,
818 .fault
= privcmd_fault
821 static int privcmd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
823 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
824 * how to recreate these mappings */
825 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTCOPY
|
826 VM_DONTEXPAND
| VM_DONTDUMP
;
827 vma
->vm_ops
= &privcmd_vm_ops
;
828 vma
->vm_private_data
= NULL
;
834 * For MMAPBATCH*. This allows asserting the singleshot mapping
835 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
836 * can be then retried until success.
838 static int is_mapped_fn(pte_t
*pte
, struct page
*pmd_page
,
839 unsigned long addr
, void *data
)
841 return pte_none(*pte
) ? 0 : -EBUSY
;
844 static int privcmd_vma_range_is_mapped(
845 struct vm_area_struct
*vma
,
847 unsigned long nr_pages
)
849 return apply_to_page_range(vma
->vm_mm
, addr
, nr_pages
<< PAGE_SHIFT
,
850 is_mapped_fn
, NULL
) != 0;
853 const struct file_operations xen_privcmd_fops
= {
854 .owner
= THIS_MODULE
,
855 .unlocked_ioctl
= privcmd_ioctl
,
856 .open
= privcmd_open
,
857 .release
= privcmd_release
,
858 .mmap
= privcmd_mmap
,
860 EXPORT_SYMBOL_GPL(xen_privcmd_fops
);
862 static struct miscdevice privcmd_dev
= {
863 .minor
= MISC_DYNAMIC_MINOR
,
864 .name
= "xen/privcmd",
865 .fops
= &xen_privcmd_fops
,
868 static int __init
privcmd_init(void)
875 err
= misc_register(&privcmd_dev
);
877 pr_err("Could not register Xen privcmd device\n");
883 static void __exit
privcmd_exit(void)
885 misc_deregister(&privcmd_dev
);
888 module_init(privcmd_init
);
889 module_exit(privcmd_exit
);