1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/uaccess.h>
20 #include <linux/swap.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/seq_file.h>
24 #include <linux/miscdevice.h>
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
29 #include <asm/xen/hypervisor.h>
30 #include <asm/xen/hypercall.h>
33 #include <xen/privcmd.h>
34 #include <xen/interface/xen.h>
35 #include <xen/features.h>
37 #include <xen/xen-ops.h>
38 #include <xen/balloon.h>
42 MODULE_LICENSE("GPL");
44 #define PRIV_VMA_LOCKED ((void *)1)
46 static int privcmd_vma_range_is_mapped(
47 struct vm_area_struct
*vma
,
49 unsigned long nr_pages
);
51 static long privcmd_ioctl_hypercall(void __user
*udata
)
53 struct privcmd_hypercall hypercall
;
56 if (copy_from_user(&hypercall
, udata
, sizeof(hypercall
)))
59 xen_preemptible_hcall_begin();
60 ret
= privcmd_call(hypercall
.op
,
61 hypercall
.arg
[0], hypercall
.arg
[1],
62 hypercall
.arg
[2], hypercall
.arg
[3],
64 xen_preemptible_hcall_end();
69 static void free_page_list(struct list_head
*pages
)
73 list_for_each_entry_safe(p
, n
, pages
, lru
)
76 INIT_LIST_HEAD(pages
);
80 * Given an array of items in userspace, return a list of pages
81 * containing the data. If copying fails, either because of memory
82 * allocation failure or a problem reading user memory, return an
83 * error code; its up to the caller to dispose of any partial list.
85 static int gather_array(struct list_head
*pagelist
,
86 unsigned nelem
, size_t size
,
87 const void __user
*data
)
97 pagedata
= NULL
; /* quiet, gcc */
99 if (pageidx
> PAGE_SIZE
-size
) {
100 struct page
*page
= alloc_page(GFP_KERNEL
);
106 pagedata
= page_address(page
);
108 list_add_tail(&page
->lru
, pagelist
);
113 if (copy_from_user(pagedata
+ pageidx
, data
, size
))
127 * Call function "fn" on each element of the array fragmented
128 * over a list of pages.
130 static int traverse_pages(unsigned nelem
, size_t size
,
131 struct list_head
*pos
,
132 int (*fn
)(void *data
, void *state
),
139 BUG_ON(size
> PAGE_SIZE
);
142 pagedata
= NULL
; /* hush, gcc */
145 if (pageidx
> PAGE_SIZE
-size
) {
148 page
= list_entry(pos
, struct page
, lru
);
149 pagedata
= page_address(page
);
153 ret
= (*fn
)(pagedata
+ pageidx
, state
);
163 * Similar to traverse_pages, but use each page as a "block" of
164 * data to be processed as one unit.
166 static int traverse_pages_block(unsigned nelem
, size_t size
,
167 struct list_head
*pos
,
168 int (*fn
)(void *data
, int nr
, void *state
),
175 BUG_ON(size
> PAGE_SIZE
);
180 int nr
= (PAGE_SIZE
/size
);
185 page
= list_entry(pos
, struct page
, lru
);
186 pagedata
= page_address(page
);
187 ret
= (*fn
)(pagedata
, nr
, state
);
196 struct mmap_gfn_state
{
198 struct vm_area_struct
*vma
;
202 static int mmap_gfn_range(void *data
, void *state
)
204 struct privcmd_mmap_entry
*msg
= data
;
205 struct mmap_gfn_state
*st
= state
;
206 struct vm_area_struct
*vma
= st
->vma
;
209 /* Do not allow range to wrap the address space. */
210 if ((msg
->npages
> (LONG_MAX
>> PAGE_SHIFT
)) ||
211 ((unsigned long)(msg
->npages
<< PAGE_SHIFT
) >= -st
->va
))
214 /* Range chunks must be contiguous in va space. */
215 if ((msg
->va
!= st
->va
) ||
216 ((msg
->va
+(msg
->npages
<<PAGE_SHIFT
)) > vma
->vm_end
))
219 rc
= xen_remap_domain_gfn_range(vma
,
221 msg
->mfn
, msg
->npages
,
227 st
->va
+= msg
->npages
<< PAGE_SHIFT
;
232 static long privcmd_ioctl_mmap(void __user
*udata
)
234 struct privcmd_mmap mmapcmd
;
235 struct mm_struct
*mm
= current
->mm
;
236 struct vm_area_struct
*vma
;
239 struct mmap_gfn_state state
;
241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
242 if (xen_feature(XENFEAT_auto_translated_physmap
))
245 if (copy_from_user(&mmapcmd
, udata
, sizeof(mmapcmd
)))
248 rc
= gather_array(&pagelist
,
249 mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
252 if (rc
|| list_empty(&pagelist
))
255 down_write(&mm
->mmap_sem
);
258 struct page
*page
= list_first_entry(&pagelist
,
260 struct privcmd_mmap_entry
*msg
= page_address(page
);
262 vma
= find_vma(mm
, msg
->va
);
265 if (!vma
|| (msg
->va
!= vma
->vm_start
) || vma
->vm_private_data
)
267 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
270 state
.va
= vma
->vm_start
;
272 state
.domain
= mmapcmd
.dom
;
274 rc
= traverse_pages(mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
276 mmap_gfn_range
, &state
);
280 up_write(&mm
->mmap_sem
);
283 free_page_list(&pagelist
);
288 struct mmap_batch_state
{
291 struct vm_area_struct
*vma
;
295 * 1 if at least one error has happened (and no
296 * -ENOENT errors have happened)
297 * -ENOENT if at least 1 -ENOENT has happened.
302 /* User-space gfn array to store errors in the second pass for V1. */
303 xen_pfn_t __user
*user_gfn
;
304 /* User-space int array to store errors in the second pass for V2. */
305 int __user
*user_err
;
308 /* auto translated dom0 note: if domU being created is PV, then gfn is
309 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
311 static int mmap_batch_fn(void *data
, int nr
, void *state
)
313 xen_pfn_t
*gfnp
= data
;
314 struct mmap_batch_state
*st
= state
;
315 struct vm_area_struct
*vma
= st
->vma
;
316 struct page
**pages
= vma
->vm_private_data
;
317 struct page
**cur_pages
= NULL
;
320 if (xen_feature(XENFEAT_auto_translated_physmap
))
321 cur_pages
= &pages
[st
->index
];
324 ret
= xen_remap_domain_gfn_array(st
->vma
, st
->va
& PAGE_MASK
, gfnp
, nr
,
325 (int *)gfnp
, st
->vma
->vm_page_prot
,
326 st
->domain
, cur_pages
);
328 /* Adjust the global_error? */
331 st
->global_error
= -ENOENT
;
333 /* Record that at least one error has happened. */
334 if (st
->global_error
== 0)
335 st
->global_error
= 1;
338 st
->va
+= PAGE_SIZE
* nr
;
344 static int mmap_return_error(int err
, struct mmap_batch_state
*st
)
348 if (st
->version
== 1) {
352 ret
= get_user(gfn
, st
->user_gfn
);
356 * V1 encodes the error codes in the 32bit top
357 * nibble of the gfn (with its known
358 * limitations vis-a-vis 64 bit callers).
360 gfn
|= (err
== -ENOENT
) ?
361 PRIVCMD_MMAPBATCH_PAGED_ERROR
:
362 PRIVCMD_MMAPBATCH_MFN_ERROR
;
363 return __put_user(gfn
, st
->user_gfn
++);
366 } else { /* st->version == 2 */
368 return __put_user(err
, st
->user_err
++);
376 static int mmap_return_errors(void *data
, int nr
, void *state
)
378 struct mmap_batch_state
*st
= state
;
383 for (i
= 0; i
< nr
; i
++) {
384 ret
= mmap_return_error(errs
[i
], st
);
391 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
392 * the vma with the page info to use later.
393 * Returns: 0 if success, otherwise -errno
395 static int alloc_empty_pages(struct vm_area_struct
*vma
, int numpgs
)
400 pages
= kcalloc(numpgs
, sizeof(pages
[0]), GFP_KERNEL
);
404 rc
= alloc_xenballooned_pages(numpgs
, pages
);
406 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__
,
411 BUG_ON(vma
->vm_private_data
!= NULL
);
412 vma
->vm_private_data
= pages
;
417 static const struct vm_operations_struct privcmd_vm_ops
;
419 static long privcmd_ioctl_mmap_batch(void __user
*udata
, int version
)
422 struct privcmd_mmapbatch_v2 m
;
423 struct mm_struct
*mm
= current
->mm
;
424 struct vm_area_struct
*vma
;
425 unsigned long nr_pages
;
427 struct mmap_batch_state state
;
431 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch
)))
433 /* Returns per-frame error in m.arr. */
435 if (!access_ok(VERIFY_WRITE
, m
.arr
, m
.num
* sizeof(*m
.arr
)))
439 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch_v2
)))
441 /* Returns per-frame error code in m.err. */
442 if (!access_ok(VERIFY_WRITE
, m
.err
, m
.num
* (sizeof(*m
.err
))))
449 nr_pages
= DIV_ROUND_UP(m
.num
, XEN_PFN_PER_PAGE
);
450 if ((m
.num
<= 0) || (nr_pages
> (LONG_MAX
>> PAGE_SHIFT
)))
453 ret
= gather_array(&pagelist
, m
.num
, sizeof(xen_pfn_t
), m
.arr
);
457 if (list_empty(&pagelist
)) {
463 /* Zero error array now to only copy back actual errors. */
464 if (clear_user(m
.err
, sizeof(int) * m
.num
)) {
470 down_write(&mm
->mmap_sem
);
472 vma
= find_vma(mm
, m
.addr
);
474 vma
->vm_ops
!= &privcmd_vm_ops
) {
480 * Caller must either:
482 * Map the whole VMA range, which will also allocate all the
483 * pages required for the auto_translated_physmap case.
487 * Map unmapped holes left from a previous map attempt (e.g.,
488 * because those foreign frames were previously paged out).
490 if (vma
->vm_private_data
== NULL
) {
491 if (m
.addr
!= vma
->vm_start
||
492 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) != vma
->vm_end
) {
496 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
497 ret
= alloc_empty_pages(vma
, nr_pages
);
501 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
503 if (m
.addr
< vma
->vm_start
||
504 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) > vma
->vm_end
) {
508 if (privcmd_vma_range_is_mapped(vma
, m
.addr
, nr_pages
)) {
514 state
.domain
= m
.dom
;
518 state
.global_error
= 0;
519 state
.version
= version
;
521 BUILD_BUG_ON(((PAGE_SIZE
/ sizeof(xen_pfn_t
)) % XEN_PFN_PER_PAGE
) != 0);
522 /* mmap_batch_fn guarantees ret == 0 */
523 BUG_ON(traverse_pages_block(m
.num
, sizeof(xen_pfn_t
),
524 &pagelist
, mmap_batch_fn
, &state
));
526 up_write(&mm
->mmap_sem
);
528 if (state
.global_error
) {
529 /* Write back errors in second pass. */
530 state
.user_gfn
= (xen_pfn_t
*)m
.arr
;
531 state
.user_err
= m
.err
;
532 ret
= traverse_pages_block(m
.num
, sizeof(xen_pfn_t
),
533 &pagelist
, mmap_return_errors
, &state
);
537 /* If we have not had any EFAULT-like global errors then set the global
538 * error to -ENOENT if necessary. */
539 if ((ret
== 0) && (state
.global_error
== -ENOENT
))
543 free_page_list(&pagelist
);
547 up_write(&mm
->mmap_sem
);
551 static long privcmd_ioctl(struct file
*file
,
552 unsigned int cmd
, unsigned long data
)
555 void __user
*udata
= (void __user
*) data
;
558 case IOCTL_PRIVCMD_HYPERCALL
:
559 ret
= privcmd_ioctl_hypercall(udata
);
562 case IOCTL_PRIVCMD_MMAP
:
563 ret
= privcmd_ioctl_mmap(udata
);
566 case IOCTL_PRIVCMD_MMAPBATCH
:
567 ret
= privcmd_ioctl_mmap_batch(udata
, 1);
570 case IOCTL_PRIVCMD_MMAPBATCH_V2
:
571 ret
= privcmd_ioctl_mmap_batch(udata
, 2);
582 static void privcmd_close(struct vm_area_struct
*vma
)
584 struct page
**pages
= vma
->vm_private_data
;
585 int numpgs
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
586 int numgfns
= (vma
->vm_end
- vma
->vm_start
) >> XEN_PAGE_SHIFT
;
589 if (!xen_feature(XENFEAT_auto_translated_physmap
) || !numpgs
|| !pages
)
592 rc
= xen_unmap_domain_gfn_range(vma
, numgfns
, pages
);
594 free_xenballooned_pages(numpgs
, pages
);
596 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
601 static int privcmd_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
603 printk(KERN_DEBUG
"privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
604 vma
, vma
->vm_start
, vma
->vm_end
,
605 vmf
->pgoff
, vmf
->virtual_address
);
607 return VM_FAULT_SIGBUS
;
610 static const struct vm_operations_struct privcmd_vm_ops
= {
611 .close
= privcmd_close
,
612 .fault
= privcmd_fault
615 static int privcmd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
617 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
618 * how to recreate these mappings */
619 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTCOPY
|
620 VM_DONTEXPAND
| VM_DONTDUMP
;
621 vma
->vm_ops
= &privcmd_vm_ops
;
622 vma
->vm_private_data
= NULL
;
628 * For MMAPBATCH*. This allows asserting the singleshot mapping
629 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
630 * can be then retried until success.
632 static int is_mapped_fn(pte_t
*pte
, struct page
*pmd_page
,
633 unsigned long addr
, void *data
)
635 return pte_none(*pte
) ? 0 : -EBUSY
;
638 static int privcmd_vma_range_is_mapped(
639 struct vm_area_struct
*vma
,
641 unsigned long nr_pages
)
643 return apply_to_page_range(vma
->vm_mm
, addr
, nr_pages
<< PAGE_SHIFT
,
644 is_mapped_fn
, NULL
) != 0;
647 const struct file_operations xen_privcmd_fops
= {
648 .owner
= THIS_MODULE
,
649 .unlocked_ioctl
= privcmd_ioctl
,
650 .mmap
= privcmd_mmap
,
652 EXPORT_SYMBOL_GPL(xen_privcmd_fops
);
654 static struct miscdevice privcmd_dev
= {
655 .minor
= MISC_DYNAMIC_MINOR
,
656 .name
= "xen/privcmd",
657 .fops
= &xen_privcmd_fops
,
660 static int __init
privcmd_init(void)
667 err
= misc_register(&privcmd_dev
);
669 pr_err("Could not register Xen privcmd device\n");
675 static void __exit
privcmd_exit(void)
677 misc_deregister(&privcmd_dev
);
680 module_init(privcmd_init
);
681 module_exit(privcmd_exit
);