1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/uaccess.h>
20 #include <linux/swap.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/seq_file.h>
24 #include <linux/miscdevice.h>
25 #include <linux/moduleparam.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <asm/xen/hypervisor.h>
31 #include <asm/xen/hypercall.h>
34 #include <xen/privcmd.h>
35 #include <xen/interface/xen.h>
36 #include <xen/interface/memory.h>
37 #include <xen/interface/hvm/dm_op.h>
38 #include <xen/features.h>
40 #include <xen/xen-ops.h>
41 #include <xen/balloon.h>
45 MODULE_LICENSE("GPL");
47 #define PRIV_VMA_LOCKED ((void *)1)
49 static unsigned int privcmd_dm_op_max_num
= 16;
50 module_param_named(dm_op_max_nr_bufs
, privcmd_dm_op_max_num
, uint
, 0644);
51 MODULE_PARM_DESC(dm_op_max_nr_bufs
,
52 "Maximum number of buffers per dm_op hypercall");
54 static unsigned int privcmd_dm_op_buf_max_size
= 4096;
55 module_param_named(dm_op_buf_max_size
, privcmd_dm_op_buf_max_size
, uint
,
57 MODULE_PARM_DESC(dm_op_buf_max_size
,
58 "Maximum size of a dm_op hypercall buffer");
64 static int privcmd_vma_range_is_mapped(
65 struct vm_area_struct
*vma
,
67 unsigned long nr_pages
);
69 static long privcmd_ioctl_hypercall(struct file
*file
, void __user
*udata
)
71 struct privcmd_data
*data
= file
->private_data
;
72 struct privcmd_hypercall hypercall
;
75 /* Disallow arbitrary hypercalls if restricted */
76 if (data
->domid
!= DOMID_INVALID
)
79 if (copy_from_user(&hypercall
, udata
, sizeof(hypercall
)))
82 xen_preemptible_hcall_begin();
83 ret
= privcmd_call(hypercall
.op
,
84 hypercall
.arg
[0], hypercall
.arg
[1],
85 hypercall
.arg
[2], hypercall
.arg
[3],
87 xen_preemptible_hcall_end();
92 static void free_page_list(struct list_head
*pages
)
96 list_for_each_entry_safe(p
, n
, pages
, lru
)
99 INIT_LIST_HEAD(pages
);
103 * Given an array of items in userspace, return a list of pages
104 * containing the data. If copying fails, either because of memory
105 * allocation failure or a problem reading user memory, return an
106 * error code; its up to the caller to dispose of any partial list.
108 static int gather_array(struct list_head
*pagelist
,
109 unsigned nelem
, size_t size
,
110 const void __user
*data
)
116 if (size
> PAGE_SIZE
)
120 pagedata
= NULL
; /* quiet, gcc */
122 if (pageidx
> PAGE_SIZE
-size
) {
123 struct page
*page
= alloc_page(GFP_KERNEL
);
129 pagedata
= page_address(page
);
131 list_add_tail(&page
->lru
, pagelist
);
136 if (copy_from_user(pagedata
+ pageidx
, data
, size
))
150 * Call function "fn" on each element of the array fragmented
151 * over a list of pages.
153 static int traverse_pages(unsigned nelem
, size_t size
,
154 struct list_head
*pos
,
155 int (*fn
)(void *data
, void *state
),
162 BUG_ON(size
> PAGE_SIZE
);
165 pagedata
= NULL
; /* hush, gcc */
168 if (pageidx
> PAGE_SIZE
-size
) {
171 page
= list_entry(pos
, struct page
, lru
);
172 pagedata
= page_address(page
);
176 ret
= (*fn
)(pagedata
+ pageidx
, state
);
186 * Similar to traverse_pages, but use each page as a "block" of
187 * data to be processed as one unit.
189 static int traverse_pages_block(unsigned nelem
, size_t size
,
190 struct list_head
*pos
,
191 int (*fn
)(void *data
, int nr
, void *state
),
197 BUG_ON(size
> PAGE_SIZE
);
200 int nr
= (PAGE_SIZE
/size
);
205 page
= list_entry(pos
, struct page
, lru
);
206 pagedata
= page_address(page
);
207 ret
= (*fn
)(pagedata
, nr
, state
);
216 struct mmap_gfn_state
{
218 struct vm_area_struct
*vma
;
222 static int mmap_gfn_range(void *data
, void *state
)
224 struct privcmd_mmap_entry
*msg
= data
;
225 struct mmap_gfn_state
*st
= state
;
226 struct vm_area_struct
*vma
= st
->vma
;
229 /* Do not allow range to wrap the address space. */
230 if ((msg
->npages
> (LONG_MAX
>> PAGE_SHIFT
)) ||
231 ((unsigned long)(msg
->npages
<< PAGE_SHIFT
) >= -st
->va
))
234 /* Range chunks must be contiguous in va space. */
235 if ((msg
->va
!= st
->va
) ||
236 ((msg
->va
+(msg
->npages
<<PAGE_SHIFT
)) > vma
->vm_end
))
239 rc
= xen_remap_domain_gfn_range(vma
,
241 msg
->mfn
, msg
->npages
,
247 st
->va
+= msg
->npages
<< PAGE_SHIFT
;
252 static long privcmd_ioctl_mmap(struct file
*file
, void __user
*udata
)
254 struct privcmd_data
*data
= file
->private_data
;
255 struct privcmd_mmap mmapcmd
;
256 struct mm_struct
*mm
= current
->mm
;
257 struct vm_area_struct
*vma
;
260 struct mmap_gfn_state state
;
262 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
263 if (xen_feature(XENFEAT_auto_translated_physmap
))
266 if (copy_from_user(&mmapcmd
, udata
, sizeof(mmapcmd
)))
269 /* If restriction is in place, check the domid matches */
270 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= mmapcmd
.dom
)
273 rc
= gather_array(&pagelist
,
274 mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
277 if (rc
|| list_empty(&pagelist
))
280 down_write(&mm
->mmap_sem
);
283 struct page
*page
= list_first_entry(&pagelist
,
285 struct privcmd_mmap_entry
*msg
= page_address(page
);
287 vma
= find_vma(mm
, msg
->va
);
290 if (!vma
|| (msg
->va
!= vma
->vm_start
) || vma
->vm_private_data
)
292 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
295 state
.va
= vma
->vm_start
;
297 state
.domain
= mmapcmd
.dom
;
299 rc
= traverse_pages(mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
301 mmap_gfn_range
, &state
);
305 up_write(&mm
->mmap_sem
);
308 free_page_list(&pagelist
);
313 struct mmap_batch_state
{
316 struct vm_area_struct
*vma
;
320 * 1 if at least one error has happened (and no
321 * -ENOENT errors have happened)
322 * -ENOENT if at least 1 -ENOENT has happened.
327 /* User-space gfn array to store errors in the second pass for V1. */
328 xen_pfn_t __user
*user_gfn
;
329 /* User-space int array to store errors in the second pass for V2. */
330 int __user
*user_err
;
333 /* auto translated dom0 note: if domU being created is PV, then gfn is
334 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
336 static int mmap_batch_fn(void *data
, int nr
, void *state
)
338 xen_pfn_t
*gfnp
= data
;
339 struct mmap_batch_state
*st
= state
;
340 struct vm_area_struct
*vma
= st
->vma
;
341 struct page
**pages
= vma
->vm_private_data
;
342 struct page
**cur_pages
= NULL
;
345 if (xen_feature(XENFEAT_auto_translated_physmap
))
346 cur_pages
= &pages
[st
->index
];
349 ret
= xen_remap_domain_gfn_array(st
->vma
, st
->va
& PAGE_MASK
, gfnp
, nr
,
350 (int *)gfnp
, st
->vma
->vm_page_prot
,
351 st
->domain
, cur_pages
);
353 /* Adjust the global_error? */
356 st
->global_error
= -ENOENT
;
358 /* Record that at least one error has happened. */
359 if (st
->global_error
== 0)
360 st
->global_error
= 1;
363 st
->va
+= XEN_PAGE_SIZE
* nr
;
364 st
->index
+= nr
/ XEN_PFN_PER_PAGE
;
369 static int mmap_return_error(int err
, struct mmap_batch_state
*st
)
373 if (st
->version
== 1) {
377 ret
= get_user(gfn
, st
->user_gfn
);
381 * V1 encodes the error codes in the 32bit top
382 * nibble of the gfn (with its known
383 * limitations vis-a-vis 64 bit callers).
385 gfn
|= (err
== -ENOENT
) ?
386 PRIVCMD_MMAPBATCH_PAGED_ERROR
:
387 PRIVCMD_MMAPBATCH_MFN_ERROR
;
388 return __put_user(gfn
, st
->user_gfn
++);
391 } else { /* st->version == 2 */
393 return __put_user(err
, st
->user_err
++);
401 static int mmap_return_errors(void *data
, int nr
, void *state
)
403 struct mmap_batch_state
*st
= state
;
408 for (i
= 0; i
< nr
; i
++) {
409 ret
= mmap_return_error(errs
[i
], st
);
416 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
417 * the vma with the page info to use later.
418 * Returns: 0 if success, otherwise -errno
420 static int alloc_empty_pages(struct vm_area_struct
*vma
, int numpgs
)
425 pages
= kcalloc(numpgs
, sizeof(pages
[0]), GFP_KERNEL
);
429 rc
= alloc_xenballooned_pages(numpgs
, pages
);
431 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__
,
436 BUG_ON(vma
->vm_private_data
!= NULL
);
437 vma
->vm_private_data
= pages
;
442 static const struct vm_operations_struct privcmd_vm_ops
;
444 static long privcmd_ioctl_mmap_batch(
445 struct file
*file
, void __user
*udata
, int version
)
447 struct privcmd_data
*data
= file
->private_data
;
449 struct privcmd_mmapbatch_v2 m
;
450 struct mm_struct
*mm
= current
->mm
;
451 struct vm_area_struct
*vma
;
452 unsigned long nr_pages
;
454 struct mmap_batch_state state
;
458 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch
)))
460 /* Returns per-frame error in m.arr. */
462 if (!access_ok(VERIFY_WRITE
, m
.arr
, m
.num
* sizeof(*m
.arr
)))
466 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch_v2
)))
468 /* Returns per-frame error code in m.err. */
469 if (!access_ok(VERIFY_WRITE
, m
.err
, m
.num
* (sizeof(*m
.err
))))
476 /* If restriction is in place, check the domid matches */
477 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= m
.dom
)
480 nr_pages
= DIV_ROUND_UP(m
.num
, XEN_PFN_PER_PAGE
);
481 if ((m
.num
<= 0) || (nr_pages
> (LONG_MAX
>> PAGE_SHIFT
)))
484 ret
= gather_array(&pagelist
, m
.num
, sizeof(xen_pfn_t
), m
.arr
);
488 if (list_empty(&pagelist
)) {
494 /* Zero error array now to only copy back actual errors. */
495 if (clear_user(m
.err
, sizeof(int) * m
.num
)) {
501 down_write(&mm
->mmap_sem
);
503 vma
= find_vma(mm
, m
.addr
);
505 vma
->vm_ops
!= &privcmd_vm_ops
) {
511 * Caller must either:
513 * Map the whole VMA range, which will also allocate all the
514 * pages required for the auto_translated_physmap case.
518 * Map unmapped holes left from a previous map attempt (e.g.,
519 * because those foreign frames were previously paged out).
521 if (vma
->vm_private_data
== NULL
) {
522 if (m
.addr
!= vma
->vm_start
||
523 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) != vma
->vm_end
) {
527 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
528 ret
= alloc_empty_pages(vma
, nr_pages
);
532 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
534 if (m
.addr
< vma
->vm_start
||
535 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) > vma
->vm_end
) {
539 if (privcmd_vma_range_is_mapped(vma
, m
.addr
, nr_pages
)) {
545 state
.domain
= m
.dom
;
549 state
.global_error
= 0;
550 state
.version
= version
;
552 BUILD_BUG_ON(((PAGE_SIZE
/ sizeof(xen_pfn_t
)) % XEN_PFN_PER_PAGE
) != 0);
553 /* mmap_batch_fn guarantees ret == 0 */
554 BUG_ON(traverse_pages_block(m
.num
, sizeof(xen_pfn_t
),
555 &pagelist
, mmap_batch_fn
, &state
));
557 up_write(&mm
->mmap_sem
);
559 if (state
.global_error
) {
560 /* Write back errors in second pass. */
561 state
.user_gfn
= (xen_pfn_t
*)m
.arr
;
562 state
.user_err
= m
.err
;
563 ret
= traverse_pages_block(m
.num
, sizeof(xen_pfn_t
),
564 &pagelist
, mmap_return_errors
, &state
);
568 /* If we have not had any EFAULT-like global errors then set the global
569 * error to -ENOENT if necessary. */
570 if ((ret
== 0) && (state
.global_error
== -ENOENT
))
574 free_page_list(&pagelist
);
578 up_write(&mm
->mmap_sem
);
582 static int lock_pages(
583 struct privcmd_dm_op_buf kbufs
[], unsigned int num
,
584 struct page
*pages
[], unsigned int nr_pages
)
588 for (i
= 0; i
< num
; i
++) {
589 unsigned int requested
;
592 requested
= DIV_ROUND_UP(
593 offset_in_page(kbufs
[i
].uptr
) + kbufs
[i
].size
,
595 if (requested
> nr_pages
)
598 pinned
= get_user_pages_fast(
599 (unsigned long) kbufs
[i
].uptr
,
600 requested
, FOLL_WRITE
, pages
);
611 static void unlock_pages(struct page
*pages
[], unsigned int nr_pages
)
618 for (i
= 0; i
< nr_pages
; i
++) {
624 static long privcmd_ioctl_dm_op(struct file
*file
, void __user
*udata
)
626 struct privcmd_data
*data
= file
->private_data
;
627 struct privcmd_dm_op kdata
;
628 struct privcmd_dm_op_buf
*kbufs
;
629 unsigned int nr_pages
= 0;
630 struct page
**pages
= NULL
;
631 struct xen_dm_op_buf
*xbufs
= NULL
;
635 if (copy_from_user(&kdata
, udata
, sizeof(kdata
)))
638 /* If restriction is in place, check the domid matches */
639 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= kdata
.dom
)
645 if (kdata
.num
> privcmd_dm_op_max_num
)
648 kbufs
= kcalloc(kdata
.num
, sizeof(*kbufs
), GFP_KERNEL
);
652 if (copy_from_user(kbufs
, kdata
.ubufs
,
653 sizeof(*kbufs
) * kdata
.num
)) {
658 for (i
= 0; i
< kdata
.num
; i
++) {
659 if (kbufs
[i
].size
> privcmd_dm_op_buf_max_size
) {
664 if (!access_ok(VERIFY_WRITE
, kbufs
[i
].uptr
,
670 nr_pages
+= DIV_ROUND_UP(
671 offset_in_page(kbufs
[i
].uptr
) + kbufs
[i
].size
,
675 pages
= kcalloc(nr_pages
, sizeof(*pages
), GFP_KERNEL
);
681 xbufs
= kcalloc(kdata
.num
, sizeof(*xbufs
), GFP_KERNEL
);
687 rc
= lock_pages(kbufs
, kdata
.num
, pages
, nr_pages
);
691 for (i
= 0; i
< kdata
.num
; i
++) {
692 set_xen_guest_handle(xbufs
[i
].h
, kbufs
[i
].uptr
);
693 xbufs
[i
].size
= kbufs
[i
].size
;
696 xen_preemptible_hcall_begin();
697 rc
= HYPERVISOR_dm_op(kdata
.dom
, kdata
.num
, xbufs
);
698 xen_preemptible_hcall_end();
701 unlock_pages(pages
, nr_pages
);
709 static long privcmd_ioctl_restrict(struct file
*file
, void __user
*udata
)
711 struct privcmd_data
*data
= file
->private_data
;
714 if (copy_from_user(&dom
, udata
, sizeof(dom
)))
717 /* Set restriction to the specified domain, or check it matches */
718 if (data
->domid
== DOMID_INVALID
)
720 else if (data
->domid
!= dom
)
727 struct mm_struct
*mm
;
733 static int remap_pfn_fn(pte_t
*ptep
, pgtable_t token
, unsigned long addr
,
736 struct remap_pfn
*r
= data
;
737 struct page
*page
= r
->pages
[r
->i
];
738 pte_t pte
= pte_mkspecial(pfn_pte(page_to_pfn(page
), r
->prot
));
740 set_pte_at(r
->mm
, addr
, ptep
, pte
);
746 static long privcmd_ioctl_mmap_resource(struct file
*file
, void __user
*udata
)
748 struct privcmd_data
*data
= file
->private_data
;
749 struct mm_struct
*mm
= current
->mm
;
750 struct vm_area_struct
*vma
;
751 struct privcmd_mmap_resource kdata
;
752 xen_pfn_t
*pfns
= NULL
;
753 struct xen_mem_acquire_resource xdata
;
756 if (copy_from_user(&kdata
, udata
, sizeof(kdata
)))
759 /* If restriction is in place, check the domid matches */
760 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= kdata
.dom
)
763 down_write(&mm
->mmap_sem
);
765 vma
= find_vma(mm
, kdata
.addr
);
766 if (!vma
|| vma
->vm_ops
!= &privcmd_vm_ops
) {
771 pfns
= kcalloc(kdata
.num
, sizeof(*pfns
), GFP_KERNEL
);
777 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
778 unsigned int nr
= DIV_ROUND_UP(kdata
.num
, XEN_PFN_PER_PAGE
);
782 rc
= alloc_empty_pages(vma
, nr
);
786 pages
= vma
->vm_private_data
;
787 for (i
= 0; i
< kdata
.num
; i
++) {
789 page_to_xen_pfn(pages
[i
/ XEN_PFN_PER_PAGE
]);
791 pfns
[i
] = pfn
+ (i
% XEN_PFN_PER_PAGE
);
794 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
796 memset(&xdata
, 0, sizeof(xdata
));
797 xdata
.domid
= kdata
.dom
;
798 xdata
.type
= kdata
.type
;
800 xdata
.frame
= kdata
.idx
;
801 xdata
.nr_frames
= kdata
.num
;
802 set_xen_guest_handle(xdata
.frame_list
, pfns
);
804 xen_preemptible_hcall_begin();
805 rc
= HYPERVISOR_memory_op(XENMEM_acquire_resource
, &xdata
);
806 xen_preemptible_hcall_end();
811 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
812 struct remap_pfn r
= {
814 .pages
= vma
->vm_private_data
,
815 .prot
= vma
->vm_page_prot
,
818 rc
= apply_to_page_range(r
.mm
, kdata
.addr
,
819 kdata
.num
<< PAGE_SHIFT
,
823 (xdata
.flags
& XENMEM_rsrc_acq_caller_owned
) ?
824 DOMID_SELF
: kdata
.dom
;
827 num
= xen_remap_domain_mfn_array(vma
,
828 kdata
.addr
& PAGE_MASK
,
829 pfns
, kdata
.num
, (int *)pfns
,
832 vma
->vm_private_data
);
835 else if (num
!= kdata
.num
) {
838 for (i
= 0; i
< num
; i
++) {
848 up_write(&mm
->mmap_sem
);
854 static long privcmd_ioctl(struct file
*file
,
855 unsigned int cmd
, unsigned long data
)
858 void __user
*udata
= (void __user
*) data
;
861 case IOCTL_PRIVCMD_HYPERCALL
:
862 ret
= privcmd_ioctl_hypercall(file
, udata
);
865 case IOCTL_PRIVCMD_MMAP
:
866 ret
= privcmd_ioctl_mmap(file
, udata
);
869 case IOCTL_PRIVCMD_MMAPBATCH
:
870 ret
= privcmd_ioctl_mmap_batch(file
, udata
, 1);
873 case IOCTL_PRIVCMD_MMAPBATCH_V2
:
874 ret
= privcmd_ioctl_mmap_batch(file
, udata
, 2);
877 case IOCTL_PRIVCMD_DM_OP
:
878 ret
= privcmd_ioctl_dm_op(file
, udata
);
881 case IOCTL_PRIVCMD_RESTRICT
:
882 ret
= privcmd_ioctl_restrict(file
, udata
);
885 case IOCTL_PRIVCMD_MMAP_RESOURCE
:
886 ret
= privcmd_ioctl_mmap_resource(file
, udata
);
896 static int privcmd_open(struct inode
*ino
, struct file
*file
)
898 struct privcmd_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
903 /* DOMID_INVALID implies no restriction */
904 data
->domid
= DOMID_INVALID
;
906 file
->private_data
= data
;
910 static int privcmd_release(struct inode
*ino
, struct file
*file
)
912 struct privcmd_data
*data
= file
->private_data
;
918 static void privcmd_close(struct vm_area_struct
*vma
)
920 struct page
**pages
= vma
->vm_private_data
;
921 int numpgs
= vma_pages(vma
);
922 int numgfns
= (vma
->vm_end
- vma
->vm_start
) >> XEN_PAGE_SHIFT
;
925 if (!xen_feature(XENFEAT_auto_translated_physmap
) || !numpgs
|| !pages
)
928 rc
= xen_unmap_domain_gfn_range(vma
, numgfns
, pages
);
930 free_xenballooned_pages(numpgs
, pages
);
932 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
937 static vm_fault_t
privcmd_fault(struct vm_fault
*vmf
)
939 printk(KERN_DEBUG
"privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
940 vmf
->vma
, vmf
->vma
->vm_start
, vmf
->vma
->vm_end
,
941 vmf
->pgoff
, (void *)vmf
->address
);
943 return VM_FAULT_SIGBUS
;
946 static const struct vm_operations_struct privcmd_vm_ops
= {
947 .close
= privcmd_close
,
948 .fault
= privcmd_fault
951 static int privcmd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
953 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
954 * how to recreate these mappings */
955 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTCOPY
|
956 VM_DONTEXPAND
| VM_DONTDUMP
;
957 vma
->vm_ops
= &privcmd_vm_ops
;
958 vma
->vm_private_data
= NULL
;
964 * For MMAPBATCH*. This allows asserting the singleshot mapping
965 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
966 * can be then retried until success.
968 static int is_mapped_fn(pte_t
*pte
, struct page
*pmd_page
,
969 unsigned long addr
, void *data
)
971 return pte_none(*pte
) ? 0 : -EBUSY
;
974 static int privcmd_vma_range_is_mapped(
975 struct vm_area_struct
*vma
,
977 unsigned long nr_pages
)
979 return apply_to_page_range(vma
->vm_mm
, addr
, nr_pages
<< PAGE_SHIFT
,
980 is_mapped_fn
, NULL
) != 0;
983 const struct file_operations xen_privcmd_fops
= {
984 .owner
= THIS_MODULE
,
985 .unlocked_ioctl
= privcmd_ioctl
,
986 .open
= privcmd_open
,
987 .release
= privcmd_release
,
988 .mmap
= privcmd_mmap
,
990 EXPORT_SYMBOL_GPL(xen_privcmd_fops
);
992 static struct miscdevice privcmd_dev
= {
993 .minor
= MISC_DYNAMIC_MINOR
,
994 .name
= "xen/privcmd",
995 .fops
= &xen_privcmd_fops
,
998 static int __init
privcmd_init(void)
1005 err
= misc_register(&privcmd_dev
);
1007 pr_err("Could not register Xen privcmd device\n");
1013 static void __exit
privcmd_exit(void)
1015 misc_deregister(&privcmd_dev
);
1018 module_init(privcmd_init
);
1019 module_exit(privcmd_exit
);