1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Interface to privileged domain-0 commands.
7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
10 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
19 #include <linux/mman.h>
20 #include <linux/uaccess.h>
21 #include <linux/swap.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/seq_file.h>
25 #include <linux/miscdevice.h>
26 #include <linux/moduleparam.h>
28 #include <asm/xen/hypervisor.h>
29 #include <asm/xen/hypercall.h>
32 #include <xen/privcmd.h>
33 #include <xen/interface/xen.h>
34 #include <xen/interface/memory.h>
35 #include <xen/interface/hvm/dm_op.h>
36 #include <xen/features.h>
38 #include <xen/xen-ops.h>
39 #include <xen/balloon.h>
43 MODULE_LICENSE("GPL");
45 #define PRIV_VMA_LOCKED ((void *)1)
47 static unsigned int privcmd_dm_op_max_num
= 16;
48 module_param_named(dm_op_max_nr_bufs
, privcmd_dm_op_max_num
, uint
, 0644);
49 MODULE_PARM_DESC(dm_op_max_nr_bufs
,
50 "Maximum number of buffers per dm_op hypercall");
52 static unsigned int privcmd_dm_op_buf_max_size
= 4096;
53 module_param_named(dm_op_buf_max_size
, privcmd_dm_op_buf_max_size
, uint
,
55 MODULE_PARM_DESC(dm_op_buf_max_size
,
56 "Maximum size of a dm_op hypercall buffer");
62 static int privcmd_vma_range_is_mapped(
63 struct vm_area_struct
*vma
,
65 unsigned long nr_pages
);
67 static long privcmd_ioctl_hypercall(struct file
*file
, void __user
*udata
)
69 struct privcmd_data
*data
= file
->private_data
;
70 struct privcmd_hypercall hypercall
;
73 /* Disallow arbitrary hypercalls if restricted */
74 if (data
->domid
!= DOMID_INVALID
)
77 if (copy_from_user(&hypercall
, udata
, sizeof(hypercall
)))
80 xen_preemptible_hcall_begin();
81 ret
= privcmd_call(hypercall
.op
,
82 hypercall
.arg
[0], hypercall
.arg
[1],
83 hypercall
.arg
[2], hypercall
.arg
[3],
85 xen_preemptible_hcall_end();
90 static void free_page_list(struct list_head
*pages
)
94 list_for_each_entry_safe(p
, n
, pages
, lru
)
97 INIT_LIST_HEAD(pages
);
101 * Given an array of items in userspace, return a list of pages
102 * containing the data. If copying fails, either because of memory
103 * allocation failure or a problem reading user memory, return an
104 * error code; its up to the caller to dispose of any partial list.
106 static int gather_array(struct list_head
*pagelist
,
107 unsigned nelem
, size_t size
,
108 const void __user
*data
)
114 if (size
> PAGE_SIZE
)
118 pagedata
= NULL
; /* quiet, gcc */
120 if (pageidx
> PAGE_SIZE
-size
) {
121 struct page
*page
= alloc_page(GFP_KERNEL
);
127 pagedata
= page_address(page
);
129 list_add_tail(&page
->lru
, pagelist
);
134 if (copy_from_user(pagedata
+ pageidx
, data
, size
))
148 * Call function "fn" on each element of the array fragmented
149 * over a list of pages.
151 static int traverse_pages(unsigned nelem
, size_t size
,
152 struct list_head
*pos
,
153 int (*fn
)(void *data
, void *state
),
160 BUG_ON(size
> PAGE_SIZE
);
163 pagedata
= NULL
; /* hush, gcc */
166 if (pageidx
> PAGE_SIZE
-size
) {
169 page
= list_entry(pos
, struct page
, lru
);
170 pagedata
= page_address(page
);
174 ret
= (*fn
)(pagedata
+ pageidx
, state
);
184 * Similar to traverse_pages, but use each page as a "block" of
185 * data to be processed as one unit.
187 static int traverse_pages_block(unsigned nelem
, size_t size
,
188 struct list_head
*pos
,
189 int (*fn
)(void *data
, int nr
, void *state
),
195 BUG_ON(size
> PAGE_SIZE
);
198 int nr
= (PAGE_SIZE
/size
);
203 page
= list_entry(pos
, struct page
, lru
);
204 pagedata
= page_address(page
);
205 ret
= (*fn
)(pagedata
, nr
, state
);
214 struct mmap_gfn_state
{
216 struct vm_area_struct
*vma
;
220 static int mmap_gfn_range(void *data
, void *state
)
222 struct privcmd_mmap_entry
*msg
= data
;
223 struct mmap_gfn_state
*st
= state
;
224 struct vm_area_struct
*vma
= st
->vma
;
227 /* Do not allow range to wrap the address space. */
228 if ((msg
->npages
> (LONG_MAX
>> PAGE_SHIFT
)) ||
229 ((unsigned long)(msg
->npages
<< PAGE_SHIFT
) >= -st
->va
))
232 /* Range chunks must be contiguous in va space. */
233 if ((msg
->va
!= st
->va
) ||
234 ((msg
->va
+(msg
->npages
<<PAGE_SHIFT
)) > vma
->vm_end
))
237 rc
= xen_remap_domain_gfn_range(vma
,
239 msg
->mfn
, msg
->npages
,
245 st
->va
+= msg
->npages
<< PAGE_SHIFT
;
250 static long privcmd_ioctl_mmap(struct file
*file
, void __user
*udata
)
252 struct privcmd_data
*data
= file
->private_data
;
253 struct privcmd_mmap mmapcmd
;
254 struct mm_struct
*mm
= current
->mm
;
255 struct vm_area_struct
*vma
;
258 struct mmap_gfn_state state
;
260 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
261 if (xen_feature(XENFEAT_auto_translated_physmap
))
264 if (copy_from_user(&mmapcmd
, udata
, sizeof(mmapcmd
)))
267 /* If restriction is in place, check the domid matches */
268 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= mmapcmd
.dom
)
271 rc
= gather_array(&pagelist
,
272 mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
275 if (rc
|| list_empty(&pagelist
))
281 struct page
*page
= list_first_entry(&pagelist
,
283 struct privcmd_mmap_entry
*msg
= page_address(page
);
285 vma
= find_vma(mm
, msg
->va
);
288 if (!vma
|| (msg
->va
!= vma
->vm_start
) || vma
->vm_private_data
)
290 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
293 state
.va
= vma
->vm_start
;
295 state
.domain
= mmapcmd
.dom
;
297 rc
= traverse_pages(mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
299 mmap_gfn_range
, &state
);
303 mmap_write_unlock(mm
);
306 free_page_list(&pagelist
);
311 struct mmap_batch_state
{
314 struct vm_area_struct
*vma
;
318 * 1 if at least one error has happened (and no
319 * -ENOENT errors have happened)
320 * -ENOENT if at least 1 -ENOENT has happened.
325 /* User-space gfn array to store errors in the second pass for V1. */
326 xen_pfn_t __user
*user_gfn
;
327 /* User-space int array to store errors in the second pass for V2. */
328 int __user
*user_err
;
331 /* auto translated dom0 note: if domU being created is PV, then gfn is
332 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
334 static int mmap_batch_fn(void *data
, int nr
, void *state
)
336 xen_pfn_t
*gfnp
= data
;
337 struct mmap_batch_state
*st
= state
;
338 struct vm_area_struct
*vma
= st
->vma
;
339 struct page
**pages
= vma
->vm_private_data
;
340 struct page
**cur_pages
= NULL
;
343 if (xen_feature(XENFEAT_auto_translated_physmap
))
344 cur_pages
= &pages
[st
->index
];
347 ret
= xen_remap_domain_gfn_array(st
->vma
, st
->va
& PAGE_MASK
, gfnp
, nr
,
348 (int *)gfnp
, st
->vma
->vm_page_prot
,
349 st
->domain
, cur_pages
);
351 /* Adjust the global_error? */
354 st
->global_error
= -ENOENT
;
356 /* Record that at least one error has happened. */
357 if (st
->global_error
== 0)
358 st
->global_error
= 1;
361 st
->va
+= XEN_PAGE_SIZE
* nr
;
362 st
->index
+= nr
/ XEN_PFN_PER_PAGE
;
367 static int mmap_return_error(int err
, struct mmap_batch_state
*st
)
371 if (st
->version
== 1) {
375 ret
= get_user(gfn
, st
->user_gfn
);
379 * V1 encodes the error codes in the 32bit top
380 * nibble of the gfn (with its known
381 * limitations vis-a-vis 64 bit callers).
383 gfn
|= (err
== -ENOENT
) ?
384 PRIVCMD_MMAPBATCH_PAGED_ERROR
:
385 PRIVCMD_MMAPBATCH_MFN_ERROR
;
386 return __put_user(gfn
, st
->user_gfn
++);
389 } else { /* st->version == 2 */
391 return __put_user(err
, st
->user_err
++);
399 static int mmap_return_errors(void *data
, int nr
, void *state
)
401 struct mmap_batch_state
*st
= state
;
406 for (i
= 0; i
< nr
; i
++) {
407 ret
= mmap_return_error(errs
[i
], st
);
414 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
415 * the vma with the page info to use later.
416 * Returns: 0 if success, otherwise -errno
418 static int alloc_empty_pages(struct vm_area_struct
*vma
, int numpgs
)
423 pages
= kcalloc(numpgs
, sizeof(pages
[0]), GFP_KERNEL
);
427 rc
= xen_alloc_unpopulated_pages(numpgs
, pages
);
429 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__
,
434 BUG_ON(vma
->vm_private_data
!= NULL
);
435 vma
->vm_private_data
= pages
;
440 static const struct vm_operations_struct privcmd_vm_ops
;
442 static long privcmd_ioctl_mmap_batch(
443 struct file
*file
, void __user
*udata
, int version
)
445 struct privcmd_data
*data
= file
->private_data
;
447 struct privcmd_mmapbatch_v2 m
;
448 struct mm_struct
*mm
= current
->mm
;
449 struct vm_area_struct
*vma
;
450 unsigned long nr_pages
;
452 struct mmap_batch_state state
;
456 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch
)))
458 /* Returns per-frame error in m.arr. */
460 if (!access_ok(m
.arr
, m
.num
* sizeof(*m
.arr
)))
464 if (copy_from_user(&m
, udata
, sizeof(struct privcmd_mmapbatch_v2
)))
466 /* Returns per-frame error code in m.err. */
467 if (!access_ok(m
.err
, m
.num
* (sizeof(*m
.err
))))
474 /* If restriction is in place, check the domid matches */
475 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= m
.dom
)
478 nr_pages
= DIV_ROUND_UP(m
.num
, XEN_PFN_PER_PAGE
);
479 if ((m
.num
<= 0) || (nr_pages
> (LONG_MAX
>> PAGE_SHIFT
)))
482 ret
= gather_array(&pagelist
, m
.num
, sizeof(xen_pfn_t
), m
.arr
);
486 if (list_empty(&pagelist
)) {
492 /* Zero error array now to only copy back actual errors. */
493 if (clear_user(m
.err
, sizeof(int) * m
.num
)) {
501 vma
= find_vma(mm
, m
.addr
);
503 vma
->vm_ops
!= &privcmd_vm_ops
) {
509 * Caller must either:
511 * Map the whole VMA range, which will also allocate all the
512 * pages required for the auto_translated_physmap case.
516 * Map unmapped holes left from a previous map attempt (e.g.,
517 * because those foreign frames were previously paged out).
519 if (vma
->vm_private_data
== NULL
) {
520 if (m
.addr
!= vma
->vm_start
||
521 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) != vma
->vm_end
) {
525 if (xen_feature(XENFEAT_auto_translated_physmap
)) {
526 ret
= alloc_empty_pages(vma
, nr_pages
);
530 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
532 if (m
.addr
< vma
->vm_start
||
533 m
.addr
+ (nr_pages
<< PAGE_SHIFT
) > vma
->vm_end
) {
537 if (privcmd_vma_range_is_mapped(vma
, m
.addr
, nr_pages
)) {
543 state
.domain
= m
.dom
;
547 state
.global_error
= 0;
548 state
.version
= version
;
550 BUILD_BUG_ON(((PAGE_SIZE
/ sizeof(xen_pfn_t
)) % XEN_PFN_PER_PAGE
) != 0);
551 /* mmap_batch_fn guarantees ret == 0 */
552 BUG_ON(traverse_pages_block(m
.num
, sizeof(xen_pfn_t
),
553 &pagelist
, mmap_batch_fn
, &state
));
555 mmap_write_unlock(mm
);
557 if (state
.global_error
) {
558 /* Write back errors in second pass. */
559 state
.user_gfn
= (xen_pfn_t
*)m
.arr
;
560 state
.user_err
= m
.err
;
561 ret
= traverse_pages_block(m
.num
, sizeof(xen_pfn_t
),
562 &pagelist
, mmap_return_errors
, &state
);
566 /* If we have not had any EFAULT-like global errors then set the global
567 * error to -ENOENT if necessary. */
568 if ((ret
== 0) && (state
.global_error
== -ENOENT
))
572 free_page_list(&pagelist
);
576 mmap_write_unlock(mm
);
580 static int lock_pages(
581 struct privcmd_dm_op_buf kbufs
[], unsigned int num
,
582 struct page
*pages
[], unsigned int nr_pages
, unsigned int *pinned
)
586 for (i
= 0; i
< num
; i
++) {
587 unsigned int requested
;
590 requested
= DIV_ROUND_UP(
591 offset_in_page(kbufs
[i
].uptr
) + kbufs
[i
].size
,
593 if (requested
> nr_pages
)
596 page_count
= pin_user_pages_fast(
597 (unsigned long) kbufs
[i
].uptr
,
598 requested
, FOLL_WRITE
, pages
);
602 *pinned
+= page_count
;
603 nr_pages
-= page_count
;
610 static void unlock_pages(struct page
*pages
[], unsigned int nr_pages
)
612 unpin_user_pages_dirty_lock(pages
, nr_pages
, true);
615 static long privcmd_ioctl_dm_op(struct file
*file
, void __user
*udata
)
617 struct privcmd_data
*data
= file
->private_data
;
618 struct privcmd_dm_op kdata
;
619 struct privcmd_dm_op_buf
*kbufs
;
620 unsigned int nr_pages
= 0;
621 struct page
**pages
= NULL
;
622 struct xen_dm_op_buf
*xbufs
= NULL
;
625 unsigned int pinned
= 0;
627 if (copy_from_user(&kdata
, udata
, sizeof(kdata
)))
630 /* If restriction is in place, check the domid matches */
631 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= kdata
.dom
)
637 if (kdata
.num
> privcmd_dm_op_max_num
)
640 kbufs
= kcalloc(kdata
.num
, sizeof(*kbufs
), GFP_KERNEL
);
644 if (copy_from_user(kbufs
, kdata
.ubufs
,
645 sizeof(*kbufs
) * kdata
.num
)) {
650 for (i
= 0; i
< kdata
.num
; i
++) {
651 if (kbufs
[i
].size
> privcmd_dm_op_buf_max_size
) {
656 if (!access_ok(kbufs
[i
].uptr
,
662 nr_pages
+= DIV_ROUND_UP(
663 offset_in_page(kbufs
[i
].uptr
) + kbufs
[i
].size
,
667 pages
= kcalloc(nr_pages
, sizeof(*pages
), GFP_KERNEL
);
673 xbufs
= kcalloc(kdata
.num
, sizeof(*xbufs
), GFP_KERNEL
);
679 rc
= lock_pages(kbufs
, kdata
.num
, pages
, nr_pages
, &pinned
);
685 for (i
= 0; i
< kdata
.num
; i
++) {
686 set_xen_guest_handle(xbufs
[i
].h
, kbufs
[i
].uptr
);
687 xbufs
[i
].size
= kbufs
[i
].size
;
690 xen_preemptible_hcall_begin();
691 rc
= HYPERVISOR_dm_op(kdata
.dom
, kdata
.num
, xbufs
);
692 xen_preemptible_hcall_end();
695 unlock_pages(pages
, nr_pages
);
703 static long privcmd_ioctl_restrict(struct file
*file
, void __user
*udata
)
705 struct privcmd_data
*data
= file
->private_data
;
708 if (copy_from_user(&dom
, udata
, sizeof(dom
)))
711 /* Set restriction to the specified domain, or check it matches */
712 if (data
->domid
== DOMID_INVALID
)
714 else if (data
->domid
!= dom
)
720 static long privcmd_ioctl_mmap_resource(struct file
*file
, void __user
*udata
)
722 struct privcmd_data
*data
= file
->private_data
;
723 struct mm_struct
*mm
= current
->mm
;
724 struct vm_area_struct
*vma
;
725 struct privcmd_mmap_resource kdata
;
726 xen_pfn_t
*pfns
= NULL
;
727 struct xen_mem_acquire_resource xdata
;
730 if (copy_from_user(&kdata
, udata
, sizeof(kdata
)))
733 /* If restriction is in place, check the domid matches */
734 if (data
->domid
!= DOMID_INVALID
&& data
->domid
!= kdata
.dom
)
739 vma
= find_vma(mm
, kdata
.addr
);
740 if (!vma
|| vma
->vm_ops
!= &privcmd_vm_ops
) {
745 pfns
= kcalloc(kdata
.num
, sizeof(*pfns
), GFP_KERNEL
);
751 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE
) &&
752 xen_feature(XENFEAT_auto_translated_physmap
)) {
753 unsigned int nr
= DIV_ROUND_UP(kdata
.num
, XEN_PFN_PER_PAGE
);
757 rc
= alloc_empty_pages(vma
, nr
);
761 pages
= vma
->vm_private_data
;
762 for (i
= 0; i
< kdata
.num
; i
++) {
764 page_to_xen_pfn(pages
[i
/ XEN_PFN_PER_PAGE
]);
766 pfns
[i
] = pfn
+ (i
% XEN_PFN_PER_PAGE
);
769 vma
->vm_private_data
= PRIV_VMA_LOCKED
;
771 memset(&xdata
, 0, sizeof(xdata
));
772 xdata
.domid
= kdata
.dom
;
773 xdata
.type
= kdata
.type
;
775 xdata
.frame
= kdata
.idx
;
776 xdata
.nr_frames
= kdata
.num
;
777 set_xen_guest_handle(xdata
.frame_list
, pfns
);
779 xen_preemptible_hcall_begin();
780 rc
= HYPERVISOR_memory_op(XENMEM_acquire_resource
, &xdata
);
781 xen_preemptible_hcall_end();
786 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE
) &&
787 xen_feature(XENFEAT_auto_translated_physmap
)) {
788 rc
= xen_remap_vma_range(vma
, kdata
.addr
, kdata
.num
<< PAGE_SHIFT
);
791 (xdata
.flags
& XENMEM_rsrc_acq_caller_owned
) ?
792 DOMID_SELF
: kdata
.dom
;
795 num
= xen_remap_domain_mfn_array(vma
,
796 kdata
.addr
& PAGE_MASK
,
797 pfns
, kdata
.num
, (int *)pfns
,
800 vma
->vm_private_data
);
803 else if (num
!= kdata
.num
) {
806 for (i
= 0; i
< num
; i
++) {
816 mmap_write_unlock(mm
);
822 static long privcmd_ioctl(struct file
*file
,
823 unsigned int cmd
, unsigned long data
)
826 void __user
*udata
= (void __user
*) data
;
829 case IOCTL_PRIVCMD_HYPERCALL
:
830 ret
= privcmd_ioctl_hypercall(file
, udata
);
833 case IOCTL_PRIVCMD_MMAP
:
834 ret
= privcmd_ioctl_mmap(file
, udata
);
837 case IOCTL_PRIVCMD_MMAPBATCH
:
838 ret
= privcmd_ioctl_mmap_batch(file
, udata
, 1);
841 case IOCTL_PRIVCMD_MMAPBATCH_V2
:
842 ret
= privcmd_ioctl_mmap_batch(file
, udata
, 2);
845 case IOCTL_PRIVCMD_DM_OP
:
846 ret
= privcmd_ioctl_dm_op(file
, udata
);
849 case IOCTL_PRIVCMD_RESTRICT
:
850 ret
= privcmd_ioctl_restrict(file
, udata
);
853 case IOCTL_PRIVCMD_MMAP_RESOURCE
:
854 ret
= privcmd_ioctl_mmap_resource(file
, udata
);
864 static int privcmd_open(struct inode
*ino
, struct file
*file
)
866 struct privcmd_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
871 /* DOMID_INVALID implies no restriction */
872 data
->domid
= DOMID_INVALID
;
874 file
->private_data
= data
;
878 static int privcmd_release(struct inode
*ino
, struct file
*file
)
880 struct privcmd_data
*data
= file
->private_data
;
886 static void privcmd_close(struct vm_area_struct
*vma
)
888 struct page
**pages
= vma
->vm_private_data
;
889 int numpgs
= vma_pages(vma
);
890 int numgfns
= (vma
->vm_end
- vma
->vm_start
) >> XEN_PAGE_SHIFT
;
893 if (!xen_feature(XENFEAT_auto_translated_physmap
) || !numpgs
|| !pages
)
896 rc
= xen_unmap_domain_gfn_range(vma
, numgfns
, pages
);
898 xen_free_unpopulated_pages(numpgs
, pages
);
900 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
905 static vm_fault_t
privcmd_fault(struct vm_fault
*vmf
)
907 printk(KERN_DEBUG
"privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
908 vmf
->vma
, vmf
->vma
->vm_start
, vmf
->vma
->vm_end
,
909 vmf
->pgoff
, (void *)vmf
->address
);
911 return VM_FAULT_SIGBUS
;
914 static const struct vm_operations_struct privcmd_vm_ops
= {
915 .close
= privcmd_close
,
916 .fault
= privcmd_fault
919 static int privcmd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
921 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
922 * how to recreate these mappings */
923 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTCOPY
|
924 VM_DONTEXPAND
| VM_DONTDUMP
;
925 vma
->vm_ops
= &privcmd_vm_ops
;
926 vma
->vm_private_data
= NULL
;
932 * For MMAPBATCH*. This allows asserting the singleshot mapping
933 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
934 * can be then retried until success.
936 static int is_mapped_fn(pte_t
*pte
, unsigned long addr
, void *data
)
938 return pte_none(*pte
) ? 0 : -EBUSY
;
941 static int privcmd_vma_range_is_mapped(
942 struct vm_area_struct
*vma
,
944 unsigned long nr_pages
)
946 return apply_to_page_range(vma
->vm_mm
, addr
, nr_pages
<< PAGE_SHIFT
,
947 is_mapped_fn
, NULL
) != 0;
950 const struct file_operations xen_privcmd_fops
= {
951 .owner
= THIS_MODULE
,
952 .unlocked_ioctl
= privcmd_ioctl
,
953 .open
= privcmd_open
,
954 .release
= privcmd_release
,
955 .mmap
= privcmd_mmap
,
957 EXPORT_SYMBOL_GPL(xen_privcmd_fops
);
959 static struct miscdevice privcmd_dev
= {
960 .minor
= MISC_DYNAMIC_MINOR
,
961 .name
= "xen/privcmd",
962 .fops
= &xen_privcmd_fops
,
965 static int __init
privcmd_init(void)
972 err
= misc_register(&privcmd_dev
);
974 pr_err("Could not register Xen privcmd device\n");
978 err
= misc_register(&xen_privcmdbuf_dev
);
980 pr_err("Could not register Xen hypercall-buf device\n");
981 misc_deregister(&privcmd_dev
);
988 static void __exit
privcmd_exit(void)
990 misc_deregister(&privcmd_dev
);
991 misc_deregister(&xen_privcmdbuf_dev
);
994 module_init(privcmd_init
);
995 module_exit(privcmd_exit
);