1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
16 #include <linux/mman.h>
17 #include <linux/uaccess.h>
18 #include <linux/swap.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
22 #include <linux/miscdevice.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
27 #include <asm/xen/hypervisor.h>
28 #include <asm/xen/hypercall.h>
31 #include <xen/privcmd.h>
32 #include <xen/interface/xen.h>
33 #include <xen/features.h>
35 #include <xen/xen-ops.h>
39 MODULE_LICENSE("GPL");
41 #ifndef HAVE_ARCH_PRIVCMD_MMAP
42 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct
*vma
);
45 static long privcmd_ioctl_hypercall(void __user
*udata
)
47 struct privcmd_hypercall hypercall
;
50 if (copy_from_user(&hypercall
, udata
, sizeof(hypercall
)))
53 ret
= privcmd_call(hypercall
.op
,
54 hypercall
.arg
[0], hypercall
.arg
[1],
55 hypercall
.arg
[2], hypercall
.arg
[3],
61 static void free_page_list(struct list_head
*pages
)
65 list_for_each_entry_safe(p
, n
, pages
, lru
)
68 INIT_LIST_HEAD(pages
);
72 * Given an array of items in userspace, return a list of pages
73 * containing the data. If copying fails, either because of memory
74 * allocation failure or a problem reading user memory, return an
75 * error code; its up to the caller to dispose of any partial list.
77 static int gather_array(struct list_head
*pagelist
,
78 unsigned nelem
, size_t size
,
89 pagedata
= NULL
; /* quiet, gcc */
91 if (pageidx
> PAGE_SIZE
-size
) {
92 struct page
*page
= alloc_page(GFP_KERNEL
);
98 pagedata
= page_address(page
);
100 list_add_tail(&page
->lru
, pagelist
);
105 if (copy_from_user(pagedata
+ pageidx
, data
, size
))
119 * Call function "fn" on each element of the array fragmented
120 * over a list of pages.
122 static int traverse_pages(unsigned nelem
, size_t size
,
123 struct list_head
*pos
,
124 int (*fn
)(void *data
, void *state
),
131 BUG_ON(size
> PAGE_SIZE
);
134 pagedata
= NULL
; /* hush, gcc */
137 if (pageidx
> PAGE_SIZE
-size
) {
140 page
= list_entry(pos
, struct page
, lru
);
141 pagedata
= page_address(page
);
145 ret
= (*fn
)(pagedata
+ pageidx
, state
);
154 struct mmap_mfn_state
{
156 struct vm_area_struct
*vma
;
160 static int mmap_mfn_range(void *data
, void *state
)
162 struct privcmd_mmap_entry
*msg
= data
;
163 struct mmap_mfn_state
*st
= state
;
164 struct vm_area_struct
*vma
= st
->vma
;
167 /* Do not allow range to wrap the address space. */
168 if ((msg
->npages
> (LONG_MAX
>> PAGE_SHIFT
)) ||
169 ((unsigned long)(msg
->npages
<< PAGE_SHIFT
) >= -st
->va
))
172 /* Range chunks must be contiguous in va space. */
173 if ((msg
->va
!= st
->va
) ||
174 ((msg
->va
+(msg
->npages
<<PAGE_SHIFT
)) > vma
->vm_end
))
177 rc
= xen_remap_domain_mfn_range(vma
,
179 msg
->mfn
, msg
->npages
,
185 st
->va
+= msg
->npages
<< PAGE_SHIFT
;
190 static long privcmd_ioctl_mmap(void __user
*udata
)
192 struct privcmd_mmap mmapcmd
;
193 struct mm_struct
*mm
= current
->mm
;
194 struct vm_area_struct
*vma
;
197 struct mmap_mfn_state state
;
199 if (!xen_initial_domain())
202 if (copy_from_user(&mmapcmd
, udata
, sizeof(mmapcmd
)))
205 rc
= gather_array(&pagelist
,
206 mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
209 if (rc
|| list_empty(&pagelist
))
212 down_write(&mm
->mmap_sem
);
215 struct page
*page
= list_first_entry(&pagelist
,
217 struct privcmd_mmap_entry
*msg
= page_address(page
);
219 vma
= find_vma(mm
, msg
->va
);
222 if (!vma
|| (msg
->va
!= vma
->vm_start
) ||
223 !privcmd_enforce_singleshot_mapping(vma
))
227 state
.va
= vma
->vm_start
;
229 state
.domain
= mmapcmd
.dom
;
231 rc
= traverse_pages(mmapcmd
.num
, sizeof(struct privcmd_mmap_entry
),
233 mmap_mfn_range
, &state
);
237 up_write(&mm
->mmap_sem
);
240 free_page_list(&pagelist
);
245 struct mmap_batch_state
{
248 struct vm_area_struct
*vma
;
251 xen_pfn_t __user
*user
;
254 static int mmap_batch_fn(void *data
, void *state
)
256 xen_pfn_t
*mfnp
= data
;
257 struct mmap_batch_state
*st
= state
;
259 if (xen_remap_domain_mfn_range(st
->vma
, st
->va
& PAGE_MASK
, *mfnp
, 1,
260 st
->vma
->vm_page_prot
, st
->domain
) < 0) {
261 *mfnp
|= 0xf0000000U
;
269 static int mmap_return_errors(void *data
, void *state
)
271 xen_pfn_t
*mfnp
= data
;
272 struct mmap_batch_state
*st
= state
;
274 return put_user(*mfnp
, st
->user
++);
277 static struct vm_operations_struct privcmd_vm_ops
;
279 static long privcmd_ioctl_mmap_batch(void __user
*udata
)
282 struct privcmd_mmapbatch m
;
283 struct mm_struct
*mm
= current
->mm
;
284 struct vm_area_struct
*vma
;
285 unsigned long nr_pages
;
287 struct mmap_batch_state state
;
289 if (!xen_initial_domain())
292 if (copy_from_user(&m
, udata
, sizeof(m
)))
296 if ((m
.num
<= 0) || (nr_pages
> (LONG_MAX
>> PAGE_SHIFT
)))
299 ret
= gather_array(&pagelist
, m
.num
, sizeof(xen_pfn_t
),
302 if (ret
|| list_empty(&pagelist
))
305 down_write(&mm
->mmap_sem
);
307 vma
= find_vma(mm
, m
.addr
);
310 vma
->vm_ops
!= &privcmd_vm_ops
||
311 (m
.addr
!= vma
->vm_start
) ||
312 ((m
.addr
+ (nr_pages
<< PAGE_SHIFT
)) != vma
->vm_end
) ||
313 !privcmd_enforce_singleshot_mapping(vma
)) {
314 up_write(&mm
->mmap_sem
);
318 state
.domain
= m
.dom
;
323 ret
= traverse_pages(m
.num
, sizeof(xen_pfn_t
),
324 &pagelist
, mmap_batch_fn
, &state
);
326 up_write(&mm
->mmap_sem
);
330 ret
= traverse_pages(m
.num
, sizeof(xen_pfn_t
),
332 mmap_return_errors
, &state
);
336 free_page_list(&pagelist
);
341 static long privcmd_ioctl(struct file
*file
,
342 unsigned int cmd
, unsigned long data
)
345 void __user
*udata
= (void __user
*) data
;
348 case IOCTL_PRIVCMD_HYPERCALL
:
349 ret
= privcmd_ioctl_hypercall(udata
);
352 case IOCTL_PRIVCMD_MMAP
:
353 ret
= privcmd_ioctl_mmap(udata
);
356 case IOCTL_PRIVCMD_MMAPBATCH
:
357 ret
= privcmd_ioctl_mmap_batch(udata
);
368 static int privcmd_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
370 printk(KERN_DEBUG
"privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
371 vma
, vma
->vm_start
, vma
->vm_end
,
372 vmf
->pgoff
, vmf
->virtual_address
);
374 return VM_FAULT_SIGBUS
;
377 static struct vm_operations_struct privcmd_vm_ops
= {
378 .fault
= privcmd_fault
381 static int privcmd_mmap(struct file
*file
, struct vm_area_struct
*vma
)
383 /* Unsupported for auto-translate guests. */
384 if (xen_feature(XENFEAT_auto_translated_physmap
))
387 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
388 * how to recreate these mappings */
389 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_DONTCOPY
| VM_PFNMAP
;
390 vma
->vm_ops
= &privcmd_vm_ops
;
391 vma
->vm_private_data
= NULL
;
396 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct
*vma
)
398 return (xchg(&vma
->vm_private_data
, (void *)1) == NULL
);
401 const struct file_operations xen_privcmd_fops
= {
402 .owner
= THIS_MODULE
,
403 .unlocked_ioctl
= privcmd_ioctl
,
404 .mmap
= privcmd_mmap
,
406 EXPORT_SYMBOL_GPL(xen_privcmd_fops
);
408 static struct miscdevice privcmd_dev
= {
409 .minor
= MISC_DYNAMIC_MINOR
,
410 .name
= "xen/privcmd",
411 .fops
= &xen_privcmd_fops
,
414 static int __init
privcmd_init(void)
421 err
= misc_register(&privcmd_dev
);
423 printk(KERN_ERR
"Could not register Xen privcmd device\n");
429 static void __exit
privcmd_exit(void)
431 misc_deregister(&privcmd_dev
);
434 module_init(privcmd_init
);
435 module_exit(privcmd_exit
);