Use dentry_path() to create full path to inode object
[pohmelfs.git] / drivers / xen / privcmd.c
blobccee0f16bcf8c10e994855a6efadff99b5b6bda1
1 /******************************************************************************
2 * privcmd.c
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/uaccess.h>
18 #include <linux/swap.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
22 #include <linux/miscdevice.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlb.h>
27 #include <asm/xen/hypervisor.h>
28 #include <asm/xen/hypercall.h>
30 #include <xen/xen.h>
31 #include <xen/privcmd.h>
32 #include <xen/interface/xen.h>
33 #include <xen/features.h>
34 #include <xen/page.h>
35 #include <xen/xen-ops.h>
37 #include "privcmd.h"
39 MODULE_LICENSE("GPL");
41 #ifndef HAVE_ARCH_PRIVCMD_MMAP
42 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
43 #endif
45 static long privcmd_ioctl_hypercall(void __user *udata)
47 struct privcmd_hypercall hypercall;
48 long ret;
50 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
51 return -EFAULT;
53 ret = privcmd_call(hypercall.op,
54 hypercall.arg[0], hypercall.arg[1],
55 hypercall.arg[2], hypercall.arg[3],
56 hypercall.arg[4]);
58 return ret;
61 static void free_page_list(struct list_head *pages)
63 struct page *p, *n;
65 list_for_each_entry_safe(p, n, pages, lru)
66 __free_page(p);
68 INIT_LIST_HEAD(pages);
72 * Given an array of items in userspace, return a list of pages
73 * containing the data. If copying fails, either because of memory
74 * allocation failure or a problem reading user memory, return an
75 * error code; its up to the caller to dispose of any partial list.
77 static int gather_array(struct list_head *pagelist,
78 unsigned nelem, size_t size,
79 void __user *data)
81 unsigned pageidx;
82 void *pagedata;
83 int ret;
85 if (size > PAGE_SIZE)
86 return 0;
88 pageidx = PAGE_SIZE;
89 pagedata = NULL; /* quiet, gcc */
90 while (nelem--) {
91 if (pageidx > PAGE_SIZE-size) {
92 struct page *page = alloc_page(GFP_KERNEL);
94 ret = -ENOMEM;
95 if (page == NULL)
96 goto fail;
98 pagedata = page_address(page);
100 list_add_tail(&page->lru, pagelist);
101 pageidx = 0;
104 ret = -EFAULT;
105 if (copy_from_user(pagedata + pageidx, data, size))
106 goto fail;
108 data += size;
109 pageidx += size;
112 ret = 0;
114 fail:
115 return ret;
119 * Call function "fn" on each element of the array fragmented
120 * over a list of pages.
122 static int traverse_pages(unsigned nelem, size_t size,
123 struct list_head *pos,
124 int (*fn)(void *data, void *state),
125 void *state)
127 void *pagedata;
128 unsigned pageidx;
129 int ret = 0;
131 BUG_ON(size > PAGE_SIZE);
133 pageidx = PAGE_SIZE;
134 pagedata = NULL; /* hush, gcc */
136 while (nelem--) {
137 if (pageidx > PAGE_SIZE-size) {
138 struct page *page;
139 pos = pos->next;
140 page = list_entry(pos, struct page, lru);
141 pagedata = page_address(page);
142 pageidx = 0;
145 ret = (*fn)(pagedata + pageidx, state);
146 if (ret)
147 break;
148 pageidx += size;
151 return ret;
154 struct mmap_mfn_state {
155 unsigned long va;
156 struct vm_area_struct *vma;
157 domid_t domain;
160 static int mmap_mfn_range(void *data, void *state)
162 struct privcmd_mmap_entry *msg = data;
163 struct mmap_mfn_state *st = state;
164 struct vm_area_struct *vma = st->vma;
165 int rc;
167 /* Do not allow range to wrap the address space. */
168 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
169 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
170 return -EINVAL;
172 /* Range chunks must be contiguous in va space. */
173 if ((msg->va != st->va) ||
174 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
175 return -EINVAL;
177 rc = xen_remap_domain_mfn_range(vma,
178 msg->va & PAGE_MASK,
179 msg->mfn, msg->npages,
180 vma->vm_page_prot,
181 st->domain);
182 if (rc < 0)
183 return rc;
185 st->va += msg->npages << PAGE_SHIFT;
187 return 0;
190 static long privcmd_ioctl_mmap(void __user *udata)
192 struct privcmd_mmap mmapcmd;
193 struct mm_struct *mm = current->mm;
194 struct vm_area_struct *vma;
195 int rc;
196 LIST_HEAD(pagelist);
197 struct mmap_mfn_state state;
199 if (!xen_initial_domain())
200 return -EPERM;
202 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
203 return -EFAULT;
205 rc = gather_array(&pagelist,
206 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
207 mmapcmd.entry);
209 if (rc || list_empty(&pagelist))
210 goto out;
212 down_write(&mm->mmap_sem);
215 struct page *page = list_first_entry(&pagelist,
216 struct page, lru);
217 struct privcmd_mmap_entry *msg = page_address(page);
219 vma = find_vma(mm, msg->va);
220 rc = -EINVAL;
222 if (!vma || (msg->va != vma->vm_start) ||
223 !privcmd_enforce_singleshot_mapping(vma))
224 goto out_up;
227 state.va = vma->vm_start;
228 state.vma = vma;
229 state.domain = mmapcmd.dom;
231 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
232 &pagelist,
233 mmap_mfn_range, &state);
236 out_up:
237 up_write(&mm->mmap_sem);
239 out:
240 free_page_list(&pagelist);
242 return rc;
245 struct mmap_batch_state {
246 domid_t domain;
247 unsigned long va;
248 struct vm_area_struct *vma;
249 int err;
251 xen_pfn_t __user *user;
254 static int mmap_batch_fn(void *data, void *state)
256 xen_pfn_t *mfnp = data;
257 struct mmap_batch_state *st = state;
259 if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
260 st->vma->vm_page_prot, st->domain) < 0) {
261 *mfnp |= 0xf0000000U;
262 st->err++;
264 st->va += PAGE_SIZE;
266 return 0;
269 static int mmap_return_errors(void *data, void *state)
271 xen_pfn_t *mfnp = data;
272 struct mmap_batch_state *st = state;
274 return put_user(*mfnp, st->user++);
277 static struct vm_operations_struct privcmd_vm_ops;
279 static long privcmd_ioctl_mmap_batch(void __user *udata)
281 int ret;
282 struct privcmd_mmapbatch m;
283 struct mm_struct *mm = current->mm;
284 struct vm_area_struct *vma;
285 unsigned long nr_pages;
286 LIST_HEAD(pagelist);
287 struct mmap_batch_state state;
289 if (!xen_initial_domain())
290 return -EPERM;
292 if (copy_from_user(&m, udata, sizeof(m)))
293 return -EFAULT;
295 nr_pages = m.num;
296 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
297 return -EINVAL;
299 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
300 m.arr);
302 if (ret || list_empty(&pagelist))
303 goto out;
305 down_write(&mm->mmap_sem);
307 vma = find_vma(mm, m.addr);
308 ret = -EINVAL;
309 if (!vma ||
310 vma->vm_ops != &privcmd_vm_ops ||
311 (m.addr != vma->vm_start) ||
312 ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
313 !privcmd_enforce_singleshot_mapping(vma)) {
314 up_write(&mm->mmap_sem);
315 goto out;
318 state.domain = m.dom;
319 state.vma = vma;
320 state.va = m.addr;
321 state.err = 0;
323 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
324 &pagelist, mmap_batch_fn, &state);
326 up_write(&mm->mmap_sem);
328 if (state.err > 0) {
329 state.user = m.arr;
330 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
331 &pagelist,
332 mmap_return_errors, &state);
335 out:
336 free_page_list(&pagelist);
338 return ret;
341 static long privcmd_ioctl(struct file *file,
342 unsigned int cmd, unsigned long data)
344 int ret = -ENOSYS;
345 void __user *udata = (void __user *) data;
347 switch (cmd) {
348 case IOCTL_PRIVCMD_HYPERCALL:
349 ret = privcmd_ioctl_hypercall(udata);
350 break;
352 case IOCTL_PRIVCMD_MMAP:
353 ret = privcmd_ioctl_mmap(udata);
354 break;
356 case IOCTL_PRIVCMD_MMAPBATCH:
357 ret = privcmd_ioctl_mmap_batch(udata);
358 break;
360 default:
361 ret = -EINVAL;
362 break;
365 return ret;
368 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
370 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
371 vma, vma->vm_start, vma->vm_end,
372 vmf->pgoff, vmf->virtual_address);
374 return VM_FAULT_SIGBUS;
377 static struct vm_operations_struct privcmd_vm_ops = {
378 .fault = privcmd_fault
381 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
383 /* Unsupported for auto-translate guests. */
384 if (xen_feature(XENFEAT_auto_translated_physmap))
385 return -ENOSYS;
387 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
388 * how to recreate these mappings */
389 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
390 vma->vm_ops = &privcmd_vm_ops;
391 vma->vm_private_data = NULL;
393 return 0;
396 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
398 return (xchg(&vma->vm_private_data, (void *)1) == NULL);
401 const struct file_operations xen_privcmd_fops = {
402 .owner = THIS_MODULE,
403 .unlocked_ioctl = privcmd_ioctl,
404 .mmap = privcmd_mmap,
406 EXPORT_SYMBOL_GPL(xen_privcmd_fops);
408 static struct miscdevice privcmd_dev = {
409 .minor = MISC_DYNAMIC_MINOR,
410 .name = "xen/privcmd",
411 .fops = &xen_privcmd_fops,
414 static int __init privcmd_init(void)
416 int err;
418 if (!xen_domain())
419 return -ENODEV;
421 err = misc_register(&privcmd_dev);
422 if (err != 0) {
423 printk(KERN_ERR "Could not register Xen privcmd device\n");
424 return err;
426 return 0;
429 static void __exit privcmd_exit(void)
431 misc_deregister(&privcmd_dev);
434 module_init(privcmd_init);
435 module_exit(privcmd_exit);