1 /******************************************************************************
4 * Device for accessing (in user-space) pages that have been granted by other
7 * Copyright (c) 2006-2007, D G Murray.
8 * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/miscdevice.h>
30 #include <linux/mman.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/types.h>
33 #include <linux/uaccess.h>
34 #include <linux/sched.h>
35 #include <linux/spinlock.h>
36 #include <linux/slab.h>
37 #include <linux/highmem.h>
40 #include <xen/grant_table.h>
41 #include <xen/balloon.h>
42 #include <xen/gntdev.h>
43 #include <xen/events.h>
44 #include <asm/xen/hypervisor.h>
45 #include <asm/xen/hypercall.h>
46 #include <asm/xen/page.h>
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
50 "Gerd Hoffmann <kraxel@redhat.com>");
51 MODULE_DESCRIPTION("User-space granted page access driver");
53 static int limit
= 1024*1024;
54 module_param(limit
, int, 0644);
55 MODULE_PARM_DESC(limit
, "Maximum number of grants that may be mapped by "
58 static atomic_t pages_mapped
= ATOMIC_INIT(0);
60 static int use_ptemod
;
61 #define populate_freeable_maps use_ptemod
64 /* maps with visible offsets in the file descriptor */
65 struct list_head maps
;
66 /* maps that are not visible; will be freed on munmap.
67 * Only populated if populate_freeable_maps == 1 */
68 struct list_head freeable_maps
;
69 /* lock protects maps and freeable_maps */
72 struct mmu_notifier mn
;
77 /* Address relative to the start of the grant_map */
83 struct list_head next
;
84 struct vm_area_struct
*vma
;
89 struct unmap_notify notify
;
90 struct ioctl_gntdev_grant_ref
*grants
;
91 struct gnttab_map_grant_ref
*map_ops
;
92 struct gnttab_unmap_grant_ref
*unmap_ops
;
93 struct gnttab_map_grant_ref
*kmap_ops
;
97 static int unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
);
99 /* ------------------------------------------------------------------ */
101 static void gntdev_print_maps(struct gntdev_priv
*priv
,
102 char *text
, int text_index
)
105 struct grant_map
*map
;
107 pr_debug("%s: maps list (priv %p)\n", __func__
, priv
);
108 list_for_each_entry(map
, &priv
->maps
, next
)
109 pr_debug(" index %2d, count %2d %s\n",
110 map
->index
, map
->count
,
111 map
->index
== text_index
&& text
? text
: "");
115 static void gntdev_free_map(struct grant_map
*map
)
121 free_xenballooned_pages(map
->count
, map
->pages
);
125 kfree(map
->unmap_ops
);
126 kfree(map
->kmap_ops
);
130 static struct grant_map
*gntdev_alloc_map(struct gntdev_priv
*priv
, int count
)
132 struct grant_map
*add
;
135 add
= kzalloc(sizeof(struct grant_map
), GFP_KERNEL
);
139 add
->grants
= kcalloc(count
, sizeof(add
->grants
[0]), GFP_KERNEL
);
140 add
->map_ops
= kcalloc(count
, sizeof(add
->map_ops
[0]), GFP_KERNEL
);
141 add
->unmap_ops
= kcalloc(count
, sizeof(add
->unmap_ops
[0]), GFP_KERNEL
);
142 add
->kmap_ops
= kcalloc(count
, sizeof(add
->kmap_ops
[0]), GFP_KERNEL
);
143 add
->pages
= kcalloc(count
, sizeof(add
->pages
[0]), GFP_KERNEL
);
144 if (NULL
== add
->grants
||
145 NULL
== add
->map_ops
||
146 NULL
== add
->unmap_ops
||
147 NULL
== add
->kmap_ops
||
151 if (alloc_xenballooned_pages(count
, add
->pages
, false /* lowmem */))
154 for (i
= 0; i
< count
; i
++) {
155 add
->map_ops
[i
].handle
= -1;
156 add
->unmap_ops
[i
].handle
= -1;
157 add
->kmap_ops
[i
].handle
= -1;
162 atomic_set(&add
->users
, 1);
167 gntdev_free_map(add
);
171 static void gntdev_add_map(struct gntdev_priv
*priv
, struct grant_map
*add
)
173 struct grant_map
*map
;
175 list_for_each_entry(map
, &priv
->maps
, next
) {
176 if (add
->index
+ add
->count
< map
->index
) {
177 list_add_tail(&add
->next
, &map
->next
);
180 add
->index
= map
->index
+ map
->count
;
182 list_add_tail(&add
->next
, &priv
->maps
);
185 gntdev_print_maps(priv
, "[new]", add
->index
);
188 static struct grant_map
*gntdev_find_map_index(struct gntdev_priv
*priv
,
189 int index
, int count
)
191 struct grant_map
*map
;
193 list_for_each_entry(map
, &priv
->maps
, next
) {
194 if (map
->index
!= index
)
196 if (count
&& map
->count
!= count
)
203 static void gntdev_put_map(struct gntdev_priv
*priv
, struct grant_map
*map
)
208 if (!atomic_dec_and_test(&map
->users
))
211 atomic_sub(map
->count
, &pages_mapped
);
213 if (map
->notify
.flags
& UNMAP_NOTIFY_SEND_EVENT
) {
214 notify_remote_via_evtchn(map
->notify
.event
);
215 evtchn_put(map
->notify
.event
);
218 if (populate_freeable_maps
&& priv
) {
219 spin_lock(&priv
->lock
);
220 list_del(&map
->next
);
221 spin_unlock(&priv
->lock
);
224 if (map
->pages
&& !use_ptemod
)
225 unmap_grant_pages(map
, 0, map
->count
);
226 gntdev_free_map(map
);
229 /* ------------------------------------------------------------------ */
231 static int find_grant_ptes(pte_t
*pte
, pgtable_t token
,
232 unsigned long addr
, void *data
)
234 struct grant_map
*map
= data
;
235 unsigned int pgnr
= (addr
- map
->vma
->vm_start
) >> PAGE_SHIFT
;
236 int flags
= map
->flags
| GNTMAP_application_map
| GNTMAP_contains_pte
;
239 BUG_ON(pgnr
>= map
->count
);
240 pte_maddr
= arbitrary_virt_to_machine(pte
).maddr
;
242 gnttab_set_map_op(&map
->map_ops
[pgnr
], pte_maddr
, flags
,
243 map
->grants
[pgnr
].ref
,
244 map
->grants
[pgnr
].domid
);
245 gnttab_set_unmap_op(&map
->unmap_ops
[pgnr
], pte_maddr
, flags
,
250 static int map_grant_pages(struct grant_map
*map
)
255 /* Note: it could already be mapped */
256 if (map
->map_ops
[0].handle
!= -1)
258 for (i
= 0; i
< map
->count
; i
++) {
259 unsigned long addr
= (unsigned long)
260 pfn_to_kaddr(page_to_pfn(map
->pages
[i
]));
261 gnttab_set_map_op(&map
->map_ops
[i
], addr
, map
->flags
,
263 map
->grants
[i
].domid
);
264 gnttab_set_unmap_op(&map
->unmap_ops
[i
], addr
,
265 map
->flags
, -1 /* handle */);
269 * Setup the map_ops corresponding to the pte entries pointing
270 * to the kernel linear addresses of the struct pages.
271 * These ptes are completely different from the user ptes dealt
272 * with find_grant_ptes.
274 for (i
= 0; i
< map
->count
; i
++) {
275 unsigned long address
= (unsigned long)
276 pfn_to_kaddr(page_to_pfn(map
->pages
[i
]));
277 BUG_ON(PageHighMem(map
->pages
[i
]));
279 gnttab_set_map_op(&map
->kmap_ops
[i
], address
,
280 map
->flags
| GNTMAP_host_map
,
282 map
->grants
[i
].domid
);
286 pr_debug("map %d+%d\n", map
->index
, map
->count
);
287 err
= gnttab_map_refs(map
->map_ops
, use_ptemod
? map
->kmap_ops
: NULL
,
288 map
->pages
, map
->count
);
292 for (i
= 0; i
< map
->count
; i
++) {
293 if (map
->map_ops
[i
].status
)
296 BUG_ON(map
->map_ops
[i
].handle
== -1);
297 map
->unmap_ops
[i
].handle
= map
->map_ops
[i
].handle
;
298 pr_debug("map handle=%d\n", map
->map_ops
[i
].handle
);
304 static int __unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
)
308 if (map
->notify
.flags
& UNMAP_NOTIFY_CLEAR_BYTE
) {
309 int pgno
= (map
->notify
.addr
>> PAGE_SHIFT
);
310 if (pgno
>= offset
&& pgno
< offset
+ pages
) {
311 /* No need for kmap, pages are in lowmem */
312 uint8_t *tmp
= pfn_to_kaddr(page_to_pfn(map
->pages
[pgno
]));
313 tmp
[map
->notify
.addr
& (PAGE_SIZE
-1)] = 0;
314 map
->notify
.flags
&= ~UNMAP_NOTIFY_CLEAR_BYTE
;
318 err
= gnttab_unmap_refs(map
->unmap_ops
+ offset
,
319 use_ptemod
? map
->kmap_ops
+ offset
: NULL
, map
->pages
+ offset
,
324 for (i
= 0; i
< pages
; i
++) {
325 if (map
->unmap_ops
[offset
+i
].status
)
327 pr_debug("unmap handle=%d st=%d\n",
328 map
->unmap_ops
[offset
+i
].handle
,
329 map
->unmap_ops
[offset
+i
].status
);
330 map
->unmap_ops
[offset
+i
].handle
= -1;
335 static int unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
)
339 pr_debug("unmap %d+%d [%d+%d]\n", map
->index
, map
->count
, offset
, pages
);
341 /* It is possible the requested range will have a "hole" where we
342 * already unmapped some of the grants. Only unmap valid ranges.
344 while (pages
&& !err
) {
345 while (pages
&& map
->unmap_ops
[offset
].handle
== -1) {
350 while (range
< pages
) {
351 if (map
->unmap_ops
[offset
+range
].handle
== -1) {
357 err
= __unmap_grant_pages(map
, offset
, range
);
365 /* ------------------------------------------------------------------ */
367 static void gntdev_vma_open(struct vm_area_struct
*vma
)
369 struct grant_map
*map
= vma
->vm_private_data
;
371 pr_debug("gntdev_vma_open %p\n", vma
);
372 atomic_inc(&map
->users
);
375 static void gntdev_vma_close(struct vm_area_struct
*vma
)
377 struct grant_map
*map
= vma
->vm_private_data
;
378 struct file
*file
= vma
->vm_file
;
379 struct gntdev_priv
*priv
= file
->private_data
;
381 pr_debug("gntdev_vma_close %p\n", vma
);
383 /* It is possible that an mmu notifier could be running
384 * concurrently, so take priv->lock to ensure that the vma won't
385 * vanishing during the unmap_grant_pages call, since we will
386 * spin here until that completes. Such a concurrent call will
387 * not do any unmapping, since that has been done prior to
388 * closing the vma, but it may still iterate the unmap_ops list.
390 spin_lock(&priv
->lock
);
392 spin_unlock(&priv
->lock
);
394 vma
->vm_private_data
= NULL
;
395 gntdev_put_map(priv
, map
);
398 static struct vm_operations_struct gntdev_vmops
= {
399 .open
= gntdev_vma_open
,
400 .close
= gntdev_vma_close
,
403 /* ------------------------------------------------------------------ */
405 static void unmap_if_in_range(struct grant_map
*map
,
406 unsigned long start
, unsigned long end
)
408 unsigned long mstart
, mend
;
413 if (map
->vma
->vm_start
>= end
)
415 if (map
->vma
->vm_end
<= start
)
417 mstart
= max(start
, map
->vma
->vm_start
);
418 mend
= min(end
, map
->vma
->vm_end
);
419 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
420 map
->index
, map
->count
,
421 map
->vma
->vm_start
, map
->vma
->vm_end
,
422 start
, end
, mstart
, mend
);
423 err
= unmap_grant_pages(map
,
424 (mstart
- map
->vma
->vm_start
) >> PAGE_SHIFT
,
425 (mend
- mstart
) >> PAGE_SHIFT
);
429 static void mn_invl_range_start(struct mmu_notifier
*mn
,
430 struct mm_struct
*mm
,
431 unsigned long start
, unsigned long end
)
433 struct gntdev_priv
*priv
= container_of(mn
, struct gntdev_priv
, mn
);
434 struct grant_map
*map
;
436 spin_lock(&priv
->lock
);
437 list_for_each_entry(map
, &priv
->maps
, next
) {
438 unmap_if_in_range(map
, start
, end
);
440 list_for_each_entry(map
, &priv
->freeable_maps
, next
) {
441 unmap_if_in_range(map
, start
, end
);
443 spin_unlock(&priv
->lock
);
446 static void mn_invl_page(struct mmu_notifier
*mn
,
447 struct mm_struct
*mm
,
448 unsigned long address
)
450 mn_invl_range_start(mn
, mm
, address
, address
+ PAGE_SIZE
);
453 static void mn_release(struct mmu_notifier
*mn
,
454 struct mm_struct
*mm
)
456 struct gntdev_priv
*priv
= container_of(mn
, struct gntdev_priv
, mn
);
457 struct grant_map
*map
;
460 spin_lock(&priv
->lock
);
461 list_for_each_entry(map
, &priv
->maps
, next
) {
464 pr_debug("map %d+%d (%lx %lx)\n",
465 map
->index
, map
->count
,
466 map
->vma
->vm_start
, map
->vma
->vm_end
);
467 err
= unmap_grant_pages(map
, /* offset */ 0, map
->count
);
470 list_for_each_entry(map
, &priv
->freeable_maps
, next
) {
473 pr_debug("map %d+%d (%lx %lx)\n",
474 map
->index
, map
->count
,
475 map
->vma
->vm_start
, map
->vma
->vm_end
);
476 err
= unmap_grant_pages(map
, /* offset */ 0, map
->count
);
479 spin_unlock(&priv
->lock
);
482 static struct mmu_notifier_ops gntdev_mmu_ops
= {
483 .release
= mn_release
,
484 .invalidate_page
= mn_invl_page
,
485 .invalidate_range_start
= mn_invl_range_start
,
488 /* ------------------------------------------------------------------ */
490 static int gntdev_open(struct inode
*inode
, struct file
*flip
)
492 struct gntdev_priv
*priv
;
495 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
499 INIT_LIST_HEAD(&priv
->maps
);
500 INIT_LIST_HEAD(&priv
->freeable_maps
);
501 spin_lock_init(&priv
->lock
);
504 priv
->mm
= get_task_mm(current
);
509 priv
->mn
.ops
= &gntdev_mmu_ops
;
510 ret
= mmu_notifier_register(&priv
->mn
, priv
->mm
);
519 flip
->private_data
= priv
;
520 pr_debug("priv %p\n", priv
);
525 static int gntdev_release(struct inode
*inode
, struct file
*flip
)
527 struct gntdev_priv
*priv
= flip
->private_data
;
528 struct grant_map
*map
;
530 pr_debug("priv %p\n", priv
);
532 while (!list_empty(&priv
->maps
)) {
533 map
= list_entry(priv
->maps
.next
, struct grant_map
, next
);
534 list_del(&map
->next
);
535 gntdev_put_map(NULL
/* already removed */, map
);
537 WARN_ON(!list_empty(&priv
->freeable_maps
));
540 mmu_notifier_unregister(&priv
->mn
, priv
->mm
);
545 static long gntdev_ioctl_map_grant_ref(struct gntdev_priv
*priv
,
546 struct ioctl_gntdev_map_grant_ref __user
*u
)
548 struct ioctl_gntdev_map_grant_ref op
;
549 struct grant_map
*map
;
552 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
554 pr_debug("priv %p, add %d\n", priv
, op
.count
);
555 if (unlikely(op
.count
<= 0))
559 map
= gntdev_alloc_map(priv
, op
.count
);
563 if (unlikely(atomic_add_return(op
.count
, &pages_mapped
) > limit
)) {
564 pr_debug("can't map: over limit\n");
565 gntdev_put_map(NULL
, map
);
569 if (copy_from_user(map
->grants
, &u
->refs
,
570 sizeof(map
->grants
[0]) * op
.count
) != 0) {
571 gntdev_put_map(NULL
, map
);
575 spin_lock(&priv
->lock
);
576 gntdev_add_map(priv
, map
);
577 op
.index
= map
->index
<< PAGE_SHIFT
;
578 spin_unlock(&priv
->lock
);
580 if (copy_to_user(u
, &op
, sizeof(op
)) != 0)
586 static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv
*priv
,
587 struct ioctl_gntdev_unmap_grant_ref __user
*u
)
589 struct ioctl_gntdev_unmap_grant_ref op
;
590 struct grant_map
*map
;
593 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
595 pr_debug("priv %p, del %d+%d\n", priv
, (int)op
.index
, (int)op
.count
);
597 spin_lock(&priv
->lock
);
598 map
= gntdev_find_map_index(priv
, op
.index
>> PAGE_SHIFT
, op
.count
);
600 list_del(&map
->next
);
601 if (populate_freeable_maps
)
602 list_add_tail(&map
->next
, &priv
->freeable_maps
);
605 spin_unlock(&priv
->lock
);
607 gntdev_put_map(priv
, map
);
611 static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv
*priv
,
612 struct ioctl_gntdev_get_offset_for_vaddr __user
*u
)
614 struct ioctl_gntdev_get_offset_for_vaddr op
;
615 struct vm_area_struct
*vma
;
616 struct grant_map
*map
;
619 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
621 pr_debug("priv %p, offset for vaddr %lx\n", priv
, (unsigned long)op
.vaddr
);
623 down_read(¤t
->mm
->mmap_sem
);
624 vma
= find_vma(current
->mm
, op
.vaddr
);
625 if (!vma
|| vma
->vm_ops
!= &gntdev_vmops
)
628 map
= vma
->vm_private_data
;
632 op
.offset
= map
->index
<< PAGE_SHIFT
;
633 op
.count
= map
->count
;
637 up_read(¤t
->mm
->mmap_sem
);
639 if (rv
== 0 && copy_to_user(u
, &op
, sizeof(op
)) != 0)
644 static long gntdev_ioctl_notify(struct gntdev_priv
*priv
, void __user
*u
)
646 struct ioctl_gntdev_unmap_notify op
;
647 struct grant_map
*map
;
650 unsigned int out_event
;
652 if (copy_from_user(&op
, u
, sizeof(op
)))
655 if (op
.action
& ~(UNMAP_NOTIFY_CLEAR_BYTE
|UNMAP_NOTIFY_SEND_EVENT
))
658 /* We need to grab a reference to the event channel we are going to use
659 * to send the notify before releasing the reference we may already have
660 * (if someone has called this ioctl twice). This is required so that
661 * it is possible to change the clear_byte part of the notification
662 * without disturbing the event channel part, which may now be the last
663 * reference to that event channel.
665 if (op
.action
& UNMAP_NOTIFY_SEND_EVENT
) {
666 if (evtchn_get(op
.event_channel_port
))
670 out_flags
= op
.action
;
671 out_event
= op
.event_channel_port
;
673 spin_lock(&priv
->lock
);
675 list_for_each_entry(map
, &priv
->maps
, next
) {
676 uint64_t begin
= map
->index
<< PAGE_SHIFT
;
677 uint64_t end
= (map
->index
+ map
->count
) << PAGE_SHIFT
;
678 if (op
.index
>= begin
&& op
.index
< end
)
685 if ((op
.action
& UNMAP_NOTIFY_CLEAR_BYTE
) &&
686 (map
->flags
& GNTMAP_readonly
)) {
691 out_flags
= map
->notify
.flags
;
692 out_event
= map
->notify
.event
;
694 map
->notify
.flags
= op
.action
;
695 map
->notify
.addr
= op
.index
- (map
->index
<< PAGE_SHIFT
);
696 map
->notify
.event
= op
.event_channel_port
;
701 spin_unlock(&priv
->lock
);
703 /* Drop the reference to the event channel we did not save in the map */
704 if (out_flags
& UNMAP_NOTIFY_SEND_EVENT
)
705 evtchn_put(out_event
);
710 static long gntdev_ioctl(struct file
*flip
,
711 unsigned int cmd
, unsigned long arg
)
713 struct gntdev_priv
*priv
= flip
->private_data
;
714 void __user
*ptr
= (void __user
*)arg
;
717 case IOCTL_GNTDEV_MAP_GRANT_REF
:
718 return gntdev_ioctl_map_grant_ref(priv
, ptr
);
720 case IOCTL_GNTDEV_UNMAP_GRANT_REF
:
721 return gntdev_ioctl_unmap_grant_ref(priv
, ptr
);
723 case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR
:
724 return gntdev_ioctl_get_offset_for_vaddr(priv
, ptr
);
726 case IOCTL_GNTDEV_SET_UNMAP_NOTIFY
:
727 return gntdev_ioctl_notify(priv
, ptr
);
730 pr_debug("priv %p, unknown cmd %x\n", priv
, cmd
);
737 static int gntdev_mmap(struct file
*flip
, struct vm_area_struct
*vma
)
739 struct gntdev_priv
*priv
= flip
->private_data
;
740 int index
= vma
->vm_pgoff
;
741 int count
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
742 struct grant_map
*map
;
743 int i
, err
= -EINVAL
;
745 if ((vma
->vm_flags
& VM_WRITE
) && !(vma
->vm_flags
& VM_SHARED
))
748 pr_debug("map %d+%d at %lx (pgoff %lx)\n",
749 index
, count
, vma
->vm_start
, vma
->vm_pgoff
);
751 spin_lock(&priv
->lock
);
752 map
= gntdev_find_map_index(priv
, index
, count
);
755 if (use_ptemod
&& map
->vma
)
757 if (use_ptemod
&& priv
->mm
!= vma
->vm_mm
) {
758 pr_warn("Huh? Other mm?\n");
762 atomic_inc(&map
->users
);
764 vma
->vm_ops
= &gntdev_vmops
;
766 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
769 vma
->vm_flags
|= VM_DONTCOPY
;
771 vma
->vm_private_data
= map
;
777 if ((vma
->vm_flags
& VM_WRITE
) &&
778 (map
->flags
& GNTMAP_readonly
))
781 map
->flags
= GNTMAP_host_map
;
782 if (!(vma
->vm_flags
& VM_WRITE
))
783 map
->flags
|= GNTMAP_readonly
;
786 spin_unlock(&priv
->lock
);
789 err
= apply_to_page_range(vma
->vm_mm
, vma
->vm_start
,
790 vma
->vm_end
- vma
->vm_start
,
791 find_grant_ptes
, map
);
793 pr_warn("find_grant_ptes() failure.\n");
798 err
= map_grant_pages(map
);
803 for (i
= 0; i
< count
; i
++) {
804 err
= vm_insert_page(vma
, vma
->vm_start
+ i
*PAGE_SIZE
,
814 spin_unlock(&priv
->lock
);
818 spin_unlock(&priv
->lock
);
822 gntdev_put_map(priv
, map
);
826 static const struct file_operations gntdev_fops
= {
827 .owner
= THIS_MODULE
,
829 .release
= gntdev_release
,
831 .unlocked_ioctl
= gntdev_ioctl
834 static struct miscdevice gntdev_miscdev
= {
835 .minor
= MISC_DYNAMIC_MINOR
,
836 .name
= "xen/gntdev",
837 .fops
= &gntdev_fops
,
840 /* ------------------------------------------------------------------ */
842 static int __init
gntdev_init(void)
849 use_ptemod
= !xen_feature(XENFEAT_auto_translated_physmap
);
851 err
= misc_register(&gntdev_miscdev
);
853 pr_err("Could not register gntdev device\n");
859 static void __exit
gntdev_exit(void)
861 misc_deregister(&gntdev_miscdev
);
864 module_init(gntdev_init
);
865 module_exit(gntdev_exit
);
867 /* ------------------------------------------------------------------ */