1 /******************************************************************************
4 * Device for accessing (in user-space) pages that have been granted by other
7 * Copyright (c) 2006-2007, D G Murray.
8 * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/miscdevice.h>
30 #include <linux/mman.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/types.h>
33 #include <linux/uaccess.h>
34 #include <linux/sched.h>
35 #include <linux/spinlock.h>
36 #include <linux/slab.h>
37 #include <linux/highmem.h>
40 #include <xen/grant_table.h>
41 #include <xen/balloon.h>
42 #include <xen/gntdev.h>
43 #include <xen/events.h>
44 #include <asm/xen/hypervisor.h>
45 #include <asm/xen/hypercall.h>
46 #include <asm/xen/page.h>
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
50 "Gerd Hoffmann <kraxel@redhat.com>");
51 MODULE_DESCRIPTION("User-space granted page access driver");
53 static int limit
= 1024*1024;
54 module_param(limit
, int, 0644);
55 MODULE_PARM_DESC(limit
, "Maximum number of grants that may be mapped by "
58 static atomic_t pages_mapped
= ATOMIC_INIT(0);
60 static int use_ptemod
;
61 #define populate_freeable_maps use_ptemod
64 /* maps with visible offsets in the file descriptor */
65 struct list_head maps
;
66 /* maps that are not visible; will be freed on munmap.
67 * Only populated if populate_freeable_maps == 1 */
68 struct list_head freeable_maps
;
69 /* lock protects maps and freeable_maps */
72 struct mmu_notifier mn
;
77 /* Address relative to the start of the grant_map */
83 struct list_head next
;
84 struct vm_area_struct
*vma
;
89 struct unmap_notify notify
;
90 struct ioctl_gntdev_grant_ref
*grants
;
91 struct gnttab_map_grant_ref
*map_ops
;
92 struct gnttab_unmap_grant_ref
*unmap_ops
;
93 struct gnttab_map_grant_ref
*kmap_ops
;
94 struct gnttab_unmap_grant_ref
*kunmap_ops
;
96 unsigned long pages_vm_start
;
99 static int unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
);
101 /* ------------------------------------------------------------------ */
103 static void gntdev_print_maps(struct gntdev_priv
*priv
,
104 char *text
, int text_index
)
107 struct grant_map
*map
;
109 pr_debug("%s: maps list (priv %p)\n", __func__
, priv
);
110 list_for_each_entry(map
, &priv
->maps
, next
)
111 pr_debug(" index %2d, count %2d %s\n",
112 map
->index
, map
->count
,
113 map
->index
== text_index
&& text
? text
: "");
117 static void gntdev_free_map(struct grant_map
*map
)
123 gnttab_free_pages(map
->count
, map
->pages
);
127 kfree(map
->unmap_ops
);
128 kfree(map
->kmap_ops
);
129 kfree(map
->kunmap_ops
);
133 static struct grant_map
*gntdev_alloc_map(struct gntdev_priv
*priv
, int count
)
135 struct grant_map
*add
;
138 add
= kzalloc(sizeof(struct grant_map
), GFP_KERNEL
);
142 add
->grants
= kcalloc(count
, sizeof(add
->grants
[0]), GFP_KERNEL
);
143 add
->map_ops
= kcalloc(count
, sizeof(add
->map_ops
[0]), GFP_KERNEL
);
144 add
->unmap_ops
= kcalloc(count
, sizeof(add
->unmap_ops
[0]), GFP_KERNEL
);
145 add
->kmap_ops
= kcalloc(count
, sizeof(add
->kmap_ops
[0]), GFP_KERNEL
);
146 add
->kunmap_ops
= kcalloc(count
, sizeof(add
->kunmap_ops
[0]), GFP_KERNEL
);
147 add
->pages
= kcalloc(count
, sizeof(add
->pages
[0]), GFP_KERNEL
);
148 if (NULL
== add
->grants
||
149 NULL
== add
->map_ops
||
150 NULL
== add
->unmap_ops
||
151 NULL
== add
->kmap_ops
||
152 NULL
== add
->kunmap_ops
||
156 if (gnttab_alloc_pages(count
, add
->pages
))
159 for (i
= 0; i
< count
; i
++) {
160 add
->map_ops
[i
].handle
= -1;
161 add
->unmap_ops
[i
].handle
= -1;
162 add
->kmap_ops
[i
].handle
= -1;
163 add
->kunmap_ops
[i
].handle
= -1;
168 atomic_set(&add
->users
, 1);
173 gntdev_free_map(add
);
177 static void gntdev_add_map(struct gntdev_priv
*priv
, struct grant_map
*add
)
179 struct grant_map
*map
;
181 list_for_each_entry(map
, &priv
->maps
, next
) {
182 if (add
->index
+ add
->count
< map
->index
) {
183 list_add_tail(&add
->next
, &map
->next
);
186 add
->index
= map
->index
+ map
->count
;
188 list_add_tail(&add
->next
, &priv
->maps
);
191 gntdev_print_maps(priv
, "[new]", add
->index
);
194 static struct grant_map
*gntdev_find_map_index(struct gntdev_priv
*priv
,
195 int index
, int count
)
197 struct grant_map
*map
;
199 list_for_each_entry(map
, &priv
->maps
, next
) {
200 if (map
->index
!= index
)
202 if (count
&& map
->count
!= count
)
209 static void gntdev_put_map(struct gntdev_priv
*priv
, struct grant_map
*map
)
214 if (!atomic_dec_and_test(&map
->users
))
217 atomic_sub(map
->count
, &pages_mapped
);
219 if (map
->notify
.flags
& UNMAP_NOTIFY_SEND_EVENT
) {
220 notify_remote_via_evtchn(map
->notify
.event
);
221 evtchn_put(map
->notify
.event
);
224 if (populate_freeable_maps
&& priv
) {
225 mutex_lock(&priv
->lock
);
226 list_del(&map
->next
);
227 mutex_unlock(&priv
->lock
);
230 if (map
->pages
&& !use_ptemod
)
231 unmap_grant_pages(map
, 0, map
->count
);
232 gntdev_free_map(map
);
235 /* ------------------------------------------------------------------ */
237 static int find_grant_ptes(pte_t
*pte
, pgtable_t token
,
238 unsigned long addr
, void *data
)
240 struct grant_map
*map
= data
;
241 unsigned int pgnr
= (addr
- map
->vma
->vm_start
) >> PAGE_SHIFT
;
242 int flags
= map
->flags
| GNTMAP_application_map
| GNTMAP_contains_pte
;
245 BUG_ON(pgnr
>= map
->count
);
246 pte_maddr
= arbitrary_virt_to_machine(pte
).maddr
;
249 * Set the PTE as special to force get_user_pages_fast() fall
250 * back to the slow path. If this is not supported as part of
251 * the grant map, it will be done afterwards.
253 if (xen_feature(XENFEAT_gnttab_map_avail_bits
))
254 flags
|= (1 << _GNTMAP_guest_avail0
);
256 gnttab_set_map_op(&map
->map_ops
[pgnr
], pte_maddr
, flags
,
257 map
->grants
[pgnr
].ref
,
258 map
->grants
[pgnr
].domid
);
259 gnttab_set_unmap_op(&map
->unmap_ops
[pgnr
], pte_maddr
, flags
,
265 static int set_grant_ptes_as_special(pte_t
*pte
, pgtable_t token
,
266 unsigned long addr
, void *data
)
268 set_pte_at(current
->mm
, addr
, pte
, pte_mkspecial(*pte
));
273 static int map_grant_pages(struct grant_map
*map
)
278 /* Note: it could already be mapped */
279 if (map
->map_ops
[0].handle
!= -1)
281 for (i
= 0; i
< map
->count
; i
++) {
282 unsigned long addr
= (unsigned long)
283 pfn_to_kaddr(page_to_pfn(map
->pages
[i
]));
284 gnttab_set_map_op(&map
->map_ops
[i
], addr
, map
->flags
,
286 map
->grants
[i
].domid
);
287 gnttab_set_unmap_op(&map
->unmap_ops
[i
], addr
,
288 map
->flags
, -1 /* handle */);
292 * Setup the map_ops corresponding to the pte entries pointing
293 * to the kernel linear addresses of the struct pages.
294 * These ptes are completely different from the user ptes dealt
295 * with find_grant_ptes.
297 for (i
= 0; i
< map
->count
; i
++) {
298 unsigned long address
= (unsigned long)
299 pfn_to_kaddr(page_to_pfn(map
->pages
[i
]));
300 BUG_ON(PageHighMem(map
->pages
[i
]));
302 gnttab_set_map_op(&map
->kmap_ops
[i
], address
,
303 map
->flags
| GNTMAP_host_map
,
305 map
->grants
[i
].domid
);
306 gnttab_set_unmap_op(&map
->kunmap_ops
[i
], address
,
307 map
->flags
| GNTMAP_host_map
, -1);
311 pr_debug("map %d+%d\n", map
->index
, map
->count
);
312 err
= gnttab_map_refs(map
->map_ops
, use_ptemod
? map
->kmap_ops
: NULL
,
313 map
->pages
, map
->count
);
317 for (i
= 0; i
< map
->count
; i
++) {
318 if (map
->map_ops
[i
].status
) {
323 map
->unmap_ops
[i
].handle
= map
->map_ops
[i
].handle
;
325 map
->kunmap_ops
[i
].handle
= map
->kmap_ops
[i
].handle
;
330 struct unmap_grant_pages_callback_data
332 struct completion completion
;
336 static void unmap_grant_callback(int result
,
337 struct gntab_unmap_queue_data
*data
)
339 struct unmap_grant_pages_callback_data
* d
= data
->data
;
342 complete(&d
->completion
);
345 static int __unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
)
348 struct gntab_unmap_queue_data unmap_data
;
349 struct unmap_grant_pages_callback_data data
;
351 init_completion(&data
.completion
);
352 unmap_data
.data
= &data
;
353 unmap_data
.done
= &unmap_grant_callback
;
355 if (map
->notify
.flags
& UNMAP_NOTIFY_CLEAR_BYTE
) {
356 int pgno
= (map
->notify
.addr
>> PAGE_SHIFT
);
357 if (pgno
>= offset
&& pgno
< offset
+ pages
) {
358 /* No need for kmap, pages are in lowmem */
359 uint8_t *tmp
= pfn_to_kaddr(page_to_pfn(map
->pages
[pgno
]));
360 tmp
[map
->notify
.addr
& (PAGE_SIZE
-1)] = 0;
361 map
->notify
.flags
&= ~UNMAP_NOTIFY_CLEAR_BYTE
;
365 unmap_data
.unmap_ops
= map
->unmap_ops
+ offset
;
366 unmap_data
.kunmap_ops
= use_ptemod
? map
->kunmap_ops
+ offset
: NULL
;
367 unmap_data
.pages
= map
->pages
+ offset
;
368 unmap_data
.count
= pages
;
370 gnttab_unmap_refs_async(&unmap_data
);
372 wait_for_completion(&data
.completion
);
376 for (i
= 0; i
< pages
; i
++) {
377 if (map
->unmap_ops
[offset
+i
].status
)
379 pr_debug("unmap handle=%d st=%d\n",
380 map
->unmap_ops
[offset
+i
].handle
,
381 map
->unmap_ops
[offset
+i
].status
);
382 map
->unmap_ops
[offset
+i
].handle
= -1;
387 static int unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
)
391 pr_debug("unmap %d+%d [%d+%d]\n", map
->index
, map
->count
, offset
, pages
);
393 /* It is possible the requested range will have a "hole" where we
394 * already unmapped some of the grants. Only unmap valid ranges.
396 while (pages
&& !err
) {
397 while (pages
&& map
->unmap_ops
[offset
].handle
== -1) {
402 while (range
< pages
) {
403 if (map
->unmap_ops
[offset
+range
].handle
== -1) {
409 err
= __unmap_grant_pages(map
, offset
, range
);
417 /* ------------------------------------------------------------------ */
419 static void gntdev_vma_open(struct vm_area_struct
*vma
)
421 struct grant_map
*map
= vma
->vm_private_data
;
423 pr_debug("gntdev_vma_open %p\n", vma
);
424 atomic_inc(&map
->users
);
427 static void gntdev_vma_close(struct vm_area_struct
*vma
)
429 struct grant_map
*map
= vma
->vm_private_data
;
430 struct file
*file
= vma
->vm_file
;
431 struct gntdev_priv
*priv
= file
->private_data
;
433 pr_debug("gntdev_vma_close %p\n", vma
);
435 /* It is possible that an mmu notifier could be running
436 * concurrently, so take priv->lock to ensure that the vma won't
437 * vanishing during the unmap_grant_pages call, since we will
438 * spin here until that completes. Such a concurrent call will
439 * not do any unmapping, since that has been done prior to
440 * closing the vma, but it may still iterate the unmap_ops list.
442 mutex_lock(&priv
->lock
);
444 mutex_unlock(&priv
->lock
);
446 vma
->vm_private_data
= NULL
;
447 gntdev_put_map(priv
, map
);
450 static struct page
*gntdev_vma_find_special_page(struct vm_area_struct
*vma
,
453 struct grant_map
*map
= vma
->vm_private_data
;
455 return map
->pages
[(addr
- map
->pages_vm_start
) >> PAGE_SHIFT
];
458 static struct vm_operations_struct gntdev_vmops
= {
459 .open
= gntdev_vma_open
,
460 .close
= gntdev_vma_close
,
461 .find_special_page
= gntdev_vma_find_special_page
,
464 /* ------------------------------------------------------------------ */
466 static void unmap_if_in_range(struct grant_map
*map
,
467 unsigned long start
, unsigned long end
)
469 unsigned long mstart
, mend
;
474 if (map
->vma
->vm_start
>= end
)
476 if (map
->vma
->vm_end
<= start
)
478 mstart
= max(start
, map
->vma
->vm_start
);
479 mend
= min(end
, map
->vma
->vm_end
);
480 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
481 map
->index
, map
->count
,
482 map
->vma
->vm_start
, map
->vma
->vm_end
,
483 start
, end
, mstart
, mend
);
484 err
= unmap_grant_pages(map
,
485 (mstart
- map
->vma
->vm_start
) >> PAGE_SHIFT
,
486 (mend
- mstart
) >> PAGE_SHIFT
);
490 static void mn_invl_range_start(struct mmu_notifier
*mn
,
491 struct mm_struct
*mm
,
492 unsigned long start
, unsigned long end
)
494 struct gntdev_priv
*priv
= container_of(mn
, struct gntdev_priv
, mn
);
495 struct grant_map
*map
;
497 mutex_lock(&priv
->lock
);
498 list_for_each_entry(map
, &priv
->maps
, next
) {
499 unmap_if_in_range(map
, start
, end
);
501 list_for_each_entry(map
, &priv
->freeable_maps
, next
) {
502 unmap_if_in_range(map
, start
, end
);
504 mutex_unlock(&priv
->lock
);
507 static void mn_invl_page(struct mmu_notifier
*mn
,
508 struct mm_struct
*mm
,
509 unsigned long address
)
511 mn_invl_range_start(mn
, mm
, address
, address
+ PAGE_SIZE
);
514 static void mn_release(struct mmu_notifier
*mn
,
515 struct mm_struct
*mm
)
517 struct gntdev_priv
*priv
= container_of(mn
, struct gntdev_priv
, mn
);
518 struct grant_map
*map
;
521 mutex_lock(&priv
->lock
);
522 list_for_each_entry(map
, &priv
->maps
, next
) {
525 pr_debug("map %d+%d (%lx %lx)\n",
526 map
->index
, map
->count
,
527 map
->vma
->vm_start
, map
->vma
->vm_end
);
528 err
= unmap_grant_pages(map
, /* offset */ 0, map
->count
);
531 list_for_each_entry(map
, &priv
->freeable_maps
, next
) {
534 pr_debug("map %d+%d (%lx %lx)\n",
535 map
->index
, map
->count
,
536 map
->vma
->vm_start
, map
->vma
->vm_end
);
537 err
= unmap_grant_pages(map
, /* offset */ 0, map
->count
);
540 mutex_unlock(&priv
->lock
);
543 static struct mmu_notifier_ops gntdev_mmu_ops
= {
544 .release
= mn_release
,
545 .invalidate_page
= mn_invl_page
,
546 .invalidate_range_start
= mn_invl_range_start
,
549 /* ------------------------------------------------------------------ */
551 static int gntdev_open(struct inode
*inode
, struct file
*flip
)
553 struct gntdev_priv
*priv
;
556 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
560 INIT_LIST_HEAD(&priv
->maps
);
561 INIT_LIST_HEAD(&priv
->freeable_maps
);
562 mutex_init(&priv
->lock
);
565 priv
->mm
= get_task_mm(current
);
570 priv
->mn
.ops
= &gntdev_mmu_ops
;
571 ret
= mmu_notifier_register(&priv
->mn
, priv
->mm
);
580 flip
->private_data
= priv
;
581 pr_debug("priv %p\n", priv
);
586 static int gntdev_release(struct inode
*inode
, struct file
*flip
)
588 struct gntdev_priv
*priv
= flip
->private_data
;
589 struct grant_map
*map
;
591 pr_debug("priv %p\n", priv
);
593 while (!list_empty(&priv
->maps
)) {
594 map
= list_entry(priv
->maps
.next
, struct grant_map
, next
);
595 list_del(&map
->next
);
596 gntdev_put_map(NULL
/* already removed */, map
);
598 WARN_ON(!list_empty(&priv
->freeable_maps
));
601 mmu_notifier_unregister(&priv
->mn
, priv
->mm
);
606 static long gntdev_ioctl_map_grant_ref(struct gntdev_priv
*priv
,
607 struct ioctl_gntdev_map_grant_ref __user
*u
)
609 struct ioctl_gntdev_map_grant_ref op
;
610 struct grant_map
*map
;
613 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
615 pr_debug("priv %p, add %d\n", priv
, op
.count
);
616 if (unlikely(op
.count
<= 0))
620 map
= gntdev_alloc_map(priv
, op
.count
);
624 if (unlikely(atomic_add_return(op
.count
, &pages_mapped
) > limit
)) {
625 pr_debug("can't map: over limit\n");
626 gntdev_put_map(NULL
, map
);
630 if (copy_from_user(map
->grants
, &u
->refs
,
631 sizeof(map
->grants
[0]) * op
.count
) != 0) {
632 gntdev_put_map(NULL
, map
);
636 mutex_lock(&priv
->lock
);
637 gntdev_add_map(priv
, map
);
638 op
.index
= map
->index
<< PAGE_SHIFT
;
639 mutex_unlock(&priv
->lock
);
641 if (copy_to_user(u
, &op
, sizeof(op
)) != 0)
647 static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv
*priv
,
648 struct ioctl_gntdev_unmap_grant_ref __user
*u
)
650 struct ioctl_gntdev_unmap_grant_ref op
;
651 struct grant_map
*map
;
654 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
656 pr_debug("priv %p, del %d+%d\n", priv
, (int)op
.index
, (int)op
.count
);
658 mutex_lock(&priv
->lock
);
659 map
= gntdev_find_map_index(priv
, op
.index
>> PAGE_SHIFT
, op
.count
);
661 list_del(&map
->next
);
662 if (populate_freeable_maps
)
663 list_add_tail(&map
->next
, &priv
->freeable_maps
);
666 mutex_unlock(&priv
->lock
);
668 gntdev_put_map(priv
, map
);
672 static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv
*priv
,
673 struct ioctl_gntdev_get_offset_for_vaddr __user
*u
)
675 struct ioctl_gntdev_get_offset_for_vaddr op
;
676 struct vm_area_struct
*vma
;
677 struct grant_map
*map
;
680 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
682 pr_debug("priv %p, offset for vaddr %lx\n", priv
, (unsigned long)op
.vaddr
);
684 down_read(¤t
->mm
->mmap_sem
);
685 vma
= find_vma(current
->mm
, op
.vaddr
);
686 if (!vma
|| vma
->vm_ops
!= &gntdev_vmops
)
689 map
= vma
->vm_private_data
;
693 op
.offset
= map
->index
<< PAGE_SHIFT
;
694 op
.count
= map
->count
;
698 up_read(¤t
->mm
->mmap_sem
);
700 if (rv
== 0 && copy_to_user(u
, &op
, sizeof(op
)) != 0)
705 static long gntdev_ioctl_notify(struct gntdev_priv
*priv
, void __user
*u
)
707 struct ioctl_gntdev_unmap_notify op
;
708 struct grant_map
*map
;
711 unsigned int out_event
;
713 if (copy_from_user(&op
, u
, sizeof(op
)))
716 if (op
.action
& ~(UNMAP_NOTIFY_CLEAR_BYTE
|UNMAP_NOTIFY_SEND_EVENT
))
719 /* We need to grab a reference to the event channel we are going to use
720 * to send the notify before releasing the reference we may already have
721 * (if someone has called this ioctl twice). This is required so that
722 * it is possible to change the clear_byte part of the notification
723 * without disturbing the event channel part, which may now be the last
724 * reference to that event channel.
726 if (op
.action
& UNMAP_NOTIFY_SEND_EVENT
) {
727 if (evtchn_get(op
.event_channel_port
))
731 out_flags
= op
.action
;
732 out_event
= op
.event_channel_port
;
734 mutex_lock(&priv
->lock
);
736 list_for_each_entry(map
, &priv
->maps
, next
) {
737 uint64_t begin
= map
->index
<< PAGE_SHIFT
;
738 uint64_t end
= (map
->index
+ map
->count
) << PAGE_SHIFT
;
739 if (op
.index
>= begin
&& op
.index
< end
)
746 if ((op
.action
& UNMAP_NOTIFY_CLEAR_BYTE
) &&
747 (map
->flags
& GNTMAP_readonly
)) {
752 out_flags
= map
->notify
.flags
;
753 out_event
= map
->notify
.event
;
755 map
->notify
.flags
= op
.action
;
756 map
->notify
.addr
= op
.index
- (map
->index
<< PAGE_SHIFT
);
757 map
->notify
.event
= op
.event_channel_port
;
762 mutex_unlock(&priv
->lock
);
764 /* Drop the reference to the event channel we did not save in the map */
765 if (out_flags
& UNMAP_NOTIFY_SEND_EVENT
)
766 evtchn_put(out_event
);
771 static long gntdev_ioctl(struct file
*flip
,
772 unsigned int cmd
, unsigned long arg
)
774 struct gntdev_priv
*priv
= flip
->private_data
;
775 void __user
*ptr
= (void __user
*)arg
;
778 case IOCTL_GNTDEV_MAP_GRANT_REF
:
779 return gntdev_ioctl_map_grant_ref(priv
, ptr
);
781 case IOCTL_GNTDEV_UNMAP_GRANT_REF
:
782 return gntdev_ioctl_unmap_grant_ref(priv
, ptr
);
784 case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR
:
785 return gntdev_ioctl_get_offset_for_vaddr(priv
, ptr
);
787 case IOCTL_GNTDEV_SET_UNMAP_NOTIFY
:
788 return gntdev_ioctl_notify(priv
, ptr
);
791 pr_debug("priv %p, unknown cmd %x\n", priv
, cmd
);
798 static int gntdev_mmap(struct file
*flip
, struct vm_area_struct
*vma
)
800 struct gntdev_priv
*priv
= flip
->private_data
;
801 int index
= vma
->vm_pgoff
;
802 int count
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
803 struct grant_map
*map
;
804 int i
, err
= -EINVAL
;
806 if ((vma
->vm_flags
& VM_WRITE
) && !(vma
->vm_flags
& VM_SHARED
))
809 pr_debug("map %d+%d at %lx (pgoff %lx)\n",
810 index
, count
, vma
->vm_start
, vma
->vm_pgoff
);
812 mutex_lock(&priv
->lock
);
813 map
= gntdev_find_map_index(priv
, index
, count
);
816 if (use_ptemod
&& map
->vma
)
818 if (use_ptemod
&& priv
->mm
!= vma
->vm_mm
) {
819 pr_warn("Huh? Other mm?\n");
823 atomic_inc(&map
->users
);
825 vma
->vm_ops
= &gntdev_vmops
;
827 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
830 vma
->vm_flags
|= VM_DONTCOPY
;
832 vma
->vm_private_data
= map
;
838 if ((vma
->vm_flags
& VM_WRITE
) &&
839 (map
->flags
& GNTMAP_readonly
))
842 map
->flags
= GNTMAP_host_map
;
843 if (!(vma
->vm_flags
& VM_WRITE
))
844 map
->flags
|= GNTMAP_readonly
;
847 mutex_unlock(&priv
->lock
);
850 err
= apply_to_page_range(vma
->vm_mm
, vma
->vm_start
,
851 vma
->vm_end
- vma
->vm_start
,
852 find_grant_ptes
, map
);
854 pr_warn("find_grant_ptes() failure.\n");
859 err
= map_grant_pages(map
);
864 for (i
= 0; i
< count
; i
++) {
865 err
= vm_insert_page(vma
, vma
->vm_start
+ i
*PAGE_SIZE
,
873 * If the PTEs were not made special by the grant map
874 * hypercall, do so here.
876 * This is racy since the mapping is already visible
877 * to userspace but userspace should be well-behaved
878 * enough to not touch it until the mmap() call
881 if (!xen_feature(XENFEAT_gnttab_map_avail_bits
)) {
882 apply_to_page_range(vma
->vm_mm
, vma
->vm_start
,
883 vma
->vm_end
- vma
->vm_start
,
884 set_grant_ptes_as_special
, NULL
);
887 map
->pages_vm_start
= vma
->vm_start
;
893 mutex_unlock(&priv
->lock
);
897 mutex_unlock(&priv
->lock
);
901 gntdev_put_map(priv
, map
);
905 static const struct file_operations gntdev_fops
= {
906 .owner
= THIS_MODULE
,
908 .release
= gntdev_release
,
910 .unlocked_ioctl
= gntdev_ioctl
913 static struct miscdevice gntdev_miscdev
= {
914 .minor
= MISC_DYNAMIC_MINOR
,
915 .name
= "xen/gntdev",
916 .fops
= &gntdev_fops
,
919 /* ------------------------------------------------------------------ */
921 static int __init
gntdev_init(void)
928 use_ptemod
= !xen_feature(XENFEAT_auto_translated_physmap
);
930 err
= misc_register(&gntdev_miscdev
);
932 pr_err("Could not register gntdev device\n");
938 static void __exit
gntdev_exit(void)
940 misc_deregister(&gntdev_miscdev
);
943 module_init(gntdev_init
);
944 module_exit(gntdev_exit
);
946 /* ------------------------------------------------------------------ */