1 // SPDX-License-Identifier: GPL-2.0
4 * Xen dma-buf functionality for gntdev.
6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-direct.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
21 #include <xen/grant_table.h>
23 #include "gntdev-common.h"
24 #include "gntdev-dmabuf.h"
26 MODULE_IMPORT_NS("DMA_BUF");
28 struct gntdev_dmabuf
{
29 struct gntdev_dmabuf_priv
*priv
;
30 struct dma_buf
*dmabuf
;
31 struct list_head next
;
36 /* Exported buffers are reference counted. */
39 struct gntdev_priv
*priv
;
40 struct gntdev_grant_map
*map
;
43 /* Granted references of the imported buffer. */
45 /* Scatter-gather table of the imported buffer. */
47 /* dma-buf attachment of the imported buffer. */
48 struct dma_buf_attachment
*attach
;
52 /* Number of pages this buffer has. */
54 /* Pages of this buffer (only for dma-buf export). */
58 struct gntdev_dmabuf_wait_obj
{
59 struct list_head next
;
60 struct gntdev_dmabuf
*gntdev_dmabuf
;
61 struct completion completion
;
64 struct gntdev_dmabuf_attachment
{
66 enum dma_data_direction dir
;
69 struct gntdev_dmabuf_priv
{
70 /* List of exported DMA buffers. */
71 struct list_head exp_list
;
72 /* List of wait objects. */
73 struct list_head exp_wait_list
;
74 /* List of imported DMA buffers. */
75 struct list_head imp_list
;
76 /* This is the lock which protects dma_buf_xxx lists. */
79 * We reference this file while exporting dma-bufs, so
80 * the grant device context is not destroyed while there are
81 * external users alive.
86 /* DMA buffer export support. */
88 /* Implementation of wait for exported DMA buffer to be released. */
90 static void dmabuf_exp_release(struct kref
*kref
);
92 static struct gntdev_dmabuf_wait_obj
*
93 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv
*priv
,
94 struct gntdev_dmabuf
*gntdev_dmabuf
)
96 struct gntdev_dmabuf_wait_obj
*obj
;
98 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
100 return ERR_PTR(-ENOMEM
);
102 init_completion(&obj
->completion
);
103 obj
->gntdev_dmabuf
= gntdev_dmabuf
;
105 mutex_lock(&priv
->lock
);
106 list_add(&obj
->next
, &priv
->exp_wait_list
);
107 /* Put our reference and wait for gntdev_dmabuf's release to fire. */
108 kref_put(&gntdev_dmabuf
->u
.exp
.refcount
, dmabuf_exp_release
);
109 mutex_unlock(&priv
->lock
);
113 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv
*priv
,
114 struct gntdev_dmabuf_wait_obj
*obj
)
116 mutex_lock(&priv
->lock
);
117 list_del(&obj
->next
);
118 mutex_unlock(&priv
->lock
);
122 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj
*obj
,
125 if (wait_for_completion_timeout(&obj
->completion
,
126 msecs_to_jiffies(wait_to_ms
)) <= 0)
132 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv
*priv
,
133 struct gntdev_dmabuf
*gntdev_dmabuf
)
135 struct gntdev_dmabuf_wait_obj
*obj
;
137 list_for_each_entry(obj
, &priv
->exp_wait_list
, next
)
138 if (obj
->gntdev_dmabuf
== gntdev_dmabuf
) {
139 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
140 complete_all(&obj
->completion
);
145 static struct gntdev_dmabuf
*
146 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv
*priv
, int fd
)
148 struct gntdev_dmabuf
*gntdev_dmabuf
, *ret
= ERR_PTR(-ENOENT
);
150 mutex_lock(&priv
->lock
);
151 list_for_each_entry(gntdev_dmabuf
, &priv
->exp_list
, next
)
152 if (gntdev_dmabuf
->fd
== fd
) {
153 pr_debug("Found gntdev_dmabuf in the wait list\n");
154 kref_get(&gntdev_dmabuf
->u
.exp
.refcount
);
158 mutex_unlock(&priv
->lock
);
162 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv
*priv
, int fd
,
165 struct gntdev_dmabuf
*gntdev_dmabuf
;
166 struct gntdev_dmabuf_wait_obj
*obj
;
169 pr_debug("Will wait for dma-buf with fd %d\n", fd
);
171 * Try to find the DMA buffer: if not found means that
172 * either the buffer has already been released or file descriptor
175 gntdev_dmabuf
= dmabuf_exp_wait_obj_get_dmabuf(priv
, fd
);
176 if (IS_ERR(gntdev_dmabuf
))
177 return PTR_ERR(gntdev_dmabuf
);
180 * gntdev_dmabuf still exists and is reference count locked by us now,
181 * so prepare to wait: allocate wait object and add it to the wait list,
182 * so we can find it on release.
184 obj
= dmabuf_exp_wait_obj_new(priv
, gntdev_dmabuf
);
188 ret
= dmabuf_exp_wait_obj_wait(obj
, wait_to_ms
);
189 dmabuf_exp_wait_obj_free(priv
, obj
);
193 /* DMA buffer export support. */
195 static struct sg_table
*
196 dmabuf_pages_to_sgt(struct page
**pages
, unsigned int nr_pages
)
198 struct sg_table
*sgt
;
201 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
207 ret
= sg_alloc_table_from_pages(sgt
, pages
, nr_pages
, 0,
208 nr_pages
<< PAGE_SHIFT
,
220 static int dmabuf_exp_ops_attach(struct dma_buf
*dma_buf
,
221 struct dma_buf_attachment
*attach
)
223 struct gntdev_dmabuf_attachment
*gntdev_dmabuf_attach
;
225 gntdev_dmabuf_attach
= kzalloc(sizeof(*gntdev_dmabuf_attach
),
227 if (!gntdev_dmabuf_attach
)
230 gntdev_dmabuf_attach
->dir
= DMA_NONE
;
231 attach
->priv
= gntdev_dmabuf_attach
;
235 static void dmabuf_exp_ops_detach(struct dma_buf
*dma_buf
,
236 struct dma_buf_attachment
*attach
)
238 struct gntdev_dmabuf_attachment
*gntdev_dmabuf_attach
= attach
->priv
;
240 if (gntdev_dmabuf_attach
) {
241 struct sg_table
*sgt
= gntdev_dmabuf_attach
->sgt
;
244 if (gntdev_dmabuf_attach
->dir
!= DMA_NONE
)
245 dma_unmap_sgtable(attach
->dev
, sgt
,
246 gntdev_dmabuf_attach
->dir
,
247 DMA_ATTR_SKIP_CPU_SYNC
);
252 kfree(gntdev_dmabuf_attach
);
257 static struct sg_table
*
258 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment
*attach
,
259 enum dma_data_direction dir
)
261 struct gntdev_dmabuf_attachment
*gntdev_dmabuf_attach
= attach
->priv
;
262 struct gntdev_dmabuf
*gntdev_dmabuf
= attach
->dmabuf
->priv
;
263 struct sg_table
*sgt
;
265 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf
->nr_pages
,
268 if (dir
== DMA_NONE
|| !gntdev_dmabuf_attach
)
269 return ERR_PTR(-EINVAL
);
271 /* Return the cached mapping when possible. */
272 if (gntdev_dmabuf_attach
->dir
== dir
)
273 return gntdev_dmabuf_attach
->sgt
;
276 * Two mappings with different directions for the same attachment are
279 if (gntdev_dmabuf_attach
->dir
!= DMA_NONE
)
280 return ERR_PTR(-EBUSY
);
282 sgt
= dmabuf_pages_to_sgt(gntdev_dmabuf
->pages
,
283 gntdev_dmabuf
->nr_pages
);
285 if (dma_map_sgtable(attach
->dev
, sgt
, dir
,
286 DMA_ATTR_SKIP_CPU_SYNC
)) {
289 sgt
= ERR_PTR(-ENOMEM
);
291 gntdev_dmabuf_attach
->sgt
= sgt
;
292 gntdev_dmabuf_attach
->dir
= dir
;
296 pr_debug("Failed to map sg table for dev %p\n", attach
->dev
);
300 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment
*attach
,
301 struct sg_table
*sgt
,
302 enum dma_data_direction dir
)
304 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
307 static void dmabuf_exp_release(struct kref
*kref
)
309 struct gntdev_dmabuf
*gntdev_dmabuf
=
310 container_of(kref
, struct gntdev_dmabuf
, u
.exp
.refcount
);
312 dmabuf_exp_wait_obj_signal(gntdev_dmabuf
->priv
, gntdev_dmabuf
);
313 list_del(&gntdev_dmabuf
->next
);
314 fput(gntdev_dmabuf
->priv
->filp
);
315 kfree(gntdev_dmabuf
);
318 static void dmabuf_exp_remove_map(struct gntdev_priv
*priv
,
319 struct gntdev_grant_map
*map
)
321 mutex_lock(&priv
->lock
);
322 list_del(&map
->next
);
323 gntdev_put_map(NULL
/* already removed */, map
);
324 mutex_unlock(&priv
->lock
);
327 static void dmabuf_exp_ops_release(struct dma_buf
*dma_buf
)
329 struct gntdev_dmabuf
*gntdev_dmabuf
= dma_buf
->priv
;
330 struct gntdev_dmabuf_priv
*priv
= gntdev_dmabuf
->priv
;
332 dmabuf_exp_remove_map(gntdev_dmabuf
->u
.exp
.priv
,
333 gntdev_dmabuf
->u
.exp
.map
);
334 mutex_lock(&priv
->lock
);
335 kref_put(&gntdev_dmabuf
->u
.exp
.refcount
, dmabuf_exp_release
);
336 mutex_unlock(&priv
->lock
);
339 static const struct dma_buf_ops dmabuf_exp_ops
= {
340 .attach
= dmabuf_exp_ops_attach
,
341 .detach
= dmabuf_exp_ops_detach
,
342 .map_dma_buf
= dmabuf_exp_ops_map_dma_buf
,
343 .unmap_dma_buf
= dmabuf_exp_ops_unmap_dma_buf
,
344 .release
= dmabuf_exp_ops_release
,
347 struct gntdev_dmabuf_export_args
{
348 struct gntdev_priv
*priv
;
349 struct gntdev_grant_map
*map
;
350 struct gntdev_dmabuf_priv
*dmabuf_priv
;
357 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args
*args
)
359 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
360 struct gntdev_dmabuf
*gntdev_dmabuf
;
363 gntdev_dmabuf
= kzalloc(sizeof(*gntdev_dmabuf
), GFP_KERNEL
);
367 kref_init(&gntdev_dmabuf
->u
.exp
.refcount
);
369 gntdev_dmabuf
->priv
= args
->dmabuf_priv
;
370 gntdev_dmabuf
->nr_pages
= args
->count
;
371 gntdev_dmabuf
->pages
= args
->pages
;
372 gntdev_dmabuf
->u
.exp
.priv
= args
->priv
;
373 gntdev_dmabuf
->u
.exp
.map
= args
->map
;
375 exp_info
.exp_name
= KBUILD_MODNAME
;
376 if (args
->dev
->driver
&& args
->dev
->driver
->owner
)
377 exp_info
.owner
= args
->dev
->driver
->owner
;
379 exp_info
.owner
= THIS_MODULE
;
380 exp_info
.ops
= &dmabuf_exp_ops
;
381 exp_info
.size
= args
->count
<< PAGE_SHIFT
;
382 exp_info
.flags
= O_RDWR
;
383 exp_info
.priv
= gntdev_dmabuf
;
385 gntdev_dmabuf
->dmabuf
= dma_buf_export(&exp_info
);
386 if (IS_ERR(gntdev_dmabuf
->dmabuf
)) {
387 ret
= PTR_ERR(gntdev_dmabuf
->dmabuf
);
388 gntdev_dmabuf
->dmabuf
= NULL
;
392 ret
= dma_buf_fd(gntdev_dmabuf
->dmabuf
, O_CLOEXEC
);
396 gntdev_dmabuf
->fd
= ret
;
399 pr_debug("Exporting DMA buffer with fd %d\n", ret
);
401 mutex_lock(&args
->dmabuf_priv
->lock
);
402 list_add(&gntdev_dmabuf
->next
, &args
->dmabuf_priv
->exp_list
);
403 mutex_unlock(&args
->dmabuf_priv
->lock
);
404 get_file(gntdev_dmabuf
->priv
->filp
);
408 if (gntdev_dmabuf
->dmabuf
)
409 dma_buf_put(gntdev_dmabuf
->dmabuf
);
410 kfree(gntdev_dmabuf
);
414 static struct gntdev_grant_map
*
415 dmabuf_exp_alloc_backing_storage(struct gntdev_priv
*priv
, int dmabuf_flags
,
418 struct gntdev_grant_map
*map
;
420 if (unlikely(gntdev_test_page_count(count
)))
421 return ERR_PTR(-EINVAL
);
423 if ((dmabuf_flags
& GNTDEV_DMA_FLAG_WC
) &&
424 (dmabuf_flags
& GNTDEV_DMA_FLAG_COHERENT
)) {
425 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags
);
426 return ERR_PTR(-EINVAL
);
429 map
= gntdev_alloc_map(priv
, count
, dmabuf_flags
);
431 return ERR_PTR(-ENOMEM
);
436 static int dmabuf_exp_from_refs(struct gntdev_priv
*priv
, int flags
,
437 int count
, u32 domid
, u32
*refs
, u32
*fd
)
439 struct gntdev_grant_map
*map
;
440 struct gntdev_dmabuf_export_args args
;
443 map
= dmabuf_exp_alloc_backing_storage(priv
, flags
, count
);
447 for (i
= 0; i
< count
; i
++) {
448 map
->grants
[i
].domid
= domid
;
449 map
->grants
[i
].ref
= refs
[i
];
452 mutex_lock(&priv
->lock
);
453 gntdev_add_map(priv
, map
);
454 mutex_unlock(&priv
->lock
);
456 map
->flags
|= GNTMAP_host_map
;
457 #if defined(CONFIG_X86)
458 map
->flags
|= GNTMAP_device_map
;
461 ret
= gntdev_map_grant_pages(map
);
467 args
.dev
= priv
->dma_dev
;
468 args
.dmabuf_priv
= priv
->dmabuf_priv
;
469 args
.count
= map
->count
;
470 args
.pages
= map
->pages
;
471 args
.fd
= -1; /* Shut up unnecessary gcc warning for i386 */
473 ret
= dmabuf_exp_from_pages(&args
);
481 dmabuf_exp_remove_map(priv
, map
);
485 /* DMA buffer import support. */
488 dmabuf_imp_grant_foreign_access(unsigned long *gfns
, u32
*refs
,
489 int count
, int domid
)
491 grant_ref_t priv_gref_head
;
494 ret
= gnttab_alloc_grant_references(count
, &priv_gref_head
);
496 pr_debug("Cannot allocate grant references, ret %d\n", ret
);
500 for (i
= 0; i
< count
; i
++) {
503 cur_ref
= gnttab_claim_grant_reference(&priv_gref_head
);
506 pr_debug("Cannot claim grant reference, ret %d\n", ret
);
510 gnttab_grant_foreign_access_ref(cur_ref
, domid
,
518 gnttab_free_grant_references(priv_gref_head
);
522 static void dmabuf_imp_end_foreign_access(u32
*refs
, int count
)
526 for (i
= 0; i
< count
; i
++)
527 if (refs
[i
] != INVALID_GRANT_REF
)
528 gnttab_end_foreign_access(refs
[i
], NULL
);
531 static void dmabuf_imp_free_storage(struct gntdev_dmabuf
*gntdev_dmabuf
)
533 kfree(gntdev_dmabuf
->u
.imp
.refs
);
534 kfree(gntdev_dmabuf
);
537 static struct gntdev_dmabuf
*dmabuf_imp_alloc_storage(int count
)
539 struct gntdev_dmabuf
*gntdev_dmabuf
;
542 gntdev_dmabuf
= kzalloc(sizeof(*gntdev_dmabuf
), GFP_KERNEL
);
546 gntdev_dmabuf
->u
.imp
.refs
= kcalloc(count
,
547 sizeof(gntdev_dmabuf
->u
.imp
.refs
[0]),
549 if (!gntdev_dmabuf
->u
.imp
.refs
)
552 gntdev_dmabuf
->nr_pages
= count
;
554 for (i
= 0; i
< count
; i
++)
555 gntdev_dmabuf
->u
.imp
.refs
[i
] = INVALID_GRANT_REF
;
557 return gntdev_dmabuf
;
560 dmabuf_imp_free_storage(gntdev_dmabuf
);
562 return ERR_PTR(-ENOMEM
);
565 static struct gntdev_dmabuf
*
566 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv
*priv
, struct device
*dev
,
567 int fd
, int count
, int domid
)
569 struct gntdev_dmabuf
*gntdev_dmabuf
, *ret
;
570 struct dma_buf
*dma_buf
;
571 struct dma_buf_attachment
*attach
;
572 struct sg_table
*sgt
;
573 struct sg_dma_page_iter sg_iter
;
577 dma_buf
= dma_buf_get(fd
);
579 return ERR_CAST(dma_buf
);
581 gntdev_dmabuf
= dmabuf_imp_alloc_storage(count
);
582 if (IS_ERR(gntdev_dmabuf
)) {
587 gntdev_dmabuf
->priv
= priv
;
588 gntdev_dmabuf
->fd
= fd
;
590 attach
= dma_buf_attach(dma_buf
, dev
);
591 if (IS_ERR(attach
)) {
592 ret
= ERR_CAST(attach
);
596 gntdev_dmabuf
->u
.imp
.attach
= attach
;
598 sgt
= dma_buf_map_attachment_unlocked(attach
, DMA_BIDIRECTIONAL
);
604 /* Check that we have zero offset. */
605 if (sgt
->sgl
->offset
) {
606 ret
= ERR_PTR(-EINVAL
);
607 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
612 /* Check number of pages that imported buffer has. */
613 if (attach
->dmabuf
->size
!= gntdev_dmabuf
->nr_pages
<< PAGE_SHIFT
) {
614 ret
= ERR_PTR(-EINVAL
);
615 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
616 attach
->dmabuf
->size
, gntdev_dmabuf
->nr_pages
);
620 gntdev_dmabuf
->u
.imp
.sgt
= sgt
;
622 gfns
= kcalloc(count
, sizeof(*gfns
), GFP_KERNEL
);
624 ret
= ERR_PTR(-ENOMEM
);
629 * Now convert sgt to array of gfns without accessing underlying pages.
630 * It is not allowed to access the underlying struct page of an sg table
631 * exported by DMA-buf, but since we deal with special Xen dma device here
632 * (not a normal physical one) look at the dma addresses in the sg table
633 * and then calculate gfns directly from them.
636 for_each_sgtable_dma_page(sgt
, &sg_iter
, 0) {
637 dma_addr_t addr
= sg_page_iter_dma_address(&sg_iter
);
638 unsigned long pfn
= bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev
, addr
)));
640 gfns
[i
++] = pfn_to_gfn(pfn
);
643 ret
= ERR_PTR(dmabuf_imp_grant_foreign_access(gfns
,
644 gntdev_dmabuf
->u
.imp
.refs
,
648 goto fail_end_access
;
650 pr_debug("Imported DMA buffer with fd %d\n", fd
);
652 mutex_lock(&priv
->lock
);
653 list_add(&gntdev_dmabuf
->next
, &priv
->imp_list
);
654 mutex_unlock(&priv
->lock
);
656 return gntdev_dmabuf
;
659 dmabuf_imp_end_foreign_access(gntdev_dmabuf
->u
.imp
.refs
, count
);
661 dma_buf_unmap_attachment_unlocked(attach
, sgt
, DMA_BIDIRECTIONAL
);
663 dma_buf_detach(dma_buf
, attach
);
665 dmabuf_imp_free_storage(gntdev_dmabuf
);
667 dma_buf_put(dma_buf
);
672 * Find the hyper dma-buf by its file descriptor and remove
673 * it from the buffer's list.
675 static struct gntdev_dmabuf
*
676 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv
*priv
, int fd
)
678 struct gntdev_dmabuf
*q
, *gntdev_dmabuf
, *ret
= ERR_PTR(-ENOENT
);
680 mutex_lock(&priv
->lock
);
681 list_for_each_entry_safe(gntdev_dmabuf
, q
, &priv
->imp_list
, next
) {
682 if (gntdev_dmabuf
->fd
== fd
) {
683 pr_debug("Found gntdev_dmabuf in the import list\n");
685 list_del(&gntdev_dmabuf
->next
);
689 mutex_unlock(&priv
->lock
);
693 static int dmabuf_imp_release(struct gntdev_dmabuf_priv
*priv
, u32 fd
)
695 struct gntdev_dmabuf
*gntdev_dmabuf
;
696 struct dma_buf_attachment
*attach
;
697 struct dma_buf
*dma_buf
;
699 gntdev_dmabuf
= dmabuf_imp_find_unlink(priv
, fd
);
700 if (IS_ERR(gntdev_dmabuf
))
701 return PTR_ERR(gntdev_dmabuf
);
703 pr_debug("Releasing DMA buffer with fd %d\n", fd
);
705 dmabuf_imp_end_foreign_access(gntdev_dmabuf
->u
.imp
.refs
,
706 gntdev_dmabuf
->nr_pages
);
708 attach
= gntdev_dmabuf
->u
.imp
.attach
;
710 if (gntdev_dmabuf
->u
.imp
.sgt
)
711 dma_buf_unmap_attachment_unlocked(attach
, gntdev_dmabuf
->u
.imp
.sgt
,
713 dma_buf
= attach
->dmabuf
;
714 dma_buf_detach(attach
->dmabuf
, attach
);
715 dma_buf_put(dma_buf
);
717 dmabuf_imp_free_storage(gntdev_dmabuf
);
721 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv
*priv
)
723 struct gntdev_dmabuf
*q
, *gntdev_dmabuf
;
725 list_for_each_entry_safe(gntdev_dmabuf
, q
, &priv
->imp_list
, next
)
726 dmabuf_imp_release(priv
, gntdev_dmabuf
->fd
);
729 /* DMA buffer IOCTL support. */
731 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv
*priv
, int use_ptemod
,
732 struct ioctl_gntdev_dmabuf_exp_from_refs __user
*u
)
734 struct ioctl_gntdev_dmabuf_exp_from_refs op
;
739 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
744 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
747 if (unlikely(gntdev_test_page_count(op
.count
)))
750 refs
= kcalloc(op
.count
, sizeof(*refs
), GFP_KERNEL
);
754 if (copy_from_user(refs
, u
->refs
, sizeof(*refs
) * op
.count
) != 0) {
759 ret
= dmabuf_exp_from_refs(priv
, op
.flags
, op
.count
,
760 op
.domid
, refs
, &op
.fd
);
764 if (copy_to_user(u
, &op
, sizeof(op
)) != 0)
772 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv
*priv
,
773 struct ioctl_gntdev_dmabuf_exp_wait_released __user
*u
)
775 struct ioctl_gntdev_dmabuf_exp_wait_released op
;
777 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
780 return dmabuf_exp_wait_released(priv
->dmabuf_priv
, op
.fd
,
784 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv
*priv
,
785 struct ioctl_gntdev_dmabuf_imp_to_refs __user
*u
)
787 struct ioctl_gntdev_dmabuf_imp_to_refs op
;
788 struct gntdev_dmabuf
*gntdev_dmabuf
;
791 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
794 if (unlikely(gntdev_test_page_count(op
.count
)))
797 gntdev_dmabuf
= dmabuf_imp_to_refs(priv
->dmabuf_priv
,
798 priv
->dma_dev
, op
.fd
,
800 if (IS_ERR(gntdev_dmabuf
))
801 return PTR_ERR(gntdev_dmabuf
);
803 if (copy_to_user(u
->refs
, gntdev_dmabuf
->u
.imp
.refs
,
804 sizeof(*u
->refs
) * op
.count
) != 0) {
811 dmabuf_imp_release(priv
->dmabuf_priv
, op
.fd
);
815 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv
*priv
,
816 struct ioctl_gntdev_dmabuf_imp_release __user
*u
)
818 struct ioctl_gntdev_dmabuf_imp_release op
;
820 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
823 return dmabuf_imp_release(priv
->dmabuf_priv
, op
.fd
);
826 struct gntdev_dmabuf_priv
*gntdev_dmabuf_init(struct file
*filp
)
828 struct gntdev_dmabuf_priv
*priv
;
830 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
832 return ERR_PTR(-ENOMEM
);
834 mutex_init(&priv
->lock
);
835 INIT_LIST_HEAD(&priv
->exp_list
);
836 INIT_LIST_HEAD(&priv
->exp_wait_list
);
837 INIT_LIST_HEAD(&priv
->imp_list
);
844 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv
*priv
)
846 dmabuf_imp_release_all(priv
);