ixgbe: Fix race when the VF driver does a reset
[linux/fpc-iii.git] / drivers / xen / gntdev-dmabuf.c
blobcba6b586bfbdfe00e97b7e9d63b789255acfda61
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Xen dma-buf functionality for gntdev.
6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9 */
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
18 #include <xen/xen.h>
19 #include <xen/grant_table.h>
21 #include "gntdev-common.h"
22 #include "gntdev-dmabuf.h"
24 #ifndef GRANT_INVALID_REF
26 * Note on usage of grant reference 0 as invalid grant reference:
27 * grant reference 0 is valid, but never exposed to a driver,
28 * because of the fact it is already in use/reserved by the PV console.
30 #define GRANT_INVALID_REF 0
31 #endif
33 struct gntdev_dmabuf {
34 struct gntdev_dmabuf_priv *priv;
35 struct dma_buf *dmabuf;
36 struct list_head next;
37 int fd;
39 union {
40 struct {
41 /* Exported buffers are reference counted. */
42 struct kref refcount;
44 struct gntdev_priv *priv;
45 struct gntdev_grant_map *map;
46 } exp;
47 struct {
48 /* Granted references of the imported buffer. */
49 grant_ref_t *refs;
50 /* Scatter-gather table of the imported buffer. */
51 struct sg_table *sgt;
52 /* dma-buf attachment of the imported buffer. */
53 struct dma_buf_attachment *attach;
54 } imp;
55 } u;
57 /* Number of pages this buffer has. */
58 int nr_pages;
59 /* Pages of this buffer. */
60 struct page **pages;
63 struct gntdev_dmabuf_wait_obj {
64 struct list_head next;
65 struct gntdev_dmabuf *gntdev_dmabuf;
66 struct completion completion;
69 struct gntdev_dmabuf_attachment {
70 struct sg_table *sgt;
71 enum dma_data_direction dir;
74 struct gntdev_dmabuf_priv {
75 /* List of exported DMA buffers. */
76 struct list_head exp_list;
77 /* List of wait objects. */
78 struct list_head exp_wait_list;
79 /* List of imported DMA buffers. */
80 struct list_head imp_list;
81 /* This is the lock which protects dma_buf_xxx lists. */
82 struct mutex lock;
85 /* DMA buffer export support. */
87 /* Implementation of wait for exported DMA buffer to be released. */
89 static void dmabuf_exp_release(struct kref *kref);
91 static struct gntdev_dmabuf_wait_obj *
92 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
93 struct gntdev_dmabuf *gntdev_dmabuf)
95 struct gntdev_dmabuf_wait_obj *obj;
97 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
98 if (!obj)
99 return ERR_PTR(-ENOMEM);
101 init_completion(&obj->completion);
102 obj->gntdev_dmabuf = gntdev_dmabuf;
104 mutex_lock(&priv->lock);
105 list_add(&obj->next, &priv->exp_wait_list);
106 /* Put our reference and wait for gntdev_dmabuf's release to fire. */
107 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
108 mutex_unlock(&priv->lock);
109 return obj;
112 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
113 struct gntdev_dmabuf_wait_obj *obj)
115 mutex_lock(&priv->lock);
116 list_del(&obj->next);
117 mutex_unlock(&priv->lock);
118 kfree(obj);
121 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
122 u32 wait_to_ms)
124 if (wait_for_completion_timeout(&obj->completion,
125 msecs_to_jiffies(wait_to_ms)) <= 0)
126 return -ETIMEDOUT;
128 return 0;
131 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
132 struct gntdev_dmabuf *gntdev_dmabuf)
134 struct gntdev_dmabuf_wait_obj *obj;
136 list_for_each_entry(obj, &priv->exp_wait_list, next)
137 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
138 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
139 complete_all(&obj->completion);
140 break;
144 static struct gntdev_dmabuf *
145 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
147 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
149 mutex_lock(&priv->lock);
150 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
151 if (gntdev_dmabuf->fd == fd) {
152 pr_debug("Found gntdev_dmabuf in the wait list\n");
153 kref_get(&gntdev_dmabuf->u.exp.refcount);
154 ret = gntdev_dmabuf;
155 break;
157 mutex_unlock(&priv->lock);
158 return ret;
161 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
162 int wait_to_ms)
164 struct gntdev_dmabuf *gntdev_dmabuf;
165 struct gntdev_dmabuf_wait_obj *obj;
166 int ret;
168 pr_debug("Will wait for dma-buf with fd %d\n", fd);
170 * Try to find the DMA buffer: if not found means that
171 * either the buffer has already been released or file descriptor
172 * provided is wrong.
174 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
175 if (IS_ERR(gntdev_dmabuf))
176 return PTR_ERR(gntdev_dmabuf);
179 * gntdev_dmabuf still exists and is reference count locked by us now,
180 * so prepare to wait: allocate wait object and add it to the wait list,
181 * so we can find it on release.
183 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
184 if (IS_ERR(obj))
185 return PTR_ERR(obj);
187 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
188 dmabuf_exp_wait_obj_free(priv, obj);
189 return ret;
192 /* DMA buffer export support. */
194 static struct sg_table *
195 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
197 struct sg_table *sgt;
198 int ret;
200 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
201 if (!sgt) {
202 ret = -ENOMEM;
203 goto out;
206 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
207 nr_pages << PAGE_SHIFT,
208 GFP_KERNEL);
209 if (ret)
210 goto out;
212 return sgt;
214 out:
215 kfree(sgt);
216 return ERR_PTR(ret);
219 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
220 struct dma_buf_attachment *attach)
222 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
224 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
225 GFP_KERNEL);
226 if (!gntdev_dmabuf_attach)
227 return -ENOMEM;
229 gntdev_dmabuf_attach->dir = DMA_NONE;
230 attach->priv = gntdev_dmabuf_attach;
231 return 0;
234 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
235 struct dma_buf_attachment *attach)
237 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
239 if (gntdev_dmabuf_attach) {
240 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
242 if (sgt) {
243 if (gntdev_dmabuf_attach->dir != DMA_NONE)
244 dma_unmap_sg_attrs(attach->dev, sgt->sgl,
245 sgt->nents,
246 gntdev_dmabuf_attach->dir,
247 DMA_ATTR_SKIP_CPU_SYNC);
248 sg_free_table(sgt);
251 kfree(sgt);
252 kfree(gntdev_dmabuf_attach);
253 attach->priv = NULL;
257 static struct sg_table *
258 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
259 enum dma_data_direction dir)
261 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
262 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
263 struct sg_table *sgt;
265 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
266 attach->dev);
268 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
269 return ERR_PTR(-EINVAL);
271 /* Return the cached mapping when possible. */
272 if (gntdev_dmabuf_attach->dir == dir)
273 return gntdev_dmabuf_attach->sgt;
276 * Two mappings with different directions for the same attachment are
277 * not allowed.
279 if (gntdev_dmabuf_attach->dir != DMA_NONE)
280 return ERR_PTR(-EBUSY);
282 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
283 gntdev_dmabuf->nr_pages);
284 if (!IS_ERR(sgt)) {
285 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
286 DMA_ATTR_SKIP_CPU_SYNC)) {
287 sg_free_table(sgt);
288 kfree(sgt);
289 sgt = ERR_PTR(-ENOMEM);
290 } else {
291 gntdev_dmabuf_attach->sgt = sgt;
292 gntdev_dmabuf_attach->dir = dir;
295 if (IS_ERR(sgt))
296 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
297 return sgt;
300 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
301 struct sg_table *sgt,
302 enum dma_data_direction dir)
304 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
307 static void dmabuf_exp_release(struct kref *kref)
309 struct gntdev_dmabuf *gntdev_dmabuf =
310 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
312 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
313 list_del(&gntdev_dmabuf->next);
314 kfree(gntdev_dmabuf);
317 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
318 struct gntdev_grant_map *map)
320 mutex_lock(&priv->lock);
321 list_del(&map->next);
322 gntdev_put_map(NULL /* already removed */, map);
323 mutex_unlock(&priv->lock);
326 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
328 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
329 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
331 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
332 gntdev_dmabuf->u.exp.map);
333 mutex_lock(&priv->lock);
334 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
335 mutex_unlock(&priv->lock);
338 static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
339 unsigned long page_num)
341 /* Not implemented. */
342 return NULL;
345 static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
346 unsigned long page_num, void *addr)
348 /* Not implemented. */
351 static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
352 struct vm_area_struct *vma)
354 /* Not implemented. */
355 return 0;
358 static const struct dma_buf_ops dmabuf_exp_ops = {
359 .attach = dmabuf_exp_ops_attach,
360 .detach = dmabuf_exp_ops_detach,
361 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
362 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
363 .release = dmabuf_exp_ops_release,
364 .map = dmabuf_exp_ops_kmap,
365 .unmap = dmabuf_exp_ops_kunmap,
366 .mmap = dmabuf_exp_ops_mmap,
369 struct gntdev_dmabuf_export_args {
370 struct gntdev_priv *priv;
371 struct gntdev_grant_map *map;
372 struct gntdev_dmabuf_priv *dmabuf_priv;
373 struct device *dev;
374 int count;
375 struct page **pages;
376 u32 fd;
379 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
381 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
382 struct gntdev_dmabuf *gntdev_dmabuf;
383 int ret;
385 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
386 if (!gntdev_dmabuf)
387 return -ENOMEM;
389 kref_init(&gntdev_dmabuf->u.exp.refcount);
391 gntdev_dmabuf->priv = args->dmabuf_priv;
392 gntdev_dmabuf->nr_pages = args->count;
393 gntdev_dmabuf->pages = args->pages;
394 gntdev_dmabuf->u.exp.priv = args->priv;
395 gntdev_dmabuf->u.exp.map = args->map;
397 exp_info.exp_name = KBUILD_MODNAME;
398 if (args->dev->driver && args->dev->driver->owner)
399 exp_info.owner = args->dev->driver->owner;
400 else
401 exp_info.owner = THIS_MODULE;
402 exp_info.ops = &dmabuf_exp_ops;
403 exp_info.size = args->count << PAGE_SHIFT;
404 exp_info.flags = O_RDWR;
405 exp_info.priv = gntdev_dmabuf;
407 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
408 if (IS_ERR(gntdev_dmabuf->dmabuf)) {
409 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
410 gntdev_dmabuf->dmabuf = NULL;
411 goto fail;
414 ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
415 if (ret < 0)
416 goto fail;
418 gntdev_dmabuf->fd = ret;
419 args->fd = ret;
421 pr_debug("Exporting DMA buffer with fd %d\n", ret);
423 mutex_lock(&args->dmabuf_priv->lock);
424 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
425 mutex_unlock(&args->dmabuf_priv->lock);
426 return 0;
428 fail:
429 if (gntdev_dmabuf->dmabuf)
430 dma_buf_put(gntdev_dmabuf->dmabuf);
431 kfree(gntdev_dmabuf);
432 return ret;
435 static struct gntdev_grant_map *
436 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
437 int count)
439 struct gntdev_grant_map *map;
441 if (unlikely(count <= 0))
442 return ERR_PTR(-EINVAL);
444 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
445 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
446 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
447 return ERR_PTR(-EINVAL);
450 map = gntdev_alloc_map(priv, count, dmabuf_flags);
451 if (!map)
452 return ERR_PTR(-ENOMEM);
454 if (unlikely(gntdev_account_mapped_pages(count))) {
455 pr_debug("can't map %d pages: over limit\n", count);
456 gntdev_put_map(NULL, map);
457 return ERR_PTR(-ENOMEM);
459 return map;
462 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
463 int count, u32 domid, u32 *refs, u32 *fd)
465 struct gntdev_grant_map *map;
466 struct gntdev_dmabuf_export_args args;
467 int i, ret;
469 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
470 if (IS_ERR(map))
471 return PTR_ERR(map);
473 for (i = 0; i < count; i++) {
474 map->grants[i].domid = domid;
475 map->grants[i].ref = refs[i];
478 mutex_lock(&priv->lock);
479 gntdev_add_map(priv, map);
480 mutex_unlock(&priv->lock);
482 map->flags |= GNTMAP_host_map;
483 #if defined(CONFIG_X86)
484 map->flags |= GNTMAP_device_map;
485 #endif
487 ret = gntdev_map_grant_pages(map);
488 if (ret < 0)
489 goto out;
491 args.priv = priv;
492 args.map = map;
493 args.dev = priv->dma_dev;
494 args.dmabuf_priv = priv->dmabuf_priv;
495 args.count = map->count;
496 args.pages = map->pages;
497 args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
499 ret = dmabuf_exp_from_pages(&args);
500 if (ret < 0)
501 goto out;
503 *fd = args.fd;
504 return 0;
506 out:
507 dmabuf_exp_remove_map(priv, map);
508 return ret;
511 /* DMA buffer import support. */
513 static int
514 dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
515 int count, int domid)
517 grant_ref_t priv_gref_head;
518 int i, ret;
520 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
521 if (ret < 0) {
522 pr_debug("Cannot allocate grant references, ret %d\n", ret);
523 return ret;
526 for (i = 0; i < count; i++) {
527 int cur_ref;
529 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
530 if (cur_ref < 0) {
531 ret = cur_ref;
532 pr_debug("Cannot claim grant reference, ret %d\n", ret);
533 goto out;
536 gnttab_grant_foreign_access_ref(cur_ref, domid,
537 xen_page_to_gfn(pages[i]), 0);
538 refs[i] = cur_ref;
541 return 0;
543 out:
544 gnttab_free_grant_references(priv_gref_head);
545 return ret;
548 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
550 int i;
552 for (i = 0; i < count; i++)
553 if (refs[i] != GRANT_INVALID_REF)
554 gnttab_end_foreign_access(refs[i], 0, 0UL);
557 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
559 kfree(gntdev_dmabuf->pages);
560 kfree(gntdev_dmabuf->u.imp.refs);
561 kfree(gntdev_dmabuf);
564 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
566 struct gntdev_dmabuf *gntdev_dmabuf;
567 int i;
569 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
570 if (!gntdev_dmabuf)
571 goto fail_no_free;
573 gntdev_dmabuf->u.imp.refs = kcalloc(count,
574 sizeof(gntdev_dmabuf->u.imp.refs[0]),
575 GFP_KERNEL);
576 if (!gntdev_dmabuf->u.imp.refs)
577 goto fail;
579 gntdev_dmabuf->pages = kcalloc(count,
580 sizeof(gntdev_dmabuf->pages[0]),
581 GFP_KERNEL);
582 if (!gntdev_dmabuf->pages)
583 goto fail;
585 gntdev_dmabuf->nr_pages = count;
587 for (i = 0; i < count; i++)
588 gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
590 return gntdev_dmabuf;
592 fail:
593 dmabuf_imp_free_storage(gntdev_dmabuf);
594 fail_no_free:
595 return ERR_PTR(-ENOMEM);
598 static struct gntdev_dmabuf *
599 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
600 int fd, int count, int domid)
602 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
603 struct dma_buf *dma_buf;
604 struct dma_buf_attachment *attach;
605 struct sg_table *sgt;
606 struct sg_page_iter sg_iter;
607 int i;
609 dma_buf = dma_buf_get(fd);
610 if (IS_ERR(dma_buf))
611 return ERR_CAST(dma_buf);
613 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
614 if (IS_ERR(gntdev_dmabuf)) {
615 ret = gntdev_dmabuf;
616 goto fail_put;
619 gntdev_dmabuf->priv = priv;
620 gntdev_dmabuf->fd = fd;
622 attach = dma_buf_attach(dma_buf, dev);
623 if (IS_ERR(attach)) {
624 ret = ERR_CAST(attach);
625 goto fail_free_obj;
628 gntdev_dmabuf->u.imp.attach = attach;
630 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
631 if (IS_ERR(sgt)) {
632 ret = ERR_CAST(sgt);
633 goto fail_detach;
636 /* Check number of pages that imported buffer has. */
637 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
638 ret = ERR_PTR(-EINVAL);
639 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
640 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
641 goto fail_unmap;
644 gntdev_dmabuf->u.imp.sgt = sgt;
646 /* Now convert sgt to array of pages and check for page validity. */
647 i = 0;
648 for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
649 struct page *page = sg_page_iter_page(&sg_iter);
651 * Check if page is valid: this can happen if we are given
652 * a page from VRAM or other resources which are not backed
653 * by a struct page.
655 if (!pfn_valid(page_to_pfn(page))) {
656 ret = ERR_PTR(-EINVAL);
657 goto fail_unmap;
660 gntdev_dmabuf->pages[i++] = page;
663 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
664 gntdev_dmabuf->u.imp.refs,
665 count, domid));
666 if (IS_ERR(ret))
667 goto fail_end_access;
669 pr_debug("Imported DMA buffer with fd %d\n", fd);
671 mutex_lock(&priv->lock);
672 list_add(&gntdev_dmabuf->next, &priv->imp_list);
673 mutex_unlock(&priv->lock);
675 return gntdev_dmabuf;
677 fail_end_access:
678 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
679 fail_unmap:
680 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
681 fail_detach:
682 dma_buf_detach(dma_buf, attach);
683 fail_free_obj:
684 dmabuf_imp_free_storage(gntdev_dmabuf);
685 fail_put:
686 dma_buf_put(dma_buf);
687 return ret;
691 * Find the hyper dma-buf by its file descriptor and remove
692 * it from the buffer's list.
694 static struct gntdev_dmabuf *
695 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
697 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
699 mutex_lock(&priv->lock);
700 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
701 if (gntdev_dmabuf->fd == fd) {
702 pr_debug("Found gntdev_dmabuf in the import list\n");
703 ret = gntdev_dmabuf;
704 list_del(&gntdev_dmabuf->next);
705 break;
708 mutex_unlock(&priv->lock);
709 return ret;
712 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
714 struct gntdev_dmabuf *gntdev_dmabuf;
715 struct dma_buf_attachment *attach;
716 struct dma_buf *dma_buf;
718 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
719 if (IS_ERR(gntdev_dmabuf))
720 return PTR_ERR(gntdev_dmabuf);
722 pr_debug("Releasing DMA buffer with fd %d\n", fd);
724 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
725 gntdev_dmabuf->nr_pages);
727 attach = gntdev_dmabuf->u.imp.attach;
729 if (gntdev_dmabuf->u.imp.sgt)
730 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
731 DMA_BIDIRECTIONAL);
732 dma_buf = attach->dmabuf;
733 dma_buf_detach(attach->dmabuf, attach);
734 dma_buf_put(dma_buf);
736 dmabuf_imp_free_storage(gntdev_dmabuf);
737 return 0;
740 /* DMA buffer IOCTL support. */
742 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
743 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
745 struct ioctl_gntdev_dmabuf_exp_from_refs op;
746 u32 *refs;
747 long ret;
749 if (use_ptemod) {
750 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
751 use_ptemod);
752 return -EINVAL;
755 if (copy_from_user(&op, u, sizeof(op)) != 0)
756 return -EFAULT;
758 if (unlikely(op.count <= 0))
759 return -EINVAL;
761 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
762 if (!refs)
763 return -ENOMEM;
765 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
766 ret = -EFAULT;
767 goto out;
770 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
771 op.domid, refs, &op.fd);
772 if (ret)
773 goto out;
775 if (copy_to_user(u, &op, sizeof(op)) != 0)
776 ret = -EFAULT;
778 out:
779 kfree(refs);
780 return ret;
783 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
784 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
786 struct ioctl_gntdev_dmabuf_exp_wait_released op;
788 if (copy_from_user(&op, u, sizeof(op)) != 0)
789 return -EFAULT;
791 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
792 op.wait_to_ms);
795 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
796 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
798 struct ioctl_gntdev_dmabuf_imp_to_refs op;
799 struct gntdev_dmabuf *gntdev_dmabuf;
800 long ret;
802 if (copy_from_user(&op, u, sizeof(op)) != 0)
803 return -EFAULT;
805 if (unlikely(op.count <= 0))
806 return -EINVAL;
808 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
809 priv->dma_dev, op.fd,
810 op.count, op.domid);
811 if (IS_ERR(gntdev_dmabuf))
812 return PTR_ERR(gntdev_dmabuf);
814 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
815 sizeof(*u->refs) * op.count) != 0) {
816 ret = -EFAULT;
817 goto out_release;
819 return 0;
821 out_release:
822 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
823 return ret;
826 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
827 struct ioctl_gntdev_dmabuf_imp_release __user *u)
829 struct ioctl_gntdev_dmabuf_imp_release op;
831 if (copy_from_user(&op, u, sizeof(op)) != 0)
832 return -EFAULT;
834 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
837 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
839 struct gntdev_dmabuf_priv *priv;
841 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
842 if (!priv)
843 return ERR_PTR(-ENOMEM);
845 mutex_init(&priv->lock);
846 INIT_LIST_HEAD(&priv->exp_list);
847 INIT_LIST_HEAD(&priv->exp_wait_list);
848 INIT_LIST_HEAD(&priv->imp_list);
850 return priv;
853 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
855 kfree(priv);