treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
blob6adea180d62995717406da88f6bd3694be53d74a
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
17 static struct lock_class_key etnaviv_shm_lock_class;
18 static struct lock_class_key etnaviv_userptr_lock_class;
20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
22 struct drm_device *dev = etnaviv_obj->base.dev;
23 struct sg_table *sgt = etnaviv_obj->sgt;
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
35 struct drm_device *dev = etnaviv_obj->base.dev;
36 struct sg_table *sgt = etnaviv_obj->sgt;
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
46 * the region.
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
57 /* called with etnaviv_obj->lock held */
58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
60 struct drm_device *dev = etnaviv_obj->base.dev;
61 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
63 if (IS_ERR(p)) {
64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 return PTR_ERR(p);
68 etnaviv_obj->pages = p;
70 return 0;
73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
75 if (etnaviv_obj->sgt) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 sg_free_table(etnaviv_obj->sgt);
78 kfree(etnaviv_obj->sgt);
79 etnaviv_obj->sgt = NULL;
81 if (etnaviv_obj->pages) {
82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83 true, false);
85 etnaviv_obj->pages = NULL;
89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
91 int ret;
93 lockdep_assert_held(&etnaviv_obj->lock);
95 if (!etnaviv_obj->pages) {
96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 if (ret < 0)
98 return ERR_PTR(ret);
101 if (!etnaviv_obj->sgt) {
102 struct drm_device *dev = etnaviv_obj->base.dev;
103 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 struct sg_table *sgt;
106 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
107 if (IS_ERR(sgt)) {
108 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
109 PTR_ERR(sgt));
110 return ERR_CAST(sgt);
113 etnaviv_obj->sgt = sgt;
115 etnaviv_gem_scatter_map(etnaviv_obj);
118 return etnaviv_obj->pages;
121 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123 lockdep_assert_held(&etnaviv_obj->lock);
124 /* when we start tracking the pin count, then do something here */
127 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128 struct vm_area_struct *vma)
130 pgprot_t vm_page_prot;
132 vma->vm_flags &= ~VM_PFNMAP;
133 vma->vm_flags |= VM_MIXEDMAP;
135 vm_page_prot = vm_get_page_prot(vma->vm_flags);
137 if (etnaviv_obj->flags & ETNA_BO_WC) {
138 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141 } else {
143 * Shunt off cached objs to shmem file so they have their own
144 * address_space (so unmap_mapping_range does what we want,
145 * in particular in the case of mmap'd dmabufs)
147 fput(vma->vm_file);
148 get_file(etnaviv_obj->base.filp);
149 vma->vm_pgoff = 0;
150 vma->vm_file = etnaviv_obj->base.filp;
152 vma->vm_page_prot = vm_page_prot;
155 return 0;
158 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
160 struct etnaviv_gem_object *obj;
161 int ret;
163 ret = drm_gem_mmap(filp, vma);
164 if (ret) {
165 DBG("mmap failed: %d", ret);
166 return ret;
169 obj = to_etnaviv_bo(vma->vm_private_data);
170 return obj->ops->mmap(obj, vma);
173 vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
175 struct vm_area_struct *vma = vmf->vma;
176 struct drm_gem_object *obj = vma->vm_private_data;
177 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
178 struct page **pages, *page;
179 pgoff_t pgoff;
180 int err;
183 * Make sure we don't parallel update on a fault, nor move or remove
184 * something from beneath our feet. Note that vmf_insert_page() is
185 * specifically coded to take care of this, so we don't have to.
187 err = mutex_lock_interruptible(&etnaviv_obj->lock);
188 if (err)
189 return VM_FAULT_NOPAGE;
190 /* make sure we have pages attached now */
191 pages = etnaviv_gem_get_pages(etnaviv_obj);
192 mutex_unlock(&etnaviv_obj->lock);
194 if (IS_ERR(pages)) {
195 err = PTR_ERR(pages);
196 return vmf_error(err);
199 /* We don't use vmf->pgoff since that has the fake offset: */
200 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
202 page = pages[pgoff];
204 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
205 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
207 return vmf_insert_page(vma, vmf->address, page);
210 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
212 int ret;
214 /* Make it mmapable */
215 ret = drm_gem_create_mmap_offset(obj);
216 if (ret)
217 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
218 else
219 *offset = drm_vma_node_offset_addr(&obj->vma_node);
221 return ret;
224 static struct etnaviv_vram_mapping *
225 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
226 struct etnaviv_iommu_context *context)
228 struct etnaviv_vram_mapping *mapping;
230 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
231 if (mapping->context == context)
232 return mapping;
235 return NULL;
238 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
240 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
242 mutex_lock(&etnaviv_obj->lock);
243 WARN_ON(mapping->use == 0);
244 mapping->use -= 1;
245 mutex_unlock(&etnaviv_obj->lock);
247 drm_gem_object_put_unlocked(&etnaviv_obj->base);
250 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
251 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
252 u64 va)
254 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
255 struct etnaviv_vram_mapping *mapping;
256 struct page **pages;
257 int ret = 0;
259 mutex_lock(&etnaviv_obj->lock);
260 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
261 if (mapping) {
263 * Holding the object lock prevents the use count changing
264 * beneath us. If the use count is zero, the MMU might be
265 * reaping this object, so take the lock and re-check that
266 * the MMU owns this mapping to close this race.
268 if (mapping->use == 0) {
269 mutex_lock(&mmu_context->lock);
270 if (mapping->context == mmu_context)
271 mapping->use += 1;
272 else
273 mapping = NULL;
274 mutex_unlock(&mmu_context->lock);
275 if (mapping)
276 goto out;
277 } else {
278 mapping->use += 1;
279 goto out;
283 pages = etnaviv_gem_get_pages(etnaviv_obj);
284 if (IS_ERR(pages)) {
285 ret = PTR_ERR(pages);
286 goto out;
290 * See if we have a reaped vram mapping we can re-use before
291 * allocating a fresh mapping.
293 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
294 if (!mapping) {
295 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
296 if (!mapping) {
297 ret = -ENOMEM;
298 goto out;
301 INIT_LIST_HEAD(&mapping->scan_node);
302 mapping->object = etnaviv_obj;
303 } else {
304 list_del(&mapping->obj_node);
307 etnaviv_iommu_context_get(mmu_context);
308 mapping->context = mmu_context;
309 mapping->use = 1;
311 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
312 mmu_context->global->memory_base,
313 mapping, va);
314 if (ret < 0) {
315 etnaviv_iommu_context_put(mmu_context);
316 kfree(mapping);
317 } else {
318 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
321 out:
322 mutex_unlock(&etnaviv_obj->lock);
324 if (ret)
325 return ERR_PTR(ret);
327 /* Take a reference on the object */
328 drm_gem_object_get(obj);
329 return mapping;
332 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
334 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
336 if (etnaviv_obj->vaddr)
337 return etnaviv_obj->vaddr;
339 mutex_lock(&etnaviv_obj->lock);
341 * Need to check again, as we might have raced with another thread
342 * while waiting for the mutex.
344 if (!etnaviv_obj->vaddr)
345 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
346 mutex_unlock(&etnaviv_obj->lock);
348 return etnaviv_obj->vaddr;
351 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
353 struct page **pages;
355 lockdep_assert_held(&obj->lock);
357 pages = etnaviv_gem_get_pages(obj);
358 if (IS_ERR(pages))
359 return NULL;
361 return vmap(pages, obj->base.size >> PAGE_SHIFT,
362 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
365 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
367 if (op & ETNA_PREP_READ)
368 return DMA_FROM_DEVICE;
369 else if (op & ETNA_PREP_WRITE)
370 return DMA_TO_DEVICE;
371 else
372 return DMA_BIDIRECTIONAL;
375 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
376 struct drm_etnaviv_timespec *timeout)
378 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
379 struct drm_device *dev = obj->dev;
380 bool write = !!(op & ETNA_PREP_WRITE);
381 int ret;
383 if (!etnaviv_obj->sgt) {
384 void *ret;
386 mutex_lock(&etnaviv_obj->lock);
387 ret = etnaviv_gem_get_pages(etnaviv_obj);
388 mutex_unlock(&etnaviv_obj->lock);
389 if (IS_ERR(ret))
390 return PTR_ERR(ret);
393 if (op & ETNA_PREP_NOSYNC) {
394 if (!dma_resv_test_signaled_rcu(obj->resv,
395 write))
396 return -EBUSY;
397 } else {
398 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
400 ret = dma_resv_wait_timeout_rcu(obj->resv,
401 write, true, remain);
402 if (ret <= 0)
403 return ret == 0 ? -ETIMEDOUT : ret;
406 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
407 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
408 etnaviv_obj->sgt->nents,
409 etnaviv_op_to_dma_dir(op));
410 etnaviv_obj->last_cpu_prep_op = op;
413 return 0;
416 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
418 struct drm_device *dev = obj->dev;
419 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
421 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
422 /* fini without a prep is almost certainly a userspace error */
423 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
424 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
425 etnaviv_obj->sgt->nents,
426 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
427 etnaviv_obj->last_cpu_prep_op = 0;
430 return 0;
433 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
434 struct drm_etnaviv_timespec *timeout)
436 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
438 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
441 #ifdef CONFIG_DEBUG_FS
442 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
443 const char *type, struct seq_file *m)
445 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
446 seq_printf(m, "\t%9s: %s %s seq %llu\n",
447 type,
448 fence->ops->get_driver_name(fence),
449 fence->ops->get_timeline_name(fence),
450 fence->seqno);
453 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
455 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
456 struct dma_resv *robj = obj->resv;
457 struct dma_resv_list *fobj;
458 struct dma_fence *fence;
459 unsigned long off = drm_vma_node_start(&obj->vma_node);
461 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
462 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
463 obj->name, kref_read(&obj->refcount),
464 off, etnaviv_obj->vaddr, obj->size);
466 rcu_read_lock();
467 fobj = rcu_dereference(robj->fence);
468 if (fobj) {
469 unsigned int i, shared_count = fobj->shared_count;
471 for (i = 0; i < shared_count; i++) {
472 fence = rcu_dereference(fobj->shared[i]);
473 etnaviv_gem_describe_fence(fence, "Shared", m);
477 fence = rcu_dereference(robj->fence_excl);
478 if (fence)
479 etnaviv_gem_describe_fence(fence, "Exclusive", m);
480 rcu_read_unlock();
483 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
484 struct seq_file *m)
486 struct etnaviv_gem_object *etnaviv_obj;
487 int count = 0;
488 size_t size = 0;
490 mutex_lock(&priv->gem_lock);
491 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
492 struct drm_gem_object *obj = &etnaviv_obj->base;
494 seq_puts(m, " ");
495 etnaviv_gem_describe(obj, m);
496 count++;
497 size += obj->size;
499 mutex_unlock(&priv->gem_lock);
501 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
503 #endif
505 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
507 vunmap(etnaviv_obj->vaddr);
508 put_pages(etnaviv_obj);
511 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
512 .get_pages = etnaviv_gem_shmem_get_pages,
513 .release = etnaviv_gem_shmem_release,
514 .vmap = etnaviv_gem_vmap_impl,
515 .mmap = etnaviv_gem_mmap_obj,
518 void etnaviv_gem_free_object(struct drm_gem_object *obj)
520 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
521 struct etnaviv_drm_private *priv = obj->dev->dev_private;
522 struct etnaviv_vram_mapping *mapping, *tmp;
524 /* object should not be active */
525 WARN_ON(is_active(etnaviv_obj));
527 mutex_lock(&priv->gem_lock);
528 list_del(&etnaviv_obj->gem_node);
529 mutex_unlock(&priv->gem_lock);
531 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
532 obj_node) {
533 struct etnaviv_iommu_context *context = mapping->context;
535 WARN_ON(mapping->use);
537 if (context) {
538 etnaviv_iommu_unmap_gem(context, mapping);
539 etnaviv_iommu_context_put(context);
542 list_del(&mapping->obj_node);
543 kfree(mapping);
546 drm_gem_free_mmap_offset(obj);
547 etnaviv_obj->ops->release(etnaviv_obj);
548 drm_gem_object_release(obj);
550 kfree(etnaviv_obj);
553 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
555 struct etnaviv_drm_private *priv = dev->dev_private;
556 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
558 mutex_lock(&priv->gem_lock);
559 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
560 mutex_unlock(&priv->gem_lock);
563 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
564 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
566 struct etnaviv_gem_object *etnaviv_obj;
567 unsigned sz = sizeof(*etnaviv_obj);
568 bool valid = true;
570 /* validate flags */
571 switch (flags & ETNA_BO_CACHE_MASK) {
572 case ETNA_BO_UNCACHED:
573 case ETNA_BO_CACHED:
574 case ETNA_BO_WC:
575 break;
576 default:
577 valid = false;
580 if (!valid) {
581 dev_err(dev->dev, "invalid cache flag: %x\n",
582 (flags & ETNA_BO_CACHE_MASK));
583 return -EINVAL;
586 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
587 if (!etnaviv_obj)
588 return -ENOMEM;
590 etnaviv_obj->flags = flags;
591 etnaviv_obj->ops = ops;
593 mutex_init(&etnaviv_obj->lock);
594 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
596 *obj = &etnaviv_obj->base;
598 return 0;
601 /* convenience method to construct a GEM buffer object, and userspace handle */
602 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
603 u32 size, u32 flags, u32 *handle)
605 struct drm_gem_object *obj = NULL;
606 int ret;
608 size = PAGE_ALIGN(size);
610 ret = etnaviv_gem_new_impl(dev, size, flags,
611 &etnaviv_gem_shmem_ops, &obj);
612 if (ret)
613 goto fail;
615 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
617 ret = drm_gem_object_init(dev, obj, size);
618 if (ret)
619 goto fail;
622 * Our buffers are kept pinned, so allocating them from the MOVABLE
623 * zone is a really bad idea, and conflicts with CMA. See comments
624 * above new_inode() why this is required _and_ expected if you're
625 * going to pin these pages.
627 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
628 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
630 etnaviv_gem_obj_add(dev, obj);
632 ret = drm_gem_handle_create(file, obj, handle);
634 /* drop reference from allocate - handle holds it now */
635 fail:
636 drm_gem_object_put_unlocked(obj);
638 return ret;
641 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
642 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
644 struct drm_gem_object *obj;
645 int ret;
647 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
648 if (ret)
649 return ret;
651 drm_gem_private_object_init(dev, obj, size);
653 *res = to_etnaviv_bo(obj);
655 return 0;
658 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
660 struct page **pvec = NULL;
661 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
662 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
664 might_lock_read(&current->mm->mmap_sem);
666 if (userptr->mm != current->mm)
667 return -EPERM;
669 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
670 if (!pvec)
671 return -ENOMEM;
673 do {
674 unsigned num_pages = npages - pinned;
675 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
676 struct page **pages = pvec + pinned;
678 ret = get_user_pages_fast(ptr, num_pages,
679 !userptr->ro ? FOLL_WRITE : 0, pages);
680 if (ret < 0) {
681 release_pages(pvec, pinned);
682 kvfree(pvec);
683 return ret;
686 pinned += ret;
688 } while (pinned < npages);
690 etnaviv_obj->pages = pvec;
692 return 0;
695 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
697 if (etnaviv_obj->sgt) {
698 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
699 sg_free_table(etnaviv_obj->sgt);
700 kfree(etnaviv_obj->sgt);
702 if (etnaviv_obj->pages) {
703 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
705 release_pages(etnaviv_obj->pages, npages);
706 kvfree(etnaviv_obj->pages);
710 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
711 struct vm_area_struct *vma)
713 return -EINVAL;
716 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
717 .get_pages = etnaviv_gem_userptr_get_pages,
718 .release = etnaviv_gem_userptr_release,
719 .vmap = etnaviv_gem_vmap_impl,
720 .mmap = etnaviv_gem_userptr_mmap_obj,
723 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
724 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
726 struct etnaviv_gem_object *etnaviv_obj;
727 int ret;
729 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
730 &etnaviv_gem_userptr_ops, &etnaviv_obj);
731 if (ret)
732 return ret;
734 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
736 etnaviv_obj->userptr.ptr = ptr;
737 etnaviv_obj->userptr.mm = current->mm;
738 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
740 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
742 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
744 /* drop reference from allocate - handle holds it now */
745 drm_gem_object_put_unlocked(&etnaviv_obj->base);
746 return ret;