1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
17 static struct lock_class_key etnaviv_shm_lock_class
;
18 static struct lock_class_key etnaviv_userptr_lock_class
;
20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object
*etnaviv_obj
)
22 struct drm_device
*dev
= etnaviv_obj
->base
.dev
;
23 struct sg_table
*sgt
= etnaviv_obj
->sgt
;
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
29 if (etnaviv_obj
->flags
& ETNA_BO_CACHE_MASK
)
30 dma_map_sgtable(dev
->dev
, sgt
, DMA_BIDIRECTIONAL
, 0);
33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object
*etnaviv_obj
)
35 struct drm_device
*dev
= etnaviv_obj
->base
.dev
;
36 struct sg_table
*sgt
= etnaviv_obj
->sgt
;
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
53 if (etnaviv_obj
->flags
& ETNA_BO_CACHE_MASK
)
54 dma_unmap_sgtable(dev
->dev
, sgt
, DMA_BIDIRECTIONAL
, 0);
57 /* called with etnaviv_obj->lock held */
58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object
*etnaviv_obj
)
60 struct drm_device
*dev
= etnaviv_obj
->base
.dev
;
61 struct page
**p
= drm_gem_get_pages(&etnaviv_obj
->base
);
64 dev_dbg(dev
->dev
, "could not get pages: %ld\n", PTR_ERR(p
));
68 etnaviv_obj
->pages
= p
;
73 static void put_pages(struct etnaviv_gem_object
*etnaviv_obj
)
75 if (etnaviv_obj
->sgt
) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj
);
77 sg_free_table(etnaviv_obj
->sgt
);
78 kfree(etnaviv_obj
->sgt
);
79 etnaviv_obj
->sgt
= NULL
;
81 if (etnaviv_obj
->pages
) {
82 drm_gem_put_pages(&etnaviv_obj
->base
, etnaviv_obj
->pages
,
85 etnaviv_obj
->pages
= NULL
;
89 struct page
**etnaviv_gem_get_pages(struct etnaviv_gem_object
*etnaviv_obj
)
93 lockdep_assert_held(&etnaviv_obj
->lock
);
95 if (!etnaviv_obj
->pages
) {
96 ret
= etnaviv_obj
->ops
->get_pages(etnaviv_obj
);
101 if (!etnaviv_obj
->sgt
) {
102 struct drm_device
*dev
= etnaviv_obj
->base
.dev
;
103 int npages
= etnaviv_obj
->base
.size
>> PAGE_SHIFT
;
104 struct sg_table
*sgt
;
106 sgt
= drm_prime_pages_to_sg(etnaviv_obj
->base
.dev
,
107 etnaviv_obj
->pages
, npages
);
109 dev_err(dev
->dev
, "failed to allocate sgt: %ld\n",
111 return ERR_CAST(sgt
);
114 etnaviv_obj
->sgt
= sgt
;
116 etnaviv_gem_scatter_map(etnaviv_obj
);
119 return etnaviv_obj
->pages
;
122 void etnaviv_gem_put_pages(struct etnaviv_gem_object
*etnaviv_obj
)
124 lockdep_assert_held(&etnaviv_obj
->lock
);
125 /* when we start tracking the pin count, then do something here */
128 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object
*etnaviv_obj
,
129 struct vm_area_struct
*vma
)
131 pgprot_t vm_page_prot
;
133 vma
->vm_flags
&= ~VM_PFNMAP
;
134 vma
->vm_flags
|= VM_MIXEDMAP
;
136 vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
138 if (etnaviv_obj
->flags
& ETNA_BO_WC
) {
139 vma
->vm_page_prot
= pgprot_writecombine(vm_page_prot
);
140 } else if (etnaviv_obj
->flags
& ETNA_BO_UNCACHED
) {
141 vma
->vm_page_prot
= pgprot_noncached(vm_page_prot
);
144 * Shunt off cached objs to shmem file so they have their own
145 * address_space (so unmap_mapping_range does what we want,
146 * in particular in the case of mmap'd dmabufs)
149 vma_set_file(vma
, etnaviv_obj
->base
.filp
);
151 vma
->vm_page_prot
= vm_page_prot
;
157 int etnaviv_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
159 struct etnaviv_gem_object
*obj
;
162 ret
= drm_gem_mmap(filp
, vma
);
164 DBG("mmap failed: %d", ret
);
168 obj
= to_etnaviv_bo(vma
->vm_private_data
);
169 return obj
->ops
->mmap(obj
, vma
);
172 static vm_fault_t
etnaviv_gem_fault(struct vm_fault
*vmf
)
174 struct vm_area_struct
*vma
= vmf
->vma
;
175 struct drm_gem_object
*obj
= vma
->vm_private_data
;
176 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
177 struct page
**pages
, *page
;
182 * Make sure we don't parallel update on a fault, nor move or remove
183 * something from beneath our feet. Note that vmf_insert_page() is
184 * specifically coded to take care of this, so we don't have to.
186 err
= mutex_lock_interruptible(&etnaviv_obj
->lock
);
188 return VM_FAULT_NOPAGE
;
189 /* make sure we have pages attached now */
190 pages
= etnaviv_gem_get_pages(etnaviv_obj
);
191 mutex_unlock(&etnaviv_obj
->lock
);
194 err
= PTR_ERR(pages
);
195 return vmf_error(err
);
198 /* We don't use vmf->pgoff since that has the fake offset: */
199 pgoff
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
203 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf
->address
,
204 page_to_pfn(page
), page_to_pfn(page
) << PAGE_SHIFT
);
206 return vmf_insert_page(vma
, vmf
->address
, page
);
209 int etnaviv_gem_mmap_offset(struct drm_gem_object
*obj
, u64
*offset
)
213 /* Make it mmapable */
214 ret
= drm_gem_create_mmap_offset(obj
);
216 dev_err(obj
->dev
->dev
, "could not allocate mmap offset\n");
218 *offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
223 static struct etnaviv_vram_mapping
*
224 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object
*obj
,
225 struct etnaviv_iommu_context
*context
)
227 struct etnaviv_vram_mapping
*mapping
;
229 list_for_each_entry(mapping
, &obj
->vram_list
, obj_node
) {
230 if (mapping
->context
== context
)
237 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping
*mapping
)
239 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
241 mutex_lock(&etnaviv_obj
->lock
);
242 WARN_ON(mapping
->use
== 0);
244 mutex_unlock(&etnaviv_obj
->lock
);
246 drm_gem_object_put(&etnaviv_obj
->base
);
249 struct etnaviv_vram_mapping
*etnaviv_gem_mapping_get(
250 struct drm_gem_object
*obj
, struct etnaviv_iommu_context
*mmu_context
,
253 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
254 struct etnaviv_vram_mapping
*mapping
;
258 mutex_lock(&etnaviv_obj
->lock
);
259 mapping
= etnaviv_gem_get_vram_mapping(etnaviv_obj
, mmu_context
);
262 * Holding the object lock prevents the use count changing
263 * beneath us. If the use count is zero, the MMU might be
264 * reaping this object, so take the lock and re-check that
265 * the MMU owns this mapping to close this race.
267 if (mapping
->use
== 0) {
268 mutex_lock(&mmu_context
->lock
);
269 if (mapping
->context
== mmu_context
)
273 mutex_unlock(&mmu_context
->lock
);
282 pages
= etnaviv_gem_get_pages(etnaviv_obj
);
284 ret
= PTR_ERR(pages
);
289 * See if we have a reaped vram mapping we can re-use before
290 * allocating a fresh mapping.
292 mapping
= etnaviv_gem_get_vram_mapping(etnaviv_obj
, NULL
);
294 mapping
= kzalloc(sizeof(*mapping
), GFP_KERNEL
);
300 INIT_LIST_HEAD(&mapping
->scan_node
);
301 mapping
->object
= etnaviv_obj
;
303 list_del(&mapping
->obj_node
);
306 etnaviv_iommu_context_get(mmu_context
);
307 mapping
->context
= mmu_context
;
310 ret
= etnaviv_iommu_map_gem(mmu_context
, etnaviv_obj
,
311 mmu_context
->global
->memory_base
,
314 etnaviv_iommu_context_put(mmu_context
);
317 list_add_tail(&mapping
->obj_node
, &etnaviv_obj
->vram_list
);
321 mutex_unlock(&etnaviv_obj
->lock
);
326 /* Take a reference on the object */
327 drm_gem_object_get(obj
);
331 void *etnaviv_gem_vmap(struct drm_gem_object
*obj
)
333 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
335 if (etnaviv_obj
->vaddr
)
336 return etnaviv_obj
->vaddr
;
338 mutex_lock(&etnaviv_obj
->lock
);
340 * Need to check again, as we might have raced with another thread
341 * while waiting for the mutex.
343 if (!etnaviv_obj
->vaddr
)
344 etnaviv_obj
->vaddr
= etnaviv_obj
->ops
->vmap(etnaviv_obj
);
345 mutex_unlock(&etnaviv_obj
->lock
);
347 return etnaviv_obj
->vaddr
;
350 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object
*obj
)
354 lockdep_assert_held(&obj
->lock
);
356 pages
= etnaviv_gem_get_pages(obj
);
360 return vmap(pages
, obj
->base
.size
>> PAGE_SHIFT
,
361 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
364 static inline enum dma_data_direction
etnaviv_op_to_dma_dir(u32 op
)
366 if (op
& ETNA_PREP_READ
)
367 return DMA_FROM_DEVICE
;
368 else if (op
& ETNA_PREP_WRITE
)
369 return DMA_TO_DEVICE
;
371 return DMA_BIDIRECTIONAL
;
374 int etnaviv_gem_cpu_prep(struct drm_gem_object
*obj
, u32 op
,
375 struct drm_etnaviv_timespec
*timeout
)
377 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
378 struct drm_device
*dev
= obj
->dev
;
379 bool write
= !!(op
& ETNA_PREP_WRITE
);
382 if (!etnaviv_obj
->sgt
) {
385 mutex_lock(&etnaviv_obj
->lock
);
386 ret
= etnaviv_gem_get_pages(etnaviv_obj
);
387 mutex_unlock(&etnaviv_obj
->lock
);
392 if (op
& ETNA_PREP_NOSYNC
) {
393 if (!dma_resv_test_signaled_rcu(obj
->resv
,
397 unsigned long remain
= etnaviv_timeout_to_jiffies(timeout
);
399 ret
= dma_resv_wait_timeout_rcu(obj
->resv
,
400 write
, true, remain
);
402 return ret
== 0 ? -ETIMEDOUT
: ret
;
405 if (etnaviv_obj
->flags
& ETNA_BO_CACHED
) {
406 dma_sync_sgtable_for_cpu(dev
->dev
, etnaviv_obj
->sgt
,
407 etnaviv_op_to_dma_dir(op
));
408 etnaviv_obj
->last_cpu_prep_op
= op
;
414 int etnaviv_gem_cpu_fini(struct drm_gem_object
*obj
)
416 struct drm_device
*dev
= obj
->dev
;
417 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
419 if (etnaviv_obj
->flags
& ETNA_BO_CACHED
) {
420 /* fini without a prep is almost certainly a userspace error */
421 WARN_ON(etnaviv_obj
->last_cpu_prep_op
== 0);
422 dma_sync_sgtable_for_device(dev
->dev
, etnaviv_obj
->sgt
,
423 etnaviv_op_to_dma_dir(etnaviv_obj
->last_cpu_prep_op
));
424 etnaviv_obj
->last_cpu_prep_op
= 0;
430 int etnaviv_gem_wait_bo(struct etnaviv_gpu
*gpu
, struct drm_gem_object
*obj
,
431 struct drm_etnaviv_timespec
*timeout
)
433 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
435 return etnaviv_gpu_wait_obj_inactive(gpu
, etnaviv_obj
, timeout
);
438 #ifdef CONFIG_DEBUG_FS
439 static void etnaviv_gem_describe_fence(struct dma_fence
*fence
,
440 const char *type
, struct seq_file
*m
)
442 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
443 seq_printf(m
, "\t%9s: %s %s seq %llu\n",
445 fence
->ops
->get_driver_name(fence
),
446 fence
->ops
->get_timeline_name(fence
),
450 static void etnaviv_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
452 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
453 struct dma_resv
*robj
= obj
->resv
;
454 struct dma_resv_list
*fobj
;
455 struct dma_fence
*fence
;
456 unsigned long off
= drm_vma_node_start(&obj
->vma_node
);
458 seq_printf(m
, "%08x: %c %2d (%2d) %08lx %p %zd\n",
459 etnaviv_obj
->flags
, is_active(etnaviv_obj
) ? 'A' : 'I',
460 obj
->name
, kref_read(&obj
->refcount
),
461 off
, etnaviv_obj
->vaddr
, obj
->size
);
464 fobj
= rcu_dereference(robj
->fence
);
466 unsigned int i
, shared_count
= fobj
->shared_count
;
468 for (i
= 0; i
< shared_count
; i
++) {
469 fence
= rcu_dereference(fobj
->shared
[i
]);
470 etnaviv_gem_describe_fence(fence
, "Shared", m
);
474 fence
= rcu_dereference(robj
->fence_excl
);
476 etnaviv_gem_describe_fence(fence
, "Exclusive", m
);
480 void etnaviv_gem_describe_objects(struct etnaviv_drm_private
*priv
,
483 struct etnaviv_gem_object
*etnaviv_obj
;
487 mutex_lock(&priv
->gem_lock
);
488 list_for_each_entry(etnaviv_obj
, &priv
->gem_list
, gem_node
) {
489 struct drm_gem_object
*obj
= &etnaviv_obj
->base
;
492 etnaviv_gem_describe(obj
, m
);
496 mutex_unlock(&priv
->gem_lock
);
498 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
502 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object
*etnaviv_obj
)
504 vunmap(etnaviv_obj
->vaddr
);
505 put_pages(etnaviv_obj
);
508 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops
= {
509 .get_pages
= etnaviv_gem_shmem_get_pages
,
510 .release
= etnaviv_gem_shmem_release
,
511 .vmap
= etnaviv_gem_vmap_impl
,
512 .mmap
= etnaviv_gem_mmap_obj
,
515 void etnaviv_gem_free_object(struct drm_gem_object
*obj
)
517 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
518 struct etnaviv_drm_private
*priv
= obj
->dev
->dev_private
;
519 struct etnaviv_vram_mapping
*mapping
, *tmp
;
521 /* object should not be active */
522 WARN_ON(is_active(etnaviv_obj
));
524 mutex_lock(&priv
->gem_lock
);
525 list_del(&etnaviv_obj
->gem_node
);
526 mutex_unlock(&priv
->gem_lock
);
528 list_for_each_entry_safe(mapping
, tmp
, &etnaviv_obj
->vram_list
,
530 struct etnaviv_iommu_context
*context
= mapping
->context
;
532 WARN_ON(mapping
->use
);
535 etnaviv_iommu_unmap_gem(context
, mapping
);
536 etnaviv_iommu_context_put(context
);
539 list_del(&mapping
->obj_node
);
543 drm_gem_free_mmap_offset(obj
);
544 etnaviv_obj
->ops
->release(etnaviv_obj
);
545 drm_gem_object_release(obj
);
550 void etnaviv_gem_obj_add(struct drm_device
*dev
, struct drm_gem_object
*obj
)
552 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
553 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
555 mutex_lock(&priv
->gem_lock
);
556 list_add_tail(&etnaviv_obj
->gem_node
, &priv
->gem_list
);
557 mutex_unlock(&priv
->gem_lock
);
560 static const struct vm_operations_struct vm_ops
= {
561 .fault
= etnaviv_gem_fault
,
562 .open
= drm_gem_vm_open
,
563 .close
= drm_gem_vm_close
,
566 static const struct drm_gem_object_funcs etnaviv_gem_object_funcs
= {
567 .free
= etnaviv_gem_free_object
,
568 .pin
= etnaviv_gem_prime_pin
,
569 .unpin
= etnaviv_gem_prime_unpin
,
570 .get_sg_table
= etnaviv_gem_prime_get_sg_table
,
571 .vmap
= etnaviv_gem_prime_vmap
,
575 static int etnaviv_gem_new_impl(struct drm_device
*dev
, u32 size
, u32 flags
,
576 const struct etnaviv_gem_ops
*ops
, struct drm_gem_object
**obj
)
578 struct etnaviv_gem_object
*etnaviv_obj
;
579 unsigned sz
= sizeof(*etnaviv_obj
);
583 switch (flags
& ETNA_BO_CACHE_MASK
) {
584 case ETNA_BO_UNCACHED
:
593 dev_err(dev
->dev
, "invalid cache flag: %x\n",
594 (flags
& ETNA_BO_CACHE_MASK
));
598 etnaviv_obj
= kzalloc(sz
, GFP_KERNEL
);
602 etnaviv_obj
->flags
= flags
;
603 etnaviv_obj
->ops
= ops
;
605 mutex_init(&etnaviv_obj
->lock
);
606 INIT_LIST_HEAD(&etnaviv_obj
->vram_list
);
608 *obj
= &etnaviv_obj
->base
;
609 (*obj
)->funcs
= &etnaviv_gem_object_funcs
;
614 /* convenience method to construct a GEM buffer object, and userspace handle */
615 int etnaviv_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
616 u32 size
, u32 flags
, u32
*handle
)
618 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
619 struct drm_gem_object
*obj
= NULL
;
622 size
= PAGE_ALIGN(size
);
624 ret
= etnaviv_gem_new_impl(dev
, size
, flags
,
625 &etnaviv_gem_shmem_ops
, &obj
);
629 lockdep_set_class(&to_etnaviv_bo(obj
)->lock
, &etnaviv_shm_lock_class
);
631 ret
= drm_gem_object_init(dev
, obj
, size
);
636 * Our buffers are kept pinned, so allocating them from the MOVABLE
637 * zone is a really bad idea, and conflicts with CMA. See comments
638 * above new_inode() why this is required _and_ expected if you're
639 * going to pin these pages.
641 mapping_set_gfp_mask(obj
->filp
->f_mapping
, priv
->shm_gfp_mask
);
643 etnaviv_gem_obj_add(dev
, obj
);
645 ret
= drm_gem_handle_create(file
, obj
, handle
);
647 /* drop reference from allocate - handle holds it now */
649 drm_gem_object_put(obj
);
654 int etnaviv_gem_new_private(struct drm_device
*dev
, size_t size
, u32 flags
,
655 const struct etnaviv_gem_ops
*ops
, struct etnaviv_gem_object
**res
)
657 struct drm_gem_object
*obj
;
660 ret
= etnaviv_gem_new_impl(dev
, size
, flags
, ops
, &obj
);
664 drm_gem_private_object_init(dev
, obj
, size
);
666 *res
= to_etnaviv_bo(obj
);
671 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object
*etnaviv_obj
)
673 struct page
**pvec
= NULL
;
674 struct etnaviv_gem_userptr
*userptr
= &etnaviv_obj
->userptr
;
675 int ret
, pinned
= 0, npages
= etnaviv_obj
->base
.size
>> PAGE_SHIFT
;
677 might_lock_read(¤t
->mm
->mmap_lock
);
679 if (userptr
->mm
!= current
->mm
)
682 pvec
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
687 unsigned num_pages
= npages
- pinned
;
688 uint64_t ptr
= userptr
->ptr
+ pinned
* PAGE_SIZE
;
689 struct page
**pages
= pvec
+ pinned
;
691 ret
= pin_user_pages_fast(ptr
, num_pages
,
692 !userptr
->ro
? FOLL_WRITE
: 0, pages
);
694 unpin_user_pages(pvec
, pinned
);
701 } while (pinned
< npages
);
703 etnaviv_obj
->pages
= pvec
;
708 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object
*etnaviv_obj
)
710 if (etnaviv_obj
->sgt
) {
711 etnaviv_gem_scatterlist_unmap(etnaviv_obj
);
712 sg_free_table(etnaviv_obj
->sgt
);
713 kfree(etnaviv_obj
->sgt
);
715 if (etnaviv_obj
->pages
) {
716 int npages
= etnaviv_obj
->base
.size
>> PAGE_SHIFT
;
718 unpin_user_pages(etnaviv_obj
->pages
, npages
);
719 kvfree(etnaviv_obj
->pages
);
723 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object
*etnaviv_obj
,
724 struct vm_area_struct
*vma
)
729 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops
= {
730 .get_pages
= etnaviv_gem_userptr_get_pages
,
731 .release
= etnaviv_gem_userptr_release
,
732 .vmap
= etnaviv_gem_vmap_impl
,
733 .mmap
= etnaviv_gem_userptr_mmap_obj
,
736 int etnaviv_gem_new_userptr(struct drm_device
*dev
, struct drm_file
*file
,
737 uintptr_t ptr
, u32 size
, u32 flags
, u32
*handle
)
739 struct etnaviv_gem_object
*etnaviv_obj
;
742 ret
= etnaviv_gem_new_private(dev
, size
, ETNA_BO_CACHED
,
743 &etnaviv_gem_userptr_ops
, &etnaviv_obj
);
747 lockdep_set_class(&etnaviv_obj
->lock
, &etnaviv_userptr_lock_class
);
749 etnaviv_obj
->userptr
.ptr
= ptr
;
750 etnaviv_obj
->userptr
.mm
= current
->mm
;
751 etnaviv_obj
->userptr
.ro
= !(flags
& ETNA_USERPTR_WRITE
);
753 etnaviv_gem_obj_add(dev
, &etnaviv_obj
->base
);
755 ret
= drm_gem_handle_create(file
, &etnaviv_obj
->base
, handle
);
757 /* drop reference from allocate - handle holds it now */
758 drm_gem_object_put(&etnaviv_obj
->base
);