2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/spinlock.h>
18 #include <linux/shmem_fs.h>
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_mmu.h"
25 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object
*etnaviv_obj
)
27 struct drm_device
*dev
= etnaviv_obj
->base
.dev
;
28 struct sg_table
*sgt
= etnaviv_obj
->sgt
;
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
34 if (etnaviv_obj
->flags
& ETNA_BO_CACHE_MASK
)
35 dma_map_sg(dev
->dev
, sgt
->sgl
, sgt
->nents
, DMA_BIDIRECTIONAL
);
38 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object
*etnaviv_obj
)
40 struct drm_device
*dev
= etnaviv_obj
->base
.dev
;
41 struct sg_table
*sgt
= etnaviv_obj
->sgt
;
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
58 if (etnaviv_obj
->flags
& ETNA_BO_CACHE_MASK
)
59 dma_unmap_sg(dev
->dev
, sgt
->sgl
, sgt
->nents
, DMA_BIDIRECTIONAL
);
62 /* called with etnaviv_obj->lock held */
63 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object
*etnaviv_obj
)
65 struct drm_device
*dev
= etnaviv_obj
->base
.dev
;
66 struct page
**p
= drm_gem_get_pages(&etnaviv_obj
->base
);
69 dev_err(dev
->dev
, "could not get pages: %ld\n", PTR_ERR(p
));
73 etnaviv_obj
->pages
= p
;
78 static void put_pages(struct etnaviv_gem_object
*etnaviv_obj
)
80 if (etnaviv_obj
->sgt
) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj
);
82 sg_free_table(etnaviv_obj
->sgt
);
83 kfree(etnaviv_obj
->sgt
);
84 etnaviv_obj
->sgt
= NULL
;
86 if (etnaviv_obj
->pages
) {
87 drm_gem_put_pages(&etnaviv_obj
->base
, etnaviv_obj
->pages
,
90 etnaviv_obj
->pages
= NULL
;
94 struct page
**etnaviv_gem_get_pages(struct etnaviv_gem_object
*etnaviv_obj
)
98 lockdep_assert_held(&etnaviv_obj
->lock
);
100 if (!etnaviv_obj
->pages
) {
101 ret
= etnaviv_obj
->ops
->get_pages(etnaviv_obj
);
106 if (!etnaviv_obj
->sgt
) {
107 struct drm_device
*dev
= etnaviv_obj
->base
.dev
;
108 int npages
= etnaviv_obj
->base
.size
>> PAGE_SHIFT
;
109 struct sg_table
*sgt
;
111 sgt
= drm_prime_pages_to_sg(etnaviv_obj
->pages
, npages
);
113 dev_err(dev
->dev
, "failed to allocate sgt: %ld\n",
115 return ERR_CAST(sgt
);
118 etnaviv_obj
->sgt
= sgt
;
120 etnaviv_gem_scatter_map(etnaviv_obj
);
123 return etnaviv_obj
->pages
;
126 void etnaviv_gem_put_pages(struct etnaviv_gem_object
*etnaviv_obj
)
128 lockdep_assert_held(&etnaviv_obj
->lock
);
129 /* when we start tracking the pin count, then do something here */
132 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object
*etnaviv_obj
,
133 struct vm_area_struct
*vma
)
135 pgprot_t vm_page_prot
;
137 vma
->vm_flags
&= ~VM_PFNMAP
;
138 vma
->vm_flags
|= VM_MIXEDMAP
;
140 vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
142 if (etnaviv_obj
->flags
& ETNA_BO_WC
) {
143 vma
->vm_page_prot
= pgprot_writecombine(vm_page_prot
);
144 } else if (etnaviv_obj
->flags
& ETNA_BO_UNCACHED
) {
145 vma
->vm_page_prot
= pgprot_noncached(vm_page_prot
);
148 * Shunt off cached objs to shmem file so they have their own
149 * address_space (so unmap_mapping_range does what we want,
150 * in particular in the case of mmap'd dmabufs)
153 get_file(etnaviv_obj
->base
.filp
);
155 vma
->vm_file
= etnaviv_obj
->base
.filp
;
157 vma
->vm_page_prot
= vm_page_prot
;
163 int etnaviv_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
165 struct etnaviv_gem_object
*obj
;
168 ret
= drm_gem_mmap(filp
, vma
);
170 DBG("mmap failed: %d", ret
);
174 obj
= to_etnaviv_bo(vma
->vm_private_data
);
175 return obj
->ops
->mmap(obj
, vma
);
178 int etnaviv_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
180 struct drm_gem_object
*obj
= vma
->vm_private_data
;
181 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
182 struct page
**pages
, *page
;
187 * Make sure we don't parallel update on a fault, nor move or remove
188 * something from beneath our feet. Note that vm_insert_page() is
189 * specifically coded to take care of this, so we don't have to.
191 ret
= mutex_lock_interruptible(&etnaviv_obj
->lock
);
195 /* make sure we have pages attached now */
196 pages
= etnaviv_gem_get_pages(etnaviv_obj
);
197 mutex_unlock(&etnaviv_obj
->lock
);
200 ret
= PTR_ERR(pages
);
204 /* We don't use vmf->pgoff since that has the fake offset: */
205 pgoff
= ((unsigned long)vmf
->virtual_address
-
206 vma
->vm_start
) >> PAGE_SHIFT
;
210 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
211 page_to_pfn(page
), page_to_pfn(page
) << PAGE_SHIFT
);
213 ret
= vm_insert_page(vma
, (unsigned long)vmf
->virtual_address
, page
);
223 * EBUSY is ok: this just means that another thread
224 * already did the job.
226 return VM_FAULT_NOPAGE
;
230 return VM_FAULT_SIGBUS
;
234 int etnaviv_gem_mmap_offset(struct drm_gem_object
*obj
, u64
*offset
)
238 /* Make it mmapable */
239 ret
= drm_gem_create_mmap_offset(obj
);
241 dev_err(obj
->dev
->dev
, "could not allocate mmap offset\n");
243 *offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
248 static struct etnaviv_vram_mapping
*
249 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object
*obj
,
250 struct etnaviv_iommu
*mmu
)
252 struct etnaviv_vram_mapping
*mapping
;
254 list_for_each_entry(mapping
, &obj
->vram_list
, obj_node
) {
255 if (mapping
->mmu
== mmu
)
262 void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping
*mapping
)
264 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
266 drm_gem_object_reference(&etnaviv_obj
->base
);
268 mutex_lock(&etnaviv_obj
->lock
);
269 WARN_ON(mapping
->use
== 0);
271 mutex_unlock(&etnaviv_obj
->lock
);
274 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping
*mapping
)
276 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
278 mutex_lock(&etnaviv_obj
->lock
);
279 WARN_ON(mapping
->use
== 0);
281 mutex_unlock(&etnaviv_obj
->lock
);
283 drm_gem_object_unreference_unlocked(&etnaviv_obj
->base
);
286 struct etnaviv_vram_mapping
*etnaviv_gem_mapping_get(
287 struct drm_gem_object
*obj
, struct etnaviv_gpu
*gpu
)
289 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
290 struct etnaviv_vram_mapping
*mapping
;
294 mutex_lock(&etnaviv_obj
->lock
);
295 mapping
= etnaviv_gem_get_vram_mapping(etnaviv_obj
, gpu
->mmu
);
298 * Holding the object lock prevents the use count changing
299 * beneath us. If the use count is zero, the MMU might be
300 * reaping this object, so take the lock and re-check that
301 * the MMU owns this mapping to close this race.
303 if (mapping
->use
== 0) {
304 mutex_lock(&gpu
->mmu
->lock
);
305 if (mapping
->mmu
== gpu
->mmu
)
309 mutex_unlock(&gpu
->mmu
->lock
);
318 pages
= etnaviv_gem_get_pages(etnaviv_obj
);
320 ret
= PTR_ERR(pages
);
325 * See if we have a reaped vram mapping we can re-use before
326 * allocating a fresh mapping.
328 mapping
= etnaviv_gem_get_vram_mapping(etnaviv_obj
, NULL
);
330 mapping
= kzalloc(sizeof(*mapping
), GFP_KERNEL
);
336 INIT_LIST_HEAD(&mapping
->scan_node
);
337 mapping
->object
= etnaviv_obj
;
339 list_del(&mapping
->obj_node
);
342 mapping
->mmu
= gpu
->mmu
;
345 ret
= etnaviv_iommu_map_gem(gpu
->mmu
, etnaviv_obj
, gpu
->memory_base
,
350 list_add_tail(&mapping
->obj_node
, &etnaviv_obj
->vram_list
);
353 mutex_unlock(&etnaviv_obj
->lock
);
358 /* Take a reference on the object */
359 drm_gem_object_reference(obj
);
363 void *etnaviv_gem_vmap(struct drm_gem_object
*obj
)
365 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
367 if (etnaviv_obj
->vaddr
)
368 return etnaviv_obj
->vaddr
;
370 mutex_lock(&etnaviv_obj
->lock
);
372 * Need to check again, as we might have raced with another thread
373 * while waiting for the mutex.
375 if (!etnaviv_obj
->vaddr
)
376 etnaviv_obj
->vaddr
= etnaviv_obj
->ops
->vmap(etnaviv_obj
);
377 mutex_unlock(&etnaviv_obj
->lock
);
379 return etnaviv_obj
->vaddr
;
382 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object
*obj
)
386 lockdep_assert_held(&obj
->lock
);
388 pages
= etnaviv_gem_get_pages(obj
);
392 return vmap(pages
, obj
->base
.size
>> PAGE_SHIFT
,
393 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
396 static inline enum dma_data_direction
etnaviv_op_to_dma_dir(u32 op
)
398 if (op
& ETNA_PREP_READ
)
399 return DMA_FROM_DEVICE
;
400 else if (op
& ETNA_PREP_WRITE
)
401 return DMA_TO_DEVICE
;
403 return DMA_BIDIRECTIONAL
;
406 int etnaviv_gem_cpu_prep(struct drm_gem_object
*obj
, u32 op
,
407 struct timespec
*timeout
)
409 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
410 struct drm_device
*dev
= obj
->dev
;
411 bool write
= !!(op
& ETNA_PREP_WRITE
);
414 if (op
& ETNA_PREP_NOSYNC
) {
415 if (!reservation_object_test_signaled_rcu(etnaviv_obj
->resv
,
419 unsigned long remain
= etnaviv_timeout_to_jiffies(timeout
);
421 ret
= reservation_object_wait_timeout_rcu(etnaviv_obj
->resv
,
422 write
, true, remain
);
424 return ret
== 0 ? -ETIMEDOUT
: ret
;
427 if (etnaviv_obj
->flags
& ETNA_BO_CACHED
) {
428 if (!etnaviv_obj
->sgt
) {
431 mutex_lock(&etnaviv_obj
->lock
);
432 ret
= etnaviv_gem_get_pages(etnaviv_obj
);
433 mutex_unlock(&etnaviv_obj
->lock
);
438 dma_sync_sg_for_cpu(dev
->dev
, etnaviv_obj
->sgt
->sgl
,
439 etnaviv_obj
->sgt
->nents
,
440 etnaviv_op_to_dma_dir(op
));
441 etnaviv_obj
->last_cpu_prep_op
= op
;
447 int etnaviv_gem_cpu_fini(struct drm_gem_object
*obj
)
449 struct drm_device
*dev
= obj
->dev
;
450 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
452 if (etnaviv_obj
->flags
& ETNA_BO_CACHED
) {
453 /* fini without a prep is almost certainly a userspace error */
454 WARN_ON(etnaviv_obj
->last_cpu_prep_op
== 0);
455 dma_sync_sg_for_device(dev
->dev
, etnaviv_obj
->sgt
->sgl
,
456 etnaviv_obj
->sgt
->nents
,
457 etnaviv_op_to_dma_dir(etnaviv_obj
->last_cpu_prep_op
));
458 etnaviv_obj
->last_cpu_prep_op
= 0;
464 int etnaviv_gem_wait_bo(struct etnaviv_gpu
*gpu
, struct drm_gem_object
*obj
,
465 struct timespec
*timeout
)
467 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
469 return etnaviv_gpu_wait_obj_inactive(gpu
, etnaviv_obj
, timeout
);
472 #ifdef CONFIG_DEBUG_FS
473 static void etnaviv_gem_describe_fence(struct fence
*fence
,
474 const char *type
, struct seq_file
*m
)
476 if (!test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
477 seq_printf(m
, "\t%9s: %s %s seq %u\n",
479 fence
->ops
->get_driver_name(fence
),
480 fence
->ops
->get_timeline_name(fence
),
484 static void etnaviv_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
486 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
487 struct reservation_object
*robj
= etnaviv_obj
->resv
;
488 struct reservation_object_list
*fobj
;
490 unsigned long off
= drm_vma_node_start(&obj
->vma_node
);
492 seq_printf(m
, "%08x: %c %2d (%2d) %08lx %p %zd\n",
493 etnaviv_obj
->flags
, is_active(etnaviv_obj
) ? 'A' : 'I',
494 obj
->name
, obj
->refcount
.refcount
.counter
,
495 off
, etnaviv_obj
->vaddr
, obj
->size
);
498 fobj
= rcu_dereference(robj
->fence
);
500 unsigned int i
, shared_count
= fobj
->shared_count
;
502 for (i
= 0; i
< shared_count
; i
++) {
503 fence
= rcu_dereference(fobj
->shared
[i
]);
504 etnaviv_gem_describe_fence(fence
, "Shared", m
);
508 fence
= rcu_dereference(robj
->fence_excl
);
510 etnaviv_gem_describe_fence(fence
, "Exclusive", m
);
514 void etnaviv_gem_describe_objects(struct etnaviv_drm_private
*priv
,
517 struct etnaviv_gem_object
*etnaviv_obj
;
521 mutex_lock(&priv
->gem_lock
);
522 list_for_each_entry(etnaviv_obj
, &priv
->gem_list
, gem_node
) {
523 struct drm_gem_object
*obj
= &etnaviv_obj
->base
;
526 etnaviv_gem_describe(obj
, m
);
530 mutex_unlock(&priv
->gem_lock
);
532 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
536 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object
*etnaviv_obj
)
538 vunmap(etnaviv_obj
->vaddr
);
539 put_pages(etnaviv_obj
);
542 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops
= {
543 .get_pages
= etnaviv_gem_shmem_get_pages
,
544 .release
= etnaviv_gem_shmem_release
,
545 .vmap
= etnaviv_gem_vmap_impl
,
546 .mmap
= etnaviv_gem_mmap_obj
,
549 void etnaviv_gem_free_object(struct drm_gem_object
*obj
)
551 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
552 struct etnaviv_drm_private
*priv
= obj
->dev
->dev_private
;
553 struct etnaviv_vram_mapping
*mapping
, *tmp
;
555 /* object should not be active */
556 WARN_ON(is_active(etnaviv_obj
));
558 mutex_lock(&priv
->gem_lock
);
559 list_del(&etnaviv_obj
->gem_node
);
560 mutex_unlock(&priv
->gem_lock
);
562 list_for_each_entry_safe(mapping
, tmp
, &etnaviv_obj
->vram_list
,
564 struct etnaviv_iommu
*mmu
= mapping
->mmu
;
566 WARN_ON(mapping
->use
);
569 etnaviv_iommu_unmap_gem(mmu
, mapping
);
571 list_del(&mapping
->obj_node
);
575 drm_gem_free_mmap_offset(obj
);
576 etnaviv_obj
->ops
->release(etnaviv_obj
);
577 if (etnaviv_obj
->resv
== &etnaviv_obj
->_resv
)
578 reservation_object_fini(&etnaviv_obj
->_resv
);
579 drm_gem_object_release(obj
);
584 int etnaviv_gem_obj_add(struct drm_device
*dev
, struct drm_gem_object
*obj
)
586 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
587 struct etnaviv_gem_object
*etnaviv_obj
= to_etnaviv_bo(obj
);
589 mutex_lock(&priv
->gem_lock
);
590 list_add_tail(&etnaviv_obj
->gem_node
, &priv
->gem_list
);
591 mutex_unlock(&priv
->gem_lock
);
596 static int etnaviv_gem_new_impl(struct drm_device
*dev
, u32 size
, u32 flags
,
597 struct reservation_object
*robj
, const struct etnaviv_gem_ops
*ops
,
598 struct drm_gem_object
**obj
)
600 struct etnaviv_gem_object
*etnaviv_obj
;
601 unsigned sz
= sizeof(*etnaviv_obj
);
605 switch (flags
& ETNA_BO_CACHE_MASK
) {
606 case ETNA_BO_UNCACHED
:
615 dev_err(dev
->dev
, "invalid cache flag: %x\n",
616 (flags
& ETNA_BO_CACHE_MASK
));
620 etnaviv_obj
= kzalloc(sz
, GFP_KERNEL
);
624 etnaviv_obj
->flags
= flags
;
625 etnaviv_obj
->ops
= ops
;
627 etnaviv_obj
->resv
= robj
;
629 etnaviv_obj
->resv
= &etnaviv_obj
->_resv
;
630 reservation_object_init(&etnaviv_obj
->_resv
);
633 mutex_init(&etnaviv_obj
->lock
);
634 INIT_LIST_HEAD(&etnaviv_obj
->vram_list
);
636 *obj
= &etnaviv_obj
->base
;
641 static struct drm_gem_object
*__etnaviv_gem_new(struct drm_device
*dev
,
644 struct drm_gem_object
*obj
= NULL
;
647 size
= PAGE_ALIGN(size
);
649 ret
= etnaviv_gem_new_impl(dev
, size
, flags
, NULL
,
650 &etnaviv_gem_shmem_ops
, &obj
);
654 ret
= drm_gem_object_init(dev
, obj
, size
);
656 struct address_space
*mapping
;
659 * Our buffers are kept pinned, so allocating them
660 * from the MOVABLE zone is a really bad idea, and
661 * conflicts with CMA. See coments above new_inode()
662 * why this is required _and_ expected if you're
663 * going to pin these pages.
665 mapping
= obj
->filp
->f_mapping
;
666 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER
);
675 drm_gem_object_unreference_unlocked(obj
);
679 /* convenience method to construct a GEM buffer object, and userspace handle */
680 int etnaviv_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
681 u32 size
, u32 flags
, u32
*handle
)
683 struct drm_gem_object
*obj
;
686 obj
= __etnaviv_gem_new(dev
, size
, flags
);
690 ret
= etnaviv_gem_obj_add(dev
, obj
);
692 drm_gem_object_unreference_unlocked(obj
);
696 ret
= drm_gem_handle_create(file
, obj
, handle
);
698 /* drop reference from allocate - handle holds it now */
699 drm_gem_object_unreference_unlocked(obj
);
704 struct drm_gem_object
*etnaviv_gem_new(struct drm_device
*dev
,
707 struct drm_gem_object
*obj
;
710 obj
= __etnaviv_gem_new(dev
, size
, flags
);
714 ret
= etnaviv_gem_obj_add(dev
, obj
);
716 drm_gem_object_unreference_unlocked(obj
);
723 int etnaviv_gem_new_private(struct drm_device
*dev
, size_t size
, u32 flags
,
724 struct reservation_object
*robj
, const struct etnaviv_gem_ops
*ops
,
725 struct etnaviv_gem_object
**res
)
727 struct drm_gem_object
*obj
;
730 ret
= etnaviv_gem_new_impl(dev
, size
, flags
, robj
, ops
, &obj
);
734 drm_gem_private_object_init(dev
, obj
, size
);
736 *res
= to_etnaviv_bo(obj
);
741 struct get_pages_work
{
742 struct work_struct work
;
743 struct mm_struct
*mm
;
744 struct task_struct
*task
;
745 struct etnaviv_gem_object
*etnaviv_obj
;
748 static struct page
**etnaviv_gem_userptr_do_get_pages(
749 struct etnaviv_gem_object
*etnaviv_obj
, struct mm_struct
*mm
, struct task_struct
*task
)
751 int ret
= 0, pinned
, npages
= etnaviv_obj
->base
.size
>> PAGE_SHIFT
;
754 unsigned int flags
= 0;
756 pvec
= drm_malloc_ab(npages
, sizeof(struct page
*));
758 return ERR_PTR(-ENOMEM
);
760 if (!etnaviv_obj
->userptr
.ro
)
764 ptr
= etnaviv_obj
->userptr
.ptr
;
766 down_read(&mm
->mmap_sem
);
767 while (pinned
< npages
) {
768 ret
= get_user_pages_remote(task
, mm
, ptr
, npages
- pinned
,
769 flags
, pvec
+ pinned
, NULL
);
773 ptr
+= ret
* PAGE_SIZE
;
776 up_read(&mm
->mmap_sem
);
779 release_pages(pvec
, pinned
, 0);
780 drm_free_large(pvec
);
787 static void __etnaviv_gem_userptr_get_pages(struct work_struct
*_work
)
789 struct get_pages_work
*work
= container_of(_work
, typeof(*work
), work
);
790 struct etnaviv_gem_object
*etnaviv_obj
= work
->etnaviv_obj
;
793 pvec
= etnaviv_gem_userptr_do_get_pages(etnaviv_obj
, work
->mm
, work
->task
);
795 mutex_lock(&etnaviv_obj
->lock
);
797 etnaviv_obj
->userptr
.work
= ERR_CAST(pvec
);
799 etnaviv_obj
->userptr
.work
= NULL
;
800 etnaviv_obj
->pages
= pvec
;
803 mutex_unlock(&etnaviv_obj
->lock
);
804 drm_gem_object_unreference_unlocked(&etnaviv_obj
->base
);
807 put_task_struct(work
->task
);
811 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object
*etnaviv_obj
)
813 struct page
**pvec
= NULL
;
814 struct get_pages_work
*work
;
815 struct mm_struct
*mm
;
816 int ret
, pinned
, npages
= etnaviv_obj
->base
.size
>> PAGE_SHIFT
;
818 if (etnaviv_obj
->userptr
.work
) {
819 if (IS_ERR(etnaviv_obj
->userptr
.work
)) {
820 ret
= PTR_ERR(etnaviv_obj
->userptr
.work
);
821 etnaviv_obj
->userptr
.work
= NULL
;
828 mm
= get_task_mm(etnaviv_obj
->userptr
.task
);
830 if (mm
== current
->mm
) {
831 pvec
= drm_malloc_ab(npages
, sizeof(struct page
*));
837 pinned
= __get_user_pages_fast(etnaviv_obj
->userptr
.ptr
, npages
,
838 !etnaviv_obj
->userptr
.ro
, pvec
);
840 drm_free_large(pvec
);
845 if (pinned
== npages
) {
846 etnaviv_obj
->pages
= pvec
;
852 release_pages(pvec
, pinned
, 0);
853 drm_free_large(pvec
);
855 work
= kmalloc(sizeof(*work
), GFP_KERNEL
);
861 get_task_struct(current
);
862 drm_gem_object_reference(&etnaviv_obj
->base
);
865 work
->task
= current
;
866 work
->etnaviv_obj
= etnaviv_obj
;
868 etnaviv_obj
->userptr
.work
= &work
->work
;
869 INIT_WORK(&work
->work
, __etnaviv_gem_userptr_get_pages
);
871 etnaviv_queue_work(etnaviv_obj
->base
.dev
, &work
->work
);
876 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object
*etnaviv_obj
)
878 if (etnaviv_obj
->sgt
) {
879 etnaviv_gem_scatterlist_unmap(etnaviv_obj
);
880 sg_free_table(etnaviv_obj
->sgt
);
881 kfree(etnaviv_obj
->sgt
);
883 if (etnaviv_obj
->pages
) {
884 int npages
= etnaviv_obj
->base
.size
>> PAGE_SHIFT
;
886 release_pages(etnaviv_obj
->pages
, npages
, 0);
887 drm_free_large(etnaviv_obj
->pages
);
889 put_task_struct(etnaviv_obj
->userptr
.task
);
892 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object
*etnaviv_obj
,
893 struct vm_area_struct
*vma
)
898 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops
= {
899 .get_pages
= etnaviv_gem_userptr_get_pages
,
900 .release
= etnaviv_gem_userptr_release
,
901 .vmap
= etnaviv_gem_vmap_impl
,
902 .mmap
= etnaviv_gem_userptr_mmap_obj
,
905 int etnaviv_gem_new_userptr(struct drm_device
*dev
, struct drm_file
*file
,
906 uintptr_t ptr
, u32 size
, u32 flags
, u32
*handle
)
908 struct etnaviv_gem_object
*etnaviv_obj
;
911 ret
= etnaviv_gem_new_private(dev
, size
, ETNA_BO_CACHED
, NULL
,
912 &etnaviv_gem_userptr_ops
, &etnaviv_obj
);
916 etnaviv_obj
->userptr
.ptr
= ptr
;
917 etnaviv_obj
->userptr
.task
= current
;
918 etnaviv_obj
->userptr
.ro
= !(flags
& ETNA_USERPTR_WRITE
);
919 get_task_struct(current
);
921 ret
= etnaviv_gem_obj_add(dev
, &etnaviv_obj
->base
);
925 ret
= drm_gem_handle_create(file
, &etnaviv_obj
->base
, handle
);
927 /* drop reference from allocate - handle holds it now */
928 drm_gem_object_unreference_unlocked(&etnaviv_obj
->base
);