drm/exynos: Stop using drm_framebuffer_unregister_private
[linux/fpc-iii.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
blob114dddbd297bacf36406b7a4abdff39e71cdd81b
1 /*
2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/spinlock.h>
18 #include <linux/shmem_fs.h>
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_mmu.h"
25 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
27 struct drm_device *dev = etnaviv_obj->base.dev;
28 struct sg_table *sgt = etnaviv_obj->sgt;
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
34 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
38 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
40 struct drm_device *dev = etnaviv_obj->base.dev;
41 struct sg_table *sgt = etnaviv_obj->sgt;
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
51 * the region.
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
58 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
62 /* called with etnaviv_obj->lock held */
63 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
65 struct drm_device *dev = etnaviv_obj->base.dev;
66 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
68 if (IS_ERR(p)) {
69 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70 return PTR_ERR(p);
73 etnaviv_obj->pages = p;
75 return 0;
78 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
80 if (etnaviv_obj->sgt) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 sg_free_table(etnaviv_obj->sgt);
83 kfree(etnaviv_obj->sgt);
84 etnaviv_obj->sgt = NULL;
86 if (etnaviv_obj->pages) {
87 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88 true, false);
90 etnaviv_obj->pages = NULL;
94 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
96 int ret;
98 lockdep_assert_held(&etnaviv_obj->lock);
100 if (!etnaviv_obj->pages) {
101 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102 if (ret < 0)
103 return ERR_PTR(ret);
106 if (!etnaviv_obj->sgt) {
107 struct drm_device *dev = etnaviv_obj->base.dev;
108 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 struct sg_table *sgt;
111 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112 if (IS_ERR(sgt)) {
113 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114 PTR_ERR(sgt));
115 return ERR_CAST(sgt);
118 etnaviv_obj->sgt = sgt;
120 etnaviv_gem_scatter_map(etnaviv_obj);
123 return etnaviv_obj->pages;
126 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
128 lockdep_assert_held(&etnaviv_obj->lock);
129 /* when we start tracking the pin count, then do something here */
132 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
133 struct vm_area_struct *vma)
135 pgprot_t vm_page_prot;
137 vma->vm_flags &= ~VM_PFNMAP;
138 vma->vm_flags |= VM_MIXEDMAP;
140 vm_page_prot = vm_get_page_prot(vma->vm_flags);
142 if (etnaviv_obj->flags & ETNA_BO_WC) {
143 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
144 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
145 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
146 } else {
148 * Shunt off cached objs to shmem file so they have their own
149 * address_space (so unmap_mapping_range does what we want,
150 * in particular in the case of mmap'd dmabufs)
152 fput(vma->vm_file);
153 get_file(etnaviv_obj->base.filp);
154 vma->vm_pgoff = 0;
155 vma->vm_file = etnaviv_obj->base.filp;
157 vma->vm_page_prot = vm_page_prot;
160 return 0;
163 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
165 struct etnaviv_gem_object *obj;
166 int ret;
168 ret = drm_gem_mmap(filp, vma);
169 if (ret) {
170 DBG("mmap failed: %d", ret);
171 return ret;
174 obj = to_etnaviv_bo(vma->vm_private_data);
175 return obj->ops->mmap(obj, vma);
178 int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180 struct drm_gem_object *obj = vma->vm_private_data;
181 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
182 struct page **pages, *page;
183 pgoff_t pgoff;
184 int ret;
187 * Make sure we don't parallel update on a fault, nor move or remove
188 * something from beneath our feet. Note that vm_insert_page() is
189 * specifically coded to take care of this, so we don't have to.
191 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
192 if (ret)
193 goto out;
195 /* make sure we have pages attached now */
196 pages = etnaviv_gem_get_pages(etnaviv_obj);
197 mutex_unlock(&etnaviv_obj->lock);
199 if (IS_ERR(pages)) {
200 ret = PTR_ERR(pages);
201 goto out;
204 /* We don't use vmf->pgoff since that has the fake offset: */
205 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
207 page = pages[pgoff];
209 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
210 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
212 ret = vm_insert_page(vma, vmf->address, page);
214 out:
215 switch (ret) {
216 case -EAGAIN:
217 case 0:
218 case -ERESTARTSYS:
219 case -EINTR:
220 case -EBUSY:
222 * EBUSY is ok: this just means that another thread
223 * already did the job.
225 return VM_FAULT_NOPAGE;
226 case -ENOMEM:
227 return VM_FAULT_OOM;
228 default:
229 return VM_FAULT_SIGBUS;
233 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
235 int ret;
237 /* Make it mmapable */
238 ret = drm_gem_create_mmap_offset(obj);
239 if (ret)
240 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
241 else
242 *offset = drm_vma_node_offset_addr(&obj->vma_node);
244 return ret;
247 static struct etnaviv_vram_mapping *
248 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
249 struct etnaviv_iommu *mmu)
251 struct etnaviv_vram_mapping *mapping;
253 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
254 if (mapping->mmu == mmu)
255 return mapping;
258 return NULL;
261 void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
263 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
265 drm_gem_object_reference(&etnaviv_obj->base);
267 mutex_lock(&etnaviv_obj->lock);
268 WARN_ON(mapping->use == 0);
269 mapping->use += 1;
270 mutex_unlock(&etnaviv_obj->lock);
273 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
275 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
277 mutex_lock(&etnaviv_obj->lock);
278 WARN_ON(mapping->use == 0);
279 mapping->use -= 1;
280 mutex_unlock(&etnaviv_obj->lock);
282 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
285 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
286 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
288 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
289 struct etnaviv_vram_mapping *mapping;
290 struct page **pages;
291 int ret = 0;
293 mutex_lock(&etnaviv_obj->lock);
294 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
295 if (mapping) {
297 * Holding the object lock prevents the use count changing
298 * beneath us. If the use count is zero, the MMU might be
299 * reaping this object, so take the lock and re-check that
300 * the MMU owns this mapping to close this race.
302 if (mapping->use == 0) {
303 mutex_lock(&gpu->mmu->lock);
304 if (mapping->mmu == gpu->mmu)
305 mapping->use += 1;
306 else
307 mapping = NULL;
308 mutex_unlock(&gpu->mmu->lock);
309 if (mapping)
310 goto out;
311 } else {
312 mapping->use += 1;
313 goto out;
317 pages = etnaviv_gem_get_pages(etnaviv_obj);
318 if (IS_ERR(pages)) {
319 ret = PTR_ERR(pages);
320 goto out;
324 * See if we have a reaped vram mapping we can re-use before
325 * allocating a fresh mapping.
327 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
328 if (!mapping) {
329 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
330 if (!mapping) {
331 ret = -ENOMEM;
332 goto out;
335 INIT_LIST_HEAD(&mapping->scan_node);
336 mapping->object = etnaviv_obj;
337 } else {
338 list_del(&mapping->obj_node);
341 mapping->mmu = gpu->mmu;
342 mapping->use = 1;
344 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
345 mapping);
346 if (ret < 0)
347 kfree(mapping);
348 else
349 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
351 out:
352 mutex_unlock(&etnaviv_obj->lock);
354 if (ret)
355 return ERR_PTR(ret);
357 /* Take a reference on the object */
358 drm_gem_object_reference(obj);
359 return mapping;
362 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
364 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
366 if (etnaviv_obj->vaddr)
367 return etnaviv_obj->vaddr;
369 mutex_lock(&etnaviv_obj->lock);
371 * Need to check again, as we might have raced with another thread
372 * while waiting for the mutex.
374 if (!etnaviv_obj->vaddr)
375 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
376 mutex_unlock(&etnaviv_obj->lock);
378 return etnaviv_obj->vaddr;
381 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
383 struct page **pages;
385 lockdep_assert_held(&obj->lock);
387 pages = etnaviv_gem_get_pages(obj);
388 if (IS_ERR(pages))
389 return NULL;
391 return vmap(pages, obj->base.size >> PAGE_SHIFT,
392 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
395 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
397 if (op & ETNA_PREP_READ)
398 return DMA_FROM_DEVICE;
399 else if (op & ETNA_PREP_WRITE)
400 return DMA_TO_DEVICE;
401 else
402 return DMA_BIDIRECTIONAL;
405 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
406 struct timespec *timeout)
408 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
409 struct drm_device *dev = obj->dev;
410 bool write = !!(op & ETNA_PREP_WRITE);
411 unsigned long remain =
412 op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
413 long lret;
415 lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
416 write, true, remain);
417 if (lret < 0)
418 return lret;
419 else if (lret == 0)
420 return remain == 0 ? -EBUSY : -ETIMEDOUT;
422 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
423 if (!etnaviv_obj->sgt) {
424 void *ret;
426 mutex_lock(&etnaviv_obj->lock);
427 ret = etnaviv_gem_get_pages(etnaviv_obj);
428 mutex_unlock(&etnaviv_obj->lock);
429 if (IS_ERR(ret))
430 return PTR_ERR(ret);
433 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
434 etnaviv_obj->sgt->nents,
435 etnaviv_op_to_dma_dir(op));
436 etnaviv_obj->last_cpu_prep_op = op;
439 return 0;
442 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
444 struct drm_device *dev = obj->dev;
445 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
447 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
448 /* fini without a prep is almost certainly a userspace error */
449 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
450 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
451 etnaviv_obj->sgt->nents,
452 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
453 etnaviv_obj->last_cpu_prep_op = 0;
456 return 0;
459 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
460 struct timespec *timeout)
462 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
464 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
467 #ifdef CONFIG_DEBUG_FS
468 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
469 const char *type, struct seq_file *m)
471 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
472 seq_printf(m, "\t%9s: %s %s seq %u\n",
473 type,
474 fence->ops->get_driver_name(fence),
475 fence->ops->get_timeline_name(fence),
476 fence->seqno);
479 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
481 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
482 struct reservation_object *robj = etnaviv_obj->resv;
483 struct reservation_object_list *fobj;
484 struct dma_fence *fence;
485 unsigned long off = drm_vma_node_start(&obj->vma_node);
487 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
488 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
489 obj->name, obj->refcount.refcount.counter,
490 off, etnaviv_obj->vaddr, obj->size);
492 rcu_read_lock();
493 fobj = rcu_dereference(robj->fence);
494 if (fobj) {
495 unsigned int i, shared_count = fobj->shared_count;
497 for (i = 0; i < shared_count; i++) {
498 fence = rcu_dereference(fobj->shared[i]);
499 etnaviv_gem_describe_fence(fence, "Shared", m);
503 fence = rcu_dereference(robj->fence_excl);
504 if (fence)
505 etnaviv_gem_describe_fence(fence, "Exclusive", m);
506 rcu_read_unlock();
509 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
510 struct seq_file *m)
512 struct etnaviv_gem_object *etnaviv_obj;
513 int count = 0;
514 size_t size = 0;
516 mutex_lock(&priv->gem_lock);
517 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
518 struct drm_gem_object *obj = &etnaviv_obj->base;
520 seq_puts(m, " ");
521 etnaviv_gem_describe(obj, m);
522 count++;
523 size += obj->size;
525 mutex_unlock(&priv->gem_lock);
527 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
529 #endif
531 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
533 vunmap(etnaviv_obj->vaddr);
534 put_pages(etnaviv_obj);
537 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
538 .get_pages = etnaviv_gem_shmem_get_pages,
539 .release = etnaviv_gem_shmem_release,
540 .vmap = etnaviv_gem_vmap_impl,
541 .mmap = etnaviv_gem_mmap_obj,
544 void etnaviv_gem_free_object(struct drm_gem_object *obj)
546 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
547 struct etnaviv_vram_mapping *mapping, *tmp;
549 /* object should not be active */
550 WARN_ON(is_active(etnaviv_obj));
552 list_del(&etnaviv_obj->gem_node);
554 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
555 obj_node) {
556 struct etnaviv_iommu *mmu = mapping->mmu;
558 WARN_ON(mapping->use);
560 if (mmu)
561 etnaviv_iommu_unmap_gem(mmu, mapping);
563 list_del(&mapping->obj_node);
564 kfree(mapping);
567 drm_gem_free_mmap_offset(obj);
568 etnaviv_obj->ops->release(etnaviv_obj);
569 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
570 reservation_object_fini(&etnaviv_obj->_resv);
571 drm_gem_object_release(obj);
573 kfree(etnaviv_obj);
576 int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
578 struct etnaviv_drm_private *priv = dev->dev_private;
579 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
581 mutex_lock(&priv->gem_lock);
582 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
583 mutex_unlock(&priv->gem_lock);
585 return 0;
588 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
589 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
590 struct drm_gem_object **obj)
592 struct etnaviv_gem_object *etnaviv_obj;
593 unsigned sz = sizeof(*etnaviv_obj);
594 bool valid = true;
596 /* validate flags */
597 switch (flags & ETNA_BO_CACHE_MASK) {
598 case ETNA_BO_UNCACHED:
599 case ETNA_BO_CACHED:
600 case ETNA_BO_WC:
601 break;
602 default:
603 valid = false;
606 if (!valid) {
607 dev_err(dev->dev, "invalid cache flag: %x\n",
608 (flags & ETNA_BO_CACHE_MASK));
609 return -EINVAL;
612 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
613 if (!etnaviv_obj)
614 return -ENOMEM;
616 etnaviv_obj->flags = flags;
617 etnaviv_obj->ops = ops;
618 if (robj) {
619 etnaviv_obj->resv = robj;
620 } else {
621 etnaviv_obj->resv = &etnaviv_obj->_resv;
622 reservation_object_init(&etnaviv_obj->_resv);
625 mutex_init(&etnaviv_obj->lock);
626 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
628 *obj = &etnaviv_obj->base;
630 return 0;
633 static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
634 u32 size, u32 flags)
636 struct drm_gem_object *obj = NULL;
637 int ret;
639 size = PAGE_ALIGN(size);
641 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
642 &etnaviv_gem_shmem_ops, &obj);
643 if (ret)
644 goto fail;
646 ret = drm_gem_object_init(dev, obj, size);
647 if (ret == 0) {
648 struct address_space *mapping;
651 * Our buffers are kept pinned, so allocating them
652 * from the MOVABLE zone is a really bad idea, and
653 * conflicts with CMA. See coments above new_inode()
654 * why this is required _and_ expected if you're
655 * going to pin these pages.
657 mapping = obj->filp->f_mapping;
658 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
661 if (ret)
662 goto fail;
664 return obj;
666 fail:
667 drm_gem_object_unreference_unlocked(obj);
668 return ERR_PTR(ret);
671 /* convenience method to construct a GEM buffer object, and userspace handle */
672 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
673 u32 size, u32 flags, u32 *handle)
675 struct drm_gem_object *obj;
676 int ret;
678 obj = __etnaviv_gem_new(dev, size, flags);
679 if (IS_ERR(obj))
680 return PTR_ERR(obj);
682 ret = etnaviv_gem_obj_add(dev, obj);
683 if (ret < 0) {
684 drm_gem_object_unreference_unlocked(obj);
685 return ret;
688 ret = drm_gem_handle_create(file, obj, handle);
690 /* drop reference from allocate - handle holds it now */
691 drm_gem_object_unreference_unlocked(obj);
693 return ret;
696 struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
697 u32 size, u32 flags)
699 struct drm_gem_object *obj;
700 int ret;
702 obj = __etnaviv_gem_new(dev, size, flags);
703 if (IS_ERR(obj))
704 return obj;
706 ret = etnaviv_gem_obj_add(dev, obj);
707 if (ret < 0) {
708 drm_gem_object_unreference_unlocked(obj);
709 return ERR_PTR(ret);
712 return obj;
715 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
716 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
717 struct etnaviv_gem_object **res)
719 struct drm_gem_object *obj;
720 int ret;
722 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
723 if (ret)
724 return ret;
726 drm_gem_private_object_init(dev, obj, size);
728 *res = to_etnaviv_bo(obj);
730 return 0;
733 struct get_pages_work {
734 struct work_struct work;
735 struct mm_struct *mm;
736 struct task_struct *task;
737 struct etnaviv_gem_object *etnaviv_obj;
740 static struct page **etnaviv_gem_userptr_do_get_pages(
741 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
743 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
744 struct page **pvec;
745 uintptr_t ptr;
746 unsigned int flags = 0;
748 pvec = drm_malloc_ab(npages, sizeof(struct page *));
749 if (!pvec)
750 return ERR_PTR(-ENOMEM);
752 if (!etnaviv_obj->userptr.ro)
753 flags |= FOLL_WRITE;
755 pinned = 0;
756 ptr = etnaviv_obj->userptr.ptr;
758 down_read(&mm->mmap_sem);
759 while (pinned < npages) {
760 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
761 flags, pvec + pinned, NULL, NULL);
762 if (ret < 0)
763 break;
765 ptr += ret * PAGE_SIZE;
766 pinned += ret;
768 up_read(&mm->mmap_sem);
770 if (ret < 0) {
771 release_pages(pvec, pinned, 0);
772 drm_free_large(pvec);
773 return ERR_PTR(ret);
776 return pvec;
779 static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
781 struct get_pages_work *work = container_of(_work, typeof(*work), work);
782 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
783 struct page **pvec;
785 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
787 mutex_lock(&etnaviv_obj->lock);
788 if (IS_ERR(pvec)) {
789 etnaviv_obj->userptr.work = ERR_CAST(pvec);
790 } else {
791 etnaviv_obj->userptr.work = NULL;
792 etnaviv_obj->pages = pvec;
795 mutex_unlock(&etnaviv_obj->lock);
796 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
798 mmput(work->mm);
799 put_task_struct(work->task);
800 kfree(work);
803 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
805 struct page **pvec = NULL;
806 struct get_pages_work *work;
807 struct mm_struct *mm;
808 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
810 if (etnaviv_obj->userptr.work) {
811 if (IS_ERR(etnaviv_obj->userptr.work)) {
812 ret = PTR_ERR(etnaviv_obj->userptr.work);
813 etnaviv_obj->userptr.work = NULL;
814 } else {
815 ret = -EAGAIN;
817 return ret;
820 mm = get_task_mm(etnaviv_obj->userptr.task);
821 pinned = 0;
822 if (mm == current->mm) {
823 pvec = drm_malloc_ab(npages, sizeof(struct page *));
824 if (!pvec) {
825 mmput(mm);
826 return -ENOMEM;
829 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
830 !etnaviv_obj->userptr.ro, pvec);
831 if (pinned < 0) {
832 drm_free_large(pvec);
833 mmput(mm);
834 return pinned;
837 if (pinned == npages) {
838 etnaviv_obj->pages = pvec;
839 mmput(mm);
840 return 0;
844 release_pages(pvec, pinned, 0);
845 drm_free_large(pvec);
847 work = kmalloc(sizeof(*work), GFP_KERNEL);
848 if (!work) {
849 mmput(mm);
850 return -ENOMEM;
853 get_task_struct(current);
854 drm_gem_object_reference(&etnaviv_obj->base);
856 work->mm = mm;
857 work->task = current;
858 work->etnaviv_obj = etnaviv_obj;
860 etnaviv_obj->userptr.work = &work->work;
861 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
863 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
865 return -EAGAIN;
868 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
870 if (etnaviv_obj->sgt) {
871 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
872 sg_free_table(etnaviv_obj->sgt);
873 kfree(etnaviv_obj->sgt);
875 if (etnaviv_obj->pages) {
876 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
878 release_pages(etnaviv_obj->pages, npages, 0);
879 drm_free_large(etnaviv_obj->pages);
881 put_task_struct(etnaviv_obj->userptr.task);
884 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
885 struct vm_area_struct *vma)
887 return -EINVAL;
890 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
891 .get_pages = etnaviv_gem_userptr_get_pages,
892 .release = etnaviv_gem_userptr_release,
893 .vmap = etnaviv_gem_vmap_impl,
894 .mmap = etnaviv_gem_userptr_mmap_obj,
897 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
898 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
900 struct etnaviv_gem_object *etnaviv_obj;
901 int ret;
903 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
904 &etnaviv_gem_userptr_ops, &etnaviv_obj);
905 if (ret)
906 return ret;
908 etnaviv_obj->userptr.ptr = ptr;
909 etnaviv_obj->userptr.task = current;
910 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
911 get_task_struct(current);
913 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
914 if (ret)
915 goto unreference;
917 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
918 unreference:
919 /* drop reference from allocate - handle holds it now */
920 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
921 return ret;