2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
24 #include "msm_fence.h"
29 static dma_addr_t
physaddr(struct drm_gem_object
*obj
)
31 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
32 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
33 return (((dma_addr_t
)msm_obj
->vram_node
->start
) << PAGE_SHIFT
) +
37 static bool use_pages(struct drm_gem_object
*obj
)
39 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
40 return !msm_obj
->vram_node
;
43 /* allocate pages from VRAM carveout, used when no IOMMU: */
44 static struct page
**get_pages_vram(struct drm_gem_object
*obj
,
47 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
48 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
53 p
= drm_malloc_ab(npages
, sizeof(struct page
*));
55 return ERR_PTR(-ENOMEM
);
57 ret
= drm_mm_insert_node(&priv
->vram
.mm
, msm_obj
->vram_node
,
58 npages
, 0, DRM_MM_SEARCH_DEFAULT
);
64 paddr
= physaddr(obj
);
65 for (i
= 0; i
< npages
; i
++) {
66 p
[i
] = phys_to_page(paddr
);
73 /* called with dev->struct_mutex held */
74 static struct page
**get_pages(struct drm_gem_object
*obj
)
76 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
78 if (!msm_obj
->pages
) {
79 struct drm_device
*dev
= obj
->dev
;
81 int npages
= obj
->size
>> PAGE_SHIFT
;
84 p
= drm_gem_get_pages(obj
);
86 p
= get_pages_vram(obj
, npages
);
89 dev_err(dev
->dev
, "could not get pages: %ld\n",
94 msm_obj
->sgt
= drm_prime_pages_to_sg(p
, npages
);
95 if (IS_ERR(msm_obj
->sgt
)) {
96 dev_err(dev
->dev
, "failed to allocate sgt\n");
97 return ERR_CAST(msm_obj
->sgt
);
102 /* For non-cached buffers, ensure the new pages are clean
103 * because display controller, GPU, etc. are not coherent:
105 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
106 dma_map_sg(dev
->dev
, msm_obj
->sgt
->sgl
,
107 msm_obj
->sgt
->nents
, DMA_BIDIRECTIONAL
);
110 return msm_obj
->pages
;
113 static void put_pages(struct drm_gem_object
*obj
)
115 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
117 if (msm_obj
->pages
) {
118 /* For non-cached buffers, ensure the new pages are clean
119 * because display controller, GPU, etc. are not coherent:
121 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
122 dma_unmap_sg(obj
->dev
->dev
, msm_obj
->sgt
->sgl
,
123 msm_obj
->sgt
->nents
, DMA_BIDIRECTIONAL
);
124 sg_free_table(msm_obj
->sgt
);
128 drm_gem_put_pages(obj
, msm_obj
->pages
, true, false);
130 drm_mm_remove_node(msm_obj
->vram_node
);
131 drm_free_large(msm_obj
->pages
);
134 msm_obj
->pages
= NULL
;
138 struct page
**msm_gem_get_pages(struct drm_gem_object
*obj
)
140 struct drm_device
*dev
= obj
->dev
;
142 mutex_lock(&dev
->struct_mutex
);
144 mutex_unlock(&dev
->struct_mutex
);
148 void msm_gem_put_pages(struct drm_gem_object
*obj
)
150 /* when we start tracking the pin count, then do something here */
153 int msm_gem_mmap_obj(struct drm_gem_object
*obj
,
154 struct vm_area_struct
*vma
)
156 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
158 vma
->vm_flags
&= ~VM_PFNMAP
;
159 vma
->vm_flags
|= VM_MIXEDMAP
;
161 if (msm_obj
->flags
& MSM_BO_WC
) {
162 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
163 } else if (msm_obj
->flags
& MSM_BO_UNCACHED
) {
164 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
174 vma
->vm_file
= obj
->filp
;
176 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
182 int msm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
186 ret
= drm_gem_mmap(filp
, vma
);
188 DBG("mmap failed: %d", ret
);
192 return msm_gem_mmap_obj(vma
->vm_private_data
, vma
);
195 int msm_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
197 struct drm_gem_object
*obj
= vma
->vm_private_data
;
198 struct drm_device
*dev
= obj
->dev
;
199 struct msm_drm_private
*priv
= dev
->dev_private
;
205 /* This should only happen if userspace tries to pass a mmap'd
206 * but unfaulted gem bo vaddr into submit ioctl, triggering
207 * a page fault while struct_mutex is already held. This is
208 * not a valid use-case so just bail.
210 if (priv
->struct_mutex_task
== current
)
211 return VM_FAULT_SIGBUS
;
213 /* Make sure we don't parallel update on a fault, nor move or remove
214 * something from beneath our feet
216 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
220 /* make sure we have pages attached now */
221 pages
= get_pages(obj
);
223 ret
= PTR_ERR(pages
);
227 /* We don't use vmf->pgoff since that has the fake offset: */
228 pgoff
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
230 pfn
= page_to_pfn(pages
[pgoff
]);
232 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf
->address
,
233 pfn
, pfn
<< PAGE_SHIFT
);
235 ret
= vm_insert_mixed(vma
, vmf
->address
, __pfn_to_pfn_t(pfn
, PFN_DEV
));
238 mutex_unlock(&dev
->struct_mutex
);
247 * EBUSY is ok: this just means that another thread
248 * already did the job.
250 return VM_FAULT_NOPAGE
;
254 return VM_FAULT_SIGBUS
;
258 /** get mmap offset */
259 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
261 struct drm_device
*dev
= obj
->dev
;
264 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
266 /* Make it mmapable */
267 ret
= drm_gem_create_mmap_offset(obj
);
270 dev_err(dev
->dev
, "could not allocate mmap offset\n");
274 return drm_vma_node_offset_addr(&obj
->vma_node
);
277 uint64_t msm_gem_mmap_offset(struct drm_gem_object
*obj
)
280 mutex_lock(&obj
->dev
->struct_mutex
);
281 offset
= mmap_offset(obj
);
282 mutex_unlock(&obj
->dev
->struct_mutex
);
287 put_iova(struct drm_gem_object
*obj
)
289 struct drm_device
*dev
= obj
->dev
;
290 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
291 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
294 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
296 for (id
= 0; id
< ARRAY_SIZE(msm_obj
->domain
); id
++) {
297 if (!priv
->aspace
[id
])
299 msm_gem_unmap_vma(priv
->aspace
[id
],
300 &msm_obj
->domain
[id
], msm_obj
->sgt
);
304 /* should be called under struct_mutex.. although it can be called
305 * from atomic context without struct_mutex to acquire an extra
306 * iova ref if you know one is already held.
308 * That means when I do eventually need to add support for unpinning
309 * the refcnt counter needs to be atomic_t.
311 int msm_gem_get_iova_locked(struct drm_gem_object
*obj
, int id
,
314 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
317 if (!msm_obj
->domain
[id
].iova
) {
318 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
319 struct page
**pages
= get_pages(obj
);
322 return PTR_ERR(pages
);
324 if (iommu_present(&platform_bus_type
)) {
325 ret
= msm_gem_map_vma(priv
->aspace
[id
], &msm_obj
->domain
[id
],
326 msm_obj
->sgt
, obj
->size
>> PAGE_SHIFT
);
328 msm_obj
->domain
[id
].iova
= physaddr(obj
);
333 *iova
= msm_obj
->domain
[id
].iova
;
338 /* get iova, taking a reference. Should have a matching put */
339 int msm_gem_get_iova(struct drm_gem_object
*obj
, int id
, uint64_t *iova
)
341 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
344 /* this is safe right now because we don't unmap until the
347 if (msm_obj
->domain
[id
].iova
) {
348 *iova
= msm_obj
->domain
[id
].iova
;
352 mutex_lock(&obj
->dev
->struct_mutex
);
353 ret
= msm_gem_get_iova_locked(obj
, id
, iova
);
354 mutex_unlock(&obj
->dev
->struct_mutex
);
358 /* get iova without taking a reference, used in places where you have
359 * already done a 'msm_gem_get_iova()'.
361 uint64_t msm_gem_iova(struct drm_gem_object
*obj
, int id
)
363 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
364 WARN_ON(!msm_obj
->domain
[id
].iova
);
365 return msm_obj
->domain
[id
].iova
;
368 void msm_gem_put_iova(struct drm_gem_object
*obj
, int id
)
371 // NOTE: probably don't need a _locked() version.. we wouldn't
372 // normally unmap here, but instead just mark that it could be
373 // unmapped (if the iova refcnt drops to zero), but then later
374 // if another _get_iova_locked() fails we can start unmapping
375 // things that are no longer needed..
378 int msm_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
379 struct drm_mode_create_dumb
*args
)
381 args
->pitch
= align_pitch(args
->width
, args
->bpp
);
382 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
383 return msm_gem_new_handle(dev
, file
, args
->size
,
384 MSM_BO_SCANOUT
| MSM_BO_WC
, &args
->handle
);
387 int msm_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
388 uint32_t handle
, uint64_t *offset
)
390 struct drm_gem_object
*obj
;
393 /* GEM does all our handle to object mapping */
394 obj
= drm_gem_object_lookup(file
, handle
);
400 *offset
= msm_gem_mmap_offset(obj
);
402 drm_gem_object_unreference_unlocked(obj
);
408 void *msm_gem_get_vaddr_locked(struct drm_gem_object
*obj
)
410 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
411 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
412 if (!msm_obj
->vaddr
) {
413 struct page
**pages
= get_pages(obj
);
415 return ERR_CAST(pages
);
416 msm_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
417 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
418 if (msm_obj
->vaddr
== NULL
)
419 return ERR_PTR(-ENOMEM
);
421 msm_obj
->vmap_count
++;
422 return msm_obj
->vaddr
;
425 void *msm_gem_get_vaddr(struct drm_gem_object
*obj
)
428 mutex_lock(&obj
->dev
->struct_mutex
);
429 ret
= msm_gem_get_vaddr_locked(obj
);
430 mutex_unlock(&obj
->dev
->struct_mutex
);
434 void msm_gem_put_vaddr_locked(struct drm_gem_object
*obj
)
436 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
437 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
438 WARN_ON(msm_obj
->vmap_count
< 1);
439 msm_obj
->vmap_count
--;
442 void msm_gem_put_vaddr(struct drm_gem_object
*obj
)
444 mutex_lock(&obj
->dev
->struct_mutex
);
445 msm_gem_put_vaddr_locked(obj
);
446 mutex_unlock(&obj
->dev
->struct_mutex
);
449 /* Update madvise status, returns true if not purged, else
452 int msm_gem_madvise(struct drm_gem_object
*obj
, unsigned madv
)
454 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
456 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
458 if (msm_obj
->madv
!= __MSM_MADV_PURGED
)
459 msm_obj
->madv
= madv
;
461 return (msm_obj
->madv
!= __MSM_MADV_PURGED
);
464 void msm_gem_purge(struct drm_gem_object
*obj
)
466 struct drm_device
*dev
= obj
->dev
;
467 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
469 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
470 WARN_ON(!is_purgeable(msm_obj
));
471 WARN_ON(obj
->import_attach
);
479 msm_obj
->madv
= __MSM_MADV_PURGED
;
481 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
482 drm_gem_free_mmap_offset(obj
);
484 /* Our goal here is to return as much of the memory as
485 * is possible back to the system as we are called from OOM.
486 * To do this we must instruct the shmfs to drop all of its
487 * backing pages, *now*.
489 shmem_truncate_range(file_inode(obj
->filp
), 0, (loff_t
)-1);
491 invalidate_mapping_pages(file_inode(obj
->filp
)->i_mapping
,
495 void msm_gem_vunmap(struct drm_gem_object
*obj
)
497 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
499 if (!msm_obj
->vaddr
|| WARN_ON(!is_vunmapable(msm_obj
)))
502 vunmap(msm_obj
->vaddr
);
503 msm_obj
->vaddr
= NULL
;
506 /* must be called before _move_to_active().. */
507 int msm_gem_sync_object(struct drm_gem_object
*obj
,
508 struct msm_fence_context
*fctx
, bool exclusive
)
510 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
511 struct reservation_object_list
*fobj
;
512 struct dma_fence
*fence
;
516 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
517 * which makes this a slightly strange place to call it. OTOH this
518 * is a convenient can-fail point to hook it in. (And similar to
519 * how etnaviv and nouveau handle this.)
521 ret
= reservation_object_reserve_shared(msm_obj
->resv
);
526 fobj
= reservation_object_get_list(msm_obj
->resv
);
527 if (!fobj
|| (fobj
->shared_count
== 0)) {
528 fence
= reservation_object_get_excl(msm_obj
->resv
);
529 /* don't need to wait on our own fences, since ring is fifo */
530 if (fence
&& (fence
->context
!= fctx
->context
)) {
531 ret
= dma_fence_wait(fence
, true);
537 if (!exclusive
|| !fobj
)
540 for (i
= 0; i
< fobj
->shared_count
; i
++) {
541 fence
= rcu_dereference_protected(fobj
->shared
[i
],
542 reservation_object_held(msm_obj
->resv
));
543 if (fence
->context
!= fctx
->context
) {
544 ret
= dma_fence_wait(fence
, true);
553 void msm_gem_move_to_active(struct drm_gem_object
*obj
,
554 struct msm_gpu
*gpu
, bool exclusive
, struct dma_fence
*fence
)
556 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
557 WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
);
560 reservation_object_add_excl_fence(msm_obj
->resv
, fence
);
562 reservation_object_add_shared_fence(msm_obj
->resv
, fence
);
563 list_del_init(&msm_obj
->mm_list
);
564 list_add_tail(&msm_obj
->mm_list
, &gpu
->active_list
);
567 void msm_gem_move_to_inactive(struct drm_gem_object
*obj
)
569 struct drm_device
*dev
= obj
->dev
;
570 struct msm_drm_private
*priv
= dev
->dev_private
;
571 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
573 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
576 list_del_init(&msm_obj
->mm_list
);
577 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
580 int msm_gem_cpu_prep(struct drm_gem_object
*obj
, uint32_t op
, ktime_t
*timeout
)
582 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
583 bool write
= !!(op
& MSM_PREP_WRITE
);
584 unsigned long remain
=
585 op
& MSM_PREP_NOSYNC
? 0 : timeout_to_jiffies(timeout
);
588 ret
= reservation_object_wait_timeout_rcu(msm_obj
->resv
, write
,
591 return remain
== 0 ? -EBUSY
: -ETIMEDOUT
;
595 /* TODO cache maintenance */
600 int msm_gem_cpu_fini(struct drm_gem_object
*obj
)
602 /* TODO cache maintenance */
606 #ifdef CONFIG_DEBUG_FS
607 static void describe_fence(struct dma_fence
*fence
, const char *type
,
610 if (!dma_fence_is_signaled(fence
))
611 seq_printf(m
, "\t%9s: %s %s seq %u\n", type
,
612 fence
->ops
->get_driver_name(fence
),
613 fence
->ops
->get_timeline_name(fence
),
617 void msm_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
619 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
620 struct reservation_object
*robj
= msm_obj
->resv
;
621 struct reservation_object_list
*fobj
;
622 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
623 struct dma_fence
*fence
;
624 uint64_t off
= drm_vma_node_start(&obj
->vma_node
);
628 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
630 switch (msm_obj
->madv
) {
631 case __MSM_MADV_PURGED
:
634 case MSM_MADV_DONTNEED
:
637 case MSM_MADV_WILLNEED
:
643 seq_printf(m
, "%08x: %c %2d (%2d) %08llx %p\t",
644 msm_obj
->flags
, is_active(msm_obj
) ? 'A' : 'I',
645 obj
->name
, obj
->refcount
.refcount
.counter
,
646 off
, msm_obj
->vaddr
);
648 for (id
= 0; id
< priv
->num_aspaces
; id
++)
649 seq_printf(m
, " %08llx", msm_obj
->domain
[id
].iova
);
651 seq_printf(m
, " %zu%s\n", obj
->size
, madv
);
654 fobj
= rcu_dereference(robj
->fence
);
656 unsigned int i
, shared_count
= fobj
->shared_count
;
658 for (i
= 0; i
< shared_count
; i
++) {
659 fence
= rcu_dereference(fobj
->shared
[i
]);
660 describe_fence(fence
, "Shared", m
);
664 fence
= rcu_dereference(robj
->fence_excl
);
666 describe_fence(fence
, "Exclusive", m
);
670 void msm_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
672 struct msm_gem_object
*msm_obj
;
676 list_for_each_entry(msm_obj
, list
, mm_list
) {
677 struct drm_gem_object
*obj
= &msm_obj
->base
;
679 msm_gem_describe(obj
, m
);
684 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
688 void msm_gem_free_object(struct drm_gem_object
*obj
)
690 struct drm_device
*dev
= obj
->dev
;
691 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
693 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
695 /* object should not be on active list: */
696 WARN_ON(is_active(msm_obj
));
698 list_del(&msm_obj
->mm_list
);
702 if (obj
->import_attach
) {
704 dma_buf_vunmap(obj
->import_attach
->dmabuf
, msm_obj
->vaddr
);
706 /* Don't drop the pages for imported dmabuf, as they are not
707 * ours, just free the array we allocated:
710 drm_free_large(msm_obj
->pages
);
712 drm_prime_gem_destroy(obj
, msm_obj
->sgt
);
718 if (msm_obj
->resv
== &msm_obj
->_resv
)
719 reservation_object_fini(msm_obj
->resv
);
721 drm_gem_object_release(obj
);
726 /* convenience method to construct a GEM buffer object, and userspace handle */
727 int msm_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
728 uint32_t size
, uint32_t flags
, uint32_t *handle
)
730 struct drm_gem_object
*obj
;
733 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
737 obj
= msm_gem_new(dev
, size
, flags
);
739 mutex_unlock(&dev
->struct_mutex
);
744 ret
= drm_gem_handle_create(file
, obj
, handle
);
746 /* drop reference from allocate - handle holds it now */
747 drm_gem_object_unreference_unlocked(obj
);
752 static int msm_gem_new_impl(struct drm_device
*dev
,
753 uint32_t size
, uint32_t flags
,
754 struct reservation_object
*resv
,
755 struct drm_gem_object
**obj
)
757 struct msm_drm_private
*priv
= dev
->dev_private
;
758 struct msm_gem_object
*msm_obj
;
759 bool use_vram
= false;
761 switch (flags
& MSM_BO_CACHE_MASK
) {
762 case MSM_BO_UNCACHED
:
767 dev_err(dev
->dev
, "invalid cache flag: %x\n",
768 (flags
& MSM_BO_CACHE_MASK
));
772 if (!iommu_present(&platform_bus_type
))
774 else if ((flags
& MSM_BO_STOLEN
) && priv
->vram
.size
)
777 if (WARN_ON(use_vram
&& !priv
->vram
.size
))
780 msm_obj
= kzalloc(sizeof(*msm_obj
), GFP_KERNEL
);
785 msm_obj
->vram_node
= &msm_obj
->domain
[0].node
;
787 msm_obj
->flags
= flags
;
788 msm_obj
->madv
= MSM_MADV_WILLNEED
;
791 msm_obj
->resv
= resv
;
793 msm_obj
->resv
= &msm_obj
->_resv
;
794 reservation_object_init(msm_obj
->resv
);
797 INIT_LIST_HEAD(&msm_obj
->submit_entry
);
798 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
800 *obj
= &msm_obj
->base
;
805 struct drm_gem_object
*msm_gem_new(struct drm_device
*dev
,
806 uint32_t size
, uint32_t flags
)
808 struct drm_gem_object
*obj
= NULL
;
811 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
813 size
= PAGE_ALIGN(size
);
815 ret
= msm_gem_new_impl(dev
, size
, flags
, NULL
, &obj
);
819 if (use_pages(obj
)) {
820 ret
= drm_gem_object_init(dev
, obj
, size
);
824 drm_gem_private_object_init(dev
, obj
, size
);
830 drm_gem_object_unreference(obj
);
834 struct drm_gem_object
*msm_gem_import(struct drm_device
*dev
,
835 struct dma_buf
*dmabuf
, struct sg_table
*sgt
)
837 struct msm_gem_object
*msm_obj
;
838 struct drm_gem_object
*obj
;
842 /* if we don't have IOMMU, don't bother pretending we can import: */
843 if (!iommu_present(&platform_bus_type
)) {
844 dev_err(dev
->dev
, "cannot import without IOMMU\n");
845 return ERR_PTR(-EINVAL
);
848 size
= PAGE_ALIGN(dmabuf
->size
);
850 ret
= msm_gem_new_impl(dev
, size
, MSM_BO_WC
, dmabuf
->resv
, &obj
);
854 drm_gem_private_object_init(dev
, obj
, size
);
856 npages
= size
/ PAGE_SIZE
;
858 msm_obj
= to_msm_bo(obj
);
860 msm_obj
->pages
= drm_malloc_ab(npages
, sizeof(struct page
*));
861 if (!msm_obj
->pages
) {
866 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, msm_obj
->pages
, NULL
, npages
);
873 drm_gem_object_unreference_unlocked(obj
);