1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2015-2018 Broadcom */
5 * DOC: V3D GEM BO management support
7 * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
8 * GPU and the bus, allowing us to use shmem objects for our storage
11 * Physically contiguous objects may still be imported to V3D, but the
12 * driver doesn't allocate physically contiguous objects on its own.
13 * Display engines requiring physically contiguous allocations should
14 * look into Mesa's "renderonly" support (as used by the Mesa pl111
15 * driver) for an example of how to integrate with V3D.
17 * Long term, we should support evicting pages from the MMU when under
18 * memory pressure (thus the v3d_bo_get_pages() refcounting), but
19 * that's not a high priority since our systems tend to not have swap.
22 #include <linux/dma-buf.h>
23 #include <linux/pfn_t.h>
26 #include "uapi/drm/v3d_drm.h"
28 /* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
32 v3d_bo_get_pages(struct v3d_bo
*bo
)
34 struct drm_gem_object
*obj
= &bo
->base
;
35 struct drm_device
*dev
= obj
->dev
;
36 int npages
= obj
->size
>> PAGE_SHIFT
;
39 mutex_lock(&bo
->lock
);
40 if (bo
->pages_refcount
++ != 0)
43 if (!obj
->import_attach
) {
44 bo
->pages
= drm_gem_get_pages(obj
);
45 if (IS_ERR(bo
->pages
)) {
46 ret
= PTR_ERR(bo
->pages
);
50 bo
->sgt
= drm_prime_pages_to_sg(bo
->pages
, npages
);
51 if (IS_ERR(bo
->sgt
)) {
52 ret
= PTR_ERR(bo
->sgt
);
56 /* Map the pages for use by the GPU. */
57 dma_map_sg(dev
->dev
, bo
->sgt
->sgl
,
58 bo
->sgt
->nents
, DMA_BIDIRECTIONAL
);
60 bo
->pages
= kcalloc(npages
, sizeof(*bo
->pages
), GFP_KERNEL
);
64 drm_prime_sg_to_page_addr_arrays(bo
->sgt
, bo
->pages
,
67 /* Note that dma-bufs come in mapped. */
70 mutex_unlock(&bo
->lock
);
75 drm_gem_put_pages(obj
, bo
->pages
, true, true);
79 mutex_unlock(&bo
->lock
);
84 v3d_bo_put_pages(struct v3d_bo
*bo
)
86 struct drm_gem_object
*obj
= &bo
->base
;
88 mutex_lock(&bo
->lock
);
89 if (--bo
->pages_refcount
== 0) {
90 if (!obj
->import_attach
) {
91 dma_unmap_sg(obj
->dev
->dev
, bo
->sgt
->sgl
,
92 bo
->sgt
->nents
, DMA_BIDIRECTIONAL
);
93 sg_free_table(bo
->sgt
);
95 drm_gem_put_pages(obj
, bo
->pages
, true, true);
100 mutex_unlock(&bo
->lock
);
103 static struct v3d_bo
*v3d_bo_create_struct(struct drm_device
*dev
,
104 size_t unaligned_size
)
106 struct v3d_dev
*v3d
= to_v3d_dev(dev
);
107 struct drm_gem_object
*obj
;
109 size_t size
= roundup(unaligned_size
, PAGE_SIZE
);
113 return ERR_PTR(-EINVAL
);
115 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
117 return ERR_PTR(-ENOMEM
);
120 INIT_LIST_HEAD(&bo
->vmas
);
121 INIT_LIST_HEAD(&bo
->unref_head
);
122 mutex_init(&bo
->lock
);
124 ret
= drm_gem_object_init(dev
, obj
, size
);
128 spin_lock(&v3d
->mm_lock
);
129 ret
= drm_mm_insert_node_generic(&v3d
->mm
, &bo
->node
,
130 obj
->size
>> PAGE_SHIFT
,
131 GMP_GRANULARITY
>> PAGE_SHIFT
, 0, 0);
132 spin_unlock(&v3d
->mm_lock
);
139 drm_gem_object_release(obj
);
145 struct v3d_bo
*v3d_bo_create(struct drm_device
*dev
, struct drm_file
*file_priv
,
146 size_t unaligned_size
)
148 struct v3d_dev
*v3d
= to_v3d_dev(dev
);
149 struct drm_gem_object
*obj
;
153 bo
= v3d_bo_create_struct(dev
, unaligned_size
);
158 bo
->resv
= &bo
->_resv
;
159 reservation_object_init(bo
->resv
);
161 ret
= v3d_bo_get_pages(bo
);
165 v3d_mmu_insert_ptes(bo
);
167 mutex_lock(&v3d
->bo_lock
);
168 v3d
->bo_stats
.num_allocated
++;
169 v3d
->bo_stats
.pages_allocated
+= obj
->size
>> PAGE_SHIFT
;
170 mutex_unlock(&v3d
->bo_lock
);
175 spin_lock(&v3d
->mm_lock
);
176 drm_mm_remove_node(&bo
->node
);
177 spin_unlock(&v3d
->mm_lock
);
179 drm_gem_object_release(obj
);
184 /* Called DRM core on the last userspace/kernel unreference of the
187 void v3d_free_object(struct drm_gem_object
*obj
)
189 struct v3d_dev
*v3d
= to_v3d_dev(obj
->dev
);
190 struct v3d_bo
*bo
= to_v3d_bo(obj
);
192 mutex_lock(&v3d
->bo_lock
);
193 v3d
->bo_stats
.num_allocated
--;
194 v3d
->bo_stats
.pages_allocated
-= obj
->size
>> PAGE_SHIFT
;
195 mutex_unlock(&v3d
->bo_lock
);
197 reservation_object_fini(&bo
->_resv
);
199 v3d_bo_put_pages(bo
);
201 if (obj
->import_attach
)
202 drm_prime_gem_destroy(obj
, bo
->sgt
);
204 v3d_mmu_remove_ptes(bo
);
205 spin_lock(&v3d
->mm_lock
);
206 drm_mm_remove_node(&bo
->node
);
207 spin_unlock(&v3d
->mm_lock
);
209 mutex_destroy(&bo
->lock
);
211 drm_gem_object_release(obj
);
215 struct reservation_object
*v3d_prime_res_obj(struct drm_gem_object
*obj
)
217 struct v3d_bo
*bo
= to_v3d_bo(obj
);
223 v3d_set_mmap_vma_flags(struct vm_area_struct
*vma
)
225 vma
->vm_flags
&= ~VM_PFNMAP
;
226 vma
->vm_flags
|= VM_MIXEDMAP
;
227 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
230 vm_fault_t
v3d_gem_fault(struct vm_fault
*vmf
)
232 struct vm_area_struct
*vma
= vmf
->vma
;
233 struct drm_gem_object
*obj
= vma
->vm_private_data
;
234 struct v3d_bo
*bo
= to_v3d_bo(obj
);
238 /* We don't use vmf->pgoff since that has the fake offset: */
239 pgoff
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
240 pfn
= __pfn_to_pfn_t(page_to_pfn(bo
->pages
[pgoff
]), PFN_DEV
);
242 return vmf_insert_mixed(vma
, vmf
->address
, pfn
);
245 int v3d_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
249 ret
= drm_gem_mmap(filp
, vma
);
253 v3d_set_mmap_vma_flags(vma
);
258 int v3d_prime_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
)
262 ret
= drm_gem_mmap_obj(obj
, obj
->size
, vma
);
266 v3d_set_mmap_vma_flags(vma
);
272 v3d_prime_get_sg_table(struct drm_gem_object
*obj
)
274 struct v3d_bo
*bo
= to_v3d_bo(obj
);
275 int npages
= obj
->size
>> PAGE_SHIFT
;
277 return drm_prime_pages_to_sg(bo
->pages
, npages
);
280 struct drm_gem_object
*
281 v3d_prime_import_sg_table(struct drm_device
*dev
,
282 struct dma_buf_attachment
*attach
,
283 struct sg_table
*sgt
)
285 struct drm_gem_object
*obj
;
288 bo
= v3d_bo_create_struct(dev
, attach
->dmabuf
->size
);
293 bo
->resv
= attach
->dmabuf
->resv
;
296 obj
->import_attach
= attach
;
297 v3d_bo_get_pages(bo
);
299 v3d_mmu_insert_ptes(bo
);
304 int v3d_create_bo_ioctl(struct drm_device
*dev
, void *data
,
305 struct drm_file
*file_priv
)
307 struct drm_v3d_create_bo
*args
= data
;
308 struct v3d_bo
*bo
= NULL
;
311 if (args
->flags
!= 0) {
312 DRM_INFO("unknown create_bo flags: %d\n", args
->flags
);
316 bo
= v3d_bo_create(dev
, file_priv
, PAGE_ALIGN(args
->size
));
320 args
->offset
= bo
->node
.start
<< PAGE_SHIFT
;
322 ret
= drm_gem_handle_create(file_priv
, &bo
->base
, &args
->handle
);
323 drm_gem_object_put_unlocked(&bo
->base
);
328 int v3d_mmap_bo_ioctl(struct drm_device
*dev
, void *data
,
329 struct drm_file
*file_priv
)
331 struct drm_v3d_mmap_bo
*args
= data
;
332 struct drm_gem_object
*gem_obj
;
335 if (args
->flags
!= 0) {
336 DRM_INFO("unknown mmap_bo flags: %d\n", args
->flags
);
340 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
342 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
346 ret
= drm_gem_create_mmap_offset(gem_obj
);
348 args
->offset
= drm_vma_node_offset_addr(&gem_obj
->vma_node
);
349 drm_gem_object_put_unlocked(gem_obj
);
354 int v3d_get_bo_offset_ioctl(struct drm_device
*dev
, void *data
,
355 struct drm_file
*file_priv
)
357 struct drm_v3d_get_bo_offset
*args
= data
;
358 struct drm_gem_object
*gem_obj
;
361 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
363 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
366 bo
= to_v3d_bo(gem_obj
);
368 args
->offset
= bo
->node
.start
<< PAGE_SHIFT
;
370 drm_gem_object_put_unlocked(gem_obj
);