dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / gpu / drm / v3d / v3d_bo.c
bloba08766d39eab535d1c62d091cbed11ae781b4074
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2015-2018 Broadcom */
4 /**
5 * DOC: V3D GEM BO management support
7 * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
8 * GPU and the bus, allowing us to use shmem objects for our storage
9 * instead of CMA.
11 * Physically contiguous objects may still be imported to V3D, but the
12 * driver doesn't allocate physically contiguous objects on its own.
13 * Display engines requiring physically contiguous allocations should
14 * look into Mesa's "renderonly" support (as used by the Mesa pl111
15 * driver) for an example of how to integrate with V3D.
17 * Long term, we should support evicting pages from the MMU when under
18 * memory pressure (thus the v3d_bo_get_pages() refcounting), but
19 * that's not a high priority since our systems tend to not have swap.
22 #include <linux/dma-buf.h>
23 #include <linux/pfn_t.h>
25 #include "v3d_drv.h"
26 #include "uapi/drm/v3d_drm.h"
28 /* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
29 * it for DMA.
31 static int
32 v3d_bo_get_pages(struct v3d_bo *bo)
34 struct drm_gem_object *obj = &bo->base;
35 struct drm_device *dev = obj->dev;
36 int npages = obj->size >> PAGE_SHIFT;
37 int ret = 0;
39 mutex_lock(&bo->lock);
40 if (bo->pages_refcount++ != 0)
41 goto unlock;
43 if (!obj->import_attach) {
44 bo->pages = drm_gem_get_pages(obj);
45 if (IS_ERR(bo->pages)) {
46 ret = PTR_ERR(bo->pages);
47 goto unlock;
50 bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
51 if (IS_ERR(bo->sgt)) {
52 ret = PTR_ERR(bo->sgt);
53 goto put_pages;
56 /* Map the pages for use by the GPU. */
57 dma_map_sg(dev->dev, bo->sgt->sgl,
58 bo->sgt->nents, DMA_BIDIRECTIONAL);
59 } else {
60 bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
61 if (!bo->pages)
62 goto put_pages;
64 drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
65 NULL, npages);
67 /* Note that dma-bufs come in mapped. */
70 mutex_unlock(&bo->lock);
72 return 0;
74 put_pages:
75 drm_gem_put_pages(obj, bo->pages, true, true);
76 bo->pages = NULL;
77 unlock:
78 bo->pages_refcount--;
79 mutex_unlock(&bo->lock);
80 return ret;
83 static void
84 v3d_bo_put_pages(struct v3d_bo *bo)
86 struct drm_gem_object *obj = &bo->base;
88 mutex_lock(&bo->lock);
89 if (--bo->pages_refcount == 0) {
90 if (!obj->import_attach) {
91 dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
92 bo->sgt->nents, DMA_BIDIRECTIONAL);
93 sg_free_table(bo->sgt);
94 kfree(bo->sgt);
95 drm_gem_put_pages(obj, bo->pages, true, true);
96 } else {
97 kfree(bo->pages);
100 mutex_unlock(&bo->lock);
103 static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
104 size_t unaligned_size)
106 struct v3d_dev *v3d = to_v3d_dev(dev);
107 struct drm_gem_object *obj;
108 struct v3d_bo *bo;
109 size_t size = roundup(unaligned_size, PAGE_SIZE);
110 int ret;
112 if (size == 0)
113 return ERR_PTR(-EINVAL);
115 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
116 if (!bo)
117 return ERR_PTR(-ENOMEM);
118 obj = &bo->base;
120 INIT_LIST_HEAD(&bo->vmas);
121 INIT_LIST_HEAD(&bo->unref_head);
122 mutex_init(&bo->lock);
124 ret = drm_gem_object_init(dev, obj, size);
125 if (ret)
126 goto free_bo;
128 spin_lock(&v3d->mm_lock);
129 ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
130 obj->size >> PAGE_SHIFT,
131 GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
132 spin_unlock(&v3d->mm_lock);
133 if (ret)
134 goto free_obj;
136 return bo;
138 free_obj:
139 drm_gem_object_release(obj);
140 free_bo:
141 kfree(bo);
142 return ERR_PTR(ret);
145 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
146 size_t unaligned_size)
148 struct v3d_dev *v3d = to_v3d_dev(dev);
149 struct drm_gem_object *obj;
150 struct v3d_bo *bo;
151 int ret;
153 bo = v3d_bo_create_struct(dev, unaligned_size);
154 if (IS_ERR(bo))
155 return bo;
156 obj = &bo->base;
158 bo->resv = &bo->_resv;
159 reservation_object_init(bo->resv);
161 ret = v3d_bo_get_pages(bo);
162 if (ret)
163 goto free_mm;
165 v3d_mmu_insert_ptes(bo);
167 mutex_lock(&v3d->bo_lock);
168 v3d->bo_stats.num_allocated++;
169 v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
170 mutex_unlock(&v3d->bo_lock);
172 return bo;
174 free_mm:
175 spin_lock(&v3d->mm_lock);
176 drm_mm_remove_node(&bo->node);
177 spin_unlock(&v3d->mm_lock);
179 drm_gem_object_release(obj);
180 kfree(bo);
181 return ERR_PTR(ret);
184 /* Called DRM core on the last userspace/kernel unreference of the
185 * BO.
187 void v3d_free_object(struct drm_gem_object *obj)
189 struct v3d_dev *v3d = to_v3d_dev(obj->dev);
190 struct v3d_bo *bo = to_v3d_bo(obj);
192 mutex_lock(&v3d->bo_lock);
193 v3d->bo_stats.num_allocated--;
194 v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
195 mutex_unlock(&v3d->bo_lock);
197 reservation_object_fini(&bo->_resv);
199 v3d_bo_put_pages(bo);
201 if (obj->import_attach)
202 drm_prime_gem_destroy(obj, bo->sgt);
204 v3d_mmu_remove_ptes(bo);
205 spin_lock(&v3d->mm_lock);
206 drm_mm_remove_node(&bo->node);
207 spin_unlock(&v3d->mm_lock);
209 mutex_destroy(&bo->lock);
211 drm_gem_object_release(obj);
212 kfree(bo);
215 struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
217 struct v3d_bo *bo = to_v3d_bo(obj);
219 return bo->resv;
222 static void
223 v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
225 vma->vm_flags &= ~VM_PFNMAP;
226 vma->vm_flags |= VM_MIXEDMAP;
227 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
230 vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
232 struct vm_area_struct *vma = vmf->vma;
233 struct drm_gem_object *obj = vma->vm_private_data;
234 struct v3d_bo *bo = to_v3d_bo(obj);
235 pfn_t pfn;
236 pgoff_t pgoff;
238 /* We don't use vmf->pgoff since that has the fake offset: */
239 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
240 pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
242 return vmf_insert_mixed(vma, vmf->address, pfn);
245 int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
247 int ret;
249 ret = drm_gem_mmap(filp, vma);
250 if (ret)
251 return ret;
253 v3d_set_mmap_vma_flags(vma);
255 return ret;
258 int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
260 int ret;
262 ret = drm_gem_mmap_obj(obj, obj->size, vma);
263 if (ret < 0)
264 return ret;
266 v3d_set_mmap_vma_flags(vma);
268 return 0;
271 struct sg_table *
272 v3d_prime_get_sg_table(struct drm_gem_object *obj)
274 struct v3d_bo *bo = to_v3d_bo(obj);
275 int npages = obj->size >> PAGE_SHIFT;
277 return drm_prime_pages_to_sg(bo->pages, npages);
280 struct drm_gem_object *
281 v3d_prime_import_sg_table(struct drm_device *dev,
282 struct dma_buf_attachment *attach,
283 struct sg_table *sgt)
285 struct drm_gem_object *obj;
286 struct v3d_bo *bo;
288 bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
289 if (IS_ERR(bo))
290 return ERR_CAST(bo);
291 obj = &bo->base;
293 bo->resv = attach->dmabuf->resv;
295 bo->sgt = sgt;
296 obj->import_attach = attach;
297 v3d_bo_get_pages(bo);
299 v3d_mmu_insert_ptes(bo);
301 return obj;
304 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
305 struct drm_file *file_priv)
307 struct drm_v3d_create_bo *args = data;
308 struct v3d_bo *bo = NULL;
309 int ret;
311 if (args->flags != 0) {
312 DRM_INFO("unknown create_bo flags: %d\n", args->flags);
313 return -EINVAL;
316 bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size));
317 if (IS_ERR(bo))
318 return PTR_ERR(bo);
320 args->offset = bo->node.start << PAGE_SHIFT;
322 ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
323 drm_gem_object_put_unlocked(&bo->base);
325 return ret;
328 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
329 struct drm_file *file_priv)
331 struct drm_v3d_mmap_bo *args = data;
332 struct drm_gem_object *gem_obj;
333 int ret;
335 if (args->flags != 0) {
336 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
337 return -EINVAL;
340 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
341 if (!gem_obj) {
342 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
343 return -ENOENT;
346 ret = drm_gem_create_mmap_offset(gem_obj);
347 if (ret == 0)
348 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
349 drm_gem_object_put_unlocked(gem_obj);
351 return ret;
354 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *file_priv)
357 struct drm_v3d_get_bo_offset *args = data;
358 struct drm_gem_object *gem_obj;
359 struct v3d_bo *bo;
361 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
362 if (!gem_obj) {
363 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
364 return -ENOENT;
366 bo = to_v3d_bo(gem_obj);
368 args->offset = bo->node.start << PAGE_SHIFT;
370 drm_gem_object_put_unlocked(gem_obj);
371 return 0;