treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / mediatek / mtk_drm_gem.c
blobb04a3c2b111e09f7dcebb6bbc628a0f8bf50915a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 MediaTek Inc.
4 */
6 #include <linux/dma-buf.h>
8 #include <drm/drm.h>
9 #include <drm/drm_device.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_prime.h>
13 #include "mtk_drm_drv.h"
14 #include "mtk_drm_gem.h"
16 static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
17 unsigned long size)
19 struct mtk_drm_gem_obj *mtk_gem_obj;
20 int ret;
22 size = round_up(size, PAGE_SIZE);
24 mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
25 if (!mtk_gem_obj)
26 return ERR_PTR(-ENOMEM);
28 ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
29 if (ret < 0) {
30 DRM_ERROR("failed to initialize gem object\n");
31 kfree(mtk_gem_obj);
32 return ERR_PTR(ret);
35 return mtk_gem_obj;
38 struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
39 size_t size, bool alloc_kmap)
41 struct mtk_drm_private *priv = dev->dev_private;
42 struct mtk_drm_gem_obj *mtk_gem;
43 struct drm_gem_object *obj;
44 int ret;
46 mtk_gem = mtk_drm_gem_init(dev, size);
47 if (IS_ERR(mtk_gem))
48 return ERR_CAST(mtk_gem);
50 obj = &mtk_gem->base;
52 mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
54 if (!alloc_kmap)
55 mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
57 mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
58 &mtk_gem->dma_addr, GFP_KERNEL,
59 mtk_gem->dma_attrs);
60 if (!mtk_gem->cookie) {
61 DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
62 ret = -ENOMEM;
63 goto err_gem_free;
66 if (alloc_kmap)
67 mtk_gem->kvaddr = mtk_gem->cookie;
69 DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
70 mtk_gem->cookie, &mtk_gem->dma_addr,
71 size);
73 return mtk_gem;
75 err_gem_free:
76 drm_gem_object_release(obj);
77 kfree(mtk_gem);
78 return ERR_PTR(ret);
81 void mtk_drm_gem_free_object(struct drm_gem_object *obj)
83 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
84 struct mtk_drm_private *priv = obj->dev->dev_private;
86 if (mtk_gem->sg)
87 drm_prime_gem_destroy(obj, mtk_gem->sg);
88 else
89 dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
90 mtk_gem->dma_addr, mtk_gem->dma_attrs);
92 /* release file pointer to gem object. */
93 drm_gem_object_release(obj);
95 kfree(mtk_gem);
98 int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
99 struct drm_mode_create_dumb *args)
101 struct mtk_drm_gem_obj *mtk_gem;
102 int ret;
104 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
105 args->size = args->pitch * args->height;
107 mtk_gem = mtk_drm_gem_create(dev, args->size, false);
108 if (IS_ERR(mtk_gem))
109 return PTR_ERR(mtk_gem);
112 * allocate a id of idr table where the obj is registered
113 * and handle has the id what user can see.
115 ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
116 if (ret)
117 goto err_handle_create;
119 /* drop reference from allocate - handle holds it now. */
120 drm_gem_object_put_unlocked(&mtk_gem->base);
122 return 0;
124 err_handle_create:
125 mtk_drm_gem_free_object(&mtk_gem->base);
126 return ret;
129 static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
130 struct vm_area_struct *vma)
133 int ret;
134 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
135 struct mtk_drm_private *priv = obj->dev->dev_private;
138 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
139 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
141 vma->vm_flags &= ~VM_PFNMAP;
143 ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
144 mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
145 if (ret)
146 drm_gem_vm_close(vma);
148 return ret;
151 int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma)
153 int ret;
155 ret = drm_gem_mmap_obj(obj, obj->size, vma);
156 if (ret)
157 return ret;
159 return mtk_drm_gem_object_mmap(obj, vma);
162 int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
164 struct drm_gem_object *obj;
165 int ret;
167 ret = drm_gem_mmap(filp, vma);
168 if (ret)
169 return ret;
171 obj = vma->vm_private_data;
174 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
175 * whole buffer from the start.
177 vma->vm_pgoff = 0;
179 return mtk_drm_gem_object_mmap(obj, vma);
183 * Allocate a sg_table for this GEM object.
184 * Note: Both the table's contents, and the sg_table itself must be freed by
185 * the caller.
186 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
188 struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
190 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
191 struct mtk_drm_private *priv = obj->dev->dev_private;
192 struct sg_table *sgt;
193 int ret;
195 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
196 if (!sgt)
197 return ERR_PTR(-ENOMEM);
199 ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
200 mtk_gem->dma_addr, obj->size,
201 mtk_gem->dma_attrs);
202 if (ret) {
203 DRM_ERROR("failed to allocate sgt, %d\n", ret);
204 kfree(sgt);
205 return ERR_PTR(ret);
208 return sgt;
211 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
212 struct dma_buf_attachment *attach, struct sg_table *sg)
214 struct mtk_drm_gem_obj *mtk_gem;
215 int ret;
216 struct scatterlist *s;
217 unsigned int i;
218 dma_addr_t expected;
220 mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
222 if (IS_ERR(mtk_gem))
223 return ERR_CAST(mtk_gem);
225 expected = sg_dma_address(sg->sgl);
226 for_each_sg(sg->sgl, s, sg->nents, i) {
227 if (sg_dma_address(s) != expected) {
228 DRM_ERROR("sg_table is not contiguous");
229 ret = -EINVAL;
230 goto err_gem_free;
232 expected = sg_dma_address(s) + sg_dma_len(s);
235 mtk_gem->dma_addr = sg_dma_address(sg->sgl);
236 mtk_gem->sg = sg;
238 return &mtk_gem->base;
240 err_gem_free:
241 kfree(mtk_gem);
242 return ERR_PTR(ret);
245 void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
247 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
248 struct sg_table *sgt;
249 struct sg_page_iter iter;
250 unsigned int npages;
251 unsigned int i = 0;
253 if (mtk_gem->kvaddr)
254 return mtk_gem->kvaddr;
256 sgt = mtk_gem_prime_get_sg_table(obj);
257 if (IS_ERR(sgt))
258 return NULL;
260 npages = obj->size >> PAGE_SHIFT;
261 mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
262 if (!mtk_gem->pages)
263 goto out;
265 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
266 mtk_gem->pages[i++] = sg_page_iter_page(&iter);
267 if (i > npages)
268 break;
270 mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
271 pgprot_writecombine(PAGE_KERNEL));
273 out:
274 kfree(sgt);
276 return mtk_gem->kvaddr;
279 void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
281 struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
283 if (!mtk_gem->pages)
284 return;
286 vunmap(vaddr);
287 mtk_gem->kvaddr = 0;
288 kfree(mtk_gem->pages);