1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 MediaTek Inc.
6 #include <linux/dma-buf.h>
9 #include <drm/drm_device.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_prime.h>
13 #include "mtk_drm_drv.h"
14 #include "mtk_drm_gem.h"
16 static struct mtk_drm_gem_obj
*mtk_drm_gem_init(struct drm_device
*dev
,
19 struct mtk_drm_gem_obj
*mtk_gem_obj
;
22 size
= round_up(size
, PAGE_SIZE
);
24 mtk_gem_obj
= kzalloc(sizeof(*mtk_gem_obj
), GFP_KERNEL
);
26 return ERR_PTR(-ENOMEM
);
28 ret
= drm_gem_object_init(dev
, &mtk_gem_obj
->base
, size
);
30 DRM_ERROR("failed to initialize gem object\n");
38 struct mtk_drm_gem_obj
*mtk_drm_gem_create(struct drm_device
*dev
,
39 size_t size
, bool alloc_kmap
)
41 struct mtk_drm_private
*priv
= dev
->dev_private
;
42 struct mtk_drm_gem_obj
*mtk_gem
;
43 struct drm_gem_object
*obj
;
46 mtk_gem
= mtk_drm_gem_init(dev
, size
);
48 return ERR_CAST(mtk_gem
);
52 mtk_gem
->dma_attrs
= DMA_ATTR_WRITE_COMBINE
;
55 mtk_gem
->dma_attrs
|= DMA_ATTR_NO_KERNEL_MAPPING
;
57 mtk_gem
->cookie
= dma_alloc_attrs(priv
->dma_dev
, obj
->size
,
58 &mtk_gem
->dma_addr
, GFP_KERNEL
,
60 if (!mtk_gem
->cookie
) {
61 DRM_ERROR("failed to allocate %zx byte dma buffer", obj
->size
);
67 mtk_gem
->kvaddr
= mtk_gem
->cookie
;
69 DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
70 mtk_gem
->cookie
, &mtk_gem
->dma_addr
,
76 drm_gem_object_release(obj
);
81 void mtk_drm_gem_free_object(struct drm_gem_object
*obj
)
83 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
84 struct mtk_drm_private
*priv
= obj
->dev
->dev_private
;
87 drm_prime_gem_destroy(obj
, mtk_gem
->sg
);
89 dma_free_attrs(priv
->dma_dev
, obj
->size
, mtk_gem
->cookie
,
90 mtk_gem
->dma_addr
, mtk_gem
->dma_attrs
);
92 /* release file pointer to gem object. */
93 drm_gem_object_release(obj
);
98 int mtk_drm_gem_dumb_create(struct drm_file
*file_priv
, struct drm_device
*dev
,
99 struct drm_mode_create_dumb
*args
)
101 struct mtk_drm_gem_obj
*mtk_gem
;
104 args
->pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
105 args
->size
= args
->pitch
* args
->height
;
107 mtk_gem
= mtk_drm_gem_create(dev
, args
->size
, false);
109 return PTR_ERR(mtk_gem
);
112 * allocate a id of idr table where the obj is registered
113 * and handle has the id what user can see.
115 ret
= drm_gem_handle_create(file_priv
, &mtk_gem
->base
, &args
->handle
);
117 goto err_handle_create
;
119 /* drop reference from allocate - handle holds it now. */
120 drm_gem_object_put_unlocked(&mtk_gem
->base
);
125 mtk_drm_gem_free_object(&mtk_gem
->base
);
129 static int mtk_drm_gem_object_mmap(struct drm_gem_object
*obj
,
130 struct vm_area_struct
*vma
)
134 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
135 struct mtk_drm_private
*priv
= obj
->dev
->dev_private
;
138 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
139 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
141 vma
->vm_flags
&= ~VM_PFNMAP
;
143 ret
= dma_mmap_attrs(priv
->dma_dev
, vma
, mtk_gem
->cookie
,
144 mtk_gem
->dma_addr
, obj
->size
, mtk_gem
->dma_attrs
);
146 drm_gem_vm_close(vma
);
151 int mtk_drm_gem_mmap_buf(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
)
155 ret
= drm_gem_mmap_obj(obj
, obj
->size
, vma
);
159 return mtk_drm_gem_object_mmap(obj
, vma
);
162 int mtk_drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
164 struct drm_gem_object
*obj
;
167 ret
= drm_gem_mmap(filp
, vma
);
171 obj
= vma
->vm_private_data
;
174 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
175 * whole buffer from the start.
179 return mtk_drm_gem_object_mmap(obj
, vma
);
183 * Allocate a sg_table for this GEM object.
184 * Note: Both the table's contents, and the sg_table itself must be freed by
186 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
188 struct sg_table
*mtk_gem_prime_get_sg_table(struct drm_gem_object
*obj
)
190 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
191 struct mtk_drm_private
*priv
= obj
->dev
->dev_private
;
192 struct sg_table
*sgt
;
195 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
197 return ERR_PTR(-ENOMEM
);
199 ret
= dma_get_sgtable_attrs(priv
->dma_dev
, sgt
, mtk_gem
->cookie
,
200 mtk_gem
->dma_addr
, obj
->size
,
203 DRM_ERROR("failed to allocate sgt, %d\n", ret
);
211 struct drm_gem_object
*mtk_gem_prime_import_sg_table(struct drm_device
*dev
,
212 struct dma_buf_attachment
*attach
, struct sg_table
*sg
)
214 struct mtk_drm_gem_obj
*mtk_gem
;
216 struct scatterlist
*s
;
220 mtk_gem
= mtk_drm_gem_init(dev
, attach
->dmabuf
->size
);
223 return ERR_CAST(mtk_gem
);
225 expected
= sg_dma_address(sg
->sgl
);
226 for_each_sg(sg
->sgl
, s
, sg
->nents
, i
) {
227 if (sg_dma_address(s
) != expected
) {
228 DRM_ERROR("sg_table is not contiguous");
232 expected
= sg_dma_address(s
) + sg_dma_len(s
);
235 mtk_gem
->dma_addr
= sg_dma_address(sg
->sgl
);
238 return &mtk_gem
->base
;
245 void *mtk_drm_gem_prime_vmap(struct drm_gem_object
*obj
)
247 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
248 struct sg_table
*sgt
;
249 struct sg_page_iter iter
;
254 return mtk_gem
->kvaddr
;
256 sgt
= mtk_gem_prime_get_sg_table(obj
);
260 npages
= obj
->size
>> PAGE_SHIFT
;
261 mtk_gem
->pages
= kcalloc(npages
, sizeof(*mtk_gem
->pages
), GFP_KERNEL
);
265 for_each_sg_page(sgt
->sgl
, &iter
, sgt
->orig_nents
, 0) {
266 mtk_gem
->pages
[i
++] = sg_page_iter_page(&iter
);
270 mtk_gem
->kvaddr
= vmap(mtk_gem
->pages
, npages
, VM_MAP
,
271 pgprot_writecombine(PAGE_KERNEL
));
276 return mtk_gem
->kvaddr
;
279 void mtk_drm_gem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)
281 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
288 kfree(mtk_gem
->pages
);