3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/exynos_drm.h>
14 #include "exynos_drm_dmabuf.h"
15 #include "exynos_drm_drv.h"
16 #include "exynos_drm_gem.h"
18 #include <linux/dma-buf.h>
20 struct exynos_drm_dmabuf_attachment
{
22 enum dma_data_direction dir
;
26 static struct exynos_drm_gem_obj
*dma_buf_to_obj(struct dma_buf
*buf
)
28 return to_exynos_gem_obj(buf
->priv
);
31 static int exynos_gem_attach_dma_buf(struct dma_buf
*dmabuf
,
33 struct dma_buf_attachment
*attach
)
35 struct exynos_drm_dmabuf_attachment
*exynos_attach
;
37 exynos_attach
= kzalloc(sizeof(*exynos_attach
), GFP_KERNEL
);
41 exynos_attach
->dir
= DMA_NONE
;
42 attach
->priv
= exynos_attach
;
47 static void exynos_gem_detach_dma_buf(struct dma_buf
*dmabuf
,
48 struct dma_buf_attachment
*attach
)
50 struct exynos_drm_dmabuf_attachment
*exynos_attach
= attach
->priv
;
56 sgt
= &exynos_attach
->sgt
;
58 if (exynos_attach
->dir
!= DMA_NONE
)
59 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
,
67 static struct sg_table
*
68 exynos_gem_map_dma_buf(struct dma_buf_attachment
*attach
,
69 enum dma_data_direction dir
)
71 struct exynos_drm_dmabuf_attachment
*exynos_attach
= attach
->priv
;
72 struct exynos_drm_gem_obj
*gem_obj
= dma_buf_to_obj(attach
->dmabuf
);
73 struct drm_device
*dev
= gem_obj
->base
.dev
;
74 struct exynos_drm_gem_buf
*buf
;
75 struct scatterlist
*rd
, *wr
;
76 struct sg_table
*sgt
= NULL
;
80 /* just return current sgt if already requested. */
81 if (exynos_attach
->dir
== dir
&& exynos_attach
->is_mapped
)
82 return &exynos_attach
->sgt
;
84 buf
= gem_obj
->buffer
;
86 DRM_ERROR("buffer is null.\n");
87 return ERR_PTR(-ENOMEM
);
90 sgt
= &exynos_attach
->sgt
;
92 ret
= sg_alloc_table(sgt
, buf
->sgt
->orig_nents
, GFP_KERNEL
);
94 DRM_ERROR("failed to alloc sgt.\n");
95 return ERR_PTR(-ENOMEM
);
98 mutex_lock(&dev
->struct_mutex
);
102 for (i
= 0; i
< sgt
->orig_nents
; ++i
) {
103 sg_set_page(wr
, sg_page(rd
), rd
->length
, rd
->offset
);
108 if (dir
!= DMA_NONE
) {
109 nents
= dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
111 DRM_ERROR("failed to map sgl with iommu.\n");
118 exynos_attach
->is_mapped
= true;
119 exynos_attach
->dir
= dir
;
120 attach
->priv
= exynos_attach
;
122 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf
->size
);
125 mutex_unlock(&dev
->struct_mutex
);
129 static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment
*attach
,
130 struct sg_table
*sgt
,
131 enum dma_data_direction dir
)
136 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
,
137 unsigned long page_num
)
144 static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
,
145 unsigned long page_num
,
151 static void *exynos_gem_dmabuf_kmap(struct dma_buf
*dma_buf
,
152 unsigned long page_num
)
159 static void exynos_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
,
160 unsigned long page_num
, void *addr
)
165 static int exynos_gem_dmabuf_mmap(struct dma_buf
*dma_buf
,
166 struct vm_area_struct
*vma
)
171 static struct dma_buf_ops exynos_dmabuf_ops
= {
172 .attach
= exynos_gem_attach_dma_buf
,
173 .detach
= exynos_gem_detach_dma_buf
,
174 .map_dma_buf
= exynos_gem_map_dma_buf
,
175 .unmap_dma_buf
= exynos_gem_unmap_dma_buf
,
176 .kmap
= exynos_gem_dmabuf_kmap
,
177 .kmap_atomic
= exynos_gem_dmabuf_kmap_atomic
,
178 .kunmap
= exynos_gem_dmabuf_kunmap
,
179 .kunmap_atomic
= exynos_gem_dmabuf_kunmap_atomic
,
180 .mmap
= exynos_gem_dmabuf_mmap
,
181 .release
= drm_gem_dmabuf_release
,
184 struct dma_buf
*exynos_dmabuf_prime_export(struct drm_device
*drm_dev
,
185 struct drm_gem_object
*obj
, int flags
)
187 struct exynos_drm_gem_obj
*exynos_gem_obj
= to_exynos_gem_obj(obj
);
188 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
190 exp_info
.ops
= &exynos_dmabuf_ops
;
191 exp_info
.size
= exynos_gem_obj
->base
.size
;
192 exp_info
.flags
= flags
;
195 return dma_buf_export(&exp_info
);
198 struct drm_gem_object
*exynos_dmabuf_prime_import(struct drm_device
*drm_dev
,
199 struct dma_buf
*dma_buf
)
201 struct dma_buf_attachment
*attach
;
202 struct sg_table
*sgt
;
203 struct scatterlist
*sgl
;
204 struct exynos_drm_gem_obj
*exynos_gem_obj
;
205 struct exynos_drm_gem_buf
*buffer
;
208 /* is this one of own objects? */
209 if (dma_buf
->ops
== &exynos_dmabuf_ops
) {
210 struct drm_gem_object
*obj
;
214 /* is it from our device? */
215 if (obj
->dev
== drm_dev
) {
217 * Importing dmabuf exported from out own gem increases
218 * refcount on gem itself instead of f_count of dmabuf.
220 drm_gem_object_reference(obj
);
225 attach
= dma_buf_attach(dma_buf
, drm_dev
->dev
);
227 return ERR_PTR(-EINVAL
);
229 get_dma_buf(dma_buf
);
231 sgt
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
237 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
240 goto err_unmap_attach
;
243 exynos_gem_obj
= exynos_drm_gem_init(drm_dev
, dma_buf
->size
);
244 if (!exynos_gem_obj
) {
246 goto err_free_buffer
;
251 buffer
->size
= dma_buf
->size
;
252 buffer
->dma_addr
= sg_dma_address(sgl
);
254 if (sgt
->nents
== 1) {
255 /* always physically continuous memory if sgt->nents is 1. */
256 exynos_gem_obj
->flags
|= EXYNOS_BO_CONTIG
;
259 * this case could be CONTIG or NONCONTIG type but for now
261 * TODO. we have to find a way that exporter can notify
262 * the type of its own buffer to importer.
264 exynos_gem_obj
->flags
|= EXYNOS_BO_NONCONTIG
;
267 exynos_gem_obj
->buffer
= buffer
;
269 exynos_gem_obj
->base
.import_attach
= attach
;
271 DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer
->dma_addr
,
274 return &exynos_gem_obj
->base
;
280 dma_buf_unmap_attachment(attach
, sgt
, DMA_BIDIRECTIONAL
);
282 dma_buf_detach(dma_buf
, attach
);
283 dma_buf_put(dma_buf
);