4 * Copyright (c) 2014 The Chromium OS Authors
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/shmem_fs.h>
23 #include <linux/dma-buf.h>
25 struct udl_drm_dmabuf_attachment
{
27 enum dma_data_direction dir
;
31 static int udl_attach_dma_buf(struct dma_buf
*dmabuf
,
32 struct dma_buf_attachment
*attach
)
34 struct udl_drm_dmabuf_attachment
*udl_attach
;
36 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach
->dev
),
37 attach
->dmabuf
->size
);
39 udl_attach
= kzalloc(sizeof(*udl_attach
), GFP_KERNEL
);
43 udl_attach
->dir
= DMA_NONE
;
44 attach
->priv
= udl_attach
;
49 static void udl_detach_dma_buf(struct dma_buf
*dmabuf
,
50 struct dma_buf_attachment
*attach
)
52 struct udl_drm_dmabuf_attachment
*udl_attach
= attach
->priv
;
58 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach
->dev
),
59 attach
->dmabuf
->size
);
61 sgt
= &udl_attach
->sgt
;
63 if (udl_attach
->dir
!= DMA_NONE
)
64 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
,
72 static struct sg_table
*udl_map_dma_buf(struct dma_buf_attachment
*attach
,
73 enum dma_data_direction dir
)
75 struct udl_drm_dmabuf_attachment
*udl_attach
= attach
->priv
;
76 struct udl_gem_object
*obj
= to_udl_bo(attach
->dmabuf
->priv
);
77 struct drm_device
*dev
= obj
->base
.dev
;
78 struct udl_device
*udl
= dev
->dev_private
;
79 struct scatterlist
*rd
, *wr
;
80 struct sg_table
*sgt
= NULL
;
85 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach
->dev
),
86 attach
->dmabuf
->size
, dir
);
88 /* just return current sgt if already requested. */
89 if (udl_attach
->dir
== dir
&& udl_attach
->is_mapped
)
90 return &udl_attach
->sgt
;
93 ret
= udl_gem_get_pages(obj
);
95 DRM_ERROR("failed to map pages.\n");
100 page_count
= obj
->base
.size
/ PAGE_SIZE
;
101 obj
->sg
= drm_prime_pages_to_sg(obj
->pages
, page_count
);
102 if (IS_ERR(obj
->sg
)) {
103 DRM_ERROR("failed to allocate sgt.\n");
104 return ERR_CAST(obj
->sg
);
107 sgt
= &udl_attach
->sgt
;
109 ret
= sg_alloc_table(sgt
, obj
->sg
->orig_nents
, GFP_KERNEL
);
111 DRM_ERROR("failed to alloc sgt.\n");
112 return ERR_PTR(-ENOMEM
);
115 mutex_lock(&udl
->gem_lock
);
119 for (i
= 0; i
< sgt
->orig_nents
; ++i
) {
120 sg_set_page(wr
, sg_page(rd
), rd
->length
, rd
->offset
);
125 if (dir
!= DMA_NONE
) {
126 nents
= dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
128 DRM_ERROR("failed to map sgl with iommu.\n");
135 udl_attach
->is_mapped
= true;
136 udl_attach
->dir
= dir
;
137 attach
->priv
= udl_attach
;
140 mutex_unlock(&udl
->gem_lock
);
144 static void udl_unmap_dma_buf(struct dma_buf_attachment
*attach
,
145 struct sg_table
*sgt
,
146 enum dma_data_direction dir
)
149 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach
->dev
),
150 attach
->dmabuf
->size
, dir
);
153 static void *udl_dmabuf_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
160 static void udl_dmabuf_kunmap(struct dma_buf
*dma_buf
,
161 unsigned long page_num
, void *addr
)
166 static int udl_dmabuf_mmap(struct dma_buf
*dma_buf
,
167 struct vm_area_struct
*vma
)
174 static const struct dma_buf_ops udl_dmabuf_ops
= {
175 .attach
= udl_attach_dma_buf
,
176 .detach
= udl_detach_dma_buf
,
177 .map_dma_buf
= udl_map_dma_buf
,
178 .unmap_dma_buf
= udl_unmap_dma_buf
,
179 .map
= udl_dmabuf_kmap
,
180 .unmap
= udl_dmabuf_kunmap
,
181 .mmap
= udl_dmabuf_mmap
,
182 .release
= drm_gem_dmabuf_release
,
185 struct dma_buf
*udl_gem_prime_export(struct drm_device
*dev
,
186 struct drm_gem_object
*obj
, int flags
)
188 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
190 exp_info
.ops
= &udl_dmabuf_ops
;
191 exp_info
.size
= obj
->size
;
192 exp_info
.flags
= flags
;
195 return drm_gem_dmabuf_export(dev
, &exp_info
);
198 static int udl_prime_create(struct drm_device
*dev
,
201 struct udl_gem_object
**obj_p
)
203 struct udl_gem_object
*obj
;
206 npages
= size
/ PAGE_SIZE
;
209 obj
= udl_gem_alloc_object(dev
, npages
* PAGE_SIZE
);
214 obj
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
215 if (obj
->pages
== NULL
) {
216 DRM_ERROR("obj pages is NULL %d\n", npages
);
220 drm_prime_sg_to_page_addr_arrays(sg
, obj
->pages
, NULL
, npages
);
226 struct drm_gem_object
*udl_gem_prime_import(struct drm_device
*dev
,
227 struct dma_buf
*dma_buf
)
229 struct dma_buf_attachment
*attach
;
231 struct udl_gem_object
*uobj
;
235 get_device(dev
->dev
);
236 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
237 if (IS_ERR(attach
)) {
238 put_device(dev
->dev
);
239 return ERR_CAST(attach
);
242 get_dma_buf(dma_buf
);
244 sg
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
250 ret
= udl_prime_create(dev
, dma_buf
->size
, sg
, &uobj
);
254 uobj
->base
.import_attach
= attach
;
255 uobj
->flags
= UDL_BO_WC
;
260 dma_buf_unmap_attachment(attach
, sg
, DMA_BIDIRECTIONAL
);
262 dma_buf_detach(dma_buf
, attach
);
263 dma_buf_put(dma_buf
);
264 put_device(dev
->dev
);