4 * Copyright (c) 2014 The Chromium OS Authors
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/shmem_fs.h>
23 #include <linux/dma-buf.h>
25 struct udl_drm_dmabuf_attachment
{
27 enum dma_data_direction dir
;
31 static int udl_attach_dma_buf(struct dma_buf
*dmabuf
,
33 struct dma_buf_attachment
*attach
)
35 struct udl_drm_dmabuf_attachment
*udl_attach
;
37 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach
->dev
),
38 attach
->dmabuf
->size
);
40 udl_attach
= kzalloc(sizeof(*udl_attach
), GFP_KERNEL
);
44 udl_attach
->dir
= DMA_NONE
;
45 attach
->priv
= udl_attach
;
50 static void udl_detach_dma_buf(struct dma_buf
*dmabuf
,
51 struct dma_buf_attachment
*attach
)
53 struct udl_drm_dmabuf_attachment
*udl_attach
= attach
->priv
;
59 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach
->dev
),
60 attach
->dmabuf
->size
);
62 sgt
= &udl_attach
->sgt
;
64 if (udl_attach
->dir
!= DMA_NONE
)
65 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
,
73 static struct sg_table
*udl_map_dma_buf(struct dma_buf_attachment
*attach
,
74 enum dma_data_direction dir
)
76 struct udl_drm_dmabuf_attachment
*udl_attach
= attach
->priv
;
77 struct udl_gem_object
*obj
= to_udl_bo(attach
->dmabuf
->priv
);
78 struct drm_device
*dev
= obj
->base
.dev
;
79 struct scatterlist
*rd
, *wr
;
80 struct sg_table
*sgt
= NULL
;
85 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach
->dev
),
86 attach
->dmabuf
->size
, dir
);
88 /* just return current sgt if already requested. */
89 if (udl_attach
->dir
== dir
&& udl_attach
->is_mapped
)
90 return &udl_attach
->sgt
;
93 ret
= udl_gem_get_pages(obj
);
95 DRM_ERROR("failed to map pages.\n");
100 page_count
= obj
->base
.size
/ PAGE_SIZE
;
101 obj
->sg
= drm_prime_pages_to_sg(obj
->pages
, page_count
);
102 if (IS_ERR(obj
->sg
)) {
103 DRM_ERROR("failed to allocate sgt.\n");
104 return ERR_CAST(obj
->sg
);
107 sgt
= &udl_attach
->sgt
;
109 ret
= sg_alloc_table(sgt
, obj
->sg
->orig_nents
, GFP_KERNEL
);
111 DRM_ERROR("failed to alloc sgt.\n");
112 return ERR_PTR(-ENOMEM
);
115 mutex_lock(&dev
->struct_mutex
);
119 for (i
= 0; i
< sgt
->orig_nents
; ++i
) {
120 sg_set_page(wr
, sg_page(rd
), rd
->length
, rd
->offset
);
125 if (dir
!= DMA_NONE
) {
126 nents
= dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
128 DRM_ERROR("failed to map sgl with iommu.\n");
135 udl_attach
->is_mapped
= true;
136 udl_attach
->dir
= dir
;
137 attach
->priv
= udl_attach
;
140 mutex_unlock(&dev
->struct_mutex
);
144 static void udl_unmap_dma_buf(struct dma_buf_attachment
*attach
,
145 struct sg_table
*sgt
,
146 enum dma_data_direction dir
)
149 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach
->dev
),
150 attach
->dmabuf
->size
, dir
);
153 static void *udl_dmabuf_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
160 static void *udl_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
,
161 unsigned long page_num
)
168 static void udl_dmabuf_kunmap(struct dma_buf
*dma_buf
,
169 unsigned long page_num
, void *addr
)
174 static void udl_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
,
175 unsigned long page_num
,
181 static int udl_dmabuf_mmap(struct dma_buf
*dma_buf
,
182 struct vm_area_struct
*vma
)
189 static const struct dma_buf_ops udl_dmabuf_ops
= {
190 .attach
= udl_attach_dma_buf
,
191 .detach
= udl_detach_dma_buf
,
192 .map_dma_buf
= udl_map_dma_buf
,
193 .unmap_dma_buf
= udl_unmap_dma_buf
,
194 .map
= udl_dmabuf_kmap
,
195 .map_atomic
= udl_dmabuf_kmap_atomic
,
196 .unmap
= udl_dmabuf_kunmap
,
197 .unmap_atomic
= udl_dmabuf_kunmap_atomic
,
198 .mmap
= udl_dmabuf_mmap
,
199 .release
= drm_gem_dmabuf_release
,
202 struct dma_buf
*udl_gem_prime_export(struct drm_device
*dev
,
203 struct drm_gem_object
*obj
, int flags
)
205 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
207 exp_info
.ops
= &udl_dmabuf_ops
;
208 exp_info
.size
= obj
->size
;
209 exp_info
.flags
= flags
;
212 return drm_gem_dmabuf_export(dev
, &exp_info
);
215 static int udl_prime_create(struct drm_device
*dev
,
218 struct udl_gem_object
**obj_p
)
220 struct udl_gem_object
*obj
;
223 npages
= size
/ PAGE_SIZE
;
226 obj
= udl_gem_alloc_object(dev
, npages
* PAGE_SIZE
);
231 obj
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
232 if (obj
->pages
== NULL
) {
233 DRM_ERROR("obj pages is NULL %d\n", npages
);
237 drm_prime_sg_to_page_addr_arrays(sg
, obj
->pages
, NULL
, npages
);
243 struct drm_gem_object
*udl_gem_prime_import(struct drm_device
*dev
,
244 struct dma_buf
*dma_buf
)
246 struct dma_buf_attachment
*attach
;
248 struct udl_gem_object
*uobj
;
252 get_device(dev
->dev
);
253 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
254 if (IS_ERR(attach
)) {
255 put_device(dev
->dev
);
256 return ERR_CAST(attach
);
259 get_dma_buf(dma_buf
);
261 sg
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
267 ret
= udl_prime_create(dev
, dma_buf
->size
, sg
, &uobj
);
271 uobj
->base
.import_attach
= attach
;
272 uobj
->flags
= UDL_BO_WC
;
277 dma_buf_unmap_attachment(attach
, sg
, DMA_BIDIRECTIONAL
);
279 dma_buf_detach(dma_buf
, attach
);
280 dma_buf_put(dma_buf
);
281 put_device(dev
->dev
);