2 * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/dma-buf.h>
24 /* -----------------------------------------------------------------------------
28 static struct sg_table
*omap_gem_map_dma_buf(
29 struct dma_buf_attachment
*attachment
,
30 enum dma_data_direction dir
)
32 struct drm_gem_object
*obj
= attachment
->dmabuf
->priv
;
37 sg
= kzalloc(sizeof(*sg
), GFP_KERNEL
);
39 return ERR_PTR(-ENOMEM
);
41 /* camera, etc, need physically contiguous.. but we need a
42 * better way to know this..
44 ret
= omap_gem_get_paddr(obj
, &paddr
, true);
48 ret
= sg_alloc_table(sg
, 1, GFP_KERNEL
);
52 sg_init_table(sg
->sgl
, 1);
53 sg_dma_len(sg
->sgl
) = obj
->size
;
54 sg_set_page(sg
->sgl
, pfn_to_page(PFN_DOWN(paddr
)), obj
->size
, 0);
55 sg_dma_address(sg
->sgl
) = paddr
;
57 /* this should be after _get_paddr() to ensure we have pages attached */
58 omap_gem_dma_sync(obj
, dir
);
66 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
67 struct sg_table
*sg
, enum dma_data_direction dir
)
69 struct drm_gem_object
*obj
= attachment
->dmabuf
->priv
;
70 omap_gem_put_paddr(obj
);
75 static void omap_gem_dmabuf_release(struct dma_buf
*buffer
)
77 struct drm_gem_object
*obj
= buffer
->priv
;
78 /* release reference that was taken when dmabuf was exported
79 * in omap_gem_prime_set()..
81 drm_gem_object_unreference_unlocked(obj
);
85 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf
*buffer
,
86 enum dma_data_direction dir
)
88 struct drm_gem_object
*obj
= buffer
->priv
;
90 if (omap_gem_flags(obj
) & OMAP_BO_TILED
) {
91 /* TODO we would need to pin at least part of the buffer to
92 * get de-tiled view. For now just reject it.
96 /* make sure we have the pages: */
97 return omap_gem_get_pages(obj
, &pages
, true);
100 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf
*buffer
,
101 enum dma_data_direction dir
)
103 struct drm_gem_object
*obj
= buffer
->priv
;
104 omap_gem_put_pages(obj
);
109 static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf
*buffer
,
110 unsigned long page_num
)
112 struct drm_gem_object
*obj
= buffer
->priv
;
114 omap_gem_get_pages(obj
, &pages
, false);
115 omap_gem_cpu_sync(obj
, page_num
);
116 return kmap_atomic(pages
[page_num
]);
119 static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf
*buffer
,
120 unsigned long page_num
, void *addr
)
125 static void *omap_gem_dmabuf_kmap(struct dma_buf
*buffer
,
126 unsigned long page_num
)
128 struct drm_gem_object
*obj
= buffer
->priv
;
130 omap_gem_get_pages(obj
, &pages
, false);
131 omap_gem_cpu_sync(obj
, page_num
);
132 return kmap(pages
[page_num
]);
135 static void omap_gem_dmabuf_kunmap(struct dma_buf
*buffer
,
136 unsigned long page_num
, void *addr
)
138 struct drm_gem_object
*obj
= buffer
->priv
;
140 omap_gem_get_pages(obj
, &pages
, false);
141 kunmap(pages
[page_num
]);
144 static int omap_gem_dmabuf_mmap(struct dma_buf
*buffer
,
145 struct vm_area_struct
*vma
)
147 struct drm_gem_object
*obj
= buffer
->priv
;
150 if (WARN_ON(!obj
->filp
))
153 ret
= drm_gem_mmap_obj(obj
, omap_gem_mmap_size(obj
), vma
);
157 return omap_gem_mmap_obj(obj
, vma
);
160 static struct dma_buf_ops omap_dmabuf_ops
= {
161 .map_dma_buf
= omap_gem_map_dma_buf
,
162 .unmap_dma_buf
= omap_gem_unmap_dma_buf
,
163 .release
= omap_gem_dmabuf_release
,
164 .begin_cpu_access
= omap_gem_dmabuf_begin_cpu_access
,
165 .end_cpu_access
= omap_gem_dmabuf_end_cpu_access
,
166 .kmap_atomic
= omap_gem_dmabuf_kmap_atomic
,
167 .kunmap_atomic
= omap_gem_dmabuf_kunmap_atomic
,
168 .kmap
= omap_gem_dmabuf_kmap
,
169 .kunmap
= omap_gem_dmabuf_kunmap
,
170 .mmap
= omap_gem_dmabuf_mmap
,
173 struct dma_buf
*omap_gem_prime_export(struct drm_device
*dev
,
174 struct drm_gem_object
*obj
, int flags
)
176 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
178 exp_info
.ops
= &omap_dmabuf_ops
;
179 exp_info
.size
= obj
->size
;
180 exp_info
.flags
= flags
;
183 return dma_buf_export(&exp_info
);
186 /* -----------------------------------------------------------------------------
190 struct drm_gem_object
*omap_gem_prime_import(struct drm_device
*dev
,
191 struct dma_buf
*dma_buf
)
193 struct dma_buf_attachment
*attach
;
194 struct drm_gem_object
*obj
;
195 struct sg_table
*sgt
;
198 if (dma_buf
->ops
== &omap_dmabuf_ops
) {
200 if (obj
->dev
== dev
) {
202 * Importing dmabuf exported from out own gem increases
203 * refcount on gem itself instead of f_count of dmabuf.
205 drm_gem_object_reference(obj
);
210 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
212 return ERR_CAST(attach
);
214 get_dma_buf(dma_buf
);
216 sgt
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
222 obj
= omap_gem_new_dmabuf(dev
, dma_buf
->size
, sgt
);
228 obj
->import_attach
= attach
;
233 dma_buf_unmap_attachment(attach
, sgt
, DMA_BIDIRECTIONAL
);
235 dma_buf_detach(dma_buf
, attach
);
236 dma_buf_put(dma_buf
);