x86: hpet: Fix copy-and-paste mistake in earlier change
[linux/fpc-iii.git] / drivers / gpu / drm / exynos / exynos_drm_dmabuf.c
blob274909271c36de62e18d10b81fe01a58b598fc61
1 /* exynos_drm_dmabuf.c
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "drmP.h"
27 #include "drm.h"
28 #include "exynos_drm_drv.h"
29 #include "exynos_drm_gem.h"
31 #include <linux/dma-buf.h>
33 static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
34 unsigned int page_size)
36 struct sg_table *sgt = NULL;
37 struct scatterlist *sgl;
38 int i, ret;
40 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
41 if (!sgt)
42 goto out;
44 ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
45 if (ret)
46 goto err_free_sgt;
48 if (page_size < PAGE_SIZE)
49 page_size = PAGE_SIZE;
51 for_each_sg(sgt->sgl, sgl, nr_pages, i)
52 sg_set_page(sgl, pages[i], page_size, 0);
54 return sgt;
56 err_free_sgt:
57 kfree(sgt);
58 sgt = NULL;
59 out:
60 return NULL;
63 static struct sg_table *
64 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
65 enum dma_data_direction dir)
67 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
68 struct drm_device *dev = gem_obj->base.dev;
69 struct exynos_drm_gem_buf *buf;
70 struct sg_table *sgt = NULL;
71 unsigned int npages;
72 int nents;
74 DRM_DEBUG_PRIME("%s\n", __FILE__);
76 mutex_lock(&dev->struct_mutex);
78 buf = gem_obj->buffer;
80 /* there should always be pages allocated. */
81 if (!buf->pages) {
82 DRM_ERROR("pages is null.\n");
83 goto err_unlock;
86 npages = buf->size / buf->page_size;
88 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
89 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
91 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
92 npages, buf->size, buf->page_size);
94 err_unlock:
95 mutex_unlock(&dev->struct_mutex);
96 return sgt;
99 static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
100 struct sg_table *sgt,
101 enum dma_data_direction dir)
103 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
104 sg_free_table(sgt);
105 kfree(sgt);
106 sgt = NULL;
109 static void exynos_dmabuf_release(struct dma_buf *dmabuf)
111 struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
113 DRM_DEBUG_PRIME("%s\n", __FILE__);
116 * exynos_dmabuf_release() call means that file object's
117 * f_count is 0 and it calls drm_gem_object_handle_unreference()
118 * to drop the references that these values had been increased
119 * at drm_prime_handle_to_fd()
121 if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
122 exynos_gem_obj->base.export_dma_buf = NULL;
125 * drop this gem object refcount to release allocated buffer
126 * and resources.
128 drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
132 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
133 unsigned long page_num)
135 /* TODO */
137 return NULL;
140 static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
141 unsigned long page_num,
142 void *addr)
144 /* TODO */
147 static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
148 unsigned long page_num)
150 /* TODO */
152 return NULL;
155 static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
156 unsigned long page_num, void *addr)
158 /* TODO */
161 static struct dma_buf_ops exynos_dmabuf_ops = {
162 .map_dma_buf = exynos_gem_map_dma_buf,
163 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
164 .kmap = exynos_gem_dmabuf_kmap,
165 .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
166 .kunmap = exynos_gem_dmabuf_kunmap,
167 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
168 .release = exynos_dmabuf_release,
171 struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
172 struct drm_gem_object *obj, int flags)
174 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
176 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
177 exynos_gem_obj->base.size, 0600);
180 struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
181 struct dma_buf *dma_buf)
183 struct dma_buf_attachment *attach;
184 struct sg_table *sgt;
185 struct scatterlist *sgl;
186 struct exynos_drm_gem_obj *exynos_gem_obj;
187 struct exynos_drm_gem_buf *buffer;
188 struct page *page;
189 int ret, i = 0;
191 DRM_DEBUG_PRIME("%s\n", __FILE__);
193 /* is this one of own objects? */
194 if (dma_buf->ops == &exynos_dmabuf_ops) {
195 struct drm_gem_object *obj;
197 exynos_gem_obj = dma_buf->priv;
198 obj = &exynos_gem_obj->base;
200 /* is it from our device? */
201 if (obj->dev == drm_dev) {
202 drm_gem_object_reference(obj);
203 return obj;
207 attach = dma_buf_attach(dma_buf, drm_dev->dev);
208 if (IS_ERR(attach))
209 return ERR_PTR(-EINVAL);
212 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
213 if (IS_ERR(sgt)) {
214 ret = PTR_ERR(sgt);
215 goto err_buf_detach;
218 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
219 if (!buffer) {
220 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
221 ret = -ENOMEM;
222 goto err_unmap_attach;
225 buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
226 if (!buffer->pages) {
227 DRM_ERROR("failed to allocate pages.\n");
228 ret = -ENOMEM;
229 goto err_free_buffer;
232 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
233 if (!exynos_gem_obj) {
234 ret = -ENOMEM;
235 goto err_free_pages;
238 sgl = sgt->sgl;
239 buffer->dma_addr = sg_dma_address(sgl);
241 while (i < sgt->nents) {
242 buffer->pages[i] = sg_page(sgl);
243 buffer->size += sg_dma_len(sgl);
244 sgl = sg_next(sgl);
245 i++;
248 exynos_gem_obj->buffer = buffer;
249 buffer->sgt = sgt;
250 exynos_gem_obj->base.import_attach = attach;
252 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
253 buffer->size);
255 return &exynos_gem_obj->base;
257 err_free_pages:
258 kfree(buffer->pages);
259 buffer->pages = NULL;
260 err_free_buffer:
261 kfree(buffer);
262 buffer = NULL;
263 err_unmap_attach:
264 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
265 err_buf_detach:
266 dma_buf_detach(dma_buf, attach);
267 return ERR_PTR(ret);
270 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
271 MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
272 MODULE_LICENSE("GPL");