2 * Copyright 2011 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
28 #include "nouveau_drv.h"
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
32 #include <linux/dma-buf.h>
34 static struct sg_table
*nouveau_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
35 enum dma_data_direction dir
)
37 struct nouveau_bo
*nvbo
= attachment
->dmabuf
->priv
;
38 struct drm_device
*dev
= nvbo
->gem
->dev
;
39 int npages
= nvbo
->bo
.num_pages
;
43 mutex_lock(&dev
->struct_mutex
);
44 sg
= drm_prime_pages_to_sg(nvbo
->bo
.ttm
->pages
, npages
);
45 nents
= dma_map_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
46 mutex_unlock(&dev
->struct_mutex
);
50 static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
51 struct sg_table
*sg
, enum dma_data_direction dir
)
53 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
58 static void nouveau_gem_dmabuf_release(struct dma_buf
*dma_buf
)
60 struct nouveau_bo
*nvbo
= dma_buf
->priv
;
62 if (nvbo
->gem
->export_dma_buf
== dma_buf
) {
63 nvbo
->gem
->export_dma_buf
= NULL
;
64 drm_gem_object_unreference_unlocked(nvbo
->gem
);
68 static void *nouveau_gem_kmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
)
73 static void nouveau_gem_kunmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
77 static void *nouveau_gem_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
82 static void nouveau_gem_kunmap(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
87 static int nouveau_gem_prime_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
92 static void *nouveau_gem_prime_vmap(struct dma_buf
*dma_buf
)
94 struct nouveau_bo
*nvbo
= dma_buf
->priv
;
95 struct drm_device
*dev
= nvbo
->gem
->dev
;
98 mutex_lock(&dev
->struct_mutex
);
99 if (nvbo
->vmapping_count
) {
100 nvbo
->vmapping_count
++;
104 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.num_pages
,
105 &nvbo
->dma_buf_vmap
);
107 mutex_unlock(&dev
->struct_mutex
);
110 nvbo
->vmapping_count
= 1;
112 mutex_unlock(&dev
->struct_mutex
);
113 return nvbo
->dma_buf_vmap
.virtual;
116 static void nouveau_gem_prime_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
118 struct nouveau_bo
*nvbo
= dma_buf
->priv
;
119 struct drm_device
*dev
= nvbo
->gem
->dev
;
121 mutex_lock(&dev
->struct_mutex
);
122 nvbo
->vmapping_count
--;
123 if (nvbo
->vmapping_count
== 0) {
124 ttm_bo_kunmap(&nvbo
->dma_buf_vmap
);
126 mutex_unlock(&dev
->struct_mutex
);
129 static const struct dma_buf_ops nouveau_dmabuf_ops
= {
130 .map_dma_buf
= nouveau_gem_map_dma_buf
,
131 .unmap_dma_buf
= nouveau_gem_unmap_dma_buf
,
132 .release
= nouveau_gem_dmabuf_release
,
133 .kmap
= nouveau_gem_kmap
,
134 .kmap_atomic
= nouveau_gem_kmap_atomic
,
135 .kunmap
= nouveau_gem_kunmap
,
136 .kunmap_atomic
= nouveau_gem_kunmap_atomic
,
137 .mmap
= nouveau_gem_prime_mmap
,
138 .vmap
= nouveau_gem_prime_vmap
,
139 .vunmap
= nouveau_gem_prime_vunmap
,
143 nouveau_prime_new(struct drm_device
*dev
,
146 struct nouveau_bo
**pnvbo
)
148 struct nouveau_bo
*nvbo
;
152 flags
= TTM_PL_FLAG_TT
;
154 ret
= nouveau_bo_new(dev
, size
, 0, flags
, 0, 0,
160 /* we restrict allowed domains on nv50+ to only the types
161 * that were requested at creation time. not possibly on
162 * earlier chips without busting the ABI.
164 nvbo
->valid_domains
= NOUVEAU_GEM_DOMAIN_GART
;
165 nvbo
->gem
= drm_gem_object_alloc(dev
, nvbo
->bo
.mem
.size
);
167 nouveau_bo_ref(NULL
, pnvbo
);
171 nvbo
->gem
->driver_private
= nvbo
;
175 struct dma_buf
*nouveau_gem_prime_export(struct drm_device
*dev
,
176 struct drm_gem_object
*obj
, int flags
)
178 struct nouveau_bo
*nvbo
= nouveau_gem_object(obj
);
181 /* pin buffer into GTT */
182 ret
= nouveau_bo_pin(nvbo
, TTM_PL_FLAG_TT
);
184 return ERR_PTR(-EINVAL
);
186 return dma_buf_export(nvbo
, &nouveau_dmabuf_ops
, obj
->size
, flags
);
189 struct drm_gem_object
*nouveau_gem_prime_import(struct drm_device
*dev
,
190 struct dma_buf
*dma_buf
)
192 struct dma_buf_attachment
*attach
;
194 struct nouveau_bo
*nvbo
;
197 if (dma_buf
->ops
== &nouveau_dmabuf_ops
) {
198 nvbo
= dma_buf
->priv
;
200 if (nvbo
->gem
->dev
== dev
) {
201 drm_gem_object_reference(nvbo
->gem
);
207 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
209 return ERR_PTR(PTR_ERR(attach
));
211 sg
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
217 ret
= nouveau_prime_new(dev
, dma_buf
->size
, sg
, &nvbo
);
221 nvbo
->gem
->import_attach
= attach
;
226 dma_buf_unmap_attachment(attach
, sg
, DMA_BIDIRECTIONAL
);
228 dma_buf_detach(dma_buf
, attach
);