2 * Copyright (c) 2015-2016, Linaro Limited
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/device.h>
15 #include <linux/dma-buf.h>
16 #include <linux/fdtable.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/tee_drv.h>
21 #include "tee_private.h"
23 static void tee_shm_release(struct tee_shm
*shm
)
25 struct tee_device
*teedev
= shm
->teedev
;
26 struct tee_shm_pool_mgr
*poolm
;
28 mutex_lock(&teedev
->mutex
);
29 idr_remove(&teedev
->idr
, shm
->id
);
32 mutex_unlock(&teedev
->mutex
);
34 if (shm
->flags
& TEE_SHM_DMA_BUF
)
35 poolm
= &teedev
->pool
->dma_buf_mgr
;
37 poolm
= &teedev
->pool
->private_mgr
;
39 poolm
->ops
->free(poolm
, shm
);
42 tee_device_put(teedev
);
45 static struct sg_table
*tee_shm_op_map_dma_buf(struct dma_buf_attachment
46 *attach
, enum dma_data_direction dir
)
51 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment
*attach
,
52 struct sg_table
*table
,
53 enum dma_data_direction dir
)
57 static void tee_shm_op_release(struct dma_buf
*dmabuf
)
59 struct tee_shm
*shm
= dmabuf
->priv
;
64 static void *tee_shm_op_map_atomic(struct dma_buf
*dmabuf
, unsigned long pgnum
)
69 static void *tee_shm_op_map(struct dma_buf
*dmabuf
, unsigned long pgnum
)
74 static int tee_shm_op_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
76 struct tee_shm
*shm
= dmabuf
->priv
;
77 size_t size
= vma
->vm_end
- vma
->vm_start
;
79 return remap_pfn_range(vma
, vma
->vm_start
, shm
->paddr
>> PAGE_SHIFT
,
80 size
, vma
->vm_page_prot
);
83 static const struct dma_buf_ops tee_shm_dma_buf_ops
= {
84 .map_dma_buf
= tee_shm_op_map_dma_buf
,
85 .unmap_dma_buf
= tee_shm_op_unmap_dma_buf
,
86 .release
= tee_shm_op_release
,
87 .map_atomic
= tee_shm_op_map_atomic
,
88 .map
= tee_shm_op_map
,
89 .mmap
= tee_shm_op_mmap
,
93 * tee_shm_alloc() - Allocate shared memory
94 * @ctx: Context that allocates the shared memory
95 * @size: Requested size of shared memory
96 * @flags: Flags setting properties for the requested shared memory.
98 * Memory allocated as global shared memory is automatically freed when the
99 * TEE file pointer is closed. The @flags field uses the bits defined by
100 * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
101 * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
102 * associated with a dma-buf handle, else driver private memory.
104 struct tee_shm
*tee_shm_alloc(struct tee_context
*ctx
, size_t size
, u32 flags
)
106 struct tee_device
*teedev
= ctx
->teedev
;
107 struct tee_shm_pool_mgr
*poolm
= NULL
;
112 if (!(flags
& TEE_SHM_MAPPED
)) {
113 dev_err(teedev
->dev
.parent
,
114 "only mapped allocations supported\n");
115 return ERR_PTR(-EINVAL
);
118 if ((flags
& ~(TEE_SHM_MAPPED
| TEE_SHM_DMA_BUF
))) {
119 dev_err(teedev
->dev
.parent
, "invalid shm flags 0x%x", flags
);
120 return ERR_PTR(-EINVAL
);
123 if (!tee_device_get(teedev
))
124 return ERR_PTR(-EINVAL
);
127 /* teedev has been detached from driver */
128 ret
= ERR_PTR(-EINVAL
);
132 shm
= kzalloc(sizeof(*shm
), GFP_KERNEL
);
134 ret
= ERR_PTR(-ENOMEM
);
139 shm
->teedev
= teedev
;
141 if (flags
& TEE_SHM_DMA_BUF
)
142 poolm
= &teedev
->pool
->dma_buf_mgr
;
144 poolm
= &teedev
->pool
->private_mgr
;
146 rc
= poolm
->ops
->alloc(poolm
, shm
, size
);
152 mutex_lock(&teedev
->mutex
);
153 shm
->id
= idr_alloc(&teedev
->idr
, shm
, 1, 0, GFP_KERNEL
);
154 mutex_unlock(&teedev
->mutex
);
156 ret
= ERR_PTR(shm
->id
);
160 if (flags
& TEE_SHM_DMA_BUF
) {
161 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
163 exp_info
.ops
= &tee_shm_dma_buf_ops
;
164 exp_info
.size
= shm
->size
;
165 exp_info
.flags
= O_RDWR
;
168 shm
->dmabuf
= dma_buf_export(&exp_info
);
169 if (IS_ERR(shm
->dmabuf
)) {
170 ret
= ERR_CAST(shm
->dmabuf
);
174 mutex_lock(&teedev
->mutex
);
175 list_add_tail(&shm
->link
, &ctx
->list_shm
);
176 mutex_unlock(&teedev
->mutex
);
180 mutex_lock(&teedev
->mutex
);
181 idr_remove(&teedev
->idr
, shm
->id
);
182 mutex_unlock(&teedev
->mutex
);
184 poolm
->ops
->free(poolm
, shm
);
188 tee_device_put(teedev
);
191 EXPORT_SYMBOL_GPL(tee_shm_alloc
);
194 * tee_shm_get_fd() - Increase reference count and return file descriptor
195 * @shm: Shared memory handle
196 * @returns user space file descriptor to shared memory
198 int tee_shm_get_fd(struct tee_shm
*shm
)
200 u32 req_flags
= TEE_SHM_MAPPED
| TEE_SHM_DMA_BUF
;
203 if ((shm
->flags
& req_flags
) != req_flags
)
206 fd
= dma_buf_fd(shm
->dmabuf
, O_CLOEXEC
);
208 get_dma_buf(shm
->dmabuf
);
213 * tee_shm_free() - Free shared memory
214 * @shm: Handle to shared memory to free
216 void tee_shm_free(struct tee_shm
*shm
)
219 * dma_buf_put() decreases the dmabuf reference counter and will
220 * call tee_shm_release() when the last reference is gone.
222 * In the case of driver private memory we call tee_shm_release
223 * directly instead as it doesn't have a reference counter.
225 if (shm
->flags
& TEE_SHM_DMA_BUF
)
226 dma_buf_put(shm
->dmabuf
);
228 tee_shm_release(shm
);
230 EXPORT_SYMBOL_GPL(tee_shm_free
);
233 * tee_shm_va2pa() - Get physical address of a virtual address
234 * @shm: Shared memory handle
235 * @va: Virtual address to tranlsate
236 * @pa: Returned physical address
237 * @returns 0 on success and < 0 on failure
239 int tee_shm_va2pa(struct tee_shm
*shm
, void *va
, phys_addr_t
*pa
)
241 /* Check that we're in the range of the shm */
242 if ((char *)va
< (char *)shm
->kaddr
)
244 if ((char *)va
>= ((char *)shm
->kaddr
+ shm
->size
))
247 return tee_shm_get_pa(
248 shm
, (unsigned long)va
- (unsigned long)shm
->kaddr
, pa
);
250 EXPORT_SYMBOL_GPL(tee_shm_va2pa
);
253 * tee_shm_pa2va() - Get virtual address of a physical address
254 * @shm: Shared memory handle
255 * @pa: Physical address to tranlsate
256 * @va: Returned virtual address
257 * @returns 0 on success and < 0 on failure
259 int tee_shm_pa2va(struct tee_shm
*shm
, phys_addr_t pa
, void **va
)
261 /* Check that we're in the range of the shm */
264 if (pa
>= (shm
->paddr
+ shm
->size
))
268 void *v
= tee_shm_get_va(shm
, pa
- shm
->paddr
);
276 EXPORT_SYMBOL_GPL(tee_shm_pa2va
);
279 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
280 * @shm: Shared memory handle
281 * @offs: Offset from start of this shared memory
282 * @returns virtual address of the shared memory + offs if offs is within
283 * the bounds of this shared memory, else an ERR_PTR
285 void *tee_shm_get_va(struct tee_shm
*shm
, size_t offs
)
287 if (offs
>= shm
->size
)
288 return ERR_PTR(-EINVAL
);
289 return (char *)shm
->kaddr
+ offs
;
291 EXPORT_SYMBOL_GPL(tee_shm_get_va
);
294 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
295 * @shm: Shared memory handle
296 * @offs: Offset from start of this shared memory
297 * @pa: Physical address to return
298 * @returns 0 if offs is within the bounds of this shared memory, else an
301 int tee_shm_get_pa(struct tee_shm
*shm
, size_t offs
, phys_addr_t
*pa
)
303 if (offs
>= shm
->size
)
306 *pa
= shm
->paddr
+ offs
;
309 EXPORT_SYMBOL_GPL(tee_shm_get_pa
);
312 * tee_shm_get_from_id() - Find shared memory object and increase reference
314 * @ctx: Context owning the shared memory
315 * @id: Id of shared memory object
316 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
318 struct tee_shm
*tee_shm_get_from_id(struct tee_context
*ctx
, int id
)
320 struct tee_device
*teedev
;
324 return ERR_PTR(-EINVAL
);
326 teedev
= ctx
->teedev
;
327 mutex_lock(&teedev
->mutex
);
328 shm
= idr_find(&teedev
->idr
, id
);
329 if (!shm
|| shm
->ctx
!= ctx
)
330 shm
= ERR_PTR(-EINVAL
);
331 else if (shm
->flags
& TEE_SHM_DMA_BUF
)
332 get_dma_buf(shm
->dmabuf
);
333 mutex_unlock(&teedev
->mutex
);
336 EXPORT_SYMBOL_GPL(tee_shm_get_from_id
);
339 * tee_shm_get_id() - Get id of a shared memory object
340 * @shm: Shared memory handle
343 int tee_shm_get_id(struct tee_shm
*shm
)
347 EXPORT_SYMBOL_GPL(tee_shm_get_id
);
350 * tee_shm_put() - Decrease reference count on a shared memory handle
351 * @shm: Shared memory handle
353 void tee_shm_put(struct tee_shm
*shm
)
355 if (shm
->flags
& TEE_SHM_DMA_BUF
)
356 dma_buf_put(shm
->dmabuf
);
358 EXPORT_SYMBOL_GPL(tee_shm_put
);