1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
5 #include <linux/anon_inodes.h>
6 #include <linux/device.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/tee_core.h>
13 #include <linux/uaccess.h>
14 #include <linux/uio.h>
15 #include <linux/highmem.h>
16 #include "tee_private.h"
18 static void shm_put_kernel_pages(struct page
**pages
, size_t page_count
)
22 for (n
= 0; n
< page_count
; n
++)
26 static void shm_get_kernel_pages(struct page
**pages
, size_t page_count
)
30 for (n
= 0; n
< page_count
; n
++)
34 static void release_registered_pages(struct tee_shm
*shm
)
37 if (shm
->flags
& TEE_SHM_USER_MAPPED
)
38 unpin_user_pages(shm
->pages
, shm
->num_pages
);
40 shm_put_kernel_pages(shm
->pages
, shm
->num_pages
);
46 static void tee_shm_release(struct tee_device
*teedev
, struct tee_shm
*shm
)
48 if (shm
->flags
& TEE_SHM_POOL
) {
49 teedev
->pool
->ops
->free(teedev
->pool
, shm
);
50 } else if (shm
->flags
& TEE_SHM_DYNAMIC
) {
51 int rc
= teedev
->desc
->ops
->shm_unregister(shm
->ctx
, shm
);
54 dev_err(teedev
->dev
.parent
,
55 "unregister shm %p failed: %d", shm
, rc
);
57 release_registered_pages(shm
);
60 teedev_ctx_put(shm
->ctx
);
64 tee_device_put(teedev
);
67 static struct tee_shm
*shm_alloc_helper(struct tee_context
*ctx
, size_t size
,
68 size_t align
, u32 flags
, int id
)
70 struct tee_device
*teedev
= ctx
->teedev
;
75 if (!tee_device_get(teedev
))
76 return ERR_PTR(-EINVAL
);
79 /* teedev has been detached from driver */
80 ret
= ERR_PTR(-EINVAL
);
84 shm
= kzalloc(sizeof(*shm
), GFP_KERNEL
);
86 ret
= ERR_PTR(-ENOMEM
);
90 refcount_set(&shm
->refcount
, 1);
95 * We're assigning this as it is needed if the shm is to be
96 * registered. If this function returns OK then the caller expected
97 * to call teedev_ctx_get() or clear shm->ctx in case it's not
102 rc
= teedev
->pool
->ops
->alloc(teedev
->pool
, shm
, size
, align
);
113 tee_device_put(teedev
);
118 * tee_shm_alloc_user_buf() - Allocate shared memory for user space
119 * @ctx: Context that allocates the shared memory
120 * @size: Requested size of shared memory
122 * Memory allocated as user space shared memory is automatically freed when
123 * the TEE file pointer is closed. The primary usage of this function is
124 * when the TEE driver doesn't support registering ordinary user space
127 * @returns a pointer to 'struct tee_shm'
129 struct tee_shm
*tee_shm_alloc_user_buf(struct tee_context
*ctx
, size_t size
)
131 u32 flags
= TEE_SHM_DYNAMIC
| TEE_SHM_POOL
;
132 struct tee_device
*teedev
= ctx
->teedev
;
137 mutex_lock(&teedev
->mutex
);
138 id
= idr_alloc(&teedev
->idr
, NULL
, 1, 0, GFP_KERNEL
);
139 mutex_unlock(&teedev
->mutex
);
143 shm
= shm_alloc_helper(ctx
, size
, PAGE_SIZE
, flags
, id
);
145 mutex_lock(&teedev
->mutex
);
146 idr_remove(&teedev
->idr
, id
);
147 mutex_unlock(&teedev
->mutex
);
151 mutex_lock(&teedev
->mutex
);
152 ret
= idr_replace(&teedev
->idr
, shm
, id
);
153 mutex_unlock(&teedev
->mutex
);
163 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
164 * @ctx: Context that allocates the shared memory
165 * @size: Requested size of shared memory
167 * The returned memory registered in secure world and is suitable to be
168 * passed as a memory buffer in parameter argument to
169 * tee_client_invoke_func(). The memory allocated is later freed with a
170 * call to tee_shm_free().
172 * @returns a pointer to 'struct tee_shm'
174 struct tee_shm
*tee_shm_alloc_kernel_buf(struct tee_context
*ctx
, size_t size
)
176 u32 flags
= TEE_SHM_DYNAMIC
| TEE_SHM_POOL
;
178 return shm_alloc_helper(ctx
, size
, PAGE_SIZE
, flags
, -1);
180 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf
);
183 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
185 * @ctx: Context that allocates the shared memory
186 * @size: Requested size of shared memory
188 * This function returns similar shared memory as
189 * tee_shm_alloc_kernel_buf(), but with the difference that the memory
190 * might not be registered in secure world in case the driver supports
191 * passing memory not registered in advance.
193 * This function should normally only be used internally in the TEE
196 * @returns a pointer to 'struct tee_shm'
198 struct tee_shm
*tee_shm_alloc_priv_buf(struct tee_context
*ctx
, size_t size
)
200 u32 flags
= TEE_SHM_PRIV
| TEE_SHM_POOL
;
202 return shm_alloc_helper(ctx
, size
, sizeof(long) * 2, flags
, -1);
204 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf
);
206 int tee_dyn_shm_alloc_helper(struct tee_shm
*shm
, size_t size
, size_t align
,
207 int (*shm_register
)(struct tee_context
*ctx
,
211 unsigned long start
))
213 size_t nr_pages
= roundup(size
, PAGE_SIZE
) / PAGE_SIZE
;
219 * Ignore alignment since this is already going to be page aligned
220 * and there's no need for any larger alignment.
222 shm
->kaddr
= alloc_pages_exact(nr_pages
* PAGE_SIZE
,
223 GFP_KERNEL
| __GFP_ZERO
);
227 shm
->paddr
= virt_to_phys(shm
->kaddr
);
228 shm
->size
= nr_pages
* PAGE_SIZE
;
230 pages
= kcalloc(nr_pages
, sizeof(*pages
), GFP_KERNEL
);
236 for (i
= 0; i
< nr_pages
; i
++)
237 pages
[i
] = virt_to_page((u8
*)shm
->kaddr
+ i
* PAGE_SIZE
);
240 shm
->num_pages
= nr_pages
;
243 rc
= shm_register(shm
->ctx
, shm
, pages
, nr_pages
,
244 (unsigned long)shm
->kaddr
);
251 free_pages_exact(shm
->kaddr
, shm
->size
);
255 EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper
);
257 void tee_dyn_shm_free_helper(struct tee_shm
*shm
,
258 int (*shm_unregister
)(struct tee_context
*ctx
,
259 struct tee_shm
*shm
))
262 shm_unregister(shm
->ctx
, shm
);
263 free_pages_exact(shm
->kaddr
, shm
->size
);
268 EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper
);
270 static struct tee_shm
*
271 register_shm_helper(struct tee_context
*ctx
, struct iov_iter
*iter
, u32 flags
,
274 struct tee_device
*teedev
= ctx
->teedev
;
276 unsigned long start
, addr
;
277 size_t num_pages
, off
;
282 if (!tee_device_get(teedev
))
283 return ERR_PTR(-EINVAL
);
285 if (!teedev
->desc
->ops
->shm_register
||
286 !teedev
->desc
->ops
->shm_unregister
) {
287 ret
= ERR_PTR(-ENOTSUPP
);
293 shm
= kzalloc(sizeof(*shm
), GFP_KERNEL
);
295 ret
= ERR_PTR(-ENOMEM
);
299 refcount_set(&shm
->refcount
, 1);
303 addr
= untagged_addr((unsigned long)iter_iov_addr(iter
));
304 start
= rounddown(addr
, PAGE_SIZE
);
305 num_pages
= iov_iter_npages(iter
, INT_MAX
);
307 ret
= ERR_PTR(-ENOMEM
);
311 shm
->pages
= kcalloc(num_pages
, sizeof(*shm
->pages
), GFP_KERNEL
);
313 ret
= ERR_PTR(-ENOMEM
);
317 len
= iov_iter_extract_pages(iter
, &shm
->pages
, LONG_MAX
, num_pages
, 0,
319 if (unlikely(len
<= 0)) {
320 ret
= len
? ERR_PTR(len
) : ERR_PTR(-ENOMEM
);
321 goto err_free_shm_pages
;
325 * iov_iter_extract_kvec_pages does not get reference on the pages,
326 * get a reference on them.
328 if (iov_iter_is_kvec(iter
))
329 shm_get_kernel_pages(shm
->pages
, num_pages
);
333 shm
->num_pages
= num_pages
;
335 rc
= teedev
->desc
->ops
->shm_register(ctx
, shm
, shm
->pages
,
336 shm
->num_pages
, start
);
339 goto err_put_shm_pages
;
344 if (!iov_iter_is_kvec(iter
))
345 unpin_user_pages(shm
->pages
, shm
->num_pages
);
347 shm_put_kernel_pages(shm
->pages
, shm
->num_pages
);
355 tee_device_put(teedev
);
360 * tee_shm_register_user_buf() - Register a userspace shared memory buffer
361 * @ctx: Context that registers the shared memory
362 * @addr: The userspace address of the shared buffer
363 * @length: Length of the shared buffer
365 * @returns a pointer to 'struct tee_shm'
367 struct tee_shm
*tee_shm_register_user_buf(struct tee_context
*ctx
,
368 unsigned long addr
, size_t length
)
370 u32 flags
= TEE_SHM_USER_MAPPED
| TEE_SHM_DYNAMIC
;
371 struct tee_device
*teedev
= ctx
->teedev
;
373 struct iov_iter iter
;
377 if (!access_ok((void __user
*)addr
, length
))
378 return ERR_PTR(-EFAULT
);
380 mutex_lock(&teedev
->mutex
);
381 id
= idr_alloc(&teedev
->idr
, NULL
, 1, 0, GFP_KERNEL
);
382 mutex_unlock(&teedev
->mutex
);
386 iov_iter_ubuf(&iter
, ITER_DEST
, (void __user
*)addr
, length
);
387 shm
= register_shm_helper(ctx
, &iter
, flags
, id
);
389 mutex_lock(&teedev
->mutex
);
390 idr_remove(&teedev
->idr
, id
);
391 mutex_unlock(&teedev
->mutex
);
395 mutex_lock(&teedev
->mutex
);
396 ret
= idr_replace(&teedev
->idr
, shm
, id
);
397 mutex_unlock(&teedev
->mutex
);
407 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
409 * @ctx: Context that registers the shared memory
411 * @length: Length of the buffer
413 * @returns a pointer to 'struct tee_shm'
416 struct tee_shm
*tee_shm_register_kernel_buf(struct tee_context
*ctx
,
417 void *addr
, size_t length
)
419 u32 flags
= TEE_SHM_DYNAMIC
;
421 struct iov_iter iter
;
423 kvec
.iov_base
= addr
;
424 kvec
.iov_len
= length
;
425 iov_iter_kvec(&iter
, ITER_DEST
, &kvec
, 1, length
);
427 return register_shm_helper(ctx
, &iter
, flags
, -1);
429 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf
);
431 static int tee_shm_fop_release(struct inode
*inode
, struct file
*filp
)
433 tee_shm_put(filp
->private_data
);
437 static int tee_shm_fop_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
439 struct tee_shm
*shm
= filp
->private_data
;
440 size_t size
= vma
->vm_end
- vma
->vm_start
;
442 /* Refuse sharing shared memory provided by application */
443 if (shm
->flags
& TEE_SHM_USER_MAPPED
)
446 /* check for overflowing the buffer's size */
447 if (vma
->vm_pgoff
+ vma_pages(vma
) > shm
->size
>> PAGE_SHIFT
)
450 return remap_pfn_range(vma
, vma
->vm_start
, shm
->paddr
>> PAGE_SHIFT
,
451 size
, vma
->vm_page_prot
);
454 static const struct file_operations tee_shm_fops
= {
455 .owner
= THIS_MODULE
,
456 .release
= tee_shm_fop_release
,
457 .mmap
= tee_shm_fop_mmap
,
461 * tee_shm_get_fd() - Increase reference count and return file descriptor
462 * @shm: Shared memory handle
463 * @returns user space file descriptor to shared memory
465 int tee_shm_get_fd(struct tee_shm
*shm
)
472 /* matched by tee_shm_put() in tee_shm_op_release() */
473 refcount_inc(&shm
->refcount
);
474 fd
= anon_inode_getfd("tee_shm", &tee_shm_fops
, shm
, O_RDWR
);
481 * tee_shm_free() - Free shared memory
482 * @shm: Handle to shared memory to free
484 void tee_shm_free(struct tee_shm
*shm
)
488 EXPORT_SYMBOL_GPL(tee_shm_free
);
491 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
492 * @shm: Shared memory handle
493 * @offs: Offset from start of this shared memory
494 * @returns virtual address of the shared memory + offs if offs is within
495 * the bounds of this shared memory, else an ERR_PTR
497 void *tee_shm_get_va(struct tee_shm
*shm
, size_t offs
)
500 return ERR_PTR(-EINVAL
);
501 if (offs
>= shm
->size
)
502 return ERR_PTR(-EINVAL
);
503 return (char *)shm
->kaddr
+ offs
;
505 EXPORT_SYMBOL_GPL(tee_shm_get_va
);
508 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
509 * @shm: Shared memory handle
510 * @offs: Offset from start of this shared memory
511 * @pa: Physical address to return
512 * @returns 0 if offs is within the bounds of this shared memory, else an
515 int tee_shm_get_pa(struct tee_shm
*shm
, size_t offs
, phys_addr_t
*pa
)
517 if (offs
>= shm
->size
)
520 *pa
= shm
->paddr
+ offs
;
523 EXPORT_SYMBOL_GPL(tee_shm_get_pa
);
526 * tee_shm_get_from_id() - Find shared memory object and increase reference
528 * @ctx: Context owning the shared memory
529 * @id: Id of shared memory object
530 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
532 struct tee_shm
*tee_shm_get_from_id(struct tee_context
*ctx
, int id
)
534 struct tee_device
*teedev
;
538 return ERR_PTR(-EINVAL
);
540 teedev
= ctx
->teedev
;
541 mutex_lock(&teedev
->mutex
);
542 shm
= idr_find(&teedev
->idr
, id
);
544 * If the tee_shm was found in the IDR it must have a refcount
545 * larger than 0 due to the guarantee in tee_shm_put() below. So
546 * it's safe to use refcount_inc().
548 if (!shm
|| shm
->ctx
!= ctx
)
549 shm
= ERR_PTR(-EINVAL
);
551 refcount_inc(&shm
->refcount
);
552 mutex_unlock(&teedev
->mutex
);
555 EXPORT_SYMBOL_GPL(tee_shm_get_from_id
);
558 * tee_shm_put() - Decrease reference count on a shared memory handle
559 * @shm: Shared memory handle
561 void tee_shm_put(struct tee_shm
*shm
)
563 struct tee_device
*teedev
= shm
->ctx
->teedev
;
564 bool do_release
= false;
566 mutex_lock(&teedev
->mutex
);
567 if (refcount_dec_and_test(&shm
->refcount
)) {
569 * refcount has reached 0, we must now remove it from the
570 * IDR before releasing the mutex. This will guarantee that
571 * the refcount_inc() in tee_shm_get_from_id() never starts
575 idr_remove(&teedev
->idr
, shm
->id
);
578 mutex_unlock(&teedev
->mutex
);
581 tee_shm_release(teedev
, shm
);
583 EXPORT_SYMBOL_GPL(tee_shm_put
);