1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2021, 2023 Linaro Limited
5 #include <linux/device.h>
7 #include <linux/errno.h>
9 #include <linux/slab.h>
10 #include <linux/tee_core.h>
11 #include <linux/types.h>
12 #include "optee_private.h"
14 #define MAX_ARG_PARAM_COUNT 6
17 * How much memory we allocate for each entry. This doesn't have to be a
18 * single page, but it makes sense to keep at least keep it as multiples of
21 #define SHM_ENTRY_SIZE PAGE_SIZE
24 * We need to have a compile time constant to be able to determine the
25 * maximum needed size of the bit field.
27 #define MIN_ARG_SIZE OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT)
28 #define MAX_ARG_COUNT_PER_ENTRY (SHM_ENTRY_SIZE / MIN_ARG_SIZE)
31 * Shared memory for argument structs are cached here. The number of
32 * arguments structs that can fit is determined at runtime depending on the
33 * needed RPC parameter count reported by secure world
34 * (optee->rpc_param_count).
36 struct optee_shm_arg_entry
{
37 struct list_head list_node
;
39 DECLARE_BITMAP(map
, MAX_ARG_COUNT_PER_ENTRY
);
42 void optee_cq_init(struct optee_call_queue
*cq
, int thread_count
)
44 mutex_init(&cq
->mutex
);
45 INIT_LIST_HEAD(&cq
->waiters
);
48 * If cq->total_thread_count is 0 then we're not trying to keep
49 * track of how many free threads we have, instead we're relying on
50 * the secure world to tell us when we're out of thread and have to
51 * wait for another thread to become available.
53 cq
->total_thread_count
= thread_count
;
54 cq
->free_thread_count
= thread_count
;
57 void optee_cq_wait_init(struct optee_call_queue
*cq
,
58 struct optee_call_waiter
*w
, bool sys_thread
)
60 unsigned int free_thread_threshold
;
61 bool need_wait
= false;
63 memset(w
, 0, sizeof(*w
));
66 * We're preparing to make a call to secure world. In case we can't
67 * allocate a thread in secure world we'll end up waiting in
68 * optee_cq_wait_for_completion().
70 * Normally if there's no contention in secure world the call will
71 * complete and we can cleanup directly with optee_cq_wait_final().
73 mutex_lock(&cq
->mutex
);
76 * We add ourselves to the queue, but we don't wait. This
77 * guarantees that we don't lose a completion if secure world
78 * returns busy and another thread just exited and try to complete
81 init_completion(&w
->c
);
82 list_add_tail(&w
->list_node
, &cq
->waiters
);
83 w
->sys_thread
= sys_thread
;
85 if (cq
->total_thread_count
) {
86 if (sys_thread
|| !cq
->sys_thread_req_count
)
87 free_thread_threshold
= 0;
89 free_thread_threshold
= 1;
91 if (cq
->free_thread_count
> free_thread_threshold
)
92 cq
->free_thread_count
--;
97 mutex_unlock(&cq
->mutex
);
100 optee_cq_wait_for_completion(cq
, w
);
101 mutex_lock(&cq
->mutex
);
103 if (sys_thread
|| !cq
->sys_thread_req_count
)
104 free_thread_threshold
= 0;
106 free_thread_threshold
= 1;
108 if (cq
->free_thread_count
> free_thread_threshold
) {
109 cq
->free_thread_count
--;
113 mutex_unlock(&cq
->mutex
);
117 void optee_cq_wait_for_completion(struct optee_call_queue
*cq
,
118 struct optee_call_waiter
*w
)
120 wait_for_completion(&w
->c
);
122 mutex_lock(&cq
->mutex
);
124 /* Move to end of list to get out of the way for other waiters */
125 list_del(&w
->list_node
);
126 reinit_completion(&w
->c
);
127 list_add_tail(&w
->list_node
, &cq
->waiters
);
129 mutex_unlock(&cq
->mutex
);
132 static void optee_cq_complete_one(struct optee_call_queue
*cq
)
134 struct optee_call_waiter
*w
;
136 /* Wake a waiting system session if any, prior to a normal session */
137 list_for_each_entry(w
, &cq
->waiters
, list_node
) {
138 if (w
->sys_thread
&& !completion_done(&w
->c
)) {
144 list_for_each_entry(w
, &cq
->waiters
, list_node
) {
145 if (!completion_done(&w
->c
)) {
152 void optee_cq_wait_final(struct optee_call_queue
*cq
,
153 struct optee_call_waiter
*w
)
156 * We're done with the call to secure world. The thread in secure
157 * world that was used for this call is now available for some
160 mutex_lock(&cq
->mutex
);
162 /* Get out of the list */
163 list_del(&w
->list_node
);
165 cq
->free_thread_count
++;
167 /* Wake up one eventual waiting task */
168 optee_cq_complete_one(cq
);
171 * If we're completed we've got a completion from another task that
172 * was just done with its call to secure world. Since yet another
173 * thread now is available in secure world wake up another eventual
176 if (completion_done(&w
->c
))
177 optee_cq_complete_one(cq
);
179 mutex_unlock(&cq
->mutex
);
182 /* Count registered system sessions to reserved a system thread or not */
183 static bool optee_cq_incr_sys_thread_count(struct optee_call_queue
*cq
)
185 if (cq
->total_thread_count
<= 1)
188 mutex_lock(&cq
->mutex
);
189 cq
->sys_thread_req_count
++;
190 mutex_unlock(&cq
->mutex
);
195 static void optee_cq_decr_sys_thread_count(struct optee_call_queue
*cq
)
197 mutex_lock(&cq
->mutex
);
198 cq
->sys_thread_req_count
--;
199 /* If there's someone waiting, let it resume */
200 optee_cq_complete_one(cq
);
201 mutex_unlock(&cq
->mutex
);
204 /* Requires the filpstate mutex to be held */
205 static struct optee_session
*find_session(struct optee_context_data
*ctxdata
,
208 struct optee_session
*sess
;
210 list_for_each_entry(sess
, &ctxdata
->sess_list
, list_node
)
211 if (sess
->session_id
== session_id
)
217 void optee_shm_arg_cache_init(struct optee
*optee
, u32 flags
)
219 INIT_LIST_HEAD(&optee
->shm_arg_cache
.shm_args
);
220 mutex_init(&optee
->shm_arg_cache
.mutex
);
221 optee
->shm_arg_cache
.flags
= flags
;
224 void optee_shm_arg_cache_uninit(struct optee
*optee
)
226 struct list_head
*head
= &optee
->shm_arg_cache
.shm_args
;
227 struct optee_shm_arg_entry
*entry
;
229 mutex_destroy(&optee
->shm_arg_cache
.mutex
);
230 while (!list_empty(head
)) {
231 entry
= list_first_entry(head
, struct optee_shm_arg_entry
,
233 list_del(&entry
->list_node
);
234 if (find_first_bit(entry
->map
, MAX_ARG_COUNT_PER_ENTRY
) !=
235 MAX_ARG_COUNT_PER_ENTRY
) {
236 pr_err("Freeing non-free entry\n");
238 tee_shm_free(entry
->shm
);
243 size_t optee_msg_arg_size(size_t rpc_param_count
)
245 size_t sz
= OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT
);
248 sz
+= OPTEE_MSG_GET_ARG_SIZE(rpc_param_count
);
254 * optee_get_msg_arg() - Provide shared memory for argument struct
255 * @ctx: Caller TEE context
256 * @num_params: Number of parameter to store
257 * @entry_ret: Entry pointer, needed when freeing the buffer
258 * @shm_ret: Shared memory buffer
259 * @offs_ret: Offset of argument strut in shared memory buffer
261 * @returns a pointer to the argument struct in memory, else an ERR_PTR
263 struct optee_msg_arg
*optee_get_msg_arg(struct tee_context
*ctx
,
265 struct optee_shm_arg_entry
**entry_ret
,
266 struct tee_shm
**shm_ret
,
269 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
270 size_t sz
= optee_msg_arg_size(optee
->rpc_param_count
);
271 struct optee_shm_arg_entry
*entry
;
272 struct optee_msg_arg
*ma
;
273 size_t args_per_entry
;
278 if (num_params
> MAX_ARG_PARAM_COUNT
)
279 return ERR_PTR(-EINVAL
);
281 if (optee
->shm_arg_cache
.flags
& OPTEE_SHM_ARG_SHARED
)
282 args_per_entry
= SHM_ENTRY_SIZE
/ sz
;
286 mutex_lock(&optee
->shm_arg_cache
.mutex
);
287 list_for_each_entry(entry
, &optee
->shm_arg_cache
.shm_args
, list_node
) {
288 bit
= find_first_zero_bit(entry
->map
, MAX_ARG_COUNT_PER_ENTRY
);
289 if (bit
< args_per_entry
)
294 * No entry was found, let's allocate a new.
296 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
298 res
= ERR_PTR(-ENOMEM
);
302 if (optee
->shm_arg_cache
.flags
& OPTEE_SHM_ARG_ALLOC_PRIV
)
303 res
= tee_shm_alloc_priv_buf(ctx
, SHM_ENTRY_SIZE
);
305 res
= tee_shm_alloc_kernel_buf(ctx
, SHM_ENTRY_SIZE
);
312 list_add(&entry
->list_node
, &optee
->shm_arg_cache
.shm_args
);
317 res
= tee_shm_get_va(entry
->shm
, offs
);
321 set_bit(bit
, entry
->map
);
323 ma
->num_params
= num_params
;
325 *shm_ret
= entry
->shm
;
328 mutex_unlock(&optee
->shm_arg_cache
.mutex
);
333 * optee_free_msg_arg() - Free previsouly obtained shared memory
334 * @ctx: Caller TEE context
335 * @entry: Pointer returned when the shared memory was obtained
336 * @offs: Offset of shared memory buffer to free
338 * This function frees the shared memory obtained with optee_get_msg_arg().
340 void optee_free_msg_arg(struct tee_context
*ctx
,
341 struct optee_shm_arg_entry
*entry
, u_int offs
)
343 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
344 size_t sz
= optee_msg_arg_size(optee
->rpc_param_count
);
347 if (offs
> SHM_ENTRY_SIZE
|| offs
% sz
) {
348 pr_err("Invalid offs %u\n", offs
);
353 mutex_lock(&optee
->shm_arg_cache
.mutex
);
355 if (!test_bit(bit
, entry
->map
))
356 pr_err("Bit pos %lu is already free\n", bit
);
357 clear_bit(bit
, entry
->map
);
359 mutex_unlock(&optee
->shm_arg_cache
.mutex
);
362 int optee_open_session(struct tee_context
*ctx
,
363 struct tee_ioctl_open_session_arg
*arg
,
364 struct tee_param
*param
)
366 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
367 struct optee_context_data
*ctxdata
= ctx
->data
;
368 struct optee_shm_arg_entry
*entry
;
370 struct optee_msg_arg
*msg_arg
;
371 struct optee_session
*sess
= NULL
;
376 /* +2 for the meta parameters added below */
377 msg_arg
= optee_get_msg_arg(ctx
, arg
->num_params
+ 2,
378 &entry
, &shm
, &offs
);
380 return PTR_ERR(msg_arg
);
382 msg_arg
->cmd
= OPTEE_MSG_CMD_OPEN_SESSION
;
383 msg_arg
->cancel_id
= arg
->cancel_id
;
386 * Initialize and add the meta parameters needed when opening a
389 msg_arg
->params
[0].attr
= OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
|
391 msg_arg
->params
[1].attr
= OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
|
393 memcpy(&msg_arg
->params
[0].u
.value
, arg
->uuid
, sizeof(arg
->uuid
));
394 msg_arg
->params
[1].u
.value
.c
= arg
->clnt_login
;
396 rc
= tee_session_calc_client_uuid(&client_uuid
, arg
->clnt_login
,
400 export_uuid(msg_arg
->params
[1].u
.octets
, &client_uuid
);
402 rc
= optee
->ops
->to_msg_param(optee
, msg_arg
->params
+ 2,
403 arg
->num_params
, param
);
407 sess
= kzalloc(sizeof(*sess
), GFP_KERNEL
);
413 if (optee
->ops
->do_call_with_arg(ctx
, shm
, offs
,
414 sess
->use_sys_thread
)) {
415 msg_arg
->ret
= TEEC_ERROR_COMMUNICATION
;
416 msg_arg
->ret_origin
= TEEC_ORIGIN_COMMS
;
419 if (msg_arg
->ret
== TEEC_SUCCESS
) {
420 /* A new session has been created, add it to the list. */
421 sess
->session_id
= msg_arg
->session
;
422 mutex_lock(&ctxdata
->mutex
);
423 list_add(&sess
->list_node
, &ctxdata
->sess_list
);
424 mutex_unlock(&ctxdata
->mutex
);
429 if (optee
->ops
->from_msg_param(optee
, param
, arg
->num_params
,
430 msg_arg
->params
+ 2)) {
431 arg
->ret
= TEEC_ERROR_COMMUNICATION
;
432 arg
->ret_origin
= TEEC_ORIGIN_COMMS
;
433 /* Close session again to avoid leakage */
434 optee_close_session(ctx
, msg_arg
->session
);
436 arg
->session
= msg_arg
->session
;
437 arg
->ret
= msg_arg
->ret
;
438 arg
->ret_origin
= msg_arg
->ret_origin
;
441 optee_free_msg_arg(ctx
, entry
, offs
);
446 int optee_system_session(struct tee_context
*ctx
, u32 session
)
448 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
449 struct optee_context_data
*ctxdata
= ctx
->data
;
450 struct optee_session
*sess
;
453 mutex_lock(&ctxdata
->mutex
);
455 sess
= find_session(ctxdata
, session
);
456 if (sess
&& (sess
->use_sys_thread
||
457 optee_cq_incr_sys_thread_count(&optee
->call_queue
))) {
458 sess
->use_sys_thread
= true;
462 mutex_unlock(&ctxdata
->mutex
);
467 int optee_close_session_helper(struct tee_context
*ctx
, u32 session
,
470 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
471 struct optee_shm_arg_entry
*entry
;
472 struct optee_msg_arg
*msg_arg
;
476 msg_arg
= optee_get_msg_arg(ctx
, 0, &entry
, &shm
, &offs
);
478 return PTR_ERR(msg_arg
);
480 msg_arg
->cmd
= OPTEE_MSG_CMD_CLOSE_SESSION
;
481 msg_arg
->session
= session
;
482 optee
->ops
->do_call_with_arg(ctx
, shm
, offs
, system_thread
);
484 optee_free_msg_arg(ctx
, entry
, offs
);
487 optee_cq_decr_sys_thread_count(&optee
->call_queue
);
492 int optee_close_session(struct tee_context
*ctx
, u32 session
)
494 struct optee_context_data
*ctxdata
= ctx
->data
;
495 struct optee_session
*sess
;
498 /* Check that the session is valid and remove it from the list */
499 mutex_lock(&ctxdata
->mutex
);
500 sess
= find_session(ctxdata
, session
);
502 list_del(&sess
->list_node
);
503 mutex_unlock(&ctxdata
->mutex
);
506 system_thread
= sess
->use_sys_thread
;
509 return optee_close_session_helper(ctx
, session
, system_thread
);
512 int optee_invoke_func(struct tee_context
*ctx
, struct tee_ioctl_invoke_arg
*arg
,
513 struct tee_param
*param
)
515 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
516 struct optee_context_data
*ctxdata
= ctx
->data
;
517 struct optee_shm_arg_entry
*entry
;
518 struct optee_msg_arg
*msg_arg
;
519 struct optee_session
*sess
;
525 /* Check that the session is valid */
526 mutex_lock(&ctxdata
->mutex
);
527 sess
= find_session(ctxdata
, arg
->session
);
529 system_thread
= sess
->use_sys_thread
;
530 mutex_unlock(&ctxdata
->mutex
);
534 msg_arg
= optee_get_msg_arg(ctx
, arg
->num_params
,
535 &entry
, &shm
, &offs
);
537 return PTR_ERR(msg_arg
);
538 msg_arg
->cmd
= OPTEE_MSG_CMD_INVOKE_COMMAND
;
539 msg_arg
->func
= arg
->func
;
540 msg_arg
->session
= arg
->session
;
541 msg_arg
->cancel_id
= arg
->cancel_id
;
543 rc
= optee
->ops
->to_msg_param(optee
, msg_arg
->params
, arg
->num_params
,
548 if (optee
->ops
->do_call_with_arg(ctx
, shm
, offs
, system_thread
)) {
549 msg_arg
->ret
= TEEC_ERROR_COMMUNICATION
;
550 msg_arg
->ret_origin
= TEEC_ORIGIN_COMMS
;
553 if (optee
->ops
->from_msg_param(optee
, param
, arg
->num_params
,
555 msg_arg
->ret
= TEEC_ERROR_COMMUNICATION
;
556 msg_arg
->ret_origin
= TEEC_ORIGIN_COMMS
;
559 arg
->ret
= msg_arg
->ret
;
560 arg
->ret_origin
= msg_arg
->ret_origin
;
562 optee_free_msg_arg(ctx
, entry
, offs
);
566 int optee_cancel_req(struct tee_context
*ctx
, u32 cancel_id
, u32 session
)
568 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
569 struct optee_context_data
*ctxdata
= ctx
->data
;
570 struct optee_shm_arg_entry
*entry
;
571 struct optee_msg_arg
*msg_arg
;
572 struct optee_session
*sess
;
577 /* Check that the session is valid */
578 mutex_lock(&ctxdata
->mutex
);
579 sess
= find_session(ctxdata
, session
);
581 system_thread
= sess
->use_sys_thread
;
582 mutex_unlock(&ctxdata
->mutex
);
586 msg_arg
= optee_get_msg_arg(ctx
, 0, &entry
, &shm
, &offs
);
588 return PTR_ERR(msg_arg
);
590 msg_arg
->cmd
= OPTEE_MSG_CMD_CANCEL
;
591 msg_arg
->session
= session
;
592 msg_arg
->cancel_id
= cancel_id
;
593 optee
->ops
->do_call_with_arg(ctx
, shm
, offs
, system_thread
);
595 optee_free_msg_arg(ctx
, entry
, offs
);
599 static bool is_normal_memory(pgprot_t p
)
601 #if defined(CONFIG_ARM)
602 return (((pgprot_val(p
) & L_PTE_MT_MASK
) == L_PTE_MT_WRITEALLOC
) ||
603 ((pgprot_val(p
) & L_PTE_MT_MASK
) == L_PTE_MT_WRITEBACK
));
604 #elif defined(CONFIG_ARM64)
605 return (pgprot_val(p
) & PTE_ATTRINDX_MASK
) == PTE_ATTRINDX(MT_NORMAL
);
607 #error "Unsupported architecture"
611 static int __check_mem_type(struct mm_struct
*mm
, unsigned long start
,
614 struct vm_area_struct
*vma
;
615 VMA_ITERATOR(vmi
, mm
, start
);
617 for_each_vma_range(vmi
, vma
, end
) {
618 if (!is_normal_memory(vma
->vm_page_prot
))
625 int optee_check_mem_type(unsigned long start
, size_t num_pages
)
627 struct mm_struct
*mm
= current
->mm
;
631 * Allow kernel address to register with OP-TEE as kernel
632 * pages are configured as normal memory only.
634 if (virt_addr_valid((void *)start
) || is_vmalloc_addr((void *)start
))
638 rc
= __check_mem_type(mm
, start
, start
+ num_pages
* PAGE_SIZE
);
639 mmap_read_unlock(mm
);
644 static int simple_call_with_arg(struct tee_context
*ctx
, u32 cmd
)
646 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
647 struct optee_shm_arg_entry
*entry
;
648 struct optee_msg_arg
*msg_arg
;
652 msg_arg
= optee_get_msg_arg(ctx
, 0, &entry
, &shm
, &offs
);
654 return PTR_ERR(msg_arg
);
657 optee
->ops
->do_call_with_arg(ctx
, shm
, offs
, false);
659 optee_free_msg_arg(ctx
, entry
, offs
);
663 int optee_do_bottom_half(struct tee_context
*ctx
)
665 return simple_call_with_arg(ctx
, OPTEE_MSG_CMD_DO_BOTTOM_HALF
);
668 int optee_stop_async_notif(struct tee_context
*ctx
)
670 return simple_call_with_arg(ctx
, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF
);