1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, 2023 Linaro Limited
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/arm_ffa.h>
9 #include <linux/errno.h>
10 #include <linux/rpmb.h>
11 #include <linux/scatterlist.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 #include <linux/tee_core.h>
16 #include <linux/types.h>
17 #include "optee_private.h"
18 #include "optee_ffa.h"
19 #include "optee_rpc_cmd.h"
22 * This file implement the FF-A ABI used when communicating with secure world
24 * This file is divided into the following sections:
25 * 1. Maintain a hash table for lookup of a global FF-A memory handle
26 * 2. Convert between struct tee_param and struct optee_msg_param
27 * 3. Low level support functions to register shared memory in secure world
28 * 4. Dynamic shared memory pool based on alloc_pages()
29 * 5. Do a normal scheduled call into secure world
30 * 6. Driver initialization.
34 * 1. Maintain a hash table for lookup of a global FF-A memory handle
36 * FF-A assigns a global memory handle for each piece shared memory.
37 * This handle is then used when communicating with secure world.
39 * Main functions are optee_shm_add_ffa_handle() and optee_shm_rem_ffa_handle()
44 struct rhash_head linkage
;
47 static void rh_free_fn(void *ptr
, void *arg
)
52 static const struct rhashtable_params shm_rhash_params
= {
53 .head_offset
= offsetof(struct shm_rhash
, linkage
),
54 .key_len
= sizeof(u64
),
55 .key_offset
= offsetof(struct shm_rhash
, global_id
),
56 .automatic_shrinking
= true,
59 static struct tee_shm
*optee_shm_from_ffa_handle(struct optee
*optee
,
62 struct tee_shm
*shm
= NULL
;
65 mutex_lock(&optee
->ffa
.mutex
);
66 r
= rhashtable_lookup_fast(&optee
->ffa
.global_ids
, &global_id
,
70 mutex_unlock(&optee
->ffa
.mutex
);
75 static int optee_shm_add_ffa_handle(struct optee
*optee
, struct tee_shm
*shm
,
81 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
85 r
->global_id
= global_id
;
87 mutex_lock(&optee
->ffa
.mutex
);
88 rc
= rhashtable_lookup_insert_fast(&optee
->ffa
.global_ids
, &r
->linkage
,
90 mutex_unlock(&optee
->ffa
.mutex
);
98 static int optee_shm_rem_ffa_handle(struct optee
*optee
, u64 global_id
)
103 mutex_lock(&optee
->ffa
.mutex
);
104 r
= rhashtable_lookup_fast(&optee
->ffa
.global_ids
, &global_id
,
107 rc
= rhashtable_remove_fast(&optee
->ffa
.global_ids
,
108 &r
->linkage
, shm_rhash_params
);
109 mutex_unlock(&optee
->ffa
.mutex
);
118 * 2. Convert between struct tee_param and struct optee_msg_param
120 * optee_ffa_from_msg_param() and optee_ffa_to_msg_param() are the main
124 static void from_msg_param_ffa_mem(struct optee
*optee
, struct tee_param
*p
,
125 u32 attr
, const struct optee_msg_param
*mp
)
127 struct tee_shm
*shm
= NULL
;
131 p
->attr
= TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT
+
132 attr
- OPTEE_MSG_ATTR_TYPE_FMEM_INPUT
;
133 p
->u
.memref
.size
= mp
->u
.fmem
.size
;
135 if (mp
->u
.fmem
.global_id
!= OPTEE_MSG_FMEM_INVALID_GLOBAL_ID
)
136 shm
= optee_shm_from_ffa_handle(optee
, mp
->u
.fmem
.global_id
);
137 p
->u
.memref
.shm
= shm
;
140 offs_low
= mp
->u
.fmem
.offs_low
;
141 offs_high
= mp
->u
.fmem
.offs_high
;
143 p
->u
.memref
.shm_offs
= offs_low
| offs_high
<< 32;
147 * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to
149 * @optee: main service struct
150 * @params: subsystem internal parameter representation
151 * @num_params: number of elements in the parameter arrays
152 * @msg_params: OPTEE_MSG parameters
154 * Returns 0 on success or <0 on failure
156 static int optee_ffa_from_msg_param(struct optee
*optee
,
157 struct tee_param
*params
, size_t num_params
,
158 const struct optee_msg_param
*msg_params
)
162 for (n
= 0; n
< num_params
; n
++) {
163 struct tee_param
*p
= params
+ n
;
164 const struct optee_msg_param
*mp
= msg_params
+ n
;
165 u32 attr
= mp
->attr
& OPTEE_MSG_ATTR_TYPE_MASK
;
168 case OPTEE_MSG_ATTR_TYPE_NONE
:
169 p
->attr
= TEE_IOCTL_PARAM_ATTR_TYPE_NONE
;
170 memset(&p
->u
, 0, sizeof(p
->u
));
172 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
:
173 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT
:
174 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT
:
175 optee_from_msg_param_value(p
, attr
, mp
);
177 case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT
:
178 case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT
:
179 case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT
:
180 from_msg_param_ffa_mem(optee
, p
, attr
, mp
);
190 static int to_msg_param_ffa_mem(struct optee_msg_param
*mp
,
191 const struct tee_param
*p
)
193 struct tee_shm
*shm
= p
->u
.memref
.shm
;
195 mp
->attr
= OPTEE_MSG_ATTR_TYPE_FMEM_INPUT
+ p
->attr
-
196 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT
;
199 u64 shm_offs
= p
->u
.memref
.shm_offs
;
201 mp
->u
.fmem
.internal_offs
= shm
->offset
;
203 mp
->u
.fmem
.offs_low
= shm_offs
;
204 mp
->u
.fmem
.offs_high
= shm_offs
>> 32;
205 /* Check that the entire offset could be stored. */
206 if (mp
->u
.fmem
.offs_high
!= shm_offs
>> 32)
209 mp
->u
.fmem
.global_id
= shm
->sec_world_id
;
211 memset(&mp
->u
, 0, sizeof(mp
->u
));
212 mp
->u
.fmem
.global_id
= OPTEE_MSG_FMEM_INVALID_GLOBAL_ID
;
214 mp
->u
.fmem
.size
= p
->u
.memref
.size
;
220 * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG
222 * @optee: main service struct
223 * @msg_params: OPTEE_MSG parameters
224 * @num_params: number of elements in the parameter arrays
225 * @params: subsystem itnernal parameter representation
226 * Returns 0 on success or <0 on failure
228 static int optee_ffa_to_msg_param(struct optee
*optee
,
229 struct optee_msg_param
*msg_params
,
231 const struct tee_param
*params
)
235 for (n
= 0; n
< num_params
; n
++) {
236 const struct tee_param
*p
= params
+ n
;
237 struct optee_msg_param
*mp
= msg_params
+ n
;
240 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE
:
241 mp
->attr
= TEE_IOCTL_PARAM_ATTR_TYPE_NONE
;
242 memset(&mp
->u
, 0, sizeof(mp
->u
));
244 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT
:
245 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT
:
246 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT
:
247 optee_to_msg_param_value(mp
, p
);
249 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT
:
250 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT
:
251 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT
:
252 if (to_msg_param_ffa_mem(mp
, p
))
264 * 3. Low level support functions to register shared memory in secure world
266 * Functions to register and unregister shared memory both for normal
267 * clients and for tee-supplicant.
270 static int optee_ffa_shm_register(struct tee_context
*ctx
, struct tee_shm
*shm
,
271 struct page
**pages
, size_t num_pages
,
274 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
275 struct ffa_device
*ffa_dev
= optee
->ffa
.ffa_dev
;
276 const struct ffa_mem_ops
*mem_ops
= ffa_dev
->ops
->mem_ops
;
277 struct ffa_mem_region_attributes mem_attr
= {
278 .receiver
= ffa_dev
->vm_id
,
281 struct ffa_mem_ops_args args
= {
289 rc
= optee_check_mem_type(start
, num_pages
);
293 rc
= sg_alloc_table_from_pages(&sgt
, pages
, num_pages
, 0,
294 num_pages
* PAGE_SIZE
, GFP_KERNEL
);
298 rc
= mem_ops
->memory_share(&args
);
303 rc
= optee_shm_add_ffa_handle(optee
, shm
, args
.g_handle
);
305 mem_ops
->memory_reclaim(args
.g_handle
, 0);
309 shm
->sec_world_id
= args
.g_handle
;
314 static int optee_ffa_shm_unregister(struct tee_context
*ctx
,
317 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
318 struct ffa_device
*ffa_dev
= optee
->ffa
.ffa_dev
;
319 const struct ffa_msg_ops
*msg_ops
= ffa_dev
->ops
->msg_ops
;
320 const struct ffa_mem_ops
*mem_ops
= ffa_dev
->ops
->mem_ops
;
321 u64 global_handle
= shm
->sec_world_id
;
322 struct ffa_send_direct_data data
= {
323 .data0
= OPTEE_FFA_UNREGISTER_SHM
,
324 .data1
= (u32
)global_handle
,
325 .data2
= (u32
)(global_handle
>> 32)
329 optee_shm_rem_ffa_handle(optee
, global_handle
);
330 shm
->sec_world_id
= 0;
332 rc
= msg_ops
->sync_send_receive(ffa_dev
, &data
);
334 pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle
, rc
);
336 rc
= mem_ops
->memory_reclaim(global_handle
, 0);
338 pr_err("mem_reclaim: 0x%llx %d", global_handle
, rc
);
343 static int optee_ffa_shm_unregister_supp(struct tee_context
*ctx
,
346 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
347 const struct ffa_mem_ops
*mem_ops
;
348 u64 global_handle
= shm
->sec_world_id
;
352 * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call
353 * since this is OP-TEE freeing via RPC so it has already retired
357 optee_shm_rem_ffa_handle(optee
, global_handle
);
358 mem_ops
= optee
->ffa
.ffa_dev
->ops
->mem_ops
;
359 rc
= mem_ops
->memory_reclaim(global_handle
, 0);
361 pr_err("mem_reclaim: 0x%llx %d", global_handle
, rc
);
363 shm
->sec_world_id
= 0;
369 * 4. Dynamic shared memory pool based on alloc_pages()
371 * Implements an OP-TEE specific shared memory pool.
372 * The main function is optee_ffa_shm_pool_alloc_pages().
375 static int pool_ffa_op_alloc(struct tee_shm_pool
*pool
,
376 struct tee_shm
*shm
, size_t size
, size_t align
)
378 return tee_dyn_shm_alloc_helper(shm
, size
, align
,
379 optee_ffa_shm_register
);
382 static void pool_ffa_op_free(struct tee_shm_pool
*pool
,
385 tee_dyn_shm_free_helper(shm
, optee_ffa_shm_unregister
);
388 static void pool_ffa_op_destroy_pool(struct tee_shm_pool
*pool
)
393 static const struct tee_shm_pool_ops pool_ffa_ops
= {
394 .alloc
= pool_ffa_op_alloc
,
395 .free
= pool_ffa_op_free
,
396 .destroy_pool
= pool_ffa_op_destroy_pool
,
400 * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool
402 * This pool is used with OP-TEE over FF-A. In this case command buffers
403 * and such are allocated from kernel's own memory.
405 static struct tee_shm_pool
*optee_ffa_shm_pool_alloc_pages(void)
407 struct tee_shm_pool
*pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
410 return ERR_PTR(-ENOMEM
);
412 pool
->ops
= &pool_ffa_ops
;
418 * 5. Do a normal scheduled call into secure world
420 * The function optee_ffa_do_call_with_arg() performs a normal scheduled
421 * call into secure world. During this call may normal world request help
422 * from normal world using RPCs, Remote Procedure Calls. This includes
423 * delivery of non-secure interrupts to for instance allow rescheduling of
427 static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context
*ctx
,
429 struct optee_msg_arg
*arg
)
433 if (arg
->num_params
!= 1 ||
434 arg
->params
[0].attr
!= OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
) {
435 arg
->ret
= TEEC_ERROR_BAD_PARAMETERS
;
439 switch (arg
->params
[0].u
.value
.a
) {
440 case OPTEE_RPC_SHM_TYPE_APPL
:
441 shm
= optee_rpc_cmd_alloc_suppl(ctx
, arg
->params
[0].u
.value
.b
);
443 case OPTEE_RPC_SHM_TYPE_KERNEL
:
444 shm
= tee_shm_alloc_priv_buf(optee
->ctx
,
445 arg
->params
[0].u
.value
.b
);
448 arg
->ret
= TEEC_ERROR_BAD_PARAMETERS
;
453 arg
->ret
= TEEC_ERROR_OUT_OF_MEMORY
;
457 arg
->params
[0] = (struct optee_msg_param
){
458 .attr
= OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT
,
459 .u
.fmem
.size
= tee_shm_get_size(shm
),
460 .u
.fmem
.global_id
= shm
->sec_world_id
,
461 .u
.fmem
.internal_offs
= shm
->offset
,
464 arg
->ret
= TEEC_SUCCESS
;
467 static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context
*ctx
,
469 struct optee_msg_arg
*arg
)
473 if (arg
->num_params
!= 1 ||
474 arg
->params
[0].attr
!= OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
)
477 shm
= optee_shm_from_ffa_handle(optee
, arg
->params
[0].u
.value
.b
);
480 switch (arg
->params
[0].u
.value
.a
) {
481 case OPTEE_RPC_SHM_TYPE_APPL
:
482 optee_rpc_cmd_free_suppl(ctx
, shm
);
484 case OPTEE_RPC_SHM_TYPE_KERNEL
:
490 arg
->ret
= TEEC_SUCCESS
;
494 arg
->ret
= TEEC_ERROR_BAD_PARAMETERS
;
497 static void handle_ffa_rpc_func_cmd(struct tee_context
*ctx
,
499 struct optee_msg_arg
*arg
)
501 arg
->ret_origin
= TEEC_ORIGIN_COMMS
;
503 case OPTEE_RPC_CMD_SHM_ALLOC
:
504 handle_ffa_rpc_func_cmd_shm_alloc(ctx
, optee
, arg
);
506 case OPTEE_RPC_CMD_SHM_FREE
:
507 handle_ffa_rpc_func_cmd_shm_free(ctx
, optee
, arg
);
510 optee_rpc_cmd(ctx
, optee
, arg
);
514 static void optee_handle_ffa_rpc(struct tee_context
*ctx
, struct optee
*optee
,
515 u32 cmd
, struct optee_msg_arg
*arg
)
518 case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD
:
519 handle_ffa_rpc_func_cmd(ctx
, optee
, arg
);
521 case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT
:
522 /* Interrupt delivered by now */
525 pr_warn("Unknown RPC func 0x%x\n", cmd
);
530 static int optee_ffa_yielding_call(struct tee_context
*ctx
,
531 struct ffa_send_direct_data
*data
,
532 struct optee_msg_arg
*rpc_arg
,
535 struct optee
*optee
= tee_get_drvdata(ctx
->teedev
);
536 struct ffa_device
*ffa_dev
= optee
->ffa
.ffa_dev
;
537 const struct ffa_msg_ops
*msg_ops
= ffa_dev
->ops
->msg_ops
;
538 struct optee_call_waiter w
;
539 u32 cmd
= data
->data0
;
540 u32 w4
= data
->data1
;
541 u32 w5
= data
->data2
;
542 u32 w6
= data
->data3
;
545 /* Initialize waiter */
546 optee_cq_wait_init(&optee
->call_queue
, &w
, system_thread
);
548 rc
= msg_ops
->sync_send_receive(ffa_dev
, data
);
552 switch ((int)data
->data0
) {
555 case TEEC_ERROR_BUSY
:
556 if (cmd
== OPTEE_FFA_YIELDING_CALL_RESUME
) {
562 * Out of threads in secure world, wait for a thread
565 optee_cq_wait_for_completion(&optee
->call_queue
, &w
);
576 if (data
->data1
== OPTEE_FFA_YIELDING_CALL_RETURN_DONE
)
580 * OP-TEE has returned with a RPC request.
582 * Note that data->data4 (passed in register w7) is already
583 * filled in by ffa_mem_ops->sync_send_receive() returning
587 optee_handle_ffa_rpc(ctx
, optee
, data
->data1
, rpc_arg
);
588 cmd
= OPTEE_FFA_YIELDING_CALL_RESUME
;
596 * We're done with our thread in secure world, if there's any
597 * thread waiters wake up one.
599 optee_cq_wait_final(&optee
->call_queue
, &w
);
605 * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
606 * @ctx: calling context
607 * @shm: shared memory holding the message to pass to secure world
608 * @offs: offset of the message in @shm
609 * @system_thread: true if caller requests TEE system thread support
611 * Does a FF-A call to OP-TEE in secure world and handles eventual resulting
612 * Remote Procedure Calls (RPC) from OP-TEE.
614 * Returns return code from FF-A, 0 is OK
617 static int optee_ffa_do_call_with_arg(struct tee_context
*ctx
,
618 struct tee_shm
*shm
, u_int offs
,
621 struct ffa_send_direct_data data
= {
622 .data0
= OPTEE_FFA_YIELDING_CALL_WITH_ARG
,
623 .data1
= (u32
)shm
->sec_world_id
,
624 .data2
= (u32
)(shm
->sec_world_id
>> 32),
627 struct optee_msg_arg
*arg
;
628 unsigned int rpc_arg_offs
;
629 struct optee_msg_arg
*rpc_arg
;
632 * The shared memory object has to start on a page when passed as
633 * an argument struct. This is also what the shm pool allocator
634 * returns, but check this before calling secure world to catch
635 * eventual errors early in case something changes.
640 arg
= tee_shm_get_va(shm
, offs
);
644 rpc_arg_offs
= OPTEE_MSG_GET_ARG_SIZE(arg
->num_params
);
645 rpc_arg
= tee_shm_get_va(shm
, offs
+ rpc_arg_offs
);
647 return PTR_ERR(rpc_arg
);
649 return optee_ffa_yielding_call(ctx
, &data
, rpc_arg
, system_thread
);
653 * 6. Driver initialization
655 * During driver inititialization is the OP-TEE Secure Partition is probed
656 * to find out which features it supports so the driver can be initialized
657 * with a matching configuration.
660 static bool optee_ffa_api_is_compatbile(struct ffa_device
*ffa_dev
,
661 const struct ffa_ops
*ops
)
663 const struct ffa_msg_ops
*msg_ops
= ops
->msg_ops
;
664 struct ffa_send_direct_data data
= {
665 .data0
= OPTEE_FFA_GET_API_VERSION
,
669 msg_ops
->mode_32bit_set(ffa_dev
);
671 rc
= msg_ops
->sync_send_receive(ffa_dev
, &data
);
673 pr_err("Unexpected error %d\n", rc
);
676 if (data
.data0
!= OPTEE_FFA_VERSION_MAJOR
||
677 data
.data1
< OPTEE_FFA_VERSION_MINOR
) {
678 pr_err("Incompatible OP-TEE API version %lu.%lu",
679 data
.data0
, data
.data1
);
683 data
= (struct ffa_send_direct_data
){
684 .data0
= OPTEE_FFA_GET_OS_VERSION
,
686 rc
= msg_ops
->sync_send_receive(ffa_dev
, &data
);
688 pr_err("Unexpected error %d\n", rc
);
692 pr_info("revision %lu.%lu (%08lx)",
693 data
.data0
, data
.data1
, data
.data2
);
695 pr_info("revision %lu.%lu", data
.data0
, data
.data1
);
700 static bool optee_ffa_exchange_caps(struct ffa_device
*ffa_dev
,
701 const struct ffa_ops
*ops
,
703 unsigned int *rpc_param_count
,
704 unsigned int *max_notif_value
)
706 struct ffa_send_direct_data data
= {
707 .data0
= OPTEE_FFA_EXCHANGE_CAPABILITIES
,
711 rc
= ops
->msg_ops
->sync_send_receive(ffa_dev
, &data
);
713 pr_err("Unexpected error %d", rc
);
717 pr_err("Unexpected exchange error %lu", data
.data0
);
721 *rpc_param_count
= (u8
)data
.data1
;
722 *sec_caps
= data
.data2
;
724 *max_notif_value
= data
.data3
;
726 *max_notif_value
= OPTEE_DEFAULT_MAX_NOTIF_VALUE
;
731 static void notif_callback(int notify_id
, void *cb_data
)
733 struct optee
*optee
= cb_data
;
735 if (notify_id
== optee
->ffa
.bottom_half_value
)
736 optee_do_bottom_half(optee
->ctx
);
738 optee_notif_send(optee
, notify_id
);
741 static int enable_async_notif(struct optee
*optee
)
743 struct ffa_device
*ffa_dev
= optee
->ffa
.ffa_dev
;
744 struct ffa_send_direct_data data
= {
745 .data0
= OPTEE_FFA_ENABLE_ASYNC_NOTIF
,
746 .data1
= optee
->ffa
.bottom_half_value
,
750 rc
= ffa_dev
->ops
->msg_ops
->sync_send_receive(ffa_dev
, &data
);
756 static void optee_ffa_get_version(struct tee_device
*teedev
,
757 struct tee_ioctl_version_data
*vers
)
759 struct tee_ioctl_version_data v
= {
760 .impl_id
= TEE_IMPL_ID_OPTEE
,
761 .impl_caps
= TEE_OPTEE_CAP_TZ
,
762 .gen_caps
= TEE_GEN_CAP_GP
| TEE_GEN_CAP_REG_MEM
|
763 TEE_GEN_CAP_MEMREF_NULL
,
769 static int optee_ffa_open(struct tee_context
*ctx
)
771 return optee_open(ctx
, true);
774 static const struct tee_driver_ops optee_ffa_clnt_ops
= {
775 .get_version
= optee_ffa_get_version
,
776 .open
= optee_ffa_open
,
777 .release
= optee_release
,
778 .open_session
= optee_open_session
,
779 .close_session
= optee_close_session
,
780 .invoke_func
= optee_invoke_func
,
781 .cancel_req
= optee_cancel_req
,
782 .shm_register
= optee_ffa_shm_register
,
783 .shm_unregister
= optee_ffa_shm_unregister
,
786 static const struct tee_desc optee_ffa_clnt_desc
= {
787 .name
= DRIVER_NAME
"-ffa-clnt",
788 .ops
= &optee_ffa_clnt_ops
,
789 .owner
= THIS_MODULE
,
792 static const struct tee_driver_ops optee_ffa_supp_ops
= {
793 .get_version
= optee_ffa_get_version
,
794 .open
= optee_ffa_open
,
795 .release
= optee_release_supp
,
796 .supp_recv
= optee_supp_recv
,
797 .supp_send
= optee_supp_send
,
798 .shm_register
= optee_ffa_shm_register
, /* same as for clnt ops */
799 .shm_unregister
= optee_ffa_shm_unregister_supp
,
802 static const struct tee_desc optee_ffa_supp_desc
= {
803 .name
= DRIVER_NAME
"-ffa-supp",
804 .ops
= &optee_ffa_supp_ops
,
805 .owner
= THIS_MODULE
,
806 .flags
= TEE_DESC_PRIVILEGED
,
809 static const struct optee_ops optee_ffa_ops
= {
810 .do_call_with_arg
= optee_ffa_do_call_with_arg
,
811 .to_msg_param
= optee_ffa_to_msg_param
,
812 .from_msg_param
= optee_ffa_from_msg_param
,
815 static void optee_ffa_remove(struct ffa_device
*ffa_dev
)
817 struct optee
*optee
= ffa_dev_get_drvdata(ffa_dev
);
818 u32 bottom_half_id
= optee
->ffa
.bottom_half_value
;
820 if (bottom_half_id
!= U32_MAX
)
821 ffa_dev
->ops
->notifier_ops
->notify_relinquish(ffa_dev
,
823 optee_remove_common(optee
);
825 mutex_destroy(&optee
->ffa
.mutex
);
826 rhashtable_free_and_destroy(&optee
->ffa
.global_ids
, rh_free_fn
, NULL
);
831 static int optee_ffa_async_notif_init(struct ffa_device
*ffa_dev
,
834 bool is_per_vcpu
= false;
839 rc
= ffa_dev
->ops
->notifier_ops
->notify_request(ffa_dev
,
847 * -EACCES means that the notification ID was
848 * already bound, try the next one as long as we
849 * haven't reached the max. Any other error is a
850 * permanent error, so skip asynchronous
851 * notifications in that case.
856 if (notif_id
>= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE
)
859 optee
->ffa
.bottom_half_value
= notif_id
;
861 rc
= enable_async_notif(optee
);
863 ffa_dev
->ops
->notifier_ops
->notify_relinquish(ffa_dev
,
865 optee
->ffa
.bottom_half_value
= U32_MAX
;
871 static int optee_ffa_probe(struct ffa_device
*ffa_dev
)
873 const struct ffa_notifier_ops
*notif_ops
;
874 const struct ffa_ops
*ffa_ops
;
875 unsigned int max_notif_value
;
876 unsigned int rpc_param_count
;
877 struct tee_shm_pool
*pool
;
878 struct tee_device
*teedev
;
879 struct tee_context
*ctx
;
880 u32 arg_cache_flags
= 0;
885 ffa_ops
= ffa_dev
->ops
;
886 notif_ops
= ffa_ops
->notifier_ops
;
888 if (!optee_ffa_api_is_compatbile(ffa_dev
, ffa_ops
))
891 if (!optee_ffa_exchange_caps(ffa_dev
, ffa_ops
, &sec_caps
,
892 &rpc_param_count
, &max_notif_value
))
894 if (sec_caps
& OPTEE_FFA_SEC_CAP_ARG_OFFSET
)
895 arg_cache_flags
|= OPTEE_SHM_ARG_SHARED
;
897 optee
= kzalloc(sizeof(*optee
), GFP_KERNEL
);
901 pool
= optee_ffa_shm_pool_alloc_pages();
908 optee
->ops
= &optee_ffa_ops
;
909 optee
->ffa
.ffa_dev
= ffa_dev
;
910 optee
->ffa
.bottom_half_value
= U32_MAX
;
911 optee
->rpc_param_count
= rpc_param_count
;
913 if (IS_REACHABLE(CONFIG_RPMB
) &&
914 (sec_caps
& OPTEE_FFA_SEC_CAP_RPMB_PROBE
))
915 optee
->in_kernel_rpmb_routing
= true;
917 teedev
= tee_device_alloc(&optee_ffa_clnt_desc
, NULL
, optee
->pool
,
919 if (IS_ERR(teedev
)) {
920 rc
= PTR_ERR(teedev
);
923 optee
->teedev
= teedev
;
925 teedev
= tee_device_alloc(&optee_ffa_supp_desc
, NULL
, optee
->pool
,
927 if (IS_ERR(teedev
)) {
928 rc
= PTR_ERR(teedev
);
929 goto err_unreg_teedev
;
931 optee
->supp_teedev
= teedev
;
933 optee_set_dev_group(optee
);
935 rc
= tee_device_register(optee
->teedev
);
937 goto err_unreg_supp_teedev
;
939 rc
= tee_device_register(optee
->supp_teedev
);
941 goto err_unreg_supp_teedev
;
943 rc
= rhashtable_init(&optee
->ffa
.global_ids
, &shm_rhash_params
);
945 goto err_unreg_supp_teedev
;
946 mutex_init(&optee
->ffa
.mutex
);
947 optee_cq_init(&optee
->call_queue
, 0);
948 optee_supp_init(&optee
->supp
);
949 optee_shm_arg_cache_init(optee
, arg_cache_flags
);
950 mutex_init(&optee
->rpmb_dev_mutex
);
951 ffa_dev_set_drvdata(ffa_dev
, optee
);
952 ctx
= teedev_open(optee
->teedev
);
955 goto err_rhashtable_free
;
958 rc
= optee_notif_init(optee
, OPTEE_DEFAULT_MAX_NOTIF_VALUE
);
961 if (sec_caps
& OPTEE_FFA_SEC_CAP_ASYNC_NOTIF
) {
962 rc
= optee_ffa_async_notif_init(ffa_dev
, optee
);
964 pr_err("Failed to initialize async notifications: %d",
968 rc
= optee_enumerate_devices(PTA_CMD_GET_DEVICES
);
970 goto err_unregister_devices
;
972 INIT_WORK(&optee
->rpmb_scan_bus_work
, optee_bus_scan_rpmb
);
973 optee
->rpmb_intf
.notifier_call
= optee_rpmb_intf_rdev
;
974 blocking_notifier_chain_register(&optee_rpmb_intf_added
,
976 pr_info("initialized driver\n");
979 err_unregister_devices
:
980 optee_unregister_devices();
981 if (optee
->ffa
.bottom_half_value
!= U32_MAX
)
982 notif_ops
->notify_relinquish(ffa_dev
,
983 optee
->ffa
.bottom_half_value
);
984 optee_notif_uninit(optee
);
986 teedev_close_context(ctx
);
988 rhashtable_free_and_destroy(&optee
->ffa
.global_ids
, rh_free_fn
, NULL
);
989 rpmb_dev_put(optee
->rpmb_dev
);
990 mutex_destroy(&optee
->rpmb_dev_mutex
);
991 optee_supp_uninit(&optee
->supp
);
992 mutex_destroy(&optee
->call_queue
.mutex
);
993 mutex_destroy(&optee
->ffa
.mutex
);
994 err_unreg_supp_teedev
:
995 tee_device_unregister(optee
->supp_teedev
);
997 tee_device_unregister(optee
->teedev
);
999 tee_shm_pool_free(pool
);
1005 static const struct ffa_device_id optee_ffa_device_id
[] = {
1006 /* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */
1007 { UUID_INIT(0x486178e0, 0xe7f8, 0x11e3,
1008 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) },
1012 static struct ffa_driver optee_ffa_driver
= {
1014 .probe
= optee_ffa_probe
,
1015 .remove
= optee_ffa_remove
,
1016 .id_table
= optee_ffa_device_id
,
1019 int optee_ffa_abi_register(void)
1021 if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT
))
1022 return ffa_register(&optee_ffa_driver
);
1027 void optee_ffa_abi_unregister(void)
1029 if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT
))
1030 ffa_unregister(&optee_ffa_driver
);