gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / tee / optee / rpc.c
blobb4ade54d1f280a538a27c4f1ef89af4ce4177532
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2016, Linaro Limited
4 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include "optee_private.h"
13 #include "optee_smc.h"
15 struct wq_entry {
16 struct list_head link;
17 struct completion c;
18 u32 key;
21 void optee_wait_queue_init(struct optee_wait_queue *priv)
23 mutex_init(&priv->mu);
24 INIT_LIST_HEAD(&priv->db);
27 void optee_wait_queue_exit(struct optee_wait_queue *priv)
29 mutex_destroy(&priv->mu);
32 static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
34 struct timespec64 ts;
36 if (arg->num_params != 1)
37 goto bad;
38 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
39 OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
40 goto bad;
42 ktime_get_real_ts64(&ts);
43 arg->params[0].u.value.a = ts.tv_sec;
44 arg->params[0].u.value.b = ts.tv_nsec;
46 arg->ret = TEEC_SUCCESS;
47 return;
48 bad:
49 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
52 static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
54 struct wq_entry *w;
56 mutex_lock(&wq->mu);
58 list_for_each_entry(w, &wq->db, link)
59 if (w->key == key)
60 goto out;
62 w = kmalloc(sizeof(*w), GFP_KERNEL);
63 if (w) {
64 init_completion(&w->c);
65 w->key = key;
66 list_add_tail(&w->link, &wq->db);
68 out:
69 mutex_unlock(&wq->mu);
70 return w;
73 static void wq_sleep(struct optee_wait_queue *wq, u32 key)
75 struct wq_entry *w = wq_entry_get(wq, key);
77 if (w) {
78 wait_for_completion(&w->c);
79 mutex_lock(&wq->mu);
80 list_del(&w->link);
81 mutex_unlock(&wq->mu);
82 kfree(w);
86 static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
88 struct wq_entry *w = wq_entry_get(wq, key);
90 if (w)
91 complete(&w->c);
94 static void handle_rpc_func_cmd_wq(struct optee *optee,
95 struct optee_msg_arg *arg)
97 if (arg->num_params != 1)
98 goto bad;
100 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
101 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
102 goto bad;
104 switch (arg->params[0].u.value.a) {
105 case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
106 wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
107 break;
108 case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
109 wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
110 break;
111 default:
112 goto bad;
115 arg->ret = TEEC_SUCCESS;
116 return;
117 bad:
118 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
121 static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
123 u32 msec_to_wait;
125 if (arg->num_params != 1)
126 goto bad;
128 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
129 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
130 goto bad;
132 msec_to_wait = arg->params[0].u.value.a;
134 /* Go to interruptible sleep */
135 msleep_interruptible(msec_to_wait);
137 arg->ret = TEEC_SUCCESS;
138 return;
139 bad:
140 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
143 static void handle_rpc_supp_cmd(struct tee_context *ctx,
144 struct optee_msg_arg *arg)
146 struct tee_param *params;
148 arg->ret_origin = TEEC_ORIGIN_COMMS;
150 params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
151 GFP_KERNEL);
152 if (!params) {
153 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
154 return;
157 if (optee_from_msg_param(params, arg->num_params, arg->params)) {
158 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
159 goto out;
162 arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
164 if (optee_to_msg_param(arg->params, arg->num_params, params))
165 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
166 out:
167 kfree(params);
170 static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
172 u32 ret;
173 struct tee_param param;
174 struct optee *optee = tee_get_drvdata(ctx->teedev);
175 struct tee_shm *shm;
177 param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
178 param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
179 param.u.value.b = sz;
180 param.u.value.c = 0;
182 ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &param);
183 if (ret)
184 return ERR_PTR(-ENOMEM);
186 mutex_lock(&optee->supp.mutex);
187 /* Increases count as secure world doesn't have a reference */
188 shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
189 mutex_unlock(&optee->supp.mutex);
190 return shm;
193 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
194 struct optee_msg_arg *arg,
195 struct optee_call_ctx *call_ctx)
197 phys_addr_t pa;
198 struct tee_shm *shm;
199 size_t sz;
200 size_t n;
202 arg->ret_origin = TEEC_ORIGIN_COMMS;
204 if (!arg->num_params ||
205 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
206 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
207 return;
210 for (n = 1; n < arg->num_params; n++) {
211 if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
212 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
213 return;
217 sz = arg->params[0].u.value.b;
218 switch (arg->params[0].u.value.a) {
219 case OPTEE_MSG_RPC_SHM_TYPE_APPL:
220 shm = cmd_alloc_suppl(ctx, sz);
221 break;
222 case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
223 shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
224 break;
225 default:
226 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
227 return;
230 if (IS_ERR(shm)) {
231 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
232 return;
235 if (tee_shm_get_pa(shm, 0, &pa)) {
236 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
237 goto bad;
240 sz = tee_shm_get_size(shm);
242 if (tee_shm_is_registered(shm)) {
243 struct page **pages;
244 u64 *pages_list;
245 size_t page_num;
247 pages = tee_shm_get_pages(shm, &page_num);
248 if (!pages || !page_num) {
249 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
250 goto bad;
253 pages_list = optee_allocate_pages_list(page_num);
254 if (!pages_list) {
255 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
256 goto bad;
259 call_ctx->pages_list = pages_list;
260 call_ctx->num_entries = page_num;
262 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
263 OPTEE_MSG_ATTR_NONCONTIG;
265 * In the least bits of u.tmem.buf_ptr we store buffer offset
266 * from 4k page, as described in OP-TEE ABI.
268 arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
269 (tee_shm_get_page_offset(shm) &
270 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
271 arg->params[0].u.tmem.size = tee_shm_get_size(shm);
272 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
274 optee_fill_pages_list(pages_list, pages, page_num,
275 tee_shm_get_page_offset(shm));
276 } else {
277 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
278 arg->params[0].u.tmem.buf_ptr = pa;
279 arg->params[0].u.tmem.size = sz;
280 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
283 arg->ret = TEEC_SUCCESS;
284 return;
285 bad:
286 tee_shm_free(shm);
289 static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
291 struct tee_param param;
293 param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
294 param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
295 param.u.value.b = tee_shm_get_id(shm);
296 param.u.value.c = 0;
299 * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
300 * world has released its reference.
302 * It's better to do this before sending the request to supplicant
303 * as we'd like to let the process doing the initial allocation to
304 * do release the last reference too in order to avoid stacking
305 * many pending fput() on the client process. This could otherwise
306 * happen if secure world does many allocate and free in a single
307 * invoke.
309 tee_shm_put(shm);
311 optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &param);
314 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
315 struct optee_msg_arg *arg)
317 struct tee_shm *shm;
319 arg->ret_origin = TEEC_ORIGIN_COMMS;
321 if (arg->num_params != 1 ||
322 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
323 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
324 return;
327 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
328 switch (arg->params[0].u.value.a) {
329 case OPTEE_MSG_RPC_SHM_TYPE_APPL:
330 cmd_free_suppl(ctx, shm);
331 break;
332 case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
333 tee_shm_free(shm);
334 break;
335 default:
336 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
338 arg->ret = TEEC_SUCCESS;
341 static void free_pages_list(struct optee_call_ctx *call_ctx)
343 if (call_ctx->pages_list) {
344 optee_free_pages_list(call_ctx->pages_list,
345 call_ctx->num_entries);
346 call_ctx->pages_list = NULL;
347 call_ctx->num_entries = 0;
351 void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
353 free_pages_list(call_ctx);
356 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
357 struct tee_shm *shm,
358 struct optee_call_ctx *call_ctx)
360 struct optee_msg_arg *arg;
362 arg = tee_shm_get_va(shm, 0);
363 if (IS_ERR(arg)) {
364 pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
365 return;
368 switch (arg->cmd) {
369 case OPTEE_MSG_RPC_CMD_GET_TIME:
370 handle_rpc_func_cmd_get_time(arg);
371 break;
372 case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
373 handle_rpc_func_cmd_wq(optee, arg);
374 break;
375 case OPTEE_MSG_RPC_CMD_SUSPEND:
376 handle_rpc_func_cmd_wait(arg);
377 break;
378 case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
379 free_pages_list(call_ctx);
380 handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
381 break;
382 case OPTEE_MSG_RPC_CMD_SHM_FREE:
383 handle_rpc_func_cmd_shm_free(ctx, arg);
384 break;
385 default:
386 handle_rpc_supp_cmd(ctx, arg);
391 * optee_handle_rpc() - handle RPC from secure world
392 * @ctx: context doing the RPC
393 * @param: value of registers for the RPC
394 * @call_ctx: call context. Preserved during one OP-TEE invocation
396 * Result of RPC is written back into @param.
398 void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
399 struct optee_call_ctx *call_ctx)
401 struct tee_device *teedev = ctx->teedev;
402 struct optee *optee = tee_get_drvdata(teedev);
403 struct tee_shm *shm;
404 phys_addr_t pa;
406 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
407 case OPTEE_SMC_RPC_FUNC_ALLOC:
408 shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
409 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
410 reg_pair_from_64(&param->a1, &param->a2, pa);
411 reg_pair_from_64(&param->a4, &param->a5,
412 (unsigned long)shm);
413 } else {
414 param->a1 = 0;
415 param->a2 = 0;
416 param->a4 = 0;
417 param->a5 = 0;
419 break;
420 case OPTEE_SMC_RPC_FUNC_FREE:
421 shm = reg_pair_to_ptr(param->a1, param->a2);
422 tee_shm_free(shm);
423 break;
424 case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
426 * A foreign interrupt was raised while secure world was
427 * executing, since they are handled in Linux a dummy RPC is
428 * performed to let Linux take the interrupt through the normal
429 * vector.
431 break;
432 case OPTEE_SMC_RPC_FUNC_CMD:
433 shm = reg_pair_to_ptr(param->a1, param->a2);
434 handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
435 break;
436 default:
437 pr_warn("Unknown RPC func 0x%x\n",
438 (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
439 break;
442 param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;