Linux 4.19.133
[linux/fpc-iii.git] / drivers / tee / tee_core.c
blobdd46b758852aa9ba2866348e6f973da3447b3623
1 /*
2 * Copyright (c) 2015-2016, Linaro Limited
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #define pr_fmt(fmt) "%s: " fmt, __func__
17 #include <linux/cdev.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/idr.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/tee_drv.h>
24 #include <linux/uaccess.h>
25 #include "tee_private.h"
27 #define TEE_NUM_DEVICES 32
29 #define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
32 * Unprivileged devices in the lower half range and privileged devices in
33 * the upper half range.
35 static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
36 static DEFINE_SPINLOCK(driver_lock);
38 static struct class *tee_class;
39 static dev_t tee_devt;
41 static int tee_open(struct inode *inode, struct file *filp)
43 int rc;
44 struct tee_device *teedev;
45 struct tee_context *ctx;
47 teedev = container_of(inode->i_cdev, struct tee_device, cdev);
48 if (!tee_device_get(teedev))
49 return -EINVAL;
51 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
52 if (!ctx) {
53 rc = -ENOMEM;
54 goto err;
57 kref_init(&ctx->refcount);
58 ctx->teedev = teedev;
59 INIT_LIST_HEAD(&ctx->list_shm);
60 filp->private_data = ctx;
61 rc = teedev->desc->ops->open(ctx);
62 if (rc)
63 goto err;
65 return 0;
66 err:
67 kfree(ctx);
68 tee_device_put(teedev);
69 return rc;
72 void teedev_ctx_get(struct tee_context *ctx)
74 if (ctx->releasing)
75 return;
77 kref_get(&ctx->refcount);
80 static void teedev_ctx_release(struct kref *ref)
82 struct tee_context *ctx = container_of(ref, struct tee_context,
83 refcount);
84 ctx->releasing = true;
85 ctx->teedev->desc->ops->release(ctx);
86 kfree(ctx);
89 void teedev_ctx_put(struct tee_context *ctx)
91 if (ctx->releasing)
92 return;
94 kref_put(&ctx->refcount, teedev_ctx_release);
97 static void teedev_close_context(struct tee_context *ctx)
99 tee_device_put(ctx->teedev);
100 teedev_ctx_put(ctx);
103 static int tee_release(struct inode *inode, struct file *filp)
105 teedev_close_context(filp->private_data);
106 return 0;
109 static int tee_ioctl_version(struct tee_context *ctx,
110 struct tee_ioctl_version_data __user *uvers)
112 struct tee_ioctl_version_data vers;
114 ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
116 if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
117 vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
119 if (copy_to_user(uvers, &vers, sizeof(vers)))
120 return -EFAULT;
122 return 0;
125 static int tee_ioctl_shm_alloc(struct tee_context *ctx,
126 struct tee_ioctl_shm_alloc_data __user *udata)
128 long ret;
129 struct tee_ioctl_shm_alloc_data data;
130 struct tee_shm *shm;
132 if (copy_from_user(&data, udata, sizeof(data)))
133 return -EFAULT;
135 /* Currently no input flags are supported */
136 if (data.flags)
137 return -EINVAL;
139 shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
140 if (IS_ERR(shm))
141 return PTR_ERR(shm);
143 data.id = shm->id;
144 data.flags = shm->flags;
145 data.size = shm->size;
147 if (copy_to_user(udata, &data, sizeof(data)))
148 ret = -EFAULT;
149 else
150 ret = tee_shm_get_fd(shm);
153 * When user space closes the file descriptor the shared memory
154 * should be freed or if tee_shm_get_fd() failed then it will
155 * be freed immediately.
157 tee_shm_put(shm);
158 return ret;
161 static int
162 tee_ioctl_shm_register(struct tee_context *ctx,
163 struct tee_ioctl_shm_register_data __user *udata)
165 long ret;
166 struct tee_ioctl_shm_register_data data;
167 struct tee_shm *shm;
169 if (copy_from_user(&data, udata, sizeof(data)))
170 return -EFAULT;
172 /* Currently no input flags are supported */
173 if (data.flags)
174 return -EINVAL;
176 shm = tee_shm_register(ctx, data.addr, data.length,
177 TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
178 if (IS_ERR(shm))
179 return PTR_ERR(shm);
181 data.id = shm->id;
182 data.flags = shm->flags;
183 data.length = shm->size;
185 if (copy_to_user(udata, &data, sizeof(data)))
186 ret = -EFAULT;
187 else
188 ret = tee_shm_get_fd(shm);
190 * When user space closes the file descriptor the shared memory
191 * should be freed or if tee_shm_get_fd() failed then it will
192 * be freed immediately.
194 tee_shm_put(shm);
195 return ret;
198 static int params_from_user(struct tee_context *ctx, struct tee_param *params,
199 size_t num_params,
200 struct tee_ioctl_param __user *uparams)
202 size_t n;
204 for (n = 0; n < num_params; n++) {
205 struct tee_shm *shm;
206 struct tee_ioctl_param ip;
208 if (copy_from_user(&ip, uparams + n, sizeof(ip)))
209 return -EFAULT;
211 /* All unused attribute bits has to be zero */
212 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
213 return -EINVAL;
215 params[n].attr = ip.attr;
216 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
217 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
218 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
219 break;
220 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
221 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
222 params[n].u.value.a = ip.a;
223 params[n].u.value.b = ip.b;
224 params[n].u.value.c = ip.c;
225 break;
226 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
227 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
228 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
230 * If we fail to get a pointer to a shared memory
231 * object (and increase the ref count) from an
232 * identifier we return an error. All pointers that
233 * has been added in params have an increased ref
234 * count. It's the callers responibility to do
235 * tee_shm_put() on all resolved pointers.
237 shm = tee_shm_get_from_id(ctx, ip.c);
238 if (IS_ERR(shm))
239 return PTR_ERR(shm);
242 * Ensure offset + size does not overflow offset
243 * and does not overflow the size of the referred
244 * shared memory object.
246 if ((ip.a + ip.b) < ip.a ||
247 (ip.a + ip.b) > shm->size) {
248 tee_shm_put(shm);
249 return -EINVAL;
252 params[n].u.memref.shm_offs = ip.a;
253 params[n].u.memref.size = ip.b;
254 params[n].u.memref.shm = shm;
255 break;
256 default:
257 /* Unknown attribute */
258 return -EINVAL;
261 return 0;
264 static int params_to_user(struct tee_ioctl_param __user *uparams,
265 size_t num_params, struct tee_param *params)
267 size_t n;
269 for (n = 0; n < num_params; n++) {
270 struct tee_ioctl_param __user *up = uparams + n;
271 struct tee_param *p = params + n;
273 switch (p->attr) {
274 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
275 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
276 if (put_user(p->u.value.a, &up->a) ||
277 put_user(p->u.value.b, &up->b) ||
278 put_user(p->u.value.c, &up->c))
279 return -EFAULT;
280 break;
281 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
282 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
283 if (put_user((u64)p->u.memref.size, &up->b))
284 return -EFAULT;
285 default:
286 break;
289 return 0;
292 static int tee_ioctl_open_session(struct tee_context *ctx,
293 struct tee_ioctl_buf_data __user *ubuf)
295 int rc;
296 size_t n;
297 struct tee_ioctl_buf_data buf;
298 struct tee_ioctl_open_session_arg __user *uarg;
299 struct tee_ioctl_open_session_arg arg;
300 struct tee_ioctl_param __user *uparams = NULL;
301 struct tee_param *params = NULL;
302 bool have_session = false;
304 if (!ctx->teedev->desc->ops->open_session)
305 return -EINVAL;
307 if (copy_from_user(&buf, ubuf, sizeof(buf)))
308 return -EFAULT;
310 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
311 buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
312 return -EINVAL;
314 uarg = u64_to_user_ptr(buf.buf_ptr);
315 if (copy_from_user(&arg, uarg, sizeof(arg)))
316 return -EFAULT;
318 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
319 return -EINVAL;
321 if (arg.num_params) {
322 params = kcalloc(arg.num_params, sizeof(struct tee_param),
323 GFP_KERNEL);
324 if (!params)
325 return -ENOMEM;
326 uparams = uarg->params;
327 rc = params_from_user(ctx, params, arg.num_params, uparams);
328 if (rc)
329 goto out;
332 rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
333 if (rc)
334 goto out;
335 have_session = true;
337 if (put_user(arg.session, &uarg->session) ||
338 put_user(arg.ret, &uarg->ret) ||
339 put_user(arg.ret_origin, &uarg->ret_origin)) {
340 rc = -EFAULT;
341 goto out;
343 rc = params_to_user(uparams, arg.num_params, params);
344 out:
346 * If we've succeeded to open the session but failed to communicate
347 * it back to user space, close the session again to avoid leakage.
349 if (rc && have_session && ctx->teedev->desc->ops->close_session)
350 ctx->teedev->desc->ops->close_session(ctx, arg.session);
352 if (params) {
353 /* Decrease ref count for all valid shared memory pointers */
354 for (n = 0; n < arg.num_params; n++)
355 if (tee_param_is_memref(params + n) &&
356 params[n].u.memref.shm)
357 tee_shm_put(params[n].u.memref.shm);
358 kfree(params);
361 return rc;
364 static int tee_ioctl_invoke(struct tee_context *ctx,
365 struct tee_ioctl_buf_data __user *ubuf)
367 int rc;
368 size_t n;
369 struct tee_ioctl_buf_data buf;
370 struct tee_ioctl_invoke_arg __user *uarg;
371 struct tee_ioctl_invoke_arg arg;
372 struct tee_ioctl_param __user *uparams = NULL;
373 struct tee_param *params = NULL;
375 if (!ctx->teedev->desc->ops->invoke_func)
376 return -EINVAL;
378 if (copy_from_user(&buf, ubuf, sizeof(buf)))
379 return -EFAULT;
381 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
382 buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
383 return -EINVAL;
385 uarg = u64_to_user_ptr(buf.buf_ptr);
386 if (copy_from_user(&arg, uarg, sizeof(arg)))
387 return -EFAULT;
389 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
390 return -EINVAL;
392 if (arg.num_params) {
393 params = kcalloc(arg.num_params, sizeof(struct tee_param),
394 GFP_KERNEL);
395 if (!params)
396 return -ENOMEM;
397 uparams = uarg->params;
398 rc = params_from_user(ctx, params, arg.num_params, uparams);
399 if (rc)
400 goto out;
403 rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
404 if (rc)
405 goto out;
407 if (put_user(arg.ret, &uarg->ret) ||
408 put_user(arg.ret_origin, &uarg->ret_origin)) {
409 rc = -EFAULT;
410 goto out;
412 rc = params_to_user(uparams, arg.num_params, params);
413 out:
414 if (params) {
415 /* Decrease ref count for all valid shared memory pointers */
416 for (n = 0; n < arg.num_params; n++)
417 if (tee_param_is_memref(params + n) &&
418 params[n].u.memref.shm)
419 tee_shm_put(params[n].u.memref.shm);
420 kfree(params);
422 return rc;
425 static int tee_ioctl_cancel(struct tee_context *ctx,
426 struct tee_ioctl_cancel_arg __user *uarg)
428 struct tee_ioctl_cancel_arg arg;
430 if (!ctx->teedev->desc->ops->cancel_req)
431 return -EINVAL;
433 if (copy_from_user(&arg, uarg, sizeof(arg)))
434 return -EFAULT;
436 return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
437 arg.session);
440 static int
441 tee_ioctl_close_session(struct tee_context *ctx,
442 struct tee_ioctl_close_session_arg __user *uarg)
444 struct tee_ioctl_close_session_arg arg;
446 if (!ctx->teedev->desc->ops->close_session)
447 return -EINVAL;
449 if (copy_from_user(&arg, uarg, sizeof(arg)))
450 return -EFAULT;
452 return ctx->teedev->desc->ops->close_session(ctx, arg.session);
455 static int params_to_supp(struct tee_context *ctx,
456 struct tee_ioctl_param __user *uparams,
457 size_t num_params, struct tee_param *params)
459 size_t n;
461 for (n = 0; n < num_params; n++) {
462 struct tee_ioctl_param ip;
463 struct tee_param *p = params + n;
465 ip.attr = p->attr;
466 switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
467 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
468 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
469 ip.a = p->u.value.a;
470 ip.b = p->u.value.b;
471 ip.c = p->u.value.c;
472 break;
473 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
474 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
475 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
476 ip.b = p->u.memref.size;
477 if (!p->u.memref.shm) {
478 ip.a = 0;
479 ip.c = (u64)-1; /* invalid shm id */
480 break;
482 ip.a = p->u.memref.shm_offs;
483 ip.c = p->u.memref.shm->id;
484 break;
485 default:
486 ip.a = 0;
487 ip.b = 0;
488 ip.c = 0;
489 break;
492 if (copy_to_user(uparams + n, &ip, sizeof(ip)))
493 return -EFAULT;
496 return 0;
499 static int tee_ioctl_supp_recv(struct tee_context *ctx,
500 struct tee_ioctl_buf_data __user *ubuf)
502 int rc;
503 struct tee_ioctl_buf_data buf;
504 struct tee_iocl_supp_recv_arg __user *uarg;
505 struct tee_param *params;
506 u32 num_params;
507 u32 func;
509 if (!ctx->teedev->desc->ops->supp_recv)
510 return -EINVAL;
512 if (copy_from_user(&buf, ubuf, sizeof(buf)))
513 return -EFAULT;
515 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
516 buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
517 return -EINVAL;
519 uarg = u64_to_user_ptr(buf.buf_ptr);
520 if (get_user(num_params, &uarg->num_params))
521 return -EFAULT;
523 if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
524 return -EINVAL;
526 params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
527 if (!params)
528 return -ENOMEM;
530 rc = params_from_user(ctx, params, num_params, uarg->params);
531 if (rc)
532 goto out;
534 rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
535 if (rc)
536 goto out;
538 if (put_user(func, &uarg->func) ||
539 put_user(num_params, &uarg->num_params)) {
540 rc = -EFAULT;
541 goto out;
544 rc = params_to_supp(ctx, uarg->params, num_params, params);
545 out:
546 kfree(params);
547 return rc;
550 static int params_from_supp(struct tee_param *params, size_t num_params,
551 struct tee_ioctl_param __user *uparams)
553 size_t n;
555 for (n = 0; n < num_params; n++) {
556 struct tee_param *p = params + n;
557 struct tee_ioctl_param ip;
559 if (copy_from_user(&ip, uparams + n, sizeof(ip)))
560 return -EFAULT;
562 /* All unused attribute bits has to be zero */
563 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
564 return -EINVAL;
566 p->attr = ip.attr;
567 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
568 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
569 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
570 /* Only out and in/out values can be updated */
571 p->u.value.a = ip.a;
572 p->u.value.b = ip.b;
573 p->u.value.c = ip.c;
574 break;
575 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
576 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
578 * Only the size of the memref can be updated.
579 * Since we don't have access to the original
580 * parameters here, only store the supplied size.
581 * The driver will copy the updated size into the
582 * original parameters.
584 p->u.memref.shm = NULL;
585 p->u.memref.shm_offs = 0;
586 p->u.memref.size = ip.b;
587 break;
588 default:
589 memset(&p->u, 0, sizeof(p->u));
590 break;
593 return 0;
596 static int tee_ioctl_supp_send(struct tee_context *ctx,
597 struct tee_ioctl_buf_data __user *ubuf)
599 long rc;
600 struct tee_ioctl_buf_data buf;
601 struct tee_iocl_supp_send_arg __user *uarg;
602 struct tee_param *params;
603 u32 num_params;
604 u32 ret;
606 /* Not valid for this driver */
607 if (!ctx->teedev->desc->ops->supp_send)
608 return -EINVAL;
610 if (copy_from_user(&buf, ubuf, sizeof(buf)))
611 return -EFAULT;
613 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
614 buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
615 return -EINVAL;
617 uarg = u64_to_user_ptr(buf.buf_ptr);
618 if (get_user(ret, &uarg->ret) ||
619 get_user(num_params, &uarg->num_params))
620 return -EFAULT;
622 if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
623 return -EINVAL;
625 params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
626 if (!params)
627 return -ENOMEM;
629 rc = params_from_supp(params, num_params, uarg->params);
630 if (rc)
631 goto out;
633 rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
634 out:
635 kfree(params);
636 return rc;
639 static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
641 struct tee_context *ctx = filp->private_data;
642 void __user *uarg = (void __user *)arg;
644 switch (cmd) {
645 case TEE_IOC_VERSION:
646 return tee_ioctl_version(ctx, uarg);
647 case TEE_IOC_SHM_ALLOC:
648 return tee_ioctl_shm_alloc(ctx, uarg);
649 case TEE_IOC_SHM_REGISTER:
650 return tee_ioctl_shm_register(ctx, uarg);
651 case TEE_IOC_OPEN_SESSION:
652 return tee_ioctl_open_session(ctx, uarg);
653 case TEE_IOC_INVOKE:
654 return tee_ioctl_invoke(ctx, uarg);
655 case TEE_IOC_CANCEL:
656 return tee_ioctl_cancel(ctx, uarg);
657 case TEE_IOC_CLOSE_SESSION:
658 return tee_ioctl_close_session(ctx, uarg);
659 case TEE_IOC_SUPPL_RECV:
660 return tee_ioctl_supp_recv(ctx, uarg);
661 case TEE_IOC_SUPPL_SEND:
662 return tee_ioctl_supp_send(ctx, uarg);
663 default:
664 return -EINVAL;
668 static const struct file_operations tee_fops = {
669 .owner = THIS_MODULE,
670 .open = tee_open,
671 .release = tee_release,
672 .unlocked_ioctl = tee_ioctl,
673 .compat_ioctl = tee_ioctl,
676 static void tee_release_device(struct device *dev)
678 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
680 spin_lock(&driver_lock);
681 clear_bit(teedev->id, dev_mask);
682 spin_unlock(&driver_lock);
683 mutex_destroy(&teedev->mutex);
684 idr_destroy(&teedev->idr);
685 kfree(teedev);
689 * tee_device_alloc() - Allocate a new struct tee_device instance
690 * @teedesc: Descriptor for this driver
691 * @dev: Parent device for this device
692 * @pool: Shared memory pool, NULL if not used
693 * @driver_data: Private driver data for this device
695 * Allocates a new struct tee_device instance. The device is
696 * removed by tee_device_unregister().
698 * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
700 struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
701 struct device *dev,
702 struct tee_shm_pool *pool,
703 void *driver_data)
705 struct tee_device *teedev;
706 void *ret;
707 int rc, max_id;
708 int offs = 0;
710 if (!teedesc || !teedesc->name || !teedesc->ops ||
711 !teedesc->ops->get_version || !teedesc->ops->open ||
712 !teedesc->ops->release || !pool)
713 return ERR_PTR(-EINVAL);
715 teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
716 if (!teedev) {
717 ret = ERR_PTR(-ENOMEM);
718 goto err;
721 max_id = TEE_NUM_DEVICES / 2;
723 if (teedesc->flags & TEE_DESC_PRIVILEGED) {
724 offs = TEE_NUM_DEVICES / 2;
725 max_id = TEE_NUM_DEVICES;
728 spin_lock(&driver_lock);
729 teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
730 if (teedev->id < max_id)
731 set_bit(teedev->id, dev_mask);
732 spin_unlock(&driver_lock);
734 if (teedev->id >= max_id) {
735 ret = ERR_PTR(-ENOMEM);
736 goto err;
739 snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
740 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
741 teedev->id - offs);
743 teedev->dev.class = tee_class;
744 teedev->dev.release = tee_release_device;
745 teedev->dev.parent = dev;
747 teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
749 rc = dev_set_name(&teedev->dev, "%s", teedev->name);
750 if (rc) {
751 ret = ERR_PTR(rc);
752 goto err_devt;
755 cdev_init(&teedev->cdev, &tee_fops);
756 teedev->cdev.owner = teedesc->owner;
757 teedev->cdev.kobj.parent = &teedev->dev.kobj;
759 dev_set_drvdata(&teedev->dev, driver_data);
760 device_initialize(&teedev->dev);
762 /* 1 as tee_device_unregister() does one final tee_device_put() */
763 teedev->num_users = 1;
764 init_completion(&teedev->c_no_users);
765 mutex_init(&teedev->mutex);
766 idr_init(&teedev->idr);
768 teedev->desc = teedesc;
769 teedev->pool = pool;
771 return teedev;
772 err_devt:
773 unregister_chrdev_region(teedev->dev.devt, 1);
774 err:
775 pr_err("could not register %s driver\n",
776 teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
777 if (teedev && teedev->id < TEE_NUM_DEVICES) {
778 spin_lock(&driver_lock);
779 clear_bit(teedev->id, dev_mask);
780 spin_unlock(&driver_lock);
782 kfree(teedev);
783 return ret;
785 EXPORT_SYMBOL_GPL(tee_device_alloc);
787 static ssize_t implementation_id_show(struct device *dev,
788 struct device_attribute *attr, char *buf)
790 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
791 struct tee_ioctl_version_data vers;
793 teedev->desc->ops->get_version(teedev, &vers);
794 return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
796 static DEVICE_ATTR_RO(implementation_id);
798 static struct attribute *tee_dev_attrs[] = {
799 &dev_attr_implementation_id.attr,
800 NULL
803 static const struct attribute_group tee_dev_group = {
804 .attrs = tee_dev_attrs,
808 * tee_device_register() - Registers a TEE device
809 * @teedev: Device to register
811 * tee_device_unregister() need to be called to remove the @teedev if
812 * this function fails.
814 * @returns < 0 on failure
816 int tee_device_register(struct tee_device *teedev)
818 int rc;
820 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
821 dev_err(&teedev->dev, "attempt to register twice\n");
822 return -EINVAL;
825 rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
826 if (rc) {
827 dev_err(&teedev->dev,
828 "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
829 teedev->name, MAJOR(teedev->dev.devt),
830 MINOR(teedev->dev.devt), rc);
831 return rc;
834 rc = device_add(&teedev->dev);
835 if (rc) {
836 dev_err(&teedev->dev,
837 "unable to device_add() %s, major %d, minor %d, err=%d\n",
838 teedev->name, MAJOR(teedev->dev.devt),
839 MINOR(teedev->dev.devt), rc);
840 goto err_device_add;
843 rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
844 if (rc) {
845 dev_err(&teedev->dev,
846 "failed to create sysfs attributes, err=%d\n", rc);
847 goto err_sysfs_create_group;
850 teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
851 return 0;
853 err_sysfs_create_group:
854 device_del(&teedev->dev);
855 err_device_add:
856 cdev_del(&teedev->cdev);
857 return rc;
859 EXPORT_SYMBOL_GPL(tee_device_register);
861 void tee_device_put(struct tee_device *teedev)
863 mutex_lock(&teedev->mutex);
864 /* Shouldn't put in this state */
865 if (!WARN_ON(!teedev->desc)) {
866 teedev->num_users--;
867 if (!teedev->num_users) {
868 teedev->desc = NULL;
869 complete(&teedev->c_no_users);
872 mutex_unlock(&teedev->mutex);
875 bool tee_device_get(struct tee_device *teedev)
877 mutex_lock(&teedev->mutex);
878 if (!teedev->desc) {
879 mutex_unlock(&teedev->mutex);
880 return false;
882 teedev->num_users++;
883 mutex_unlock(&teedev->mutex);
884 return true;
888 * tee_device_unregister() - Removes a TEE device
889 * @teedev: Device to unregister
891 * This function should be called to remove the @teedev even if
892 * tee_device_register() hasn't been called yet. Does nothing if
893 * @teedev is NULL.
895 void tee_device_unregister(struct tee_device *teedev)
897 if (!teedev)
898 return;
900 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
901 sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
902 cdev_del(&teedev->cdev);
903 device_del(&teedev->dev);
906 tee_device_put(teedev);
907 wait_for_completion(&teedev->c_no_users);
910 * No need to take a mutex any longer now since teedev->desc was
911 * set to NULL before teedev->c_no_users was completed.
914 teedev->pool = NULL;
916 put_device(&teedev->dev);
918 EXPORT_SYMBOL_GPL(tee_device_unregister);
921 * tee_get_drvdata() - Return driver_data pointer
922 * @teedev: Device containing the driver_data pointer
923 * @returns the driver_data pointer supplied to tee_register().
925 void *tee_get_drvdata(struct tee_device *teedev)
927 return dev_get_drvdata(&teedev->dev);
929 EXPORT_SYMBOL_GPL(tee_get_drvdata);
931 static int __init tee_init(void)
933 int rc;
935 tee_class = class_create(THIS_MODULE, "tee");
936 if (IS_ERR(tee_class)) {
937 pr_err("couldn't create class\n");
938 return PTR_ERR(tee_class);
941 rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
942 if (rc) {
943 pr_err("failed to allocate char dev region\n");
944 class_destroy(tee_class);
945 tee_class = NULL;
948 return rc;
951 static void __exit tee_exit(void)
953 class_destroy(tee_class);
954 tee_class = NULL;
955 unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
958 subsys_initcall(tee_init);
959 module_exit(tee_exit);
961 MODULE_AUTHOR("Linaro");
962 MODULE_DESCRIPTION("TEE Driver");
963 MODULE_VERSION("1.0");
964 MODULE_LICENSE("GPL v2");