1 // SPDX-License-Identifier: GPL-2.0
3 * Greybus Component Authentication Protocol (CAP) Driver.
5 * Copyright 2016 Google Inc.
6 * Copyright 2016 Linaro Ltd.
9 #include <linux/greybus.h>
10 #include <linux/cdev.h>
12 #include <linux/ioctl.h>
13 #include <linux/uaccess.h>
15 #include "greybus_authentication.h"
18 #define CAP_TIMEOUT_MS 1000
21 * Number of minor devices this driver supports.
22 * There will be exactly one required per Interface.
24 #define NUM_MINORS U8_MAX
27 struct device
*parent
;
28 struct gb_connection
*connection
;
30 struct list_head node
;
31 bool disabled
; /* connection getting disabled */
35 struct device
*class_device
;
39 static const struct class cap_class
= {
40 .name
= "gb_authenticate",
43 static dev_t cap_dev_num
;
44 static DEFINE_IDA(cap_minors_map
);
45 static LIST_HEAD(cap_list
);
46 static DEFINE_MUTEX(list_mutex
);
48 static void cap_kref_release(struct kref
*kref
)
50 struct gb_cap
*cap
= container_of(kref
, struct gb_cap
, kref
);
56 * All users of cap take a reference (from within list_mutex lock), before
57 * they get a pointer to play with. And the structure will be freed only after
58 * the last user has put the reference to it.
60 static void put_cap(struct gb_cap
*cap
)
62 kref_put(&cap
->kref
, cap_kref_release
);
65 /* Caller must call put_cap() after using struct gb_cap */
66 static struct gb_cap
*get_cap(struct cdev
*cdev
)
70 mutex_lock(&list_mutex
);
72 list_for_each_entry(cap
, &cap_list
, node
) {
73 if (&cap
->cdev
== cdev
) {
82 mutex_unlock(&list_mutex
);
87 static int cap_get_endpoint_uid(struct gb_cap
*cap
, u8
*euid
)
89 struct gb_connection
*connection
= cap
->connection
;
90 struct gb_cap_get_endpoint_uid_response response
;
93 ret
= gb_operation_sync(connection
, GB_CAP_TYPE_GET_ENDPOINT_UID
, NULL
,
94 0, &response
, sizeof(response
));
96 dev_err(cap
->parent
, "failed to get endpoint uid (%d)\n", ret
);
100 memcpy(euid
, response
.uid
, sizeof(response
.uid
));
105 static int cap_get_ims_certificate(struct gb_cap
*cap
, u32
class, u32 id
,
106 u8
*certificate
, u32
*size
, u8
*result
)
108 struct gb_connection
*connection
= cap
->connection
;
109 struct gb_cap_get_ims_certificate_request
*request
;
110 struct gb_cap_get_ims_certificate_response
*response
;
111 size_t max_size
= gb_operation_get_payload_size_max(connection
);
112 struct gb_operation
*op
;
115 op
= gb_operation_create_flags(connection
,
116 GB_CAP_TYPE_GET_IMS_CERTIFICATE
,
117 sizeof(*request
), max_size
,
118 GB_OPERATION_FLAG_SHORT_RESPONSE
,
123 request
= op
->request
->payload
;
124 request
->certificate_class
= cpu_to_le32(class);
125 request
->certificate_id
= cpu_to_le32(id
);
127 ret
= gb_operation_request_send_sync(op
);
129 dev_err(cap
->parent
, "failed to get certificate (%d)\n", ret
);
133 response
= op
->response
->payload
;
134 *result
= response
->result_code
;
135 *size
= op
->response
->payload_size
- sizeof(*response
);
136 memcpy(certificate
, response
->certificate
, *size
);
139 gb_operation_put(op
);
143 static int cap_authenticate(struct gb_cap
*cap
, u32 auth_type
, u8
*uid
,
144 u8
*challenge
, u8
*result
, u8
*auth_response
,
145 u32
*signature_size
, u8
*signature
)
147 struct gb_connection
*connection
= cap
->connection
;
148 struct gb_cap_authenticate_request
*request
;
149 struct gb_cap_authenticate_response
*response
;
150 size_t max_size
= gb_operation_get_payload_size_max(connection
);
151 struct gb_operation
*op
;
154 op
= gb_operation_create_flags(connection
, GB_CAP_TYPE_AUTHENTICATE
,
155 sizeof(*request
), max_size
,
156 GB_OPERATION_FLAG_SHORT_RESPONSE
,
161 request
= op
->request
->payload
;
162 request
->auth_type
= cpu_to_le32(auth_type
);
163 memcpy(request
->uid
, uid
, sizeof(request
->uid
));
164 memcpy(request
->challenge
, challenge
, sizeof(request
->challenge
));
166 ret
= gb_operation_request_send_sync(op
);
168 dev_err(cap
->parent
, "failed to authenticate (%d)\n", ret
);
172 response
= op
->response
->payload
;
173 *result
= response
->result_code
;
174 *signature_size
= op
->response
->payload_size
- sizeof(*response
);
175 memcpy(auth_response
, response
->response
, sizeof(response
->response
));
176 memcpy(signature
, response
->signature
, *signature_size
);
179 gb_operation_put(op
);
183 /* Char device fops */
185 static int cap_open(struct inode
*inode
, struct file
*file
)
187 struct gb_cap
*cap
= get_cap(inode
->i_cdev
);
189 /* cap structure can't get freed until file descriptor is closed */
191 file
->private_data
= cap
;
198 static int cap_release(struct inode
*inode
, struct file
*file
)
200 struct gb_cap
*cap
= file
->private_data
;
206 static int cap_ioctl(struct gb_cap
*cap
, unsigned int cmd
,
209 struct cap_ioc_get_endpoint_uid endpoint_uid
;
210 struct cap_ioc_get_ims_certificate
*ims_cert
;
211 struct cap_ioc_authenticate
*authenticate
;
216 case CAP_IOC_GET_ENDPOINT_UID
:
217 ret
= cap_get_endpoint_uid(cap
, endpoint_uid
.uid
);
221 if (copy_to_user(buf
, &endpoint_uid
, sizeof(endpoint_uid
)))
225 case CAP_IOC_GET_IMS_CERTIFICATE
:
226 size
= sizeof(*ims_cert
);
227 ims_cert
= memdup_user(buf
, size
);
228 if (IS_ERR(ims_cert
))
229 return PTR_ERR(ims_cert
);
231 ret
= cap_get_ims_certificate(cap
, ims_cert
->certificate_class
,
232 ims_cert
->certificate_id
,
233 ims_cert
->certificate
,
234 &ims_cert
->cert_size
,
235 &ims_cert
->result_code
);
236 if (!ret
&& copy_to_user(buf
, ims_cert
, size
))
241 case CAP_IOC_AUTHENTICATE
:
242 size
= sizeof(*authenticate
);
243 authenticate
= memdup_user(buf
, size
);
244 if (IS_ERR(authenticate
))
245 return PTR_ERR(authenticate
);
247 ret
= cap_authenticate(cap
, authenticate
->auth_type
,
249 authenticate
->challenge
,
250 &authenticate
->result_code
,
251 authenticate
->response
,
252 &authenticate
->signature_size
,
253 authenticate
->signature
);
254 if (!ret
&& copy_to_user(buf
, authenticate
, size
))
264 static long cap_ioctl_unlocked(struct file
*file
, unsigned int cmd
,
267 struct gb_cap
*cap
= file
->private_data
;
268 struct gb_bundle
*bundle
= cap
->connection
->bundle
;
274 * We don't want the user to do multiple authentication operations in
277 * This is also used to protect ->disabled, which is used to check if
278 * the connection is getting disconnected, so that we don't start any
281 mutex_lock(&cap
->mutex
);
282 if (!cap
->disabled
) {
283 ret
= gb_pm_runtime_get_sync(bundle
);
285 ret
= cap_ioctl(cap
, cmd
, (void __user
*)arg
);
286 gb_pm_runtime_put_autosuspend(bundle
);
289 mutex_unlock(&cap
->mutex
);
294 static const struct file_operations cap_fops
= {
295 .owner
= THIS_MODULE
,
297 .release
= cap_release
,
298 .unlocked_ioctl
= cap_ioctl_unlocked
,
301 int gb_cap_connection_init(struct gb_connection
*connection
)
309 cap
= kzalloc(sizeof(*cap
), GFP_KERNEL
);
313 cap
->parent
= &connection
->bundle
->dev
;
314 cap
->connection
= connection
;
315 mutex_init(&cap
->mutex
);
316 gb_connection_set_data(connection
, cap
);
317 kref_init(&cap
->kref
);
319 mutex_lock(&list_mutex
);
320 list_add(&cap
->node
, &cap_list
);
321 mutex_unlock(&list_mutex
);
323 ret
= gb_connection_enable(connection
);
327 minor
= ida_alloc_max(&cap_minors_map
, NUM_MINORS
- 1, GFP_KERNEL
);
330 goto err_connection_disable
;
333 /* Add a char device to allow userspace to interact with cap */
334 cap
->dev_num
= MKDEV(MAJOR(cap_dev_num
), minor
);
335 cdev_init(&cap
->cdev
, &cap_fops
);
337 ret
= cdev_add(&cap
->cdev
, cap
->dev_num
, 1);
341 /* Add a soft link to the previously added char-dev within the bundle */
342 cap
->class_device
= device_create(&cap_class
, cap
->parent
, cap
->dev_num
,
343 NULL
, "gb-authenticate-%d", minor
);
344 if (IS_ERR(cap
->class_device
)) {
345 ret
= PTR_ERR(cap
->class_device
);
352 cdev_del(&cap
->cdev
);
354 ida_free(&cap_minors_map
, minor
);
355 err_connection_disable
:
356 gb_connection_disable(connection
);
358 mutex_lock(&list_mutex
);
359 list_del(&cap
->node
);
360 mutex_unlock(&list_mutex
);
367 void gb_cap_connection_exit(struct gb_connection
*connection
)
374 cap
= gb_connection_get_data(connection
);
376 device_destroy(&cap_class
, cap
->dev_num
);
377 cdev_del(&cap
->cdev
);
378 ida_free(&cap_minors_map
, MINOR(cap
->dev_num
));
381 * Disallow any new ioctl operations on the char device and wait for
382 * existing ones to finish.
384 mutex_lock(&cap
->mutex
);
385 cap
->disabled
= true;
386 mutex_unlock(&cap
->mutex
);
388 /* All pending greybus operations should have finished by now */
389 gb_connection_disable(cap
->connection
);
391 /* Disallow new users to get access to the cap structure */
392 mutex_lock(&list_mutex
);
393 list_del(&cap
->node
);
394 mutex_unlock(&list_mutex
);
397 * All current users of cap would have taken a reference to it by
398 * now, we can drop our reference and wait the last user will get
408 ret
= class_register(&cap_class
);
412 ret
= alloc_chrdev_region(&cap_dev_num
, 0, NUM_MINORS
,
415 goto err_remove_class
;
420 class_unregister(&cap_class
);
426 unregister_chrdev_region(cap_dev_num
, NUM_MINORS
);
427 class_unregister(&cap_class
);
428 ida_destroy(&cap_minors_map
);