1 // SPDX-License-Identifier: GPL-2.0+
3 * Provides user-space access to the SSAM EC via the /dev/surface/aggregator
4 * misc device. Intended for debugging and development.
6 * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
10 #include <linux/ioctl.h>
11 #include <linux/kernel.h>
12 #include <linux/kfifo.h>
13 #include <linux/kref.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/poll.h>
18 #include <linux/rwsem.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/vmalloc.h>
23 #include <linux/surface_aggregator/cdev.h>
24 #include <linux/surface_aggregator/controller.h>
25 #include <linux/surface_aggregator/serial_hub.h>
27 #define SSAM_CDEV_DEVICE_NAME "surface_aggregator_cdev"
30 /* -- Main structures. ------------------------------------------------------ */
32 enum ssam_cdev_device_state
{
33 SSAM_CDEV_DEVICE_SHUTDOWN_BIT
= BIT(0),
38 struct rw_semaphore lock
;
41 struct ssam_controller
*ctrl
;
42 struct miscdevice mdev
;
45 struct rw_semaphore client_lock
; /* Guards client list. */
46 struct list_head client_list
;
49 struct ssam_cdev_client
;
51 struct ssam_cdev_notifier
{
52 struct ssam_cdev_client
*client
;
53 struct ssam_event_notifier nf
;
56 struct ssam_cdev_client
{
57 struct ssam_cdev
*cdev
;
58 struct list_head node
;
60 struct mutex notifier_lock
; /* Guards notifier access for registration */
61 struct ssam_cdev_notifier
*notifier
[SSH_NUM_EVENTS
];
63 struct mutex read_lock
; /* Guards FIFO buffer read access */
64 struct mutex write_lock
; /* Guards FIFO buffer write access */
65 DECLARE_KFIFO(buffer
, u8
, 4096);
67 wait_queue_head_t waitq
;
68 struct fasync_struct
*fasync
;
71 static void __ssam_cdev_release(struct kref
*kref
)
73 kfree(container_of(kref
, struct ssam_cdev
, kref
));
76 static struct ssam_cdev
*ssam_cdev_get(struct ssam_cdev
*cdev
)
79 kref_get(&cdev
->kref
);
84 static void ssam_cdev_put(struct ssam_cdev
*cdev
)
87 kref_put(&cdev
->kref
, __ssam_cdev_release
);
91 /* -- Notifier handling. ---------------------------------------------------- */
93 static u32
ssam_cdev_notifier(struct ssam_event_notifier
*nf
, const struct ssam_event
*in
)
95 struct ssam_cdev_notifier
*cdev_nf
= container_of(nf
, struct ssam_cdev_notifier
, nf
);
96 struct ssam_cdev_client
*client
= cdev_nf
->client
;
97 struct ssam_cdev_event event
;
98 size_t n
= struct_size(&event
, data
, in
->length
);
100 /* Translate event. */
101 event
.target_category
= in
->target_category
;
102 event
.target_id
= in
->target_id
;
103 event
.command_id
= in
->command_id
;
104 event
.instance_id
= in
->instance_id
;
105 event
.length
= in
->length
;
107 mutex_lock(&client
->write_lock
);
109 /* Make sure we have enough space. */
110 if (kfifo_avail(&client
->buffer
) < n
) {
111 dev_warn(client
->cdev
->dev
,
112 "buffer full, dropping event (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
113 in
->target_category
, in
->target_id
, in
->command_id
, in
->instance_id
);
114 mutex_unlock(&client
->write_lock
);
118 /* Copy event header and payload. */
119 kfifo_in(&client
->buffer
, (const u8
*)&event
, struct_size(&event
, data
, 0));
120 kfifo_in(&client
->buffer
, &in
->data
[0], in
->length
);
122 mutex_unlock(&client
->write_lock
);
124 /* Notify waiting readers. */
125 kill_fasync(&client
->fasync
, SIGIO
, POLL_IN
);
126 wake_up_interruptible(&client
->waitq
);
129 * Don't mark events as handled, this is the job of a proper driver and
130 * not the debugging interface.
135 static int ssam_cdev_notifier_register(struct ssam_cdev_client
*client
, u8 tc
, int priority
)
137 const u16 rqid
= ssh_tc_to_rqid(tc
);
138 const u16 event
= ssh_rqid_to_event(rqid
);
139 struct ssam_cdev_notifier
*nf
;
142 lockdep_assert_held_read(&client
->cdev
->lock
);
144 /* Validate notifier target category. */
145 if (!ssh_rqid_is_event(rqid
))
148 mutex_lock(&client
->notifier_lock
);
150 /* Check if the notifier has already been registered. */
151 if (client
->notifier
[event
]) {
152 mutex_unlock(&client
->notifier_lock
);
156 /* Allocate new notifier. */
157 nf
= kzalloc(sizeof(*nf
), GFP_KERNEL
);
159 mutex_unlock(&client
->notifier_lock
);
164 * Create a dummy notifier with the minimal required fields for
165 * observer registration. Note that we can skip fully specifying event
166 * and registry here as we do not need any matching and use silent
167 * registration, which does not enable the corresponding event.
170 nf
->nf
.base
.fn
= ssam_cdev_notifier
;
171 nf
->nf
.base
.priority
= priority
;
172 nf
->nf
.event
.id
.target_category
= tc
;
173 nf
->nf
.event
.mask
= 0; /* Do not do any matching. */
174 nf
->nf
.flags
= SSAM_EVENT_NOTIFIER_OBSERVER
;
176 /* Register notifier. */
177 status
= ssam_notifier_register(client
->cdev
->ctrl
, &nf
->nf
);
181 client
->notifier
[event
] = nf
;
183 mutex_unlock(&client
->notifier_lock
);
187 static int ssam_cdev_notifier_unregister(struct ssam_cdev_client
*client
, u8 tc
)
189 const u16 rqid
= ssh_tc_to_rqid(tc
);
190 const u16 event
= ssh_rqid_to_event(rqid
);
193 lockdep_assert_held_read(&client
->cdev
->lock
);
195 /* Validate notifier target category. */
196 if (!ssh_rqid_is_event(rqid
))
199 mutex_lock(&client
->notifier_lock
);
201 /* Check if the notifier is currently registered. */
202 if (!client
->notifier
[event
]) {
203 mutex_unlock(&client
->notifier_lock
);
207 /* Unregister and free notifier. */
208 status
= ssam_notifier_unregister(client
->cdev
->ctrl
, &client
->notifier
[event
]->nf
);
209 kfree(client
->notifier
[event
]);
210 client
->notifier
[event
] = NULL
;
212 mutex_unlock(&client
->notifier_lock
);
216 static void ssam_cdev_notifier_unregister_all(struct ssam_cdev_client
*client
)
220 down_read(&client
->cdev
->lock
);
223 * This function may be used during shutdown, thus we need to test for
224 * cdev->ctrl instead of the SSAM_CDEV_DEVICE_SHUTDOWN_BIT bit.
226 if (client
->cdev
->ctrl
) {
227 for (i
= 0; i
< SSH_NUM_EVENTS
; i
++)
228 ssam_cdev_notifier_unregister(client
, i
+ 1);
234 * Device has been shut down. Any notifier remaining is a bug,
235 * so warn about that as this would otherwise hardly be
236 * noticeable. Nevertheless, free them as well.
238 mutex_lock(&client
->notifier_lock
);
239 for (i
= 0; i
< SSH_NUM_EVENTS
; i
++) {
240 count
+= !!(client
->notifier
[i
]);
241 kfree(client
->notifier
[i
]);
242 client
->notifier
[i
] = NULL
;
244 mutex_unlock(&client
->notifier_lock
);
249 up_read(&client
->cdev
->lock
);
253 /* -- IOCTL functions. ------------------------------------------------------ */
255 static long ssam_cdev_request(struct ssam_cdev_client
*client
, struct ssam_cdev_request __user
*r
)
257 struct ssam_cdev_request rqst
;
258 struct ssam_request spec
= {};
259 struct ssam_response rsp
= {};
260 const void __user
*plddata
;
261 void __user
*rspdata
;
262 int status
= 0, ret
= 0, tmp
;
264 lockdep_assert_held_read(&client
->cdev
->lock
);
266 ret
= copy_struct_from_user(&rqst
, sizeof(rqst
), r
, sizeof(*r
));
270 plddata
= u64_to_user_ptr(rqst
.payload
.data
);
271 rspdata
= u64_to_user_ptr(rqst
.response
.data
);
273 /* Setup basic request fields. */
274 spec
.target_category
= rqst
.target_category
;
275 spec
.target_id
= rqst
.target_id
;
276 spec
.command_id
= rqst
.command_id
;
277 spec
.instance_id
= rqst
.instance_id
;
279 spec
.length
= rqst
.payload
.length
;
282 if (rqst
.flags
& SSAM_CDEV_REQUEST_HAS_RESPONSE
)
283 spec
.flags
|= SSAM_REQUEST_HAS_RESPONSE
;
285 if (rqst
.flags
& SSAM_CDEV_REQUEST_UNSEQUENCED
)
286 spec
.flags
|= SSAM_REQUEST_UNSEQUENCED
;
288 rsp
.capacity
= rqst
.response
.length
;
292 /* Get request payload from user-space. */
300 * Note: spec.length is limited to U16_MAX bytes via struct
301 * ssam_cdev_request. This is slightly larger than the
302 * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
303 * underlying protocol (note that nothing remotely this size
304 * should ever be allocated in any normal case). This size is
305 * validated later in ssam_request_do_sync(), for allocation
306 * the bound imposed by u16 should be enough.
308 spec
.payload
= kzalloc(spec
.length
, GFP_KERNEL
);
314 if (copy_from_user((void *)spec
.payload
, plddata
, spec
.length
)) {
320 /* Allocate response buffer. */
328 * Note: rsp.capacity is limited to U16_MAX bytes via struct
329 * ssam_cdev_request. This is slightly larger than the
330 * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
331 * underlying protocol (note that nothing remotely this size
332 * should ever be allocated in any normal case). In later use,
333 * this capacity does not have to be strictly bounded, as it
334 * is only used as an output buffer to be written to. For
335 * allocation the bound imposed by u16 should be enough.
337 rsp
.pointer
= kzalloc(rsp
.capacity
, GFP_KERNEL
);
344 /* Perform request. */
345 status
= ssam_request_do_sync(client
->cdev
->ctrl
, &spec
, &rsp
);
349 /* Copy response to user-space. */
350 if (rsp
.length
&& copy_to_user(rspdata
, rsp
.pointer
, rsp
.length
))
354 /* Always try to set response-length and status. */
355 tmp
= put_user(rsp
.length
, &r
->response
.length
);
359 tmp
= put_user(status
, &r
->status
);
370 static long ssam_cdev_notif_register(struct ssam_cdev_client
*client
,
371 const struct ssam_cdev_notifier_desc __user
*d
)
373 struct ssam_cdev_notifier_desc desc
;
376 lockdep_assert_held_read(&client
->cdev
->lock
);
378 ret
= copy_struct_from_user(&desc
, sizeof(desc
), d
, sizeof(*d
));
382 return ssam_cdev_notifier_register(client
, desc
.target_category
, desc
.priority
);
385 static long ssam_cdev_notif_unregister(struct ssam_cdev_client
*client
,
386 const struct ssam_cdev_notifier_desc __user
*d
)
388 struct ssam_cdev_notifier_desc desc
;
391 lockdep_assert_held_read(&client
->cdev
->lock
);
393 ret
= copy_struct_from_user(&desc
, sizeof(desc
), d
, sizeof(*d
));
397 return ssam_cdev_notifier_unregister(client
, desc
.target_category
);
400 static long ssam_cdev_event_enable(struct ssam_cdev_client
*client
,
401 const struct ssam_cdev_event_desc __user
*d
)
403 struct ssam_cdev_event_desc desc
;
404 struct ssam_event_registry reg
;
405 struct ssam_event_id id
;
408 lockdep_assert_held_read(&client
->cdev
->lock
);
410 /* Read descriptor from user-space. */
411 ret
= copy_struct_from_user(&desc
, sizeof(desc
), d
, sizeof(*d
));
415 /* Translate descriptor. */
416 reg
.target_category
= desc
.reg
.target_category
;
417 reg
.target_id
= desc
.reg
.target_id
;
418 reg
.cid_enable
= desc
.reg
.cid_enable
;
419 reg
.cid_disable
= desc
.reg
.cid_disable
;
421 id
.target_category
= desc
.id
.target_category
;
422 id
.instance
= desc
.id
.instance
;
425 return ssam_controller_event_enable(client
->cdev
->ctrl
, reg
, id
, desc
.flags
);
428 static long ssam_cdev_event_disable(struct ssam_cdev_client
*client
,
429 const struct ssam_cdev_event_desc __user
*d
)
431 struct ssam_cdev_event_desc desc
;
432 struct ssam_event_registry reg
;
433 struct ssam_event_id id
;
436 lockdep_assert_held_read(&client
->cdev
->lock
);
438 /* Read descriptor from user-space. */
439 ret
= copy_struct_from_user(&desc
, sizeof(desc
), d
, sizeof(*d
));
443 /* Translate descriptor. */
444 reg
.target_category
= desc
.reg
.target_category
;
445 reg
.target_id
= desc
.reg
.target_id
;
446 reg
.cid_enable
= desc
.reg
.cid_enable
;
447 reg
.cid_disable
= desc
.reg
.cid_disable
;
449 id
.target_category
= desc
.id
.target_category
;
450 id
.instance
= desc
.id
.instance
;
453 return ssam_controller_event_disable(client
->cdev
->ctrl
, reg
, id
, desc
.flags
);
457 /* -- File operations. ------------------------------------------------------ */
459 static int ssam_cdev_device_open(struct inode
*inode
, struct file
*filp
)
461 struct miscdevice
*mdev
= filp
->private_data
;
462 struct ssam_cdev_client
*client
;
463 struct ssam_cdev
*cdev
= container_of(mdev
, struct ssam_cdev
, mdev
);
465 /* Initialize client */
466 client
= vzalloc(sizeof(*client
));
470 client
->cdev
= ssam_cdev_get(cdev
);
472 INIT_LIST_HEAD(&client
->node
);
474 mutex_init(&client
->notifier_lock
);
476 mutex_init(&client
->read_lock
);
477 mutex_init(&client
->write_lock
);
478 INIT_KFIFO(client
->buffer
);
479 init_waitqueue_head(&client
->waitq
);
481 filp
->private_data
= client
;
484 down_write(&cdev
->client_lock
);
486 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT
, &cdev
->flags
)) {
487 up_write(&cdev
->client_lock
);
488 mutex_destroy(&client
->write_lock
);
489 mutex_destroy(&client
->read_lock
);
490 mutex_destroy(&client
->notifier_lock
);
491 ssam_cdev_put(client
->cdev
);
495 list_add_tail(&client
->node
, &cdev
->client_list
);
497 up_write(&cdev
->client_lock
);
499 stream_open(inode
, filp
);
503 static int ssam_cdev_device_release(struct inode
*inode
, struct file
*filp
)
505 struct ssam_cdev_client
*client
= filp
->private_data
;
507 /* Force-unregister all remaining notifiers of this client. */
508 ssam_cdev_notifier_unregister_all(client
);
511 down_write(&client
->cdev
->client_lock
);
512 list_del(&client
->node
);
513 up_write(&client
->cdev
->client_lock
);
516 mutex_destroy(&client
->write_lock
);
517 mutex_destroy(&client
->read_lock
);
519 mutex_destroy(&client
->notifier_lock
);
521 ssam_cdev_put(client
->cdev
);
527 static long __ssam_cdev_device_ioctl(struct ssam_cdev_client
*client
, unsigned int cmd
,
530 lockdep_assert_held_read(&client
->cdev
->lock
);
533 case SSAM_CDEV_REQUEST
:
534 return ssam_cdev_request(client
, (struct ssam_cdev_request __user
*)arg
);
536 case SSAM_CDEV_NOTIF_REGISTER
:
537 return ssam_cdev_notif_register(client
,
538 (struct ssam_cdev_notifier_desc __user
*)arg
);
540 case SSAM_CDEV_NOTIF_UNREGISTER
:
541 return ssam_cdev_notif_unregister(client
,
542 (struct ssam_cdev_notifier_desc __user
*)arg
);
544 case SSAM_CDEV_EVENT_ENABLE
:
545 return ssam_cdev_event_enable(client
, (struct ssam_cdev_event_desc __user
*)arg
);
547 case SSAM_CDEV_EVENT_DISABLE
:
548 return ssam_cdev_event_disable(client
, (struct ssam_cdev_event_desc __user
*)arg
);
555 static long ssam_cdev_device_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
557 struct ssam_cdev_client
*client
= file
->private_data
;
560 /* Ensure that controller is valid for as long as we need it. */
561 if (down_read_killable(&client
->cdev
->lock
))
564 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT
, &client
->cdev
->flags
)) {
565 up_read(&client
->cdev
->lock
);
569 status
= __ssam_cdev_device_ioctl(client
, cmd
, arg
);
571 up_read(&client
->cdev
->lock
);
575 static ssize_t
ssam_cdev_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*offs
)
577 struct ssam_cdev_client
*client
= file
->private_data
;
578 struct ssam_cdev
*cdev
= client
->cdev
;
582 if (down_read_killable(&cdev
->lock
))
585 /* Make sure we're not shut down. */
586 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT
, &cdev
->flags
)) {
587 up_read(&cdev
->lock
);
592 /* Check availability, wait if necessary. */
593 if (kfifo_is_empty(&client
->buffer
)) {
594 up_read(&cdev
->lock
);
596 if (file
->f_flags
& O_NONBLOCK
)
599 status
= wait_event_interruptible(client
->waitq
,
600 !kfifo_is_empty(&client
->buffer
) ||
601 test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT
,
606 if (down_read_killable(&cdev
->lock
))
609 /* Need to check that we're not shut down again. */
610 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT
, &cdev
->flags
)) {
611 up_read(&cdev
->lock
);
616 /* Try to read from FIFO. */
617 if (mutex_lock_interruptible(&client
->read_lock
)) {
618 up_read(&cdev
->lock
);
622 status
= kfifo_to_user(&client
->buffer
, buf
, count
, &copied
);
623 mutex_unlock(&client
->read_lock
);
626 up_read(&cdev
->lock
);
630 /* We might not have gotten anything, check this here. */
631 if (copied
== 0 && (file
->f_flags
& O_NONBLOCK
)) {
632 up_read(&cdev
->lock
);
635 } while (copied
== 0);
637 up_read(&cdev
->lock
);
641 static __poll_t
ssam_cdev_poll(struct file
*file
, struct poll_table_struct
*pt
)
643 struct ssam_cdev_client
*client
= file
->private_data
;
646 if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT
, &client
->cdev
->flags
))
647 return EPOLLHUP
| EPOLLERR
;
649 poll_wait(file
, &client
->waitq
, pt
);
651 if (!kfifo_is_empty(&client
->buffer
))
652 events
|= EPOLLIN
| EPOLLRDNORM
;
657 static int ssam_cdev_fasync(int fd
, struct file
*file
, int on
)
659 struct ssam_cdev_client
*client
= file
->private_data
;
661 return fasync_helper(fd
, file
, on
, &client
->fasync
);
664 static const struct file_operations ssam_controller_fops
= {
665 .owner
= THIS_MODULE
,
666 .open
= ssam_cdev_device_open
,
667 .release
= ssam_cdev_device_release
,
668 .read
= ssam_cdev_read
,
669 .poll
= ssam_cdev_poll
,
670 .fasync
= ssam_cdev_fasync
,
671 .unlocked_ioctl
= ssam_cdev_device_ioctl
,
672 .compat_ioctl
= ssam_cdev_device_ioctl
,
676 /* -- Device and driver setup ----------------------------------------------- */
678 static int ssam_dbg_device_probe(struct platform_device
*pdev
)
680 struct ssam_controller
*ctrl
;
681 struct ssam_cdev
*cdev
;
684 ctrl
= ssam_client_bind(&pdev
->dev
);
686 return PTR_ERR(ctrl
) == -ENODEV
? -EPROBE_DEFER
: PTR_ERR(ctrl
);
688 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
692 kref_init(&cdev
->kref
);
693 init_rwsem(&cdev
->lock
);
695 cdev
->dev
= &pdev
->dev
;
697 cdev
->mdev
.parent
= &pdev
->dev
;
698 cdev
->mdev
.minor
= MISC_DYNAMIC_MINOR
;
699 cdev
->mdev
.name
= "surface_aggregator";
700 cdev
->mdev
.nodename
= "surface/aggregator";
701 cdev
->mdev
.fops
= &ssam_controller_fops
;
703 init_rwsem(&cdev
->client_lock
);
704 INIT_LIST_HEAD(&cdev
->client_list
);
706 status
= misc_register(&cdev
->mdev
);
712 platform_set_drvdata(pdev
, cdev
);
716 static void ssam_dbg_device_remove(struct platform_device
*pdev
)
718 struct ssam_cdev
*cdev
= platform_get_drvdata(pdev
);
719 struct ssam_cdev_client
*client
;
722 * Mark device as shut-down. Prevent new clients from being added and
723 * new operations from being executed.
725 set_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT
, &cdev
->flags
);
727 down_write(&cdev
->client_lock
);
729 /* Remove all notifiers registered by us. */
730 list_for_each_entry(client
, &cdev
->client_list
, node
) {
731 ssam_cdev_notifier_unregister_all(client
);
734 /* Wake up async clients. */
735 list_for_each_entry(client
, &cdev
->client_list
, node
) {
736 kill_fasync(&client
->fasync
, SIGIO
, POLL_HUP
);
739 /* Wake up blocking clients. */
740 list_for_each_entry(client
, &cdev
->client_list
, node
) {
741 wake_up_interruptible(&client
->waitq
);
744 up_write(&cdev
->client_lock
);
747 * The controller is only guaranteed to be valid for as long as the
748 * driver is bound. Remove controller so that any lingering open files
749 * cannot access it any more after we're gone.
751 down_write(&cdev
->lock
);
754 up_write(&cdev
->lock
);
756 misc_deregister(&cdev
->mdev
);
761 static struct platform_device
*ssam_cdev_device
;
763 static struct platform_driver ssam_cdev_driver
= {
764 .probe
= ssam_dbg_device_probe
,
765 .remove_new
= ssam_dbg_device_remove
,
767 .name
= SSAM_CDEV_DEVICE_NAME
,
768 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
772 static int __init
ssam_debug_init(void)
776 ssam_cdev_device
= platform_device_alloc(SSAM_CDEV_DEVICE_NAME
,
777 PLATFORM_DEVID_NONE
);
778 if (!ssam_cdev_device
)
781 status
= platform_device_add(ssam_cdev_device
);
785 status
= platform_driver_register(&ssam_cdev_driver
);
792 platform_device_del(ssam_cdev_device
);
794 platform_device_put(ssam_cdev_device
);
797 module_init(ssam_debug_init
);
799 static void __exit
ssam_debug_exit(void)
801 platform_driver_unregister(&ssam_cdev_driver
);
802 platform_device_unregister(ssam_cdev_device
);
804 module_exit(ssam_debug_exit
);
806 MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
807 MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
808 MODULE_LICENSE("GPL");