1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016, Linaro Ltd.
4 * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
5 * Copyright (c) 2012, PetaLogix
6 * Copyright (c) 2011, Texas Instruments, Inc.
7 * Copyright (c) 2011, Google, Inc.
9 * Based on rpmsg performance statistics driver by Michal Simek, which in turn
10 * was based on TI & Google OMX rpmsg driver.
12 #include <linux/cdev.h>
13 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/poll.h>
19 #include <linux/rpmsg.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <uapi/linux/rpmsg.h>
25 #include "rpmsg_internal.h"
27 #define RPMSG_DEV_MAX (MINORMASK + 1)
29 static dev_t rpmsg_major
;
30 static struct class *rpmsg_class
;
32 static DEFINE_IDA(rpmsg_ctrl_ida
);
33 static DEFINE_IDA(rpmsg_ept_ida
);
34 static DEFINE_IDA(rpmsg_minor_ida
);
36 #define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
37 #define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
39 #define dev_to_ctrldev(dev) container_of(dev, struct rpmsg_ctrldev, dev)
40 #define cdev_to_ctrldev(i_cdev) container_of(i_cdev, struct rpmsg_ctrldev, cdev)
43 * struct rpmsg_ctrldev - control device for instantiating endpoint devices
44 * @rpdev: underlaying rpmsg device
45 * @cdev: cdev for the ctrl device
46 * @dev: device for the ctrl device
48 struct rpmsg_ctrldev
{
49 struct rpmsg_device
*rpdev
;
55 * struct rpmsg_eptdev - endpoint device context
56 * @dev: endpoint device
57 * @cdev: cdev for the endpoint device
58 * @rpdev: underlaying rpmsg device
59 * @chinfo: info used to open the endpoint
60 * @ept_lock: synchronization of @ept modifications
61 * @ept: rpmsg endpoint reference, when open
62 * @queue_lock: synchronization of @queue operations
63 * @queue: incoming message queue
64 * @readq: wait object for incoming queue
70 struct rpmsg_device
*rpdev
;
71 struct rpmsg_channel_info chinfo
;
73 struct mutex ept_lock
;
74 struct rpmsg_endpoint
*ept
;
76 spinlock_t queue_lock
;
77 struct sk_buff_head queue
;
78 wait_queue_head_t readq
;
81 static int rpmsg_eptdev_destroy(struct device
*dev
, void *data
)
83 struct rpmsg_eptdev
*eptdev
= dev_to_eptdev(dev
);
85 mutex_lock(&eptdev
->ept_lock
);
87 rpmsg_destroy_ept(eptdev
->ept
);
90 mutex_unlock(&eptdev
->ept_lock
);
92 /* wake up any blocked readers */
93 wake_up_interruptible(&eptdev
->readq
);
95 device_del(&eptdev
->dev
);
96 put_device(&eptdev
->dev
);
101 static int rpmsg_ept_cb(struct rpmsg_device
*rpdev
, void *buf
, int len
,
102 void *priv
, u32 addr
)
104 struct rpmsg_eptdev
*eptdev
= priv
;
107 skb
= alloc_skb(len
, GFP_ATOMIC
);
111 skb_put_data(skb
, buf
, len
);
113 spin_lock(&eptdev
->queue_lock
);
114 skb_queue_tail(&eptdev
->queue
, skb
);
115 spin_unlock(&eptdev
->queue_lock
);
117 /* wake up any blocking processes, waiting for new data */
118 wake_up_interruptible(&eptdev
->readq
);
123 static int rpmsg_eptdev_open(struct inode
*inode
, struct file
*filp
)
125 struct rpmsg_eptdev
*eptdev
= cdev_to_eptdev(inode
->i_cdev
);
126 struct rpmsg_endpoint
*ept
;
127 struct rpmsg_device
*rpdev
= eptdev
->rpdev
;
128 struct device
*dev
= &eptdev
->dev
;
132 ept
= rpmsg_create_ept(rpdev
, rpmsg_ept_cb
, eptdev
, eptdev
->chinfo
);
134 dev_err(dev
, "failed to open %s\n", eptdev
->chinfo
.name
);
140 filp
->private_data
= eptdev
;
145 static int rpmsg_eptdev_release(struct inode
*inode
, struct file
*filp
)
147 struct rpmsg_eptdev
*eptdev
= cdev_to_eptdev(inode
->i_cdev
);
148 struct device
*dev
= &eptdev
->dev
;
150 /* Close the endpoint, if it's not already destroyed by the parent */
151 mutex_lock(&eptdev
->ept_lock
);
153 rpmsg_destroy_ept(eptdev
->ept
);
156 mutex_unlock(&eptdev
->ept_lock
);
158 /* Discard all SKBs */
159 skb_queue_purge(&eptdev
->queue
);
166 static ssize_t
rpmsg_eptdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
168 struct file
*filp
= iocb
->ki_filp
;
169 struct rpmsg_eptdev
*eptdev
= filp
->private_data
;
177 spin_lock_irqsave(&eptdev
->queue_lock
, flags
);
179 /* Wait for data in the queue */
180 if (skb_queue_empty(&eptdev
->queue
)) {
181 spin_unlock_irqrestore(&eptdev
->queue_lock
, flags
);
183 if (filp
->f_flags
& O_NONBLOCK
)
186 /* Wait until we get data or the endpoint goes away */
187 if (wait_event_interruptible(eptdev
->readq
,
188 !skb_queue_empty(&eptdev
->queue
) ||
192 /* We lost the endpoint while waiting */
196 spin_lock_irqsave(&eptdev
->queue_lock
, flags
);
199 skb
= skb_dequeue(&eptdev
->queue
);
200 spin_unlock_irqrestore(&eptdev
->queue_lock
, flags
);
204 use
= min_t(size_t, iov_iter_count(to
), skb
->len
);
205 if (copy_to_iter(skb
->data
, use
, to
) != use
)
213 static ssize_t
rpmsg_eptdev_write_iter(struct kiocb
*iocb
,
214 struct iov_iter
*from
)
216 struct file
*filp
= iocb
->ki_filp
;
217 struct rpmsg_eptdev
*eptdev
= filp
->private_data
;
218 size_t len
= iov_iter_count(from
);
222 kbuf
= kzalloc(len
, GFP_KERNEL
);
226 if (!copy_from_iter_full(kbuf
, len
, from
)) {
231 if (mutex_lock_interruptible(&eptdev
->ept_lock
)) {
241 if (filp
->f_flags
& O_NONBLOCK
)
242 ret
= rpmsg_trysend(eptdev
->ept
, kbuf
, len
);
244 ret
= rpmsg_send(eptdev
->ept
, kbuf
, len
);
247 mutex_unlock(&eptdev
->ept_lock
);
251 return ret
< 0 ? ret
: len
;
254 static __poll_t
rpmsg_eptdev_poll(struct file
*filp
, poll_table
*wait
)
256 struct rpmsg_eptdev
*eptdev
= filp
->private_data
;
262 poll_wait(filp
, &eptdev
->readq
, wait
);
264 if (!skb_queue_empty(&eptdev
->queue
))
265 mask
|= EPOLLIN
| EPOLLRDNORM
;
267 mask
|= rpmsg_poll(eptdev
->ept
, filp
, wait
);
272 static long rpmsg_eptdev_ioctl(struct file
*fp
, unsigned int cmd
,
275 struct rpmsg_eptdev
*eptdev
= fp
->private_data
;
277 if (cmd
!= RPMSG_DESTROY_EPT_IOCTL
)
280 return rpmsg_eptdev_destroy(&eptdev
->dev
, NULL
);
283 static const struct file_operations rpmsg_eptdev_fops
= {
284 .owner
= THIS_MODULE
,
285 .open
= rpmsg_eptdev_open
,
286 .release
= rpmsg_eptdev_release
,
287 .read_iter
= rpmsg_eptdev_read_iter
,
288 .write_iter
= rpmsg_eptdev_write_iter
,
289 .poll
= rpmsg_eptdev_poll
,
290 .unlocked_ioctl
= rpmsg_eptdev_ioctl
,
291 .compat_ioctl
= compat_ptr_ioctl
,
294 static ssize_t
name_show(struct device
*dev
, struct device_attribute
*attr
,
297 struct rpmsg_eptdev
*eptdev
= dev_get_drvdata(dev
);
299 return sprintf(buf
, "%s\n", eptdev
->chinfo
.name
);
301 static DEVICE_ATTR_RO(name
);
303 static ssize_t
src_show(struct device
*dev
, struct device_attribute
*attr
,
306 struct rpmsg_eptdev
*eptdev
= dev_get_drvdata(dev
);
308 return sprintf(buf
, "%d\n", eptdev
->chinfo
.src
);
310 static DEVICE_ATTR_RO(src
);
312 static ssize_t
dst_show(struct device
*dev
, struct device_attribute
*attr
,
315 struct rpmsg_eptdev
*eptdev
= dev_get_drvdata(dev
);
317 return sprintf(buf
, "%d\n", eptdev
->chinfo
.dst
);
319 static DEVICE_ATTR_RO(dst
);
321 static struct attribute
*rpmsg_eptdev_attrs
[] = {
327 ATTRIBUTE_GROUPS(rpmsg_eptdev
);
329 static void rpmsg_eptdev_release_device(struct device
*dev
)
331 struct rpmsg_eptdev
*eptdev
= dev_to_eptdev(dev
);
333 ida_simple_remove(&rpmsg_ept_ida
, dev
->id
);
334 ida_simple_remove(&rpmsg_minor_ida
, MINOR(eptdev
->dev
.devt
));
335 cdev_del(&eptdev
->cdev
);
339 static int rpmsg_eptdev_create(struct rpmsg_ctrldev
*ctrldev
,
340 struct rpmsg_channel_info chinfo
)
342 struct rpmsg_device
*rpdev
= ctrldev
->rpdev
;
343 struct rpmsg_eptdev
*eptdev
;
347 eptdev
= kzalloc(sizeof(*eptdev
), GFP_KERNEL
);
352 eptdev
->rpdev
= rpdev
;
353 eptdev
->chinfo
= chinfo
;
355 mutex_init(&eptdev
->ept_lock
);
356 spin_lock_init(&eptdev
->queue_lock
);
357 skb_queue_head_init(&eptdev
->queue
);
358 init_waitqueue_head(&eptdev
->readq
);
360 device_initialize(dev
);
361 dev
->class = rpmsg_class
;
362 dev
->parent
= &ctrldev
->dev
;
363 dev
->groups
= rpmsg_eptdev_groups
;
364 dev_set_drvdata(dev
, eptdev
);
366 cdev_init(&eptdev
->cdev
, &rpmsg_eptdev_fops
);
367 eptdev
->cdev
.owner
= THIS_MODULE
;
369 ret
= ida_simple_get(&rpmsg_minor_ida
, 0, RPMSG_DEV_MAX
, GFP_KERNEL
);
372 dev
->devt
= MKDEV(MAJOR(rpmsg_major
), ret
);
374 ret
= ida_simple_get(&rpmsg_ept_ida
, 0, 0, GFP_KERNEL
);
378 dev_set_name(dev
, "rpmsg%d", ret
);
380 ret
= cdev_add(&eptdev
->cdev
, dev
->devt
, 1);
384 /* We can now rely on the release function for cleanup */
385 dev
->release
= rpmsg_eptdev_release_device
;
387 ret
= device_add(dev
);
389 dev_err(dev
, "device_add failed: %d\n", ret
);
396 ida_simple_remove(&rpmsg_ept_ida
, dev
->id
);
398 ida_simple_remove(&rpmsg_minor_ida
, MINOR(dev
->devt
));
406 static int rpmsg_ctrldev_open(struct inode
*inode
, struct file
*filp
)
408 struct rpmsg_ctrldev
*ctrldev
= cdev_to_ctrldev(inode
->i_cdev
);
410 get_device(&ctrldev
->dev
);
411 filp
->private_data
= ctrldev
;
416 static int rpmsg_ctrldev_release(struct inode
*inode
, struct file
*filp
)
418 struct rpmsg_ctrldev
*ctrldev
= cdev_to_ctrldev(inode
->i_cdev
);
420 put_device(&ctrldev
->dev
);
425 static long rpmsg_ctrldev_ioctl(struct file
*fp
, unsigned int cmd
,
428 struct rpmsg_ctrldev
*ctrldev
= fp
->private_data
;
429 void __user
*argp
= (void __user
*)arg
;
430 struct rpmsg_endpoint_info eptinfo
;
431 struct rpmsg_channel_info chinfo
;
433 if (cmd
!= RPMSG_CREATE_EPT_IOCTL
)
436 if (copy_from_user(&eptinfo
, argp
, sizeof(eptinfo
)))
439 memcpy(chinfo
.name
, eptinfo
.name
, RPMSG_NAME_SIZE
);
440 chinfo
.name
[RPMSG_NAME_SIZE
-1] = '\0';
441 chinfo
.src
= eptinfo
.src
;
442 chinfo
.dst
= eptinfo
.dst
;
444 return rpmsg_eptdev_create(ctrldev
, chinfo
);
447 static const struct file_operations rpmsg_ctrldev_fops
= {
448 .owner
= THIS_MODULE
,
449 .open
= rpmsg_ctrldev_open
,
450 .release
= rpmsg_ctrldev_release
,
451 .unlocked_ioctl
= rpmsg_ctrldev_ioctl
,
452 .compat_ioctl
= compat_ptr_ioctl
,
455 static void rpmsg_ctrldev_release_device(struct device
*dev
)
457 struct rpmsg_ctrldev
*ctrldev
= dev_to_ctrldev(dev
);
459 ida_simple_remove(&rpmsg_ctrl_ida
, dev
->id
);
460 ida_simple_remove(&rpmsg_minor_ida
, MINOR(dev
->devt
));
461 cdev_del(&ctrldev
->cdev
);
465 static int rpmsg_chrdev_probe(struct rpmsg_device
*rpdev
)
467 struct rpmsg_ctrldev
*ctrldev
;
471 ctrldev
= kzalloc(sizeof(*ctrldev
), GFP_KERNEL
);
475 ctrldev
->rpdev
= rpdev
;
478 device_initialize(dev
);
479 dev
->parent
= &rpdev
->dev
;
480 dev
->class = rpmsg_class
;
482 cdev_init(&ctrldev
->cdev
, &rpmsg_ctrldev_fops
);
483 ctrldev
->cdev
.owner
= THIS_MODULE
;
485 ret
= ida_simple_get(&rpmsg_minor_ida
, 0, RPMSG_DEV_MAX
, GFP_KERNEL
);
488 dev
->devt
= MKDEV(MAJOR(rpmsg_major
), ret
);
490 ret
= ida_simple_get(&rpmsg_ctrl_ida
, 0, 0, GFP_KERNEL
);
494 dev_set_name(&ctrldev
->dev
, "rpmsg_ctrl%d", ret
);
496 ret
= cdev_add(&ctrldev
->cdev
, dev
->devt
, 1);
500 /* We can now rely on the release function for cleanup */
501 dev
->release
= rpmsg_ctrldev_release_device
;
503 ret
= device_add(dev
);
505 dev_err(&rpdev
->dev
, "device_add failed: %d\n", ret
);
509 dev_set_drvdata(&rpdev
->dev
, ctrldev
);
514 ida_simple_remove(&rpmsg_ctrl_ida
, dev
->id
);
516 ida_simple_remove(&rpmsg_minor_ida
, MINOR(dev
->devt
));
524 static void rpmsg_chrdev_remove(struct rpmsg_device
*rpdev
)
526 struct rpmsg_ctrldev
*ctrldev
= dev_get_drvdata(&rpdev
->dev
);
529 /* Destroy all endpoints */
530 ret
= device_for_each_child(&ctrldev
->dev
, NULL
, rpmsg_eptdev_destroy
);
532 dev_warn(&rpdev
->dev
, "failed to nuke endpoints: %d\n", ret
);
534 device_del(&ctrldev
->dev
);
535 put_device(&ctrldev
->dev
);
538 static struct rpmsg_driver rpmsg_chrdev_driver
= {
539 .probe
= rpmsg_chrdev_probe
,
540 .remove
= rpmsg_chrdev_remove
,
542 .name
= "rpmsg_chrdev",
546 static int rpmsg_char_init(void)
550 ret
= alloc_chrdev_region(&rpmsg_major
, 0, RPMSG_DEV_MAX
, "rpmsg");
552 pr_err("rpmsg: failed to allocate char dev region\n");
556 rpmsg_class
= class_create(THIS_MODULE
, "rpmsg");
557 if (IS_ERR(rpmsg_class
)) {
558 pr_err("failed to create rpmsg class\n");
559 unregister_chrdev_region(rpmsg_major
, RPMSG_DEV_MAX
);
560 return PTR_ERR(rpmsg_class
);
563 ret
= register_rpmsg_driver(&rpmsg_chrdev_driver
);
565 pr_err("rpmsgchr: failed to register rpmsg driver\n");
566 class_destroy(rpmsg_class
);
567 unregister_chrdev_region(rpmsg_major
, RPMSG_DEV_MAX
);
572 postcore_initcall(rpmsg_char_init
);
574 static void rpmsg_chrdev_exit(void)
576 unregister_rpmsg_driver(&rpmsg_chrdev_driver
);
577 class_destroy(rpmsg_class
);
578 unregister_chrdev_region(rpmsg_major
, RPMSG_DEV_MAX
);
580 module_exit(rpmsg_chrdev_exit
);
582 MODULE_ALIAS("rpmsg:rpmsg_chrdev");
583 MODULE_LICENSE("GPL v2");