1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2022, STMicroelectronics
4 * Copyright (c) 2016, Linaro Ltd.
5 * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
6 * Copyright (c) 2012, PetaLogix
7 * Copyright (c) 2011, Texas Instruments, Inc.
8 * Copyright (c) 2011, Google, Inc.
10 * Based on rpmsg performance statistics driver by Michal Simek, which in turn
11 * was based on TI & Google OMX rpmsg driver.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/idr.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/poll.h>
23 #include <linux/rpmsg.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <uapi/linux/rpmsg.h>
29 #include "rpmsg_char.h"
30 #include "rpmsg_internal.h"
32 #define RPMSG_DEV_MAX (MINORMASK + 1)
34 static dev_t rpmsg_major
;
36 static DEFINE_IDA(rpmsg_ept_ida
);
37 static DEFINE_IDA(rpmsg_minor_ida
);
39 #define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
40 #define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
43 * struct rpmsg_eptdev - endpoint device context
44 * @dev: endpoint device
45 * @cdev: cdev for the endpoint device
46 * @rpdev: underlaying rpmsg device
47 * @chinfo: info used to open the endpoint
48 * @ept_lock: synchronization of @ept modifications
49 * @ept: rpmsg endpoint reference, when open
50 * @queue_lock: synchronization of @queue operations
51 * @queue: incoming message queue
52 * @readq: wait object for incoming queue
53 * @default_ept: set to channel default endpoint if the default endpoint should be re-used
54 * on device open to prevent endpoint address update.
55 * @remote_flow_restricted: to indicate if the remote has requested for flow to be limited
56 * @remote_flow_updated: to indicate if the flow control has been requested
62 struct rpmsg_device
*rpdev
;
63 struct rpmsg_channel_info chinfo
;
65 struct mutex ept_lock
;
66 struct rpmsg_endpoint
*ept
;
67 struct rpmsg_endpoint
*default_ept
;
69 spinlock_t queue_lock
;
70 struct sk_buff_head queue
;
71 wait_queue_head_t readq
;
73 bool remote_flow_restricted
;
74 bool remote_flow_updated
;
77 int rpmsg_chrdev_eptdev_destroy(struct device
*dev
, void *data
)
79 struct rpmsg_eptdev
*eptdev
= dev_to_eptdev(dev
);
81 mutex_lock(&eptdev
->ept_lock
);
84 /* The default endpoint is released by the rpmsg core */
85 if (!eptdev
->default_ept
)
86 rpmsg_destroy_ept(eptdev
->ept
);
89 mutex_unlock(&eptdev
->ept_lock
);
91 /* wake up any blocked readers */
92 wake_up_interruptible(&eptdev
->readq
);
94 cdev_device_del(&eptdev
->cdev
, &eptdev
->dev
);
95 put_device(&eptdev
->dev
);
99 EXPORT_SYMBOL(rpmsg_chrdev_eptdev_destroy
);
101 static int rpmsg_ept_cb(struct rpmsg_device
*rpdev
, void *buf
, int len
,
102 void *priv
, u32 addr
)
104 struct rpmsg_eptdev
*eptdev
= priv
;
107 skb
= alloc_skb(len
, GFP_ATOMIC
);
111 skb_put_data(skb
, buf
, len
);
113 spin_lock(&eptdev
->queue_lock
);
114 skb_queue_tail(&eptdev
->queue
, skb
);
115 spin_unlock(&eptdev
->queue_lock
);
117 /* wake up any blocking processes, waiting for new data */
118 wake_up_interruptible(&eptdev
->readq
);
123 static int rpmsg_ept_flow_cb(struct rpmsg_device
*rpdev
, void *priv
, bool enable
)
125 struct rpmsg_eptdev
*eptdev
= priv
;
127 eptdev
->remote_flow_restricted
= enable
;
128 eptdev
->remote_flow_updated
= true;
130 wake_up_interruptible(&eptdev
->readq
);
135 static int rpmsg_eptdev_open(struct inode
*inode
, struct file
*filp
)
137 struct rpmsg_eptdev
*eptdev
= cdev_to_eptdev(inode
->i_cdev
);
138 struct rpmsg_endpoint
*ept
;
139 struct rpmsg_device
*rpdev
= eptdev
->rpdev
;
140 struct device
*dev
= &eptdev
->dev
;
142 mutex_lock(&eptdev
->ept_lock
);
144 mutex_unlock(&eptdev
->ept_lock
);
148 if (!eptdev
->rpdev
) {
149 mutex_unlock(&eptdev
->ept_lock
);
156 * If the default_ept is set, the rpmsg device default endpoint is used.
157 * Else a new endpoint is created on open that will be destroyed on release.
159 if (eptdev
->default_ept
)
160 ept
= eptdev
->default_ept
;
162 ept
= rpmsg_create_ept(rpdev
, rpmsg_ept_cb
, eptdev
, eptdev
->chinfo
);
165 dev_err(dev
, "failed to open %s\n", eptdev
->chinfo
.name
);
167 mutex_unlock(&eptdev
->ept_lock
);
171 ept
->flow_cb
= rpmsg_ept_flow_cb
;
173 filp
->private_data
= eptdev
;
174 mutex_unlock(&eptdev
->ept_lock
);
179 static int rpmsg_eptdev_release(struct inode
*inode
, struct file
*filp
)
181 struct rpmsg_eptdev
*eptdev
= cdev_to_eptdev(inode
->i_cdev
);
182 struct device
*dev
= &eptdev
->dev
;
184 /* Close the endpoint, if it's not already destroyed by the parent */
185 mutex_lock(&eptdev
->ept_lock
);
187 if (!eptdev
->default_ept
)
188 rpmsg_destroy_ept(eptdev
->ept
);
191 mutex_unlock(&eptdev
->ept_lock
);
192 eptdev
->remote_flow_updated
= false;
194 /* Discard all SKBs */
195 skb_queue_purge(&eptdev
->queue
);
202 static ssize_t
rpmsg_eptdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
204 struct file
*filp
= iocb
->ki_filp
;
205 struct rpmsg_eptdev
*eptdev
= filp
->private_data
;
213 spin_lock_irqsave(&eptdev
->queue_lock
, flags
);
215 /* Wait for data in the queue */
216 if (skb_queue_empty(&eptdev
->queue
)) {
217 spin_unlock_irqrestore(&eptdev
->queue_lock
, flags
);
219 if (filp
->f_flags
& O_NONBLOCK
)
222 /* Wait until we get data or the endpoint goes away */
223 if (wait_event_interruptible(eptdev
->readq
,
224 !skb_queue_empty(&eptdev
->queue
) ||
228 /* We lost the endpoint while waiting */
232 spin_lock_irqsave(&eptdev
->queue_lock
, flags
);
235 skb
= skb_dequeue(&eptdev
->queue
);
236 spin_unlock_irqrestore(&eptdev
->queue_lock
, flags
);
240 use
= min_t(size_t, iov_iter_count(to
), skb
->len
);
241 if (copy_to_iter(skb
->data
, use
, to
) != use
)
249 static ssize_t
rpmsg_eptdev_write_iter(struct kiocb
*iocb
,
250 struct iov_iter
*from
)
252 struct file
*filp
= iocb
->ki_filp
;
253 struct rpmsg_eptdev
*eptdev
= filp
->private_data
;
254 size_t len
= iov_iter_count(from
);
258 kbuf
= kzalloc(len
, GFP_KERNEL
);
262 if (!copy_from_iter_full(kbuf
, len
, from
)) {
267 if (mutex_lock_interruptible(&eptdev
->ept_lock
)) {
277 if (filp
->f_flags
& O_NONBLOCK
) {
278 ret
= rpmsg_trysendto(eptdev
->ept
, kbuf
, len
, eptdev
->chinfo
.dst
);
282 ret
= rpmsg_sendto(eptdev
->ept
, kbuf
, len
, eptdev
->chinfo
.dst
);
286 mutex_unlock(&eptdev
->ept_lock
);
290 return ret
< 0 ? ret
: len
;
293 static __poll_t
rpmsg_eptdev_poll(struct file
*filp
, poll_table
*wait
)
295 struct rpmsg_eptdev
*eptdev
= filp
->private_data
;
301 poll_wait(filp
, &eptdev
->readq
, wait
);
303 if (!skb_queue_empty(&eptdev
->queue
))
304 mask
|= EPOLLIN
| EPOLLRDNORM
;
306 if (eptdev
->remote_flow_updated
)
309 mutex_lock(&eptdev
->ept_lock
);
310 mask
|= rpmsg_poll(eptdev
->ept
, filp
, wait
);
311 mutex_unlock(&eptdev
->ept_lock
);
316 static long rpmsg_eptdev_ioctl(struct file
*fp
, unsigned int cmd
,
319 struct rpmsg_eptdev
*eptdev
= fp
->private_data
;
325 case RPMSG_GET_OUTGOING_FLOWCONTROL
:
326 eptdev
->remote_flow_updated
= false;
327 ret
= put_user(eptdev
->remote_flow_restricted
, (int __user
*)arg
);
329 case RPMSG_SET_INCOMING_FLOWCONTROL
:
335 ret
= rpmsg_set_flow_control(eptdev
->ept
, set
, eptdev
->chinfo
.dst
);
337 case RPMSG_DESTROY_EPT_IOCTL
:
338 /* Don't allow to destroy a default endpoint. */
339 if (eptdev
->default_ept
) {
343 ret
= rpmsg_chrdev_eptdev_destroy(&eptdev
->dev
, NULL
);
352 static const struct file_operations rpmsg_eptdev_fops
= {
353 .owner
= THIS_MODULE
,
354 .open
= rpmsg_eptdev_open
,
355 .release
= rpmsg_eptdev_release
,
356 .read_iter
= rpmsg_eptdev_read_iter
,
357 .write_iter
= rpmsg_eptdev_write_iter
,
358 .poll
= rpmsg_eptdev_poll
,
359 .unlocked_ioctl
= rpmsg_eptdev_ioctl
,
360 .compat_ioctl
= compat_ptr_ioctl
,
363 static ssize_t
name_show(struct device
*dev
, struct device_attribute
*attr
,
366 struct rpmsg_eptdev
*eptdev
= dev_get_drvdata(dev
);
368 return sprintf(buf
, "%s\n", eptdev
->chinfo
.name
);
370 static DEVICE_ATTR_RO(name
);
372 static ssize_t
src_show(struct device
*dev
, struct device_attribute
*attr
,
375 struct rpmsg_eptdev
*eptdev
= dev_get_drvdata(dev
);
377 return sprintf(buf
, "%d\n", eptdev
->chinfo
.src
);
379 static DEVICE_ATTR_RO(src
);
381 static ssize_t
dst_show(struct device
*dev
, struct device_attribute
*attr
,
384 struct rpmsg_eptdev
*eptdev
= dev_get_drvdata(dev
);
386 return sprintf(buf
, "%d\n", eptdev
->chinfo
.dst
);
388 static DEVICE_ATTR_RO(dst
);
390 static struct attribute
*rpmsg_eptdev_attrs
[] = {
396 ATTRIBUTE_GROUPS(rpmsg_eptdev
);
398 static void rpmsg_eptdev_release_device(struct device
*dev
)
400 struct rpmsg_eptdev
*eptdev
= dev_to_eptdev(dev
);
402 ida_free(&rpmsg_ept_ida
, dev
->id
);
403 ida_free(&rpmsg_minor_ida
, MINOR(eptdev
->dev
.devt
));
407 static struct rpmsg_eptdev
*rpmsg_chrdev_eptdev_alloc(struct rpmsg_device
*rpdev
,
408 struct device
*parent
)
410 struct rpmsg_eptdev
*eptdev
;
413 eptdev
= kzalloc(sizeof(*eptdev
), GFP_KERNEL
);
415 return ERR_PTR(-ENOMEM
);
418 eptdev
->rpdev
= rpdev
;
420 mutex_init(&eptdev
->ept_lock
);
421 spin_lock_init(&eptdev
->queue_lock
);
422 skb_queue_head_init(&eptdev
->queue
);
423 init_waitqueue_head(&eptdev
->readq
);
425 device_initialize(dev
);
426 dev
->class = &rpmsg_class
;
427 dev
->parent
= parent
;
428 dev
->groups
= rpmsg_eptdev_groups
;
429 dev_set_drvdata(dev
, eptdev
);
431 cdev_init(&eptdev
->cdev
, &rpmsg_eptdev_fops
);
432 eptdev
->cdev
.owner
= THIS_MODULE
;
437 static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev
*eptdev
, struct rpmsg_channel_info chinfo
)
439 struct device
*dev
= &eptdev
->dev
;
442 eptdev
->chinfo
= chinfo
;
444 ret
= ida_alloc_max(&rpmsg_minor_ida
, RPMSG_DEV_MAX
- 1, GFP_KERNEL
);
447 dev
->devt
= MKDEV(MAJOR(rpmsg_major
), ret
);
449 ret
= ida_alloc(&rpmsg_ept_ida
, GFP_KERNEL
);
453 dev_set_name(dev
, "rpmsg%d", ret
);
455 ret
= cdev_device_add(&eptdev
->cdev
, &eptdev
->dev
);
459 /* We can now rely on the release function for cleanup */
460 dev
->release
= rpmsg_eptdev_release_device
;
465 ida_free(&rpmsg_ept_ida
, dev
->id
);
467 ida_free(&rpmsg_minor_ida
, MINOR(dev
->devt
));
475 int rpmsg_chrdev_eptdev_create(struct rpmsg_device
*rpdev
, struct device
*parent
,
476 struct rpmsg_channel_info chinfo
)
478 struct rpmsg_eptdev
*eptdev
;
480 eptdev
= rpmsg_chrdev_eptdev_alloc(rpdev
, parent
);
482 return PTR_ERR(eptdev
);
484 return rpmsg_chrdev_eptdev_add(eptdev
, chinfo
);
486 EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create
);
488 static int rpmsg_chrdev_probe(struct rpmsg_device
*rpdev
)
490 struct rpmsg_channel_info chinfo
;
491 struct rpmsg_eptdev
*eptdev
;
492 struct device
*dev
= &rpdev
->dev
;
494 memcpy(chinfo
.name
, rpdev
->id
.name
, RPMSG_NAME_SIZE
);
495 chinfo
.src
= rpdev
->src
;
496 chinfo
.dst
= rpdev
->dst
;
498 eptdev
= rpmsg_chrdev_eptdev_alloc(rpdev
, dev
);
500 return PTR_ERR(eptdev
);
502 /* Set the default_ept to the rpmsg device endpoint */
503 eptdev
->default_ept
= rpdev
->ept
;
506 * The rpmsg_ept_cb uses *priv parameter to get its rpmsg_eptdev context.
507 * Storedit in default_ept *priv field.
509 eptdev
->default_ept
->priv
= eptdev
;
511 return rpmsg_chrdev_eptdev_add(eptdev
, chinfo
);
514 static void rpmsg_chrdev_remove(struct rpmsg_device
*rpdev
)
518 ret
= device_for_each_child(&rpdev
->dev
, NULL
, rpmsg_chrdev_eptdev_destroy
);
520 dev_warn(&rpdev
->dev
, "failed to destroy endpoints: %d\n", ret
);
523 static struct rpmsg_device_id rpmsg_chrdev_id_table
[] = {
524 { .name
= "rpmsg-raw" },
528 static struct rpmsg_driver rpmsg_chrdev_driver
= {
529 .probe
= rpmsg_chrdev_probe
,
530 .remove
= rpmsg_chrdev_remove
,
531 .callback
= rpmsg_ept_cb
,
532 .id_table
= rpmsg_chrdev_id_table
,
533 .drv
.name
= "rpmsg_chrdev",
536 static int rpmsg_chrdev_init(void)
540 ret
= alloc_chrdev_region(&rpmsg_major
, 0, RPMSG_DEV_MAX
, "rpmsg_char");
542 pr_err("failed to allocate char dev region\n");
546 ret
= register_rpmsg_driver(&rpmsg_chrdev_driver
);
548 pr_err("rpmsg: failed to register rpmsg raw driver\n");
555 unregister_chrdev_region(rpmsg_major
, RPMSG_DEV_MAX
);
559 postcore_initcall(rpmsg_chrdev_init
);
561 static void rpmsg_chrdev_exit(void)
563 unregister_rpmsg_driver(&rpmsg_chrdev_driver
);
564 unregister_chrdev_region(rpmsg_major
, RPMSG_DEV_MAX
);
566 module_exit(rpmsg_chrdev_exit
);
568 MODULE_ALIAS("rpmsg:rpmsg_chrdev");
569 MODULE_DESCRIPTION("RPMSG device interface");
570 MODULE_LICENSE("GPL v2");