1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016-2017, Linaro Ltd
7 #include <linux/interrupt.h>
9 #include <linux/list.h>
10 #include <linux/mfd/syscon.h>
11 #include <linux/module.h>
13 #include <linux/of_address.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/rpmsg.h>
17 #include <linux/sizes.h>
18 #include <linux/slab.h>
19 #include <linux/wait.h>
20 #include <linux/workqueue.h>
21 #include <linux/mailbox_client.h>
23 #include "rpmsg_internal.h"
24 #include "qcom_glink_native.h"
26 #define CREATE_TRACE_POINTS
27 #include "qcom_glink_trace.h"
29 #define GLINK_NAME_SIZE 32
30 #define GLINK_VERSION_1 1
32 #define RPM_GLINK_CID_MIN 1
33 #define RPM_GLINK_CID_MAX 65536
36 /* New members MUST be added within the __struct_group() macro below. */
37 __struct_group(glink_msg_hdr
, hdr
, __packed
,
44 static_assert(offsetof(struct glink_msg
, data
) == sizeof(struct glink_msg_hdr
),
45 "struct member likely outside of __struct_group()");
48 * struct glink_defer_cmd - deferred incoming control message
50 * @msg: message header
51 * @data: payload of the message
53 * Copy of a received control message, to be added to @rx_queue and processed
54 * by @rx_work of @qcom_glink.
56 struct glink_defer_cmd
{
57 struct list_head node
;
59 struct glink_msg_hdr msg
;
64 * struct glink_core_rx_intent - RX intent
67 * @data: pointer to the data (may be NULL for zero-copy)
68 * @id: remote or local intent ID
69 * @size: size of the original intent (do not modify)
70 * @reuse: To mark if the intent can be reused after first use
71 * @in_use: To mark if intent is already in use for the channel
72 * @offset: next write offset (initially 0)
75 struct glink_core_rx_intent
{
83 struct list_head node
;
87 * struct qcom_glink - driver context, relates to one remote subsystem
88 * @dev: reference to the associated struct device
89 * @label: identifier of the glink edge
90 * @rx_pipe: pipe object for receive FIFO
91 * @tx_pipe: pipe object for transmit FIFO
92 * @rx_work: worker for handling received control messages
93 * @rx_lock: protects the @rx_queue
94 * @rx_queue: queue of received control messages to be processed in @rx_work
95 * @tx_lock: synchronizes operations on the tx fifo
96 * @idr_lock: synchronizes @lcids and @rcids modifications
97 * @lcids: idr of all channels with a known local channel id
98 * @rcids: idr of all channels with a known remote channel id
99 * @features: remote features
100 * @intentless: flag to indicate that there is no intent
101 * @tx_avail_notify: Waitqueue for pending tx tasks
102 * @sent_read_notify: flag to check cmd sent or not
103 * @abort_tx: flag indicating that all tx attempts should fail
110 struct qcom_glink_pipe
*rx_pipe
;
111 struct qcom_glink_pipe
*tx_pipe
;
113 struct work_struct rx_work
;
115 struct list_head rx_queue
;
122 unsigned long features
;
125 wait_queue_head_t tx_avail_notify
;
126 bool sent_read_notify
;
139 * struct glink_channel - internal representation of a channel
140 * @rpdev: rpdev reference, only used for primary endpoints
141 * @ept: rpmsg endpoint this channel is associated with
142 * @glink: qcom_glink context handle
143 * @refcount: refcount for the channel object
144 * @recv_lock: guard for @ept.cb
145 * @name: unique channel name/identifier
146 * @lcid: channel id, in local space
147 * @rcid: channel id, in remote space
148 * @intent_lock: lock for protection of @liids, @riids
149 * @liids: idr of all local intents
150 * @riids: idr of all remote intents
151 * @intent_work: worker responsible for transmitting rx_done packets
152 * @done_intents: list of intents that needs to be announced rx_done
153 * @buf: receive buffer, for gathering fragments
154 * @buf_offset: write offset in @buf
155 * @buf_size: size of current @buf
156 * @open_ack: completed once remote has acked the open-request
157 * @open_req: completed once open-request has been received
158 * @intent_req_lock: Synchronises multiple intent requests
159 * @intent_req_result: Result of intent request
160 * @intent_received: flag indicating that an intent has been received
161 * @intent_req_wq: wait queue for intent_req signalling
163 struct glink_channel
{
164 struct rpmsg_endpoint ept
;
166 struct rpmsg_device
*rpdev
;
167 struct qcom_glink
*glink
;
169 struct kref refcount
;
171 spinlock_t recv_lock
;
177 spinlock_t intent_lock
;
180 struct work_struct intent_work
;
181 struct list_head done_intents
;
183 struct glink_core_rx_intent
*buf
;
187 struct completion open_ack
;
188 struct completion open_req
;
190 struct mutex intent_req_lock
;
191 int intent_req_result
;
192 bool intent_received
;
193 wait_queue_head_t intent_req_wq
;
196 #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
198 static const struct rpmsg_endpoint_ops glink_endpoint_ops
;
200 #define GLINK_CMD_VERSION 0
201 #define GLINK_CMD_VERSION_ACK 1
202 #define GLINK_CMD_OPEN 2
203 #define GLINK_CMD_CLOSE 3
204 #define GLINK_CMD_OPEN_ACK 4
205 #define GLINK_CMD_INTENT 5
206 #define GLINK_CMD_RX_DONE 6
207 #define GLINK_CMD_RX_INTENT_REQ 7
208 #define GLINK_CMD_RX_INTENT_REQ_ACK 8
209 #define GLINK_CMD_TX_DATA 9
210 #define GLINK_CMD_CLOSE_ACK 11
211 #define GLINK_CMD_TX_DATA_CONT 12
212 #define GLINK_CMD_READ_NOTIF 13
213 #define GLINK_CMD_RX_DONE_W_REUSE 14
214 #define GLINK_CMD_SIGNALS 15
216 #define GLINK_FEATURE_INTENTLESS BIT(1)
218 #define NATIVE_DTR_SIG NATIVE_DSR_SIG
219 #define NATIVE_DSR_SIG BIT(31)
220 #define NATIVE_RTS_SIG NATIVE_CTS_SIG
221 #define NATIVE_CTS_SIG BIT(30)
223 static void qcom_glink_rx_done_work(struct work_struct
*work
);
225 static struct glink_channel
*qcom_glink_alloc_channel(struct qcom_glink
*glink
,
228 struct glink_channel
*channel
;
230 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
232 return ERR_PTR(-ENOMEM
);
234 /* Setup glink internal glink_channel data */
235 spin_lock_init(&channel
->recv_lock
);
236 spin_lock_init(&channel
->intent_lock
);
237 mutex_init(&channel
->intent_req_lock
);
239 channel
->glink
= glink
;
240 channel
->name
= kstrdup(name
, GFP_KERNEL
);
241 if (!channel
->name
) {
243 return ERR_PTR(-ENOMEM
);
246 init_completion(&channel
->open_req
);
247 init_completion(&channel
->open_ack
);
248 init_waitqueue_head(&channel
->intent_req_wq
);
250 INIT_LIST_HEAD(&channel
->done_intents
);
251 INIT_WORK(&channel
->intent_work
, qcom_glink_rx_done_work
);
253 idr_init(&channel
->liids
);
254 idr_init(&channel
->riids
);
255 kref_init(&channel
->refcount
);
260 static void qcom_glink_channel_release(struct kref
*ref
)
262 struct glink_channel
*channel
= container_of(ref
, struct glink_channel
,
264 struct glink_core_rx_intent
*intent
;
265 struct glink_core_rx_intent
*tmp
;
269 /* cancel pending rx_done work */
270 cancel_work_sync(&channel
->intent_work
);
272 spin_lock_irqsave(&channel
->intent_lock
, flags
);
273 /* Free all non-reuse intents pending rx_done work */
274 list_for_each_entry_safe(intent
, tmp
, &channel
->done_intents
, node
) {
275 if (!intent
->reuse
) {
281 idr_for_each_entry(&channel
->liids
, tmp
, iid
) {
285 idr_destroy(&channel
->liids
);
287 idr_for_each_entry(&channel
->riids
, tmp
, iid
)
289 idr_destroy(&channel
->riids
);
290 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
292 kfree(channel
->name
);
296 static size_t qcom_glink_rx_avail(struct qcom_glink
*glink
)
298 return glink
->rx_pipe
->avail(glink
->rx_pipe
);
301 static void qcom_glink_rx_peek(struct qcom_glink
*glink
,
302 void *data
, unsigned int offset
, size_t count
)
304 glink
->rx_pipe
->peek(glink
->rx_pipe
, data
, offset
, count
);
307 static void qcom_glink_rx_advance(struct qcom_glink
*glink
, size_t count
)
309 glink
->rx_pipe
->advance(glink
->rx_pipe
, count
);
312 static size_t qcom_glink_tx_avail(struct qcom_glink
*glink
)
314 return glink
->tx_pipe
->avail(glink
->tx_pipe
);
317 static void qcom_glink_tx_write(struct qcom_glink
*glink
,
318 const void *hdr
, size_t hlen
,
319 const void *data
, size_t dlen
)
321 glink
->tx_pipe
->write(glink
->tx_pipe
, hdr
, hlen
, data
, dlen
);
324 static void qcom_glink_tx_kick(struct qcom_glink
*glink
)
326 glink
->tx_pipe
->kick(glink
->tx_pipe
);
329 static void qcom_glink_send_read_notify(struct qcom_glink
*glink
)
331 struct glink_msg msg
;
333 msg
.cmd
= cpu_to_le16(GLINK_CMD_READ_NOTIF
);
337 qcom_glink_tx_write(glink
, &msg
, sizeof(msg
), NULL
, 0);
339 qcom_glink_tx_kick(glink
);
342 static int qcom_glink_tx(struct qcom_glink
*glink
,
343 const void *hdr
, size_t hlen
,
344 const void *data
, size_t dlen
, bool wait
)
346 unsigned int tlen
= hlen
+ dlen
;
350 /* Reject packets that are too big */
351 if (tlen
>= glink
->tx_pipe
->length
)
354 spin_lock_irqsave(&glink
->tx_lock
, flags
);
356 if (glink
->abort_tx
) {
361 while (qcom_glink_tx_avail(glink
) < tlen
) {
367 if (glink
->abort_tx
) {
372 if (!glink
->sent_read_notify
) {
373 glink
->sent_read_notify
= true;
374 qcom_glink_send_read_notify(glink
);
377 /* Wait without holding the tx_lock */
378 spin_unlock_irqrestore(&glink
->tx_lock
, flags
);
380 wait_event_timeout(glink
->tx_avail_notify
,
381 qcom_glink_tx_avail(glink
) >= tlen
, 10 * HZ
);
383 spin_lock_irqsave(&glink
->tx_lock
, flags
);
385 if (qcom_glink_tx_avail(glink
) >= tlen
)
386 glink
->sent_read_notify
= false;
389 qcom_glink_tx_write(glink
, hdr
, hlen
, data
, dlen
);
390 qcom_glink_tx_kick(glink
);
393 spin_unlock_irqrestore(&glink
->tx_lock
, flags
);
398 static int qcom_glink_send_version(struct qcom_glink
*glink
)
400 struct glink_msg msg
;
402 msg
.cmd
= cpu_to_le16(GLINK_CMD_VERSION
);
403 msg
.param1
= cpu_to_le16(GLINK_VERSION_1
);
404 msg
.param2
= cpu_to_le32(glink
->features
);
406 trace_qcom_glink_cmd_version_tx(glink
->label
, GLINK_VERSION_1
, glink
->features
);
408 return qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
411 static void qcom_glink_send_version_ack(struct qcom_glink
*glink
)
413 struct glink_msg msg
;
415 msg
.cmd
= cpu_to_le16(GLINK_CMD_VERSION_ACK
);
416 msg
.param1
= cpu_to_le16(GLINK_VERSION_1
);
417 msg
.param2
= cpu_to_le32(glink
->features
);
419 trace_qcom_glink_cmd_version_ack_tx(glink
->label
, msg
.param1
, msg
.param2
);
421 qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
424 static void qcom_glink_send_open_ack(struct qcom_glink
*glink
,
425 struct glink_channel
*channel
)
427 struct glink_msg msg
;
429 msg
.cmd
= cpu_to_le16(GLINK_CMD_OPEN_ACK
);
430 msg
.param1
= cpu_to_le16(channel
->rcid
);
431 msg
.param2
= cpu_to_le32(0);
433 trace_qcom_glink_cmd_open_ack_tx(glink
->label
, channel
->name
,
434 channel
->lcid
, channel
->rcid
);
436 qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
439 static void qcom_glink_handle_intent_req_ack(struct qcom_glink
*glink
,
440 unsigned int cid
, bool granted
)
442 struct glink_channel
*channel
;
445 qcom_glink_rx_advance(glink
, ALIGN(sizeof(struct glink_msg
), 8));
447 spin_lock_irqsave(&glink
->idr_lock
, flags
);
448 channel
= idr_find(&glink
->rcids
, cid
);
449 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
451 trace_qcom_glink_cmd_rx_intent_req_ack_rx(glink
->label
,
452 channel
? channel
->name
: NULL
,
453 channel
? channel
->lcid
: 0,
456 dev_err(glink
->dev
, "unable to find channel\n");
460 WRITE_ONCE(channel
->intent_req_result
, granted
);
461 wake_up_all(&channel
->intent_req_wq
);
464 static void qcom_glink_intent_req_abort(struct glink_channel
*channel
)
466 WRITE_ONCE(channel
->intent_req_result
, 0);
467 wake_up_all(&channel
->intent_req_wq
);
471 * qcom_glink_send_open_req() - send a GLINK_CMD_OPEN request to the remote
472 * @glink: Ptr to the glink edge
473 * @channel: Ptr to the channel that the open req is sent
475 * Allocates a local channel id and sends a GLINK_CMD_OPEN message to the remote.
476 * Will return with refcount held, regardless of outcome.
478 * Return: 0 on success, negative errno otherwise.
480 static int qcom_glink_send_open_req(struct qcom_glink
*glink
,
481 struct glink_channel
*channel
)
483 DEFINE_RAW_FLEX(struct glink_msg
, req
, data
, GLINK_NAME_SIZE
);
484 int name_len
= strlen(channel
->name
) + 1;
485 int req_len
= ALIGN(sizeof(*req
) + name_len
, 8);
489 kref_get(&channel
->refcount
);
491 spin_lock_irqsave(&glink
->idr_lock
, flags
);
492 ret
= idr_alloc_cyclic(&glink
->lcids
, channel
,
493 RPM_GLINK_CID_MIN
, RPM_GLINK_CID_MAX
,
495 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
501 req
->cmd
= cpu_to_le16(GLINK_CMD_OPEN
);
502 req
->param1
= cpu_to_le16(channel
->lcid
);
503 req
->param2
= cpu_to_le32(name_len
);
504 strcpy(req
->data
, channel
->name
);
506 trace_qcom_glink_cmd_open_tx(glink
->label
, channel
->name
,
507 channel
->lcid
, channel
->rcid
);
509 ret
= qcom_glink_tx(glink
, req
, req_len
, NULL
, 0, true);
516 spin_lock_irqsave(&glink
->idr_lock
, flags
);
517 idr_remove(&glink
->lcids
, channel
->lcid
);
519 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
524 static void qcom_glink_send_close_req(struct qcom_glink
*glink
,
525 struct glink_channel
*channel
)
527 struct glink_msg req
;
529 req
.cmd
= cpu_to_le16(GLINK_CMD_CLOSE
);
530 req
.param1
= cpu_to_le16(channel
->lcid
);
533 trace_qcom_glink_cmd_close_tx(glink
->label
, channel
->name
,
534 channel
->lcid
, channel
->rcid
);
536 qcom_glink_tx(glink
, &req
, sizeof(req
), NULL
, 0, true);
539 static void qcom_glink_send_close_ack(struct qcom_glink
*glink
,
540 struct glink_channel
*channel
)
542 struct glink_msg req
;
544 req
.cmd
= cpu_to_le16(GLINK_CMD_CLOSE_ACK
);
545 req
.param1
= cpu_to_le16(channel
->rcid
);
548 trace_qcom_glink_cmd_close_ack_tx(glink
->label
, channel
->name
,
549 channel
->lcid
, channel
->rcid
);
551 qcom_glink_tx(glink
, &req
, sizeof(req
), NULL
, 0, true);
554 static void qcom_glink_rx_done_work(struct work_struct
*work
)
556 struct glink_channel
*channel
= container_of(work
, struct glink_channel
,
558 struct qcom_glink
*glink
= channel
->glink
;
559 struct glink_core_rx_intent
*intent
, *tmp
;
566 unsigned int cid
= channel
->lcid
;
571 spin_lock_irqsave(&channel
->intent_lock
, flags
);
572 list_for_each_entry_safe(intent
, tmp
, &channel
->done_intents
, node
) {
573 list_del(&intent
->node
);
574 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
576 reuse
= intent
->reuse
;
578 cmd
.id
= reuse
? GLINK_CMD_RX_DONE_W_REUSE
: GLINK_CMD_RX_DONE
;
582 trace_qcom_glink_cmd_rx_done_tx(glink
->label
, channel
->name
,
583 channel
->lcid
, channel
->rcid
, cmd
.liid
, reuse
);
585 qcom_glink_tx(glink
, &cmd
, sizeof(cmd
), NULL
, 0, true);
590 spin_lock_irqsave(&channel
->intent_lock
, flags
);
592 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
595 static void qcom_glink_rx_done(struct qcom_glink
*glink
,
596 struct glink_channel
*channel
,
597 struct glink_core_rx_intent
*intent
)
599 /* We don't send RX_DONE to intentless systems */
600 if (glink
->intentless
) {
606 /* Take it off the tree of receive intents */
607 if (!intent
->reuse
) {
608 spin_lock(&channel
->intent_lock
);
609 idr_remove(&channel
->liids
, intent
->id
);
610 spin_unlock(&channel
->intent_lock
);
613 /* Schedule the sending of a rx_done indication */
614 spin_lock(&channel
->intent_lock
);
615 list_add_tail(&intent
->node
, &channel
->done_intents
);
616 spin_unlock(&channel
->intent_lock
);
618 schedule_work(&channel
->intent_work
);
622 * qcom_glink_receive_version() - receive version/features from remote system
624 * @glink: pointer to transport interface
625 * @version: remote version
626 * @features: remote features
628 * This function is called in response to a remote-initiated version/feature
629 * negotiation sequence.
631 static void qcom_glink_receive_version(struct qcom_glink
*glink
,
635 trace_qcom_glink_cmd_version_rx(glink
->label
, version
, features
);
640 case GLINK_VERSION_1
:
641 glink
->features
&= features
;
644 qcom_glink_send_version_ack(glink
);
650 * qcom_glink_receive_version_ack() - receive negotiation ack from remote system
652 * @glink: pointer to transport interface
653 * @version: remote version response
654 * @features: remote features response
656 * This function is called in response to a local-initiated version/feature
657 * negotiation sequence and is the counter-offer from the remote side based
658 * upon the initial version and feature set requested.
660 static void qcom_glink_receive_version_ack(struct qcom_glink
*glink
,
664 trace_qcom_glink_cmd_version_ack_rx(glink
->label
, version
, features
);
668 /* Version negotiation failed */
670 case GLINK_VERSION_1
:
671 if (features
== glink
->features
)
674 glink
->features
&= features
;
677 qcom_glink_send_version(glink
);
683 * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to
684 * wire format and transmit
685 * @glink: The transport to transmit on.
686 * @channel: The glink channel
687 * @granted: The request response to encode.
689 * Return: 0 on success or standard Linux error code.
691 static int qcom_glink_send_intent_req_ack(struct qcom_glink
*glink
,
692 struct glink_channel
*channel
,
695 struct glink_msg msg
;
697 trace_qcom_glink_cmd_rx_intent_req_ack_tx(glink
->label
, channel
->name
,
698 channel
->lcid
, channel
->rcid
,
701 msg
.cmd
= cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK
);
702 msg
.param1
= cpu_to_le16(channel
->lcid
);
703 msg
.param2
= cpu_to_le32(granted
);
705 qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
711 * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and
713 * @glink: The transport to transmit on.
714 * @channel: The local channel
715 * @intent: The intent to pass on to remote.
717 * Return: 0 on success or standard Linux error code.
719 static int qcom_glink_advertise_intent(struct qcom_glink
*glink
,
720 struct glink_channel
*channel
,
721 struct glink_core_rx_intent
*intent
)
732 cmd
.id
= cpu_to_le16(GLINK_CMD_INTENT
);
733 cmd
.lcid
= cpu_to_le16(channel
->lcid
);
734 cmd
.count
= cpu_to_le32(1);
735 cmd
.size
= cpu_to_le32(intent
->size
);
736 cmd
.liid
= cpu_to_le32(intent
->id
);
738 trace_qcom_glink_cmd_intent_tx(glink
->label
, channel
->name
,
739 channel
->lcid
, channel
->rcid
,
740 cmd
.count
, cmd
.size
, cmd
.liid
);
742 qcom_glink_tx(glink
, &cmd
, sizeof(cmd
), NULL
, 0, true);
747 static struct glink_core_rx_intent
*
748 qcom_glink_alloc_intent(struct qcom_glink
*glink
,
749 struct glink_channel
*channel
,
753 struct glink_core_rx_intent
*intent
;
757 intent
= kzalloc(sizeof(*intent
), GFP_KERNEL
);
761 intent
->data
= kzalloc(size
, GFP_KERNEL
);
765 spin_lock_irqsave(&channel
->intent_lock
, flags
);
766 ret
= idr_alloc_cyclic(&channel
->liids
, intent
, 1, -1, GFP_ATOMIC
);
768 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
771 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
775 intent
->reuse
= reuseable
;
786 static void qcom_glink_handle_rx_done(struct qcom_glink
*glink
,
787 u32 cid
, uint32_t iid
,
790 struct glink_core_rx_intent
*intent
;
791 struct glink_channel
*channel
;
794 qcom_glink_rx_advance(glink
, ALIGN(sizeof(struct glink_msg
), 8));
796 spin_lock_irqsave(&glink
->idr_lock
, flags
);
797 channel
= idr_find(&glink
->rcids
, cid
);
798 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
800 trace_qcom_glink_cmd_rx_done_rx(glink
->label
, channel
? channel
->name
: NULL
,
801 channel
? channel
->lcid
: 0, cid
, iid
, reuse
);
803 dev_err(glink
->dev
, "invalid channel id received\n");
807 spin_lock_irqsave(&channel
->intent_lock
, flags
);
808 intent
= idr_find(&channel
->riids
, iid
);
811 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
812 dev_err(glink
->dev
, "invalid intent id received\n");
816 intent
->in_use
= false;
819 idr_remove(&channel
->riids
, intent
->id
);
822 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
825 WRITE_ONCE(channel
->intent_received
, true);
826 wake_up_all(&channel
->intent_req_wq
);
831 * qcom_glink_handle_intent_req() - Receive a request for rx_intent
833 * @glink: Pointer to the transport interface
834 * @cid: Remote channel ID
835 * @size: size of the intent
837 * The function searches for the local channel to which the request for
838 * rx_intent has arrived and allocates and notifies the remote back
840 static void qcom_glink_handle_intent_req(struct qcom_glink
*glink
,
841 u32 cid
, size_t size
)
843 struct glink_core_rx_intent
*intent
;
844 struct glink_channel
*channel
;
847 spin_lock_irqsave(&glink
->idr_lock
, flags
);
848 channel
= idr_find(&glink
->rcids
, cid
);
849 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
851 trace_qcom_glink_cmd_rx_intent_req_rx(glink
->label
,
852 channel
? channel
->name
: NULL
,
853 channel
? channel
->lcid
: 0,
856 pr_err("%s channel not found for cid %d\n", __func__
, cid
);
860 intent
= qcom_glink_alloc_intent(glink
, channel
, size
, false);
862 qcom_glink_advertise_intent(glink
, channel
, intent
);
864 qcom_glink_send_intent_req_ack(glink
, channel
, !!intent
);
867 static int qcom_glink_rx_defer(struct qcom_glink
*glink
, size_t extra
)
869 struct glink_defer_cmd
*dcmd
;
871 extra
= ALIGN(extra
, 8);
873 if (qcom_glink_rx_avail(glink
) < sizeof(struct glink_msg
) + extra
) {
874 dev_dbg(glink
->dev
, "Insufficient data in rx fifo");
878 dcmd
= kzalloc(struct_size(dcmd
, data
, extra
), GFP_ATOMIC
);
882 INIT_LIST_HEAD(&dcmd
->node
);
884 qcom_glink_rx_peek(glink
,
885 container_of(&dcmd
->msg
, struct glink_msg
, hdr
), 0,
886 sizeof(dcmd
->msg
) + extra
);
888 spin_lock(&glink
->rx_lock
);
889 list_add_tail(&dcmd
->node
, &glink
->rx_queue
);
890 spin_unlock(&glink
->rx_lock
);
892 schedule_work(&glink
->rx_work
);
893 qcom_glink_rx_advance(glink
, sizeof(dcmd
->msg
) + extra
);
898 static int qcom_glink_rx_data(struct qcom_glink
*glink
, size_t avail
)
900 struct glink_core_rx_intent
*intent
;
901 struct glink_channel
*channel
;
903 struct glink_msg_hdr msg
;
907 unsigned int chunk_size
;
908 unsigned int left_size
;
914 if (avail
< sizeof(hdr
)) {
915 dev_dbg(glink
->dev
, "Not enough data in fifo\n");
919 qcom_glink_rx_peek(glink
, &hdr
, 0, sizeof(hdr
));
920 chunk_size
= le32_to_cpu(hdr
.chunk_size
);
921 left_size
= le32_to_cpu(hdr
.left_size
);
923 if (avail
< sizeof(hdr
) + chunk_size
) {
924 dev_dbg(glink
->dev
, "Payload not yet in fifo\n");
928 rcid
= le16_to_cpu(hdr
.msg
.param1
);
929 liid
= le32_to_cpu(hdr
.msg
.param2
);
930 spin_lock_irqsave(&glink
->idr_lock
, flags
);
931 channel
= idr_find(&glink
->rcids
, rcid
);
932 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
934 trace_qcom_glink_cmd_tx_data_rx(glink
->label
, channel
? channel
->name
: NULL
,
935 channel
? channel
->lcid
: 0, rcid
,
936 liid
, chunk_size
, left_size
,
937 hdr
.msg
.cmd
== GLINK_CMD_TX_DATA_CONT
);
939 dev_dbg(glink
->dev
, "Data on non-existing channel\n");
941 /* Drop the message */
945 if (glink
->intentless
) {
946 /* Might have an ongoing, fragmented, message to append */
948 intent
= kzalloc(sizeof(*intent
), GFP_ATOMIC
);
952 intent
->data
= kmalloc(chunk_size
+ left_size
,
959 intent
->id
= 0xbabababa;
960 intent
->size
= chunk_size
+ left_size
;
963 channel
->buf
= intent
;
965 intent
= channel
->buf
;
968 spin_lock_irqsave(&channel
->intent_lock
, flags
);
969 intent
= idr_find(&channel
->liids
, liid
);
970 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
974 "no intent found for channel %s intent %d",
975 channel
->name
, liid
);
981 if (intent
->size
- intent
->offset
< chunk_size
) {
982 dev_err(glink
->dev
, "Insufficient space in intent\n");
984 /* The packet header lied, drop payload */
988 qcom_glink_rx_peek(glink
, intent
->data
+ intent
->offset
,
989 sizeof(hdr
), chunk_size
);
990 intent
->offset
+= chunk_size
;
992 /* Handle message when no fragments remain to be received */
994 spin_lock(&channel
->recv_lock
);
995 if (channel
->ept
.cb
) {
996 channel
->ept
.cb(channel
->ept
.rpdev
,
1002 spin_unlock(&channel
->recv_lock
);
1005 channel
->buf
= NULL
;
1007 qcom_glink_rx_done(glink
, channel
, intent
);
1011 qcom_glink_rx_advance(glink
, ALIGN(sizeof(hdr
) + chunk_size
, 8));
1016 static void qcom_glink_rx_read_notif(struct qcom_glink
*glink
)
1018 trace_qcom_glink_cmd_read_notif_rx(glink
->label
);
1020 qcom_glink_rx_advance(glink
, ALIGN(sizeof(struct glink_msg
), 8));
1021 qcom_glink_tx_kick(glink
);
1024 static void qcom_glink_handle_intent(struct qcom_glink
*glink
,
1029 struct glink_core_rx_intent
*intent
;
1030 struct glink_channel
*channel
;
1031 struct intent_pair
{
1037 struct glink_msg_hdr msg
;
1038 struct intent_pair intents
[];
1041 const size_t msglen
= struct_size(msg
, intents
, count
);
1044 unsigned long flags
;
1046 if (avail
< msglen
) {
1047 dev_dbg(glink
->dev
, "Not enough data in fifo\n");
1051 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1052 channel
= idr_find(&glink
->rcids
, cid
);
1053 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1055 trace_qcom_glink_cmd_intent_rx(glink
->label
, NULL
, 0, cid
, count
, 0, 0);
1056 dev_err(glink
->dev
, "intents for non-existing channel\n");
1057 qcom_glink_rx_advance(glink
, ALIGN(msglen
, 8));
1061 msg
= kmalloc(msglen
, GFP_ATOMIC
);
1065 qcom_glink_rx_peek(glink
, msg
, 0, msglen
);
1067 trace_qcom_glink_cmd_intent_rx(glink
->label
, channel
->name
,
1068 channel
->lcid
, cid
, count
,
1069 count
> 0 ? msg
->intents
[0].size
: 0,
1070 count
> 0 ? msg
->intents
[0].iid
: 0);
1072 for (i
= 0; i
< count
; ++i
) {
1073 intent
= kzalloc(sizeof(*intent
), GFP_ATOMIC
);
1077 intent
->id
= le32_to_cpu(msg
->intents
[i
].iid
);
1078 intent
->size
= le32_to_cpu(msg
->intents
[i
].size
);
1080 spin_lock_irqsave(&channel
->intent_lock
, flags
);
1081 ret
= idr_alloc(&channel
->riids
, intent
,
1082 intent
->id
, intent
->id
+ 1, GFP_ATOMIC
);
1083 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
1086 dev_err(glink
->dev
, "failed to store remote intent\n");
1089 WRITE_ONCE(channel
->intent_received
, true);
1090 wake_up_all(&channel
->intent_req_wq
);
1093 qcom_glink_rx_advance(glink
, ALIGN(msglen
, 8));
1096 static int qcom_glink_rx_open_ack(struct qcom_glink
*glink
, unsigned int lcid
)
1098 struct glink_channel
*channel
;
1100 qcom_glink_rx_advance(glink
, ALIGN(sizeof(struct glink_msg
), 8));
1102 spin_lock(&glink
->idr_lock
);
1103 channel
= idr_find(&glink
->lcids
, lcid
);
1104 spin_unlock(&glink
->idr_lock
);
1106 trace_qcom_glink_cmd_open_ack_rx(glink
->label
, channel
? channel
->name
: NULL
,
1107 lcid
, channel
? channel
->rcid
: 0);
1109 dev_err(glink
->dev
, "Invalid open ack packet\n");
1113 complete_all(&channel
->open_ack
);
1119 * qcom_glink_set_flow_control() - convert a signal cmd to wire format and transmit
1120 * @ept: Rpmsg endpoint for channel.
1121 * @pause: Pause transmission
1122 * @dst: destination address of the endpoint
1124 * Return: 0 on success or standard Linux error code.
1126 static int qcom_glink_set_flow_control(struct rpmsg_endpoint
*ept
, bool pause
, u32 dst
)
1128 struct glink_channel
*channel
= to_glink_channel(ept
);
1129 struct qcom_glink
*glink
= channel
->glink
;
1130 struct glink_msg msg
;
1134 sigs
|= NATIVE_DTR_SIG
| NATIVE_RTS_SIG
;
1136 msg
.cmd
= cpu_to_le16(GLINK_CMD_SIGNALS
);
1137 msg
.param1
= cpu_to_le16(channel
->lcid
);
1138 msg
.param2
= cpu_to_le32(sigs
);
1140 trace_qcom_glink_cmd_signal_tx(glink
->label
, channel
->name
,
1141 channel
->lcid
, channel
->rcid
, sigs
);
1143 return qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
1146 static void qcom_glink_handle_signals(struct qcom_glink
*glink
,
1147 unsigned int rcid
, unsigned int sigs
)
1149 struct glink_channel
*channel
;
1150 unsigned long flags
;
1153 qcom_glink_rx_advance(glink
, ALIGN(sizeof(struct glink_msg
), 8));
1155 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1156 channel
= idr_find(&glink
->rcids
, rcid
);
1157 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1159 trace_qcom_glink_cmd_signal_rx(glink
->label
, channel
? channel
->name
: NULL
,
1160 channel
? channel
->lcid
: 0, rcid
, sigs
);
1162 dev_err(glink
->dev
, "signal for non-existing channel\n");
1166 enable
= sigs
& NATIVE_DSR_SIG
|| sigs
& NATIVE_CTS_SIG
;
1168 if (channel
->ept
.flow_cb
)
1169 channel
->ept
.flow_cb(channel
->ept
.rpdev
, channel
->ept
.priv
, enable
);
1172 void qcom_glink_native_rx(struct qcom_glink
*glink
)
1174 struct glink_msg msg
;
1175 unsigned int param1
;
1176 unsigned int param2
;
1181 /* To wakeup any blocking writers */
1182 wake_up_all(&glink
->tx_avail_notify
);
1185 avail
= qcom_glink_rx_avail(glink
);
1186 if (avail
< sizeof(msg
))
1189 qcom_glink_rx_peek(glink
, &msg
, 0, sizeof(msg
));
1191 cmd
= le16_to_cpu(msg
.cmd
);
1192 param1
= le16_to_cpu(msg
.param1
);
1193 param2
= le32_to_cpu(msg
.param2
);
1196 case GLINK_CMD_VERSION
:
1197 case GLINK_CMD_VERSION_ACK
:
1198 case GLINK_CMD_CLOSE
:
1199 case GLINK_CMD_CLOSE_ACK
:
1200 case GLINK_CMD_RX_INTENT_REQ
:
1201 ret
= qcom_glink_rx_defer(glink
, 0);
1203 case GLINK_CMD_OPEN_ACK
:
1204 ret
= qcom_glink_rx_open_ack(glink
, param1
);
1206 case GLINK_CMD_OPEN
:
1207 /* upper 16 bits of param2 are the "prio" field */
1208 ret
= qcom_glink_rx_defer(glink
, param2
& 0xffff);
1210 case GLINK_CMD_TX_DATA
:
1211 case GLINK_CMD_TX_DATA_CONT
:
1212 ret
= qcom_glink_rx_data(glink
, avail
);
1214 case GLINK_CMD_READ_NOTIF
:
1215 qcom_glink_rx_read_notif(glink
);
1217 case GLINK_CMD_INTENT
:
1218 qcom_glink_handle_intent(glink
, param1
, param2
, avail
);
1220 case GLINK_CMD_RX_DONE
:
1221 qcom_glink_handle_rx_done(glink
, param1
, param2
, false);
1223 case GLINK_CMD_RX_DONE_W_REUSE
:
1224 qcom_glink_handle_rx_done(glink
, param1
, param2
, true);
1226 case GLINK_CMD_RX_INTENT_REQ_ACK
:
1227 qcom_glink_handle_intent_req_ack(glink
, param1
, param2
);
1229 case GLINK_CMD_SIGNALS
:
1230 qcom_glink_handle_signals(glink
, param1
, param2
);
1233 dev_err(glink
->dev
, "unhandled rx cmd: %d\n", cmd
);
1242 EXPORT_SYMBOL(qcom_glink_native_rx
);
1244 /* Locally initiated rpmsg_create_ept */
1245 static struct glink_channel
*qcom_glink_create_local(struct qcom_glink
*glink
,
1248 struct glink_channel
*channel
;
1250 unsigned long flags
;
1252 channel
= qcom_glink_alloc_channel(glink
, name
);
1253 if (IS_ERR(channel
))
1254 return ERR_CAST(channel
);
1256 ret
= qcom_glink_send_open_req(glink
, channel
);
1258 goto release_channel
;
1260 ret
= wait_for_completion_timeout(&channel
->open_ack
, 5 * HZ
);
1264 ret
= wait_for_completion_timeout(&channel
->open_req
, 5 * HZ
);
1268 qcom_glink_send_open_ack(glink
, channel
);
1273 /* qcom_glink_send_open_req() did register the channel in lcids*/
1274 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1275 idr_remove(&glink
->lcids
, channel
->lcid
);
1276 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1279 /* Release qcom_glink_send_open_req() reference */
1280 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1281 /* Release qcom_glink_alloc_channel() reference */
1282 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1284 return ERR_PTR(-ETIMEDOUT
);
1287 /* Remote initiated rpmsg_create_ept */
1288 static int qcom_glink_create_remote(struct qcom_glink
*glink
,
1289 struct glink_channel
*channel
)
1293 qcom_glink_send_open_ack(glink
, channel
);
1295 ret
= qcom_glink_send_open_req(glink
, channel
);
1299 ret
= wait_for_completion_timeout(&channel
->open_ack
, 5 * HZ
);
1309 * Send a close request to "undo" our open-ack. The close-ack will
1310 * release qcom_glink_send_open_req() reference and the last reference
1311 * will be relesed after receiving remote_close or transport unregister
1312 * by calling qcom_glink_native_remove().
1314 qcom_glink_send_close_req(glink
, channel
);
1319 static struct rpmsg_endpoint
*qcom_glink_create_ept(struct rpmsg_device
*rpdev
,
1322 struct rpmsg_channel_info
1325 struct glink_channel
*parent
= to_glink_channel(rpdev
->ept
);
1326 struct glink_channel
*channel
;
1327 struct qcom_glink
*glink
= parent
->glink
;
1328 struct rpmsg_endpoint
*ept
;
1329 const char *name
= chinfo
.name
;
1332 unsigned long flags
;
1334 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1335 idr_for_each_entry(&glink
->rcids
, channel
, cid
) {
1336 if (!strcmp(channel
->name
, name
))
1339 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1342 channel
= qcom_glink_create_local(glink
, name
);
1343 if (IS_ERR(channel
))
1346 ret
= qcom_glink_create_remote(glink
, channel
);
1351 ept
= &channel
->ept
;
1355 ept
->ops
= &glink_endpoint_ops
;
1360 static int qcom_glink_announce_create(struct rpmsg_device
*rpdev
)
1362 struct glink_channel
*channel
= to_glink_channel(rpdev
->ept
);
1363 struct device_node
*np
= rpdev
->dev
.of_node
;
1364 struct qcom_glink
*glink
= channel
->glink
;
1365 struct glink_core_rx_intent
*intent
;
1366 const struct property
*prop
= NULL
;
1367 __be32 defaults
[] = { cpu_to_be32(SZ_1K
), cpu_to_be32(5) };
1370 __be32
*val
= defaults
;
1373 if (glink
->intentless
|| !completion_done(&channel
->open_ack
))
1376 prop
= of_find_property(np
, "qcom,intents", NULL
);
1379 num_groups
= prop
->length
/ sizeof(u32
) / 2;
1382 /* Channel is now open, advertise base set of intents */
1383 while (num_groups
--) {
1384 size
= be32_to_cpup(val
++);
1385 num_intents
= be32_to_cpup(val
++);
1386 while (num_intents
--) {
1387 intent
= qcom_glink_alloc_intent(glink
, channel
, size
,
1392 qcom_glink_advertise_intent(glink
, channel
, intent
);
1398 static void qcom_glink_destroy_ept(struct rpmsg_endpoint
*ept
)
1400 struct glink_channel
*channel
= to_glink_channel(ept
);
1401 struct qcom_glink
*glink
= channel
->glink
;
1402 unsigned long flags
;
1404 spin_lock_irqsave(&channel
->recv_lock
, flags
);
1405 channel
->ept
.cb
= NULL
;
1406 spin_unlock_irqrestore(&channel
->recv_lock
, flags
);
1408 /* Decouple the potential rpdev from the channel */
1409 channel
->rpdev
= NULL
;
1411 qcom_glink_send_close_req(glink
, channel
);
1414 static int qcom_glink_request_intent(struct qcom_glink
*glink
,
1415 struct glink_channel
*channel
,
1426 mutex_lock(&channel
->intent_req_lock
);
1428 WRITE_ONCE(channel
->intent_req_result
, -1);
1429 WRITE_ONCE(channel
->intent_received
, false);
1431 cmd
.id
= GLINK_CMD_RX_INTENT_REQ
;
1432 cmd
.cid
= channel
->lcid
;
1435 trace_qcom_glink_cmd_rx_intent_req_tx(glink
->label
, channel
->name
,
1436 channel
->lcid
, channel
->rcid
,
1439 ret
= qcom_glink_tx(glink
, &cmd
, sizeof(cmd
), NULL
, 0, true);
1443 ret
= wait_event_timeout(channel
->intent_req_wq
,
1444 READ_ONCE(channel
->intent_req_result
) == 0 ||
1445 (READ_ONCE(channel
->intent_req_result
) > 0 &&
1446 READ_ONCE(channel
->intent_received
)) ||
1450 dev_err(glink
->dev
, "intent request timed out\n");
1452 } else if (glink
->abort_tx
) {
1455 ret
= READ_ONCE(channel
->intent_req_result
) ? 0 : -EAGAIN
;
1459 mutex_unlock(&channel
->intent_req_lock
);
1463 static int __qcom_glink_send(struct glink_channel
*channel
,
1464 void *data
, int len
, bool wait
)
1466 struct qcom_glink
*glink
= channel
->glink
;
1467 struct glink_core_rx_intent
*intent
= NULL
;
1468 struct glink_core_rx_intent
*tmp
;
1471 struct glink_msg_hdr msg
;
1476 unsigned long flags
;
1477 int chunk_size
= len
;
1480 if (!glink
->intentless
) {
1482 spin_lock_irqsave(&channel
->intent_lock
, flags
);
1483 idr_for_each_entry(&channel
->riids
, tmp
, iid
) {
1484 if (tmp
->size
>= len
&& !tmp
->in_use
) {
1487 else if (intent
->size
> tmp
->size
)
1489 if (intent
->size
== len
)
1494 intent
->in_use
= true;
1495 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
1497 /* We found an available intent */
1504 ret
= qcom_glink_request_intent(glink
, channel
, len
);
1512 while (offset
< len
) {
1513 chunk_size
= len
- offset
;
1514 if (chunk_size
> SZ_8K
&& wait
)
1517 req
.msg
.cmd
= cpu_to_le16(offset
== 0 ? GLINK_CMD_TX_DATA
: GLINK_CMD_TX_DATA_CONT
);
1518 req
.msg
.param1
= cpu_to_le16(channel
->lcid
);
1519 req
.msg
.param2
= cpu_to_le32(iid
);
1520 req
.chunk_size
= cpu_to_le32(chunk_size
);
1521 req
.left_size
= cpu_to_le32(len
- offset
- chunk_size
);
1523 trace_qcom_glink_cmd_tx_data_tx(glink
->label
, channel
->name
,
1524 channel
->lcid
, channel
->rcid
,
1526 len
- offset
- chunk_size
,
1529 ret
= qcom_glink_tx(glink
, &req
, sizeof(req
), data
+ offset
, chunk_size
, wait
);
1531 /* Mark intent available if we failed */
1533 intent
->in_use
= false;
1537 offset
+= chunk_size
;
1543 static int qcom_glink_send(struct rpmsg_endpoint
*ept
, void *data
, int len
)
1545 struct glink_channel
*channel
= to_glink_channel(ept
);
1547 return __qcom_glink_send(channel
, data
, len
, true);
1550 static int qcom_glink_trysend(struct rpmsg_endpoint
*ept
, void *data
, int len
)
1552 struct glink_channel
*channel
= to_glink_channel(ept
);
1554 return __qcom_glink_send(channel
, data
, len
, false);
1557 static int qcom_glink_sendto(struct rpmsg_endpoint
*ept
, void *data
, int len
, u32 dst
)
1559 struct glink_channel
*channel
= to_glink_channel(ept
);
1561 return __qcom_glink_send(channel
, data
, len
, true);
1564 static int qcom_glink_trysendto(struct rpmsg_endpoint
*ept
, void *data
, int len
, u32 dst
)
1566 struct glink_channel
*channel
= to_glink_channel(ept
);
1568 return __qcom_glink_send(channel
, data
, len
, false);
1572 * Finds the device_node for the glink child interested in this channel.
1574 static struct device_node
*qcom_glink_match_channel(struct device_node
*node
,
1575 const char *channel
)
1577 struct device_node
*child
;
1582 for_each_available_child_of_node(node
, child
) {
1583 key
= "qcom,glink-channels";
1584 ret
= of_property_read_string(child
, key
, &name
);
1588 if (strcmp(name
, channel
) == 0)
1595 static const struct rpmsg_device_ops glink_device_ops
= {
1596 .create_ept
= qcom_glink_create_ept
,
1597 .announce_create
= qcom_glink_announce_create
,
1600 static const struct rpmsg_endpoint_ops glink_endpoint_ops
= {
1601 .destroy_ept
= qcom_glink_destroy_ept
,
1602 .send
= qcom_glink_send
,
1603 .sendto
= qcom_glink_sendto
,
1604 .trysend
= qcom_glink_trysend
,
1605 .trysendto
= qcom_glink_trysendto
,
1606 .set_flow_control
= qcom_glink_set_flow_control
,
1609 static void qcom_glink_rpdev_release(struct device
*dev
)
1611 struct rpmsg_device
*rpdev
= to_rpmsg_device(dev
);
1613 kfree(rpdev
->driver_override
);
1617 static int qcom_glink_rx_open(struct qcom_glink
*glink
, unsigned int rcid
,
1620 struct glink_channel
*channel
;
1621 struct rpmsg_device
*rpdev
;
1622 bool create_device
= false;
1623 struct device_node
*node
;
1626 unsigned long flags
;
1628 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1629 idr_for_each_entry(&glink
->lcids
, channel
, lcid
) {
1630 if (!strcmp(channel
->name
, name
))
1633 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1636 channel
= qcom_glink_alloc_channel(glink
, name
);
1637 if (IS_ERR(channel
))
1638 return PTR_ERR(channel
);
1640 /* The opening dance was initiated by the remote */
1641 create_device
= true;
1644 trace_qcom_glink_cmd_open_rx(glink
->label
, name
, channel
->lcid
, rcid
);
1646 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1647 ret
= idr_alloc(&glink
->rcids
, channel
, rcid
, rcid
+ 1, GFP_ATOMIC
);
1649 dev_err(glink
->dev
, "Unable to insert channel into rcid list\n");
1650 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1653 channel
->rcid
= ret
;
1654 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1656 complete_all(&channel
->open_req
);
1658 if (create_device
) {
1659 rpdev
= kzalloc(sizeof(*rpdev
), GFP_KERNEL
);
1665 rpdev
->ept
= &channel
->ept
;
1666 strscpy_pad(rpdev
->id
.name
, name
, RPMSG_NAME_SIZE
);
1667 rpdev
->src
= RPMSG_ADDR_ANY
;
1668 rpdev
->dst
= RPMSG_ADDR_ANY
;
1669 rpdev
->ops
= &glink_device_ops
;
1671 node
= qcom_glink_match_channel(glink
->dev
->of_node
, name
);
1672 rpdev
->dev
.of_node
= node
;
1673 rpdev
->dev
.parent
= glink
->dev
;
1674 rpdev
->dev
.release
= qcom_glink_rpdev_release
;
1676 ret
= rpmsg_register_device(rpdev
);
1680 channel
->rpdev
= rpdev
;
1686 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1687 idr_remove(&glink
->rcids
, channel
->rcid
);
1689 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1691 /* Release the reference, iff we took it */
1693 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1698 static void qcom_glink_rx_close(struct qcom_glink
*glink
, unsigned int rcid
)
1700 struct rpmsg_channel_info chinfo
;
1701 struct glink_channel
*channel
;
1702 unsigned long flags
;
1704 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1705 channel
= idr_find(&glink
->rcids
, rcid
);
1706 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1708 trace_qcom_glink_cmd_close_rx(glink
->label
, channel
? channel
->name
: NULL
,
1709 channel
? channel
->lcid
: 0, rcid
);
1710 if (WARN(!channel
, "close request on unknown channel\n"))
1713 /* cancel pending rx_done work */
1714 cancel_work_sync(&channel
->intent_work
);
1716 if (channel
->rpdev
) {
1717 strscpy_pad(chinfo
.name
, channel
->name
, sizeof(chinfo
.name
));
1718 chinfo
.src
= RPMSG_ADDR_ANY
;
1719 chinfo
.dst
= RPMSG_ADDR_ANY
;
1721 rpmsg_unregister_device(glink
->dev
, &chinfo
);
1723 channel
->rpdev
= NULL
;
1725 qcom_glink_send_close_ack(glink
, channel
);
1727 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1728 idr_remove(&glink
->rcids
, channel
->rcid
);
1730 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1732 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1735 static void qcom_glink_rx_close_ack(struct qcom_glink
*glink
, unsigned int lcid
)
1737 struct rpmsg_channel_info chinfo
;
1738 struct glink_channel
*channel
;
1739 unsigned long flags
;
1741 /* To wakeup any blocking writers */
1742 wake_up_all(&glink
->tx_avail_notify
);
1744 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1745 channel
= idr_find(&glink
->lcids
, lcid
);
1747 trace_qcom_glink_cmd_close_ack_rx(glink
->label
, channel
? channel
->name
: NULL
,
1748 lcid
, channel
? channel
->rcid
: 0);
1749 if (WARN(!channel
, "close ack on unknown channel\n")) {
1750 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1754 idr_remove(&glink
->lcids
, channel
->lcid
);
1756 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1758 /* Decouple the potential rpdev from the channel */
1759 if (channel
->rpdev
) {
1760 strscpy(chinfo
.name
, channel
->name
, sizeof(chinfo
.name
));
1761 chinfo
.src
= RPMSG_ADDR_ANY
;
1762 chinfo
.dst
= RPMSG_ADDR_ANY
;
1764 rpmsg_unregister_device(glink
->dev
, &chinfo
);
1766 channel
->rpdev
= NULL
;
1768 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1771 static void qcom_glink_work(struct work_struct
*work
)
1773 struct qcom_glink
*glink
= container_of(work
, struct qcom_glink
,
1775 struct glink_defer_cmd
*dcmd
;
1776 struct glink_msg
*msg
;
1777 unsigned long flags
;
1778 unsigned int param1
;
1779 unsigned int param2
;
1783 spin_lock_irqsave(&glink
->rx_lock
, flags
);
1784 if (list_empty(&glink
->rx_queue
)) {
1785 spin_unlock_irqrestore(&glink
->rx_lock
, flags
);
1788 dcmd
= list_first_entry(&glink
->rx_queue
,
1789 struct glink_defer_cmd
, node
);
1790 list_del(&dcmd
->node
);
1791 spin_unlock_irqrestore(&glink
->rx_lock
, flags
);
1793 msg
= container_of(&dcmd
->msg
, struct glink_msg
, hdr
);
1794 cmd
= le16_to_cpu(msg
->cmd
);
1795 param1
= le16_to_cpu(msg
->param1
);
1796 param2
= le32_to_cpu(msg
->param2
);
1799 case GLINK_CMD_VERSION
:
1800 qcom_glink_receive_version(glink
, param1
, param2
);
1802 case GLINK_CMD_VERSION_ACK
:
1803 qcom_glink_receive_version_ack(glink
, param1
, param2
);
1805 case GLINK_CMD_OPEN
:
1806 qcom_glink_rx_open(glink
, param1
, msg
->data
);
1808 case GLINK_CMD_CLOSE
:
1809 qcom_glink_rx_close(glink
, param1
);
1811 case GLINK_CMD_CLOSE_ACK
:
1812 qcom_glink_rx_close_ack(glink
, param1
);
1814 case GLINK_CMD_RX_INTENT_REQ
:
1815 qcom_glink_handle_intent_req(glink
, param1
, param2
);
1818 WARN(1, "Unknown defer object %d\n", cmd
);
1826 static void qcom_glink_cancel_rx_work(struct qcom_glink
*glink
)
1828 struct glink_defer_cmd
*dcmd
;
1829 struct glink_defer_cmd
*tmp
;
1831 /* cancel any pending deferred rx_work */
1832 cancel_work_sync(&glink
->rx_work
);
1834 list_for_each_entry_safe(dcmd
, tmp
, &glink
->rx_queue
, node
)
1838 static ssize_t
rpmsg_name_show(struct device
*dev
,
1839 struct device_attribute
*attr
, char *buf
)
1844 ret
= of_property_read_string(dev
->of_node
, "label", &name
);
1846 name
= dev
->of_node
->name
;
1848 return sysfs_emit(buf
, "%s\n", name
);
1850 static DEVICE_ATTR_RO(rpmsg_name
);
1852 static struct attribute
*qcom_glink_attrs
[] = {
1853 &dev_attr_rpmsg_name
.attr
,
1856 ATTRIBUTE_GROUPS(qcom_glink
);
1858 static void qcom_glink_device_release(struct device
*dev
)
1860 struct rpmsg_device
*rpdev
= to_rpmsg_device(dev
);
1861 struct glink_channel
*channel
= to_glink_channel(rpdev
->ept
);
1863 /* Release qcom_glink_alloc_channel() reference */
1864 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1865 kfree(rpdev
->driver_override
);
1869 static int qcom_glink_create_chrdev(struct qcom_glink
*glink
)
1871 struct rpmsg_device
*rpdev
;
1872 struct glink_channel
*channel
;
1874 rpdev
= kzalloc(sizeof(*rpdev
), GFP_KERNEL
);
1878 channel
= qcom_glink_alloc_channel(glink
, "rpmsg_chrdev");
1879 if (IS_ERR(channel
)) {
1881 return PTR_ERR(channel
);
1883 channel
->rpdev
= rpdev
;
1885 rpdev
->ept
= &channel
->ept
;
1886 rpdev
->ops
= &glink_device_ops
;
1887 rpdev
->dev
.parent
= glink
->dev
;
1888 rpdev
->dev
.release
= qcom_glink_device_release
;
1890 return rpmsg_ctrldev_register_device(rpdev
);
1893 struct qcom_glink
*qcom_glink_native_probe(struct device
*dev
,
1894 unsigned long features
,
1895 struct qcom_glink_pipe
*rx
,
1896 struct qcom_glink_pipe
*tx
,
1900 struct qcom_glink
*glink
;
1902 glink
= devm_kzalloc(dev
, sizeof(*glink
), GFP_KERNEL
);
1904 return ERR_PTR(-ENOMEM
);
1907 glink
->tx_pipe
= tx
;
1908 glink
->rx_pipe
= rx
;
1910 glink
->features
= features
;
1911 glink
->intentless
= intentless
;
1913 spin_lock_init(&glink
->tx_lock
);
1914 spin_lock_init(&glink
->rx_lock
);
1915 INIT_LIST_HEAD(&glink
->rx_queue
);
1916 INIT_WORK(&glink
->rx_work
, qcom_glink_work
);
1917 init_waitqueue_head(&glink
->tx_avail_notify
);
1919 spin_lock_init(&glink
->idr_lock
);
1920 idr_init(&glink
->lcids
);
1921 idr_init(&glink
->rcids
);
1923 ret
= of_property_read_string(dev
->of_node
, "label", &glink
->label
);
1925 glink
->label
= dev
->of_node
->name
;
1927 glink
->dev
->groups
= qcom_glink_groups
;
1929 ret
= device_add_groups(dev
, qcom_glink_groups
);
1931 dev_err(dev
, "failed to add groups\n");
1933 ret
= qcom_glink_send_version(glink
);
1935 return ERR_PTR(ret
);
1937 ret
= qcom_glink_create_chrdev(glink
);
1939 dev_err(glink
->dev
, "failed to register chrdev\n");
1943 EXPORT_SYMBOL_GPL(qcom_glink_native_probe
);
1945 static int qcom_glink_remove_device(struct device
*dev
, void *data
)
1947 device_unregister(dev
);
1952 void qcom_glink_native_remove(struct qcom_glink
*glink
)
1954 struct glink_channel
*channel
;
1955 unsigned long flags
;
1959 qcom_glink_cancel_rx_work(glink
);
1961 /* Fail all attempts at sending messages */
1962 spin_lock_irqsave(&glink
->tx_lock
, flags
);
1963 glink
->abort_tx
= true;
1964 wake_up_all(&glink
->tx_avail_notify
);
1965 spin_unlock_irqrestore(&glink
->tx_lock
, flags
);
1967 /* Abort any senders waiting for intent requests */
1968 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1969 idr_for_each_entry(&glink
->lcids
, channel
, cid
)
1970 qcom_glink_intent_req_abort(channel
);
1971 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1973 ret
= device_for_each_child(glink
->dev
, NULL
, qcom_glink_remove_device
);
1975 dev_warn(glink
->dev
, "Can't remove GLINK devices: %d\n", ret
);
1977 /* Release any defunct local channels, waiting for close-ack */
1978 idr_for_each_entry(&glink
->lcids
, channel
, cid
)
1979 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1981 /* Release any defunct local channels, waiting for close-req */
1982 idr_for_each_entry(&glink
->rcids
, channel
, cid
)
1983 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1985 idr_destroy(&glink
->lcids
);
1986 idr_destroy(&glink
->rcids
);
1988 EXPORT_SYMBOL_GPL(qcom_glink_native_remove
);
1990 MODULE_DESCRIPTION("Qualcomm GLINK driver");
1991 MODULE_LICENSE("GPL v2");