1 // SPDX-License-Identifier: GPL-2.0-only
3 * Mailbox: Common code for Mailbox controllers and users
5 * Copyright (C) 2013-2014 Linaro Ltd.
6 * Author: Jassi Brar <jassisinghbrar@gmail.com>
9 #include <linux/interrupt.h>
10 #include <linux/spinlock.h>
11 #include <linux/mutex.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/device.h>
17 #include <linux/bitops.h>
18 #include <linux/mailbox_client.h>
19 #include <linux/mailbox_controller.h>
23 static LIST_HEAD(mbox_cons
);
24 static DEFINE_MUTEX(con_mutex
);
26 static int add_to_rbuf(struct mbox_chan
*chan
, void *mssg
)
31 spin_lock_irqsave(&chan
->lock
, flags
);
33 /* See if there is any space left */
34 if (chan
->msg_count
== MBOX_TX_QUEUE_LEN
) {
35 spin_unlock_irqrestore(&chan
->lock
, flags
);
40 chan
->msg_data
[idx
] = mssg
;
43 if (idx
== MBOX_TX_QUEUE_LEN
- 1)
48 spin_unlock_irqrestore(&chan
->lock
, flags
);
53 static void msg_submit(struct mbox_chan
*chan
)
60 spin_lock_irqsave(&chan
->lock
, flags
);
62 if (!chan
->msg_count
|| chan
->active_req
)
65 count
= chan
->msg_count
;
70 idx
+= MBOX_TX_QUEUE_LEN
- count
;
72 data
= chan
->msg_data
[idx
];
74 if (chan
->cl
->tx_prepare
)
75 chan
->cl
->tx_prepare(chan
->cl
, data
);
76 /* Try to submit a message to the MBOX controller */
77 err
= chan
->mbox
->ops
->send_data(chan
, data
);
79 chan
->active_req
= data
;
83 spin_unlock_irqrestore(&chan
->lock
, flags
);
85 /* kick start the timer immediately to avoid delays */
86 if (!err
&& (chan
->txdone_method
& TXDONE_BY_POLL
)) {
87 /* but only if not already active */
88 if (!hrtimer_active(&chan
->mbox
->poll_hrt
))
89 hrtimer_start(&chan
->mbox
->poll_hrt
, 0, HRTIMER_MODE_REL
);
93 static void tx_tick(struct mbox_chan
*chan
, int r
)
98 spin_lock_irqsave(&chan
->lock
, flags
);
99 mssg
= chan
->active_req
;
100 chan
->active_req
= NULL
;
101 spin_unlock_irqrestore(&chan
->lock
, flags
);
103 /* Submit next message */
109 /* Notify the client */
110 if (chan
->cl
->tx_done
)
111 chan
->cl
->tx_done(chan
->cl
, mssg
, r
);
113 if (r
!= -ETIME
&& chan
->cl
->tx_block
)
114 complete(&chan
->tx_complete
);
117 static enum hrtimer_restart
txdone_hrtimer(struct hrtimer
*hrtimer
)
119 struct mbox_controller
*mbox
=
120 container_of(hrtimer
, struct mbox_controller
, poll_hrt
);
121 bool txdone
, resched
= false;
124 for (i
= 0; i
< mbox
->num_chans
; i
++) {
125 struct mbox_chan
*chan
= &mbox
->chans
[i
];
127 if (chan
->active_req
&& chan
->cl
) {
129 txdone
= chan
->mbox
->ops
->last_tx_done(chan
);
136 hrtimer_forward_now(hrtimer
, ms_to_ktime(mbox
->txpoll_period
));
137 return HRTIMER_RESTART
;
139 return HRTIMER_NORESTART
;
143 * mbox_chan_received_data - A way for controller driver to push data
144 * received from remote to the upper layer.
145 * @chan: Pointer to the mailbox channel on which RX happened.
146 * @mssg: Client specific message typecasted as void *
148 * After startup and before shutdown any data received on the chan
149 * is passed on to the API via atomic mbox_chan_received_data().
150 * The controller should ACK the RX only after this call returns.
152 void mbox_chan_received_data(struct mbox_chan
*chan
, void *mssg
)
154 /* No buffering the received data */
155 if (chan
->cl
->rx_callback
)
156 chan
->cl
->rx_callback(chan
->cl
, mssg
);
158 EXPORT_SYMBOL_GPL(mbox_chan_received_data
);
161 * mbox_chan_txdone - A way for controller driver to notify the
162 * framework that the last TX has completed.
163 * @chan: Pointer to the mailbox chan on which TX happened.
164 * @r: Status of last TX - OK or ERROR
166 * The controller that has IRQ for TX ACK calls this atomic API
167 * to tick the TX state machine. It works only if txdone_irq
168 * is set by the controller.
170 void mbox_chan_txdone(struct mbox_chan
*chan
, int r
)
172 if (unlikely(!(chan
->txdone_method
& TXDONE_BY_IRQ
))) {
173 dev_err(chan
->mbox
->dev
,
174 "Controller can't run the TX ticker\n");
180 EXPORT_SYMBOL_GPL(mbox_chan_txdone
);
183 * mbox_client_txdone - The way for a client to run the TX state machine.
184 * @chan: Mailbox channel assigned to this client.
185 * @r: Success status of last transmission.
187 * The client/protocol had received some 'ACK' packet and it notifies
188 * the API that the last packet was sent successfully. This only works
189 * if the controller can't sense TX-Done.
191 void mbox_client_txdone(struct mbox_chan
*chan
, int r
)
193 if (unlikely(!(chan
->txdone_method
& TXDONE_BY_ACK
))) {
194 dev_err(chan
->mbox
->dev
, "Client can't run the TX ticker\n");
200 EXPORT_SYMBOL_GPL(mbox_client_txdone
);
203 * mbox_client_peek_data - A way for client driver to pull data
204 * received from remote by the controller.
205 * @chan: Mailbox channel assigned to this client.
207 * A poke to controller driver for any received data.
208 * The data is actually passed onto client via the
209 * mbox_chan_received_data()
210 * The call can be made from atomic context, so the controller's
211 * implementation of peek_data() must not sleep.
213 * Return: True, if controller has, and is going to push after this,
215 * False, if controller doesn't have any data to be read.
217 bool mbox_client_peek_data(struct mbox_chan
*chan
)
219 if (chan
->mbox
->ops
->peek_data
)
220 return chan
->mbox
->ops
->peek_data(chan
);
224 EXPORT_SYMBOL_GPL(mbox_client_peek_data
);
227 * mbox_send_message - For client to submit a message to be
228 * sent to the remote.
229 * @chan: Mailbox channel assigned to this client.
230 * @mssg: Client specific message typecasted.
232 * For client to submit data to the controller destined for a remote
233 * processor. If the client had set 'tx_block', the call will return
234 * either when the remote receives the data or when 'tx_tout' millisecs
236 * In non-blocking mode, the requests are buffered by the API and a
237 * non-negative token is returned for each queued request. If the request
238 * is not queued, a negative token is returned. Upon failure or successful
239 * TX, the API calls 'tx_done' from atomic context, from which the client
240 * could submit yet another request.
241 * The pointer to message should be preserved until it is sent
242 * over the chan, i.e, tx_done() is made.
243 * This function could be called from atomic context as it simply
244 * queues the data and returns a token against the request.
246 * Return: Non-negative integer for successful submission (non-blocking mode)
247 * or transmission over chan (blocking mode).
248 * Negative value denotes failure.
250 int mbox_send_message(struct mbox_chan
*chan
, void *mssg
)
254 if (!chan
|| !chan
->cl
)
257 t
= add_to_rbuf(chan
, mssg
);
259 dev_err(chan
->mbox
->dev
, "Try increasing MBOX_TX_QUEUE_LEN\n");
265 if (chan
->cl
->tx_block
) {
269 if (!chan
->cl
->tx_tout
) /* wait forever */
270 wait
= msecs_to_jiffies(3600000);
272 wait
= msecs_to_jiffies(chan
->cl
->tx_tout
);
274 ret
= wait_for_completion_timeout(&chan
->tx_complete
, wait
);
283 EXPORT_SYMBOL_GPL(mbox_send_message
);
286 * mbox_flush - flush a mailbox channel
287 * @chan: mailbox channel to flush
288 * @timeout: time, in milliseconds, to allow the flush operation to succeed
290 * Mailbox controllers that need to work in atomic context can implement the
291 * ->flush() callback to busy loop until a transmission has been completed.
292 * The implementation must call mbox_chan_txdone() upon success. Clients can
293 * call the mbox_flush() function at any time after mbox_send_message() to
294 * flush the transmission. After the function returns success, the mailbox
295 * transmission is guaranteed to have completed.
297 * Returns: 0 on success or a negative error code on failure.
299 int mbox_flush(struct mbox_chan
*chan
, unsigned long timeout
)
303 if (!chan
->mbox
->ops
->flush
)
306 ret
= chan
->mbox
->ops
->flush(chan
, timeout
);
312 EXPORT_SYMBOL_GPL(mbox_flush
);
315 * mbox_request_channel - Request a mailbox channel.
316 * @cl: Identity of the client requesting the channel.
317 * @index: Index of mailbox specifier in 'mboxes' property.
319 * The Client specifies its requirements and capabilities while asking for
320 * a mailbox channel. It can't be called from atomic context.
321 * The channel is exclusively allocated and can't be used by another
322 * client before the owner calls mbox_free_channel.
323 * After assignment, any packet received on this channel will be
324 * handed over to the client via the 'rx_callback'.
325 * The framework holds reference to the client, so the mbox_client
326 * structure shouldn't be modified until the mbox_free_channel returns.
328 * Return: Pointer to the channel assigned to the client if successful.
329 * ERR_PTR for request failure.
331 struct mbox_chan
*mbox_request_channel(struct mbox_client
*cl
, int index
)
333 struct device
*dev
= cl
->dev
;
334 struct mbox_controller
*mbox
;
335 struct of_phandle_args spec
;
336 struct mbox_chan
*chan
;
340 if (!dev
|| !dev
->of_node
) {
341 pr_debug("%s: No owner device node\n", __func__
);
342 return ERR_PTR(-ENODEV
);
345 mutex_lock(&con_mutex
);
347 if (of_parse_phandle_with_args(dev
->of_node
, "mboxes",
348 "#mbox-cells", index
, &spec
)) {
349 dev_dbg(dev
, "%s: can't parse \"mboxes\" property\n", __func__
);
350 mutex_unlock(&con_mutex
);
351 return ERR_PTR(-ENODEV
);
354 chan
= ERR_PTR(-EPROBE_DEFER
);
355 list_for_each_entry(mbox
, &mbox_cons
, node
)
356 if (mbox
->dev
->of_node
== spec
.np
) {
357 chan
= mbox
->of_xlate(mbox
, &spec
);
362 of_node_put(spec
.np
);
365 mutex_unlock(&con_mutex
);
369 if (chan
->cl
|| !try_module_get(mbox
->dev
->driver
->owner
)) {
370 dev_dbg(dev
, "%s: mailbox not free\n", __func__
);
371 mutex_unlock(&con_mutex
);
372 return ERR_PTR(-EBUSY
);
375 spin_lock_irqsave(&chan
->lock
, flags
);
378 chan
->active_req
= NULL
;
380 init_completion(&chan
->tx_complete
);
382 if (chan
->txdone_method
== TXDONE_BY_POLL
&& cl
->knows_txdone
)
383 chan
->txdone_method
= TXDONE_BY_ACK
;
385 spin_unlock_irqrestore(&chan
->lock
, flags
);
387 if (chan
->mbox
->ops
->startup
) {
388 ret
= chan
->mbox
->ops
->startup(chan
);
391 dev_err(dev
, "Unable to startup the chan (%d)\n", ret
);
392 mbox_free_channel(chan
);
397 mutex_unlock(&con_mutex
);
400 EXPORT_SYMBOL_GPL(mbox_request_channel
);
402 struct mbox_chan
*mbox_request_channel_byname(struct mbox_client
*cl
,
405 struct device_node
*np
= cl
->dev
->of_node
;
406 struct property
*prop
;
407 const char *mbox_name
;
411 dev_err(cl
->dev
, "%s() currently only supports DT\n", __func__
);
412 return ERR_PTR(-EINVAL
);
415 if (!of_get_property(np
, "mbox-names", NULL
)) {
417 "%s() requires an \"mbox-names\" property\n", __func__
);
418 return ERR_PTR(-EINVAL
);
421 of_property_for_each_string(np
, "mbox-names", prop
, mbox_name
) {
422 if (!strncmp(name
, mbox_name
, strlen(name
)))
423 return mbox_request_channel(cl
, index
);
427 dev_err(cl
->dev
, "%s() could not locate channel named \"%s\"\n",
429 return ERR_PTR(-EINVAL
);
431 EXPORT_SYMBOL_GPL(mbox_request_channel_byname
);
434 * mbox_free_channel - The client relinquishes control of a mailbox
435 * channel by this call.
436 * @chan: The mailbox channel to be freed.
438 void mbox_free_channel(struct mbox_chan
*chan
)
442 if (!chan
|| !chan
->cl
)
445 if (chan
->mbox
->ops
->shutdown
)
446 chan
->mbox
->ops
->shutdown(chan
);
448 /* The queued TX requests are simply aborted, no callbacks are made */
449 spin_lock_irqsave(&chan
->lock
, flags
);
451 chan
->active_req
= NULL
;
452 if (chan
->txdone_method
== TXDONE_BY_ACK
)
453 chan
->txdone_method
= TXDONE_BY_POLL
;
455 module_put(chan
->mbox
->dev
->driver
->owner
);
456 spin_unlock_irqrestore(&chan
->lock
, flags
);
458 EXPORT_SYMBOL_GPL(mbox_free_channel
);
460 static struct mbox_chan
*
461 of_mbox_index_xlate(struct mbox_controller
*mbox
,
462 const struct of_phandle_args
*sp
)
464 int ind
= sp
->args
[0];
466 if (ind
>= mbox
->num_chans
)
467 return ERR_PTR(-EINVAL
);
469 return &mbox
->chans
[ind
];
473 * mbox_controller_register - Register the mailbox controller
474 * @mbox: Pointer to the mailbox controller.
476 * The controller driver registers its communication channels
478 int mbox_controller_register(struct mbox_controller
*mbox
)
483 if (!mbox
|| !mbox
->dev
|| !mbox
->ops
|| !mbox
->num_chans
)
486 if (mbox
->txdone_irq
)
487 txdone
= TXDONE_BY_IRQ
;
488 else if (mbox
->txdone_poll
)
489 txdone
= TXDONE_BY_POLL
;
490 else /* It has to be ACK then */
491 txdone
= TXDONE_BY_ACK
;
493 if (txdone
== TXDONE_BY_POLL
) {
495 if (!mbox
->ops
->last_tx_done
) {
496 dev_err(mbox
->dev
, "last_tx_done method is absent\n");
500 hrtimer_init(&mbox
->poll_hrt
, CLOCK_MONOTONIC
,
502 mbox
->poll_hrt
.function
= txdone_hrtimer
;
505 for (i
= 0; i
< mbox
->num_chans
; i
++) {
506 struct mbox_chan
*chan
= &mbox
->chans
[i
];
510 chan
->txdone_method
= txdone
;
511 spin_lock_init(&chan
->lock
);
515 mbox
->of_xlate
= of_mbox_index_xlate
;
517 mutex_lock(&con_mutex
);
518 list_add_tail(&mbox
->node
, &mbox_cons
);
519 mutex_unlock(&con_mutex
);
523 EXPORT_SYMBOL_GPL(mbox_controller_register
);
526 * mbox_controller_unregister - Unregister the mailbox controller
527 * @mbox: Pointer to the mailbox controller.
529 void mbox_controller_unregister(struct mbox_controller
*mbox
)
536 mutex_lock(&con_mutex
);
538 list_del(&mbox
->node
);
540 for (i
= 0; i
< mbox
->num_chans
; i
++)
541 mbox_free_channel(&mbox
->chans
[i
]);
543 if (mbox
->txdone_poll
)
544 hrtimer_cancel(&mbox
->poll_hrt
);
546 mutex_unlock(&con_mutex
);
548 EXPORT_SYMBOL_GPL(mbox_controller_unregister
);
550 static void __devm_mbox_controller_unregister(struct device
*dev
, void *res
)
552 struct mbox_controller
**mbox
= res
;
554 mbox_controller_unregister(*mbox
);
557 static int devm_mbox_controller_match(struct device
*dev
, void *res
, void *data
)
559 struct mbox_controller
**mbox
= res
;
561 if (WARN_ON(!mbox
|| !*mbox
))
564 return *mbox
== data
;
568 * devm_mbox_controller_register() - managed mbox_controller_register()
569 * @dev: device owning the mailbox controller being registered
570 * @mbox: mailbox controller being registered
572 * This function adds a device-managed resource that will make sure that the
573 * mailbox controller, which is registered using mbox_controller_register()
574 * as part of this function, will be unregistered along with the rest of
575 * device-managed resources upon driver probe failure or driver removal.
577 * Returns 0 on success or a negative error code on failure.
579 int devm_mbox_controller_register(struct device
*dev
,
580 struct mbox_controller
*mbox
)
582 struct mbox_controller
**ptr
;
585 ptr
= devres_alloc(__devm_mbox_controller_unregister
, sizeof(*ptr
),
590 err
= mbox_controller_register(mbox
);
596 devres_add(dev
, ptr
);
601 EXPORT_SYMBOL_GPL(devm_mbox_controller_register
);
604 * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
605 * @dev: device owning the mailbox controller being unregistered
606 * @mbox: mailbox controller being unregistered
608 * This function unregisters the mailbox controller and removes the device-
609 * managed resource that was set up to automatically unregister the mailbox
610 * controller on driver probe failure or driver removal. It's typically not
611 * necessary to call this function.
613 void devm_mbox_controller_unregister(struct device
*dev
, struct mbox_controller
*mbox
)
615 WARN_ON(devres_release(dev
, __devm_mbox_controller_unregister
,
616 devm_mbox_controller_match
, mbox
));
618 EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister
);