2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/clk/tegra.h>
15 #include <linux/genalloc.h>
16 #include <linux/mailbox_client.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
22 #include <linux/semaphore.h>
23 #include <linux/sched/clock.h>
25 #include <soc/tegra/bpmp.h>
26 #include <soc/tegra/bpmp-abi.h>
27 #include <soc/tegra/ivc.h>
29 #define MSG_ACK BIT(0)
30 #define MSG_RING BIT(1)
32 static inline struct tegra_bpmp
*
33 mbox_client_to_bpmp(struct mbox_client
*client
)
35 return container_of(client
, struct tegra_bpmp
, mbox
.client
);
38 struct tegra_bpmp
*tegra_bpmp_get(struct device
*dev
)
40 struct platform_device
*pdev
;
41 struct tegra_bpmp
*bpmp
;
42 struct device_node
*np
;
44 np
= of_parse_phandle(dev
->of_node
, "nvidia,bpmp", 0);
46 return ERR_PTR(-ENOENT
);
48 pdev
= of_find_device_by_node(np
);
50 bpmp
= ERR_PTR(-ENODEV
);
54 bpmp
= platform_get_drvdata(pdev
);
56 bpmp
= ERR_PTR(-EPROBE_DEFER
);
57 put_device(&pdev
->dev
);
65 EXPORT_SYMBOL_GPL(tegra_bpmp_get
);
67 void tegra_bpmp_put(struct tegra_bpmp
*bpmp
)
70 put_device(bpmp
->dev
);
72 EXPORT_SYMBOL_GPL(tegra_bpmp_put
);
75 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel
*channel
)
77 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
81 count
= bpmp
->soc
->channels
.thread
.count
;
83 index
= channel
- channel
->bpmp
->threaded_channels
;
84 if (index
< 0 || index
>= count
)
90 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message
*msg
)
92 return (msg
->tx
.size
<= MSG_DATA_MIN_SZ
) &&
93 (msg
->rx
.size
<= MSG_DATA_MIN_SZ
) &&
94 (msg
->tx
.size
== 0 || msg
->tx
.data
) &&
95 (msg
->rx
.size
== 0 || msg
->rx
.data
);
98 static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel
*channel
)
102 frame
= tegra_ivc_read_get_next_frame(channel
->ivc
);
113 static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel
*channel
)
115 unsigned long timeout
= channel
->bpmp
->soc
->channels
.cpu_tx
.timeout
;
118 end
= ktime_add_us(ktime_get(), timeout
);
121 if (tegra_bpmp_master_acked(channel
))
123 } while (ktime_before(ktime_get(), end
));
128 static bool tegra_bpmp_master_free(struct tegra_bpmp_channel
*channel
)
132 frame
= tegra_ivc_write_get_next_frame(channel
->ivc
);
143 static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel
*channel
)
145 unsigned long timeout
= channel
->bpmp
->soc
->channels
.cpu_tx
.timeout
;
148 start
= ns_to_ktime(local_clock());
151 if (tegra_bpmp_master_free(channel
))
154 now
= ns_to_ktime(local_clock());
155 } while (ktime_us_delta(now
, start
) < timeout
);
160 static ssize_t
__tegra_bpmp_channel_read(struct tegra_bpmp_channel
*channel
,
161 void *data
, size_t size
, int *ret
)
165 if (data
&& size
> 0)
166 memcpy(data
, channel
->ib
->data
, size
);
168 err
= tegra_ivc_read_advance(channel
->ivc
);
172 *ret
= channel
->ib
->code
;
177 static ssize_t
tegra_bpmp_channel_read(struct tegra_bpmp_channel
*channel
,
178 void *data
, size_t size
, int *ret
)
180 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
185 index
= tegra_bpmp_channel_get_thread_index(channel
);
191 spin_lock_irqsave(&bpmp
->lock
, flags
);
192 err
= __tegra_bpmp_channel_read(channel
, data
, size
, ret
);
193 clear_bit(index
, bpmp
->threaded
.allocated
);
194 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
197 up(&bpmp
->threaded
.lock
);
202 static ssize_t
__tegra_bpmp_channel_write(struct tegra_bpmp_channel
*channel
,
203 unsigned int mrq
, unsigned long flags
,
204 const void *data
, size_t size
)
206 channel
->ob
->code
= mrq
;
207 channel
->ob
->flags
= flags
;
209 if (data
&& size
> 0)
210 memcpy(channel
->ob
->data
, data
, size
);
212 return tegra_ivc_write_advance(channel
->ivc
);
215 static struct tegra_bpmp_channel
*
216 tegra_bpmp_write_threaded(struct tegra_bpmp
*bpmp
, unsigned int mrq
,
217 const void *data
, size_t size
)
219 unsigned long timeout
= bpmp
->soc
->channels
.thread
.timeout
;
220 unsigned int count
= bpmp
->soc
->channels
.thread
.count
;
221 struct tegra_bpmp_channel
*channel
;
226 err
= down_timeout(&bpmp
->threaded
.lock
, usecs_to_jiffies(timeout
));
230 spin_lock_irqsave(&bpmp
->lock
, flags
);
232 index
= find_first_zero_bit(bpmp
->threaded
.allocated
, count
);
233 if (index
== count
) {
238 channel
= &bpmp
->threaded_channels
[index
];
240 if (!tegra_bpmp_master_free(channel
)) {
245 set_bit(index
, bpmp
->threaded
.allocated
);
247 err
= __tegra_bpmp_channel_write(channel
, mrq
, MSG_ACK
| MSG_RING
,
250 goto clear_allocated
;
252 set_bit(index
, bpmp
->threaded
.busy
);
254 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
258 clear_bit(index
, bpmp
->threaded
.allocated
);
260 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
261 up(&bpmp
->threaded
.lock
);
266 static ssize_t
tegra_bpmp_channel_write(struct tegra_bpmp_channel
*channel
,
267 unsigned int mrq
, unsigned long flags
,
268 const void *data
, size_t size
)
272 err
= tegra_bpmp_wait_master_free(channel
);
276 return __tegra_bpmp_channel_write(channel
, mrq
, flags
, data
, size
);
279 int tegra_bpmp_transfer_atomic(struct tegra_bpmp
*bpmp
,
280 struct tegra_bpmp_message
*msg
)
282 struct tegra_bpmp_channel
*channel
;
285 if (WARN_ON(!irqs_disabled()))
288 if (!tegra_bpmp_message_valid(msg
))
291 channel
= bpmp
->tx_channel
;
293 spin_lock(&bpmp
->atomic_tx_lock
);
295 err
= tegra_bpmp_channel_write(channel
, msg
->mrq
, MSG_ACK
,
296 msg
->tx
.data
, msg
->tx
.size
);
298 spin_unlock(&bpmp
->atomic_tx_lock
);
302 spin_unlock(&bpmp
->atomic_tx_lock
);
304 err
= mbox_send_message(bpmp
->mbox
.channel
, NULL
);
308 mbox_client_txdone(bpmp
->mbox
.channel
, 0);
310 err
= tegra_bpmp_wait_ack(channel
);
314 return __tegra_bpmp_channel_read(channel
, msg
->rx
.data
, msg
->rx
.size
,
317 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic
);
319 int tegra_bpmp_transfer(struct tegra_bpmp
*bpmp
,
320 struct tegra_bpmp_message
*msg
)
322 struct tegra_bpmp_channel
*channel
;
323 unsigned long timeout
;
326 if (WARN_ON(irqs_disabled()))
329 if (!tegra_bpmp_message_valid(msg
))
332 channel
= tegra_bpmp_write_threaded(bpmp
, msg
->mrq
, msg
->tx
.data
,
335 return PTR_ERR(channel
);
337 err
= mbox_send_message(bpmp
->mbox
.channel
, NULL
);
341 mbox_client_txdone(bpmp
->mbox
.channel
, 0);
343 timeout
= usecs_to_jiffies(bpmp
->soc
->channels
.thread
.timeout
);
345 err
= wait_for_completion_timeout(&channel
->completion
, timeout
);
349 return tegra_bpmp_channel_read(channel
, msg
->rx
.data
, msg
->rx
.size
,
352 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer
);
354 static struct tegra_bpmp_mrq
*tegra_bpmp_find_mrq(struct tegra_bpmp
*bpmp
,
357 struct tegra_bpmp_mrq
*entry
;
359 list_for_each_entry(entry
, &bpmp
->mrqs
, list
)
360 if (entry
->mrq
== mrq
)
366 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel
*channel
, int code
,
367 const void *data
, size_t size
)
369 unsigned long flags
= channel
->ib
->flags
;
370 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
371 struct tegra_bpmp_mb_data
*frame
;
374 if (WARN_ON(size
> MSG_DATA_MIN_SZ
))
377 err
= tegra_ivc_read_advance(channel
->ivc
);
378 if (WARN_ON(err
< 0))
381 if ((flags
& MSG_ACK
) == 0)
384 frame
= tegra_ivc_write_get_next_frame(channel
->ivc
);
385 if (WARN_ON(IS_ERR(frame
)))
390 if (data
&& size
> 0)
391 memcpy(frame
->data
, data
, size
);
393 err
= tegra_ivc_write_advance(channel
->ivc
);
394 if (WARN_ON(err
< 0))
397 if (flags
& MSG_RING
) {
398 err
= mbox_send_message(bpmp
->mbox
.channel
, NULL
);
399 if (WARN_ON(err
< 0))
402 mbox_client_txdone(bpmp
->mbox
.channel
, 0);
405 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return
);
407 static void tegra_bpmp_handle_mrq(struct tegra_bpmp
*bpmp
,
409 struct tegra_bpmp_channel
*channel
)
411 struct tegra_bpmp_mrq
*entry
;
414 spin_lock(&bpmp
->lock
);
416 entry
= tegra_bpmp_find_mrq(bpmp
, mrq
);
418 spin_unlock(&bpmp
->lock
);
419 tegra_bpmp_mrq_return(channel
, -EINVAL
, &zero
, sizeof(zero
));
423 entry
->handler(mrq
, channel
, entry
->data
);
425 spin_unlock(&bpmp
->lock
);
428 int tegra_bpmp_request_mrq(struct tegra_bpmp
*bpmp
, unsigned int mrq
,
429 tegra_bpmp_mrq_handler_t handler
, void *data
)
431 struct tegra_bpmp_mrq
*entry
;
437 entry
= devm_kzalloc(bpmp
->dev
, sizeof(*entry
), GFP_KERNEL
);
441 spin_lock_irqsave(&bpmp
->lock
, flags
);
444 entry
->handler
= handler
;
446 list_add(&entry
->list
, &bpmp
->mrqs
);
448 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
452 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq
);
454 void tegra_bpmp_free_mrq(struct tegra_bpmp
*bpmp
, unsigned int mrq
, void *data
)
456 struct tegra_bpmp_mrq
*entry
;
459 spin_lock_irqsave(&bpmp
->lock
, flags
);
461 entry
= tegra_bpmp_find_mrq(bpmp
, mrq
);
465 list_del(&entry
->list
);
466 devm_kfree(bpmp
->dev
, entry
);
469 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
471 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq
);
473 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq
,
474 struct tegra_bpmp_channel
*channel
,
477 struct mrq_ping_request
*request
;
478 struct mrq_ping_response response
;
480 request
= (struct mrq_ping_request
*)channel
->ib
->data
;
482 memset(&response
, 0, sizeof(response
));
483 response
.reply
= request
->challenge
<< 1;
485 tegra_bpmp_mrq_return(channel
, 0, &response
, sizeof(response
));
488 static int tegra_bpmp_ping(struct tegra_bpmp
*bpmp
)
490 struct mrq_ping_response response
;
491 struct mrq_ping_request request
;
492 struct tegra_bpmp_message msg
;
497 memset(&request
, 0, sizeof(request
));
498 request
.challenge
= 1;
500 memset(&response
, 0, sizeof(response
));
502 memset(&msg
, 0, sizeof(msg
));
504 msg
.tx
.data
= &request
;
505 msg
.tx
.size
= sizeof(request
);
506 msg
.rx
.data
= &response
;
507 msg
.rx
.size
= sizeof(response
);
509 local_irq_save(flags
);
511 err
= tegra_bpmp_transfer_atomic(bpmp
, &msg
);
513 local_irq_restore(flags
);
517 "ping ok: challenge: %u, response: %u, time: %lld\n",
518 request
.challenge
, response
.reply
,
519 ktime_to_us(ktime_sub(end
, start
)));
524 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp
*bpmp
, char *tag
,
527 struct mrq_query_tag_request request
;
528 struct tegra_bpmp_message msg
;
534 virt
= dma_alloc_coherent(bpmp
->dev
, MSG_DATA_MIN_SZ
, &phys
,
535 GFP_KERNEL
| GFP_DMA32
);
539 memset(&request
, 0, sizeof(request
));
542 memset(&msg
, 0, sizeof(msg
));
543 msg
.mrq
= MRQ_QUERY_TAG
;
544 msg
.tx
.data
= &request
;
545 msg
.tx
.size
= sizeof(request
);
547 local_irq_save(flags
);
548 err
= tegra_bpmp_transfer_atomic(bpmp
, &msg
);
549 local_irq_restore(flags
);
552 strlcpy(tag
, virt
, size
);
554 dma_free_coherent(bpmp
->dev
, MSG_DATA_MIN_SZ
, virt
, phys
);
559 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel
*channel
)
561 unsigned long flags
= channel
->ob
->flags
;
563 if ((flags
& MSG_RING
) == 0)
566 complete(&channel
->completion
);
569 static void tegra_bpmp_handle_rx(struct mbox_client
*client
, void *data
)
571 struct tegra_bpmp
*bpmp
= mbox_client_to_bpmp(client
);
572 struct tegra_bpmp_channel
*channel
;
573 unsigned int i
, count
;
576 channel
= bpmp
->rx_channel
;
577 count
= bpmp
->soc
->channels
.thread
.count
;
578 busy
= bpmp
->threaded
.busy
;
580 if (tegra_bpmp_master_acked(channel
))
581 tegra_bpmp_handle_mrq(bpmp
, channel
->ib
->code
, channel
);
583 spin_lock(&bpmp
->lock
);
585 for_each_set_bit(i
, busy
, count
) {
586 struct tegra_bpmp_channel
*channel
;
588 channel
= &bpmp
->threaded_channels
[i
];
590 if (tegra_bpmp_master_acked(channel
)) {
591 tegra_bpmp_channel_signal(channel
);
596 spin_unlock(&bpmp
->lock
);
599 static void tegra_bpmp_ivc_notify(struct tegra_ivc
*ivc
, void *data
)
601 struct tegra_bpmp
*bpmp
= data
;
604 if (WARN_ON(bpmp
->mbox
.channel
== NULL
))
607 err
= mbox_send_message(bpmp
->mbox
.channel
, NULL
);
611 mbox_client_txdone(bpmp
->mbox
.channel
, 0);
614 static int tegra_bpmp_channel_init(struct tegra_bpmp_channel
*channel
,
615 struct tegra_bpmp
*bpmp
,
618 size_t message_size
, queue_size
;
622 channel
->ivc
= devm_kzalloc(bpmp
->dev
, sizeof(*channel
->ivc
),
627 message_size
= tegra_ivc_align(MSG_MIN_SZ
);
628 queue_size
= tegra_ivc_total_queue_size(message_size
);
629 offset
= queue_size
* index
;
631 err
= tegra_ivc_init(channel
->ivc
, NULL
,
632 bpmp
->rx
.virt
+ offset
, bpmp
->rx
.phys
+ offset
,
633 bpmp
->tx
.virt
+ offset
, bpmp
->tx
.phys
+ offset
,
634 1, message_size
, tegra_bpmp_ivc_notify
,
637 dev_err(bpmp
->dev
, "failed to setup IVC for channel %u: %d\n",
642 init_completion(&channel
->completion
);
643 channel
->bpmp
= bpmp
;
648 static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel
*channel
)
650 /* reset the channel state */
651 tegra_ivc_reset(channel
->ivc
);
653 /* sync the channel state with BPMP */
654 while (tegra_ivc_notified(channel
->ivc
))
658 static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel
*channel
)
660 tegra_ivc_cleanup(channel
->ivc
);
663 static int tegra_bpmp_probe(struct platform_device
*pdev
)
665 struct tegra_bpmp
*bpmp
;
671 bpmp
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
), GFP_KERNEL
);
675 bpmp
->soc
= of_device_get_match_data(&pdev
->dev
);
676 bpmp
->dev
= &pdev
->dev
;
678 bpmp
->tx
.pool
= of_gen_pool_get(pdev
->dev
.of_node
, "shmem", 0);
679 if (!bpmp
->tx
.pool
) {
680 dev_err(&pdev
->dev
, "TX shmem pool not found\n");
684 bpmp
->tx
.virt
= gen_pool_dma_alloc(bpmp
->tx
.pool
, 4096, &bpmp
->tx
.phys
);
685 if (!bpmp
->tx
.virt
) {
686 dev_err(&pdev
->dev
, "failed to allocate from TX pool\n");
690 bpmp
->rx
.pool
= of_gen_pool_get(pdev
->dev
.of_node
, "shmem", 1);
691 if (!bpmp
->rx
.pool
) {
692 dev_err(&pdev
->dev
, "RX shmem pool not found\n");
697 bpmp
->rx
.virt
= gen_pool_dma_alloc(bpmp
->rx
.pool
, 4096, &bpmp
->rx
.phys
);
698 if (!bpmp
->rx
.virt
) {
699 dev_err(&pdev
->dev
, "failed to allocate from RX pool\n");
704 INIT_LIST_HEAD(&bpmp
->mrqs
);
705 spin_lock_init(&bpmp
->lock
);
707 bpmp
->threaded
.count
= bpmp
->soc
->channels
.thread
.count
;
708 sema_init(&bpmp
->threaded
.lock
, bpmp
->threaded
.count
);
710 size
= BITS_TO_LONGS(bpmp
->threaded
.count
) * sizeof(long);
712 bpmp
->threaded
.allocated
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
713 if (!bpmp
->threaded
.allocated
) {
718 bpmp
->threaded
.busy
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
719 if (!bpmp
->threaded
.busy
) {
724 spin_lock_init(&bpmp
->atomic_tx_lock
);
725 bpmp
->tx_channel
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
->tx_channel
),
727 if (!bpmp
->tx_channel
) {
732 bpmp
->rx_channel
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
->rx_channel
),
734 if (!bpmp
->rx_channel
) {
739 bpmp
->threaded_channels
= devm_kcalloc(&pdev
->dev
, bpmp
->threaded
.count
,
740 sizeof(*bpmp
->threaded_channels
),
742 if (!bpmp
->threaded_channels
) {
747 err
= tegra_bpmp_channel_init(bpmp
->tx_channel
, bpmp
,
748 bpmp
->soc
->channels
.cpu_tx
.offset
);
752 err
= tegra_bpmp_channel_init(bpmp
->rx_channel
, bpmp
,
753 bpmp
->soc
->channels
.cpu_rx
.offset
);
755 goto cleanup_tx_channel
;
757 for (i
= 0; i
< bpmp
->threaded
.count
; i
++) {
758 err
= tegra_bpmp_channel_init(
759 &bpmp
->threaded_channels
[i
], bpmp
,
760 bpmp
->soc
->channels
.thread
.offset
+ i
);
762 goto cleanup_threaded_channels
;
765 /* mbox registration */
766 bpmp
->mbox
.client
.dev
= &pdev
->dev
;
767 bpmp
->mbox
.client
.rx_callback
= tegra_bpmp_handle_rx
;
768 bpmp
->mbox
.client
.tx_block
= false;
769 bpmp
->mbox
.client
.knows_txdone
= false;
771 bpmp
->mbox
.channel
= mbox_request_channel(&bpmp
->mbox
.client
, 0);
772 if (IS_ERR(bpmp
->mbox
.channel
)) {
773 err
= PTR_ERR(bpmp
->mbox
.channel
);
774 dev_err(&pdev
->dev
, "failed to get HSP mailbox: %d\n", err
);
775 goto cleanup_threaded_channels
;
778 /* reset message channels */
779 tegra_bpmp_channel_reset(bpmp
->tx_channel
);
780 tegra_bpmp_channel_reset(bpmp
->rx_channel
);
781 for (i
= 0; i
< bpmp
->threaded
.count
; i
++)
782 tegra_bpmp_channel_reset(&bpmp
->threaded_channels
[i
]);
784 err
= tegra_bpmp_request_mrq(bpmp
, MRQ_PING
,
785 tegra_bpmp_mrq_handle_ping
, bpmp
);
789 err
= tegra_bpmp_ping(bpmp
);
791 dev_err(&pdev
->dev
, "failed to ping BPMP: %d\n", err
);
795 err
= tegra_bpmp_get_firmware_tag(bpmp
, tag
, sizeof(tag
) - 1);
797 dev_err(&pdev
->dev
, "failed to get firmware tag: %d\n", err
);
801 dev_info(&pdev
->dev
, "firmware: %s\n", tag
);
803 platform_set_drvdata(pdev
, bpmp
);
805 err
= of_platform_default_populate(pdev
->dev
.of_node
, NULL
, &pdev
->dev
);
809 err
= tegra_bpmp_init_clocks(bpmp
);
813 err
= tegra_bpmp_init_resets(bpmp
);
817 err
= tegra_bpmp_init_powergates(bpmp
);
821 err
= tegra_bpmp_init_debugfs(bpmp
);
823 dev_err(&pdev
->dev
, "debugfs initialization failed: %d\n", err
);
828 tegra_bpmp_free_mrq(bpmp
, MRQ_PING
, bpmp
);
830 mbox_free_channel(bpmp
->mbox
.channel
);
831 cleanup_threaded_channels
:
832 for (i
= 0; i
< bpmp
->threaded
.count
; i
++) {
833 if (bpmp
->threaded_channels
[i
].bpmp
)
834 tegra_bpmp_channel_cleanup(&bpmp
->threaded_channels
[i
]);
837 tegra_bpmp_channel_cleanup(bpmp
->rx_channel
);
839 tegra_bpmp_channel_cleanup(bpmp
->tx_channel
);
841 gen_pool_free(bpmp
->rx
.pool
, (unsigned long)bpmp
->rx
.virt
, 4096);
843 gen_pool_free(bpmp
->tx
.pool
, (unsigned long)bpmp
->tx
.virt
, 4096);
847 static int __maybe_unused
tegra_bpmp_resume(struct device
*dev
)
849 struct tegra_bpmp
*bpmp
= dev_get_drvdata(dev
);
852 /* reset message channels */
853 tegra_bpmp_channel_reset(bpmp
->tx_channel
);
854 tegra_bpmp_channel_reset(bpmp
->rx_channel
);
856 for (i
= 0; i
< bpmp
->threaded
.count
; i
++)
857 tegra_bpmp_channel_reset(&bpmp
->threaded_channels
[i
]);
862 static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops
, NULL
, tegra_bpmp_resume
);
864 static const struct tegra_bpmp_soc tegra186_soc
= {
868 .timeout
= 60 * USEC_PER_SEC
,
873 .timeout
= 600 * USEC_PER_SEC
,
883 static const struct of_device_id tegra_bpmp_match
[] = {
884 { .compatible
= "nvidia,tegra186-bpmp", .data
= &tegra186_soc
},
888 static struct platform_driver tegra_bpmp_driver
= {
890 .name
= "tegra-bpmp",
891 .of_match_table
= tegra_bpmp_match
,
892 .pm
= &tegra_bpmp_pm_ops
,
894 .probe
= tegra_bpmp_probe
,
897 static int __init
tegra_bpmp_init(void)
899 return platform_driver_register(&tegra_bpmp_driver
);
901 core_initcall(tegra_bpmp_init
);