2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/clk/tegra.h>
15 #include <linux/genalloc.h>
16 #include <linux/mailbox_client.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/semaphore.h>
22 #include <linux/sched/clock.h>
24 #include <soc/tegra/bpmp.h>
25 #include <soc/tegra/bpmp-abi.h>
26 #include <soc/tegra/ivc.h>
28 #define MSG_ACK BIT(0)
29 #define MSG_RING BIT(1)
31 static inline struct tegra_bpmp
*
32 mbox_client_to_bpmp(struct mbox_client
*client
)
34 return container_of(client
, struct tegra_bpmp
, mbox
.client
);
37 struct tegra_bpmp
*tegra_bpmp_get(struct device
*dev
)
39 struct platform_device
*pdev
;
40 struct tegra_bpmp
*bpmp
;
41 struct device_node
*np
;
43 np
= of_parse_phandle(dev
->of_node
, "nvidia,bpmp", 0);
45 return ERR_PTR(-ENOENT
);
47 pdev
= of_find_device_by_node(np
);
49 bpmp
= ERR_PTR(-ENODEV
);
53 bpmp
= platform_get_drvdata(pdev
);
55 bpmp
= ERR_PTR(-EPROBE_DEFER
);
56 put_device(&pdev
->dev
);
64 EXPORT_SYMBOL_GPL(tegra_bpmp_get
);
66 void tegra_bpmp_put(struct tegra_bpmp
*bpmp
)
69 put_device(bpmp
->dev
);
71 EXPORT_SYMBOL_GPL(tegra_bpmp_put
);
73 static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel
*channel
)
75 return channel
- channel
->bpmp
->channels
;
79 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel
*channel
)
81 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
82 unsigned int offset
, count
;
85 offset
= bpmp
->soc
->channels
.thread
.offset
;
86 count
= bpmp
->soc
->channels
.thread
.count
;
88 index
= tegra_bpmp_channel_get_index(channel
);
92 if (index
< offset
|| index
>= offset
+ count
)
95 return index
- offset
;
98 static struct tegra_bpmp_channel
*
99 tegra_bpmp_channel_get_thread(struct tegra_bpmp
*bpmp
, unsigned int index
)
101 unsigned int offset
= bpmp
->soc
->channels
.thread
.offset
;
102 unsigned int count
= bpmp
->soc
->channels
.thread
.count
;
107 return &bpmp
->channels
[offset
+ index
];
110 static struct tegra_bpmp_channel
*
111 tegra_bpmp_channel_get_tx(struct tegra_bpmp
*bpmp
)
113 unsigned int offset
= bpmp
->soc
->channels
.cpu_tx
.offset
;
115 return &bpmp
->channels
[offset
+ smp_processor_id()];
118 static struct tegra_bpmp_channel
*
119 tegra_bpmp_channel_get_rx(struct tegra_bpmp
*bpmp
)
121 unsigned int offset
= bpmp
->soc
->channels
.cpu_rx
.offset
;
123 return &bpmp
->channels
[offset
];
126 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message
*msg
)
128 return (msg
->tx
.size
<= MSG_DATA_MIN_SZ
) &&
129 (msg
->rx
.size
<= MSG_DATA_MIN_SZ
) &&
130 (msg
->tx
.size
== 0 || msg
->tx
.data
) &&
131 (msg
->rx
.size
== 0 || msg
->rx
.data
);
134 static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel
*channel
)
138 frame
= tegra_ivc_read_get_next_frame(channel
->ivc
);
149 static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel
*channel
)
151 unsigned long timeout
= channel
->bpmp
->soc
->channels
.cpu_tx
.timeout
;
154 end
= ktime_add_us(ktime_get(), timeout
);
157 if (tegra_bpmp_master_acked(channel
))
159 } while (ktime_before(ktime_get(), end
));
164 static bool tegra_bpmp_master_free(struct tegra_bpmp_channel
*channel
)
168 frame
= tegra_ivc_write_get_next_frame(channel
->ivc
);
179 static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel
*channel
)
181 unsigned long timeout
= channel
->bpmp
->soc
->channels
.cpu_tx
.timeout
;
184 start
= ns_to_ktime(local_clock());
187 if (tegra_bpmp_master_free(channel
))
190 now
= ns_to_ktime(local_clock());
191 } while (ktime_us_delta(now
, start
) < timeout
);
196 static ssize_t
__tegra_bpmp_channel_read(struct tegra_bpmp_channel
*channel
,
197 void *data
, size_t size
, int *ret
)
201 if (data
&& size
> 0)
202 memcpy(data
, channel
->ib
->data
, size
);
204 err
= tegra_ivc_read_advance(channel
->ivc
);
208 *ret
= channel
->ib
->code
;
213 static ssize_t
tegra_bpmp_channel_read(struct tegra_bpmp_channel
*channel
,
214 void *data
, size_t size
, int *ret
)
216 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
221 index
= tegra_bpmp_channel_get_thread_index(channel
);
227 spin_lock_irqsave(&bpmp
->lock
, flags
);
228 err
= __tegra_bpmp_channel_read(channel
, data
, size
, ret
);
229 clear_bit(index
, bpmp
->threaded
.allocated
);
230 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
233 up(&bpmp
->threaded
.lock
);
238 static ssize_t
__tegra_bpmp_channel_write(struct tegra_bpmp_channel
*channel
,
239 unsigned int mrq
, unsigned long flags
,
240 const void *data
, size_t size
)
242 channel
->ob
->code
= mrq
;
243 channel
->ob
->flags
= flags
;
245 if (data
&& size
> 0)
246 memcpy(channel
->ob
->data
, data
, size
);
248 return tegra_ivc_write_advance(channel
->ivc
);
251 static struct tegra_bpmp_channel
*
252 tegra_bpmp_write_threaded(struct tegra_bpmp
*bpmp
, unsigned int mrq
,
253 const void *data
, size_t size
)
255 unsigned long timeout
= bpmp
->soc
->channels
.thread
.timeout
;
256 unsigned int count
= bpmp
->soc
->channels
.thread
.count
;
257 struct tegra_bpmp_channel
*channel
;
262 err
= down_timeout(&bpmp
->threaded
.lock
, usecs_to_jiffies(timeout
));
266 spin_lock_irqsave(&bpmp
->lock
, flags
);
268 index
= find_first_zero_bit(bpmp
->threaded
.allocated
, count
);
269 if (index
== count
) {
274 channel
= tegra_bpmp_channel_get_thread(bpmp
, index
);
280 if (!tegra_bpmp_master_free(channel
)) {
285 set_bit(index
, bpmp
->threaded
.allocated
);
287 err
= __tegra_bpmp_channel_write(channel
, mrq
, MSG_ACK
| MSG_RING
,
290 goto clear_allocated
;
292 set_bit(index
, bpmp
->threaded
.busy
);
294 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
298 clear_bit(index
, bpmp
->threaded
.allocated
);
300 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
301 up(&bpmp
->threaded
.lock
);
306 static ssize_t
tegra_bpmp_channel_write(struct tegra_bpmp_channel
*channel
,
307 unsigned int mrq
, unsigned long flags
,
308 const void *data
, size_t size
)
312 err
= tegra_bpmp_wait_master_free(channel
);
316 return __tegra_bpmp_channel_write(channel
, mrq
, flags
, data
, size
);
319 int tegra_bpmp_transfer_atomic(struct tegra_bpmp
*bpmp
,
320 struct tegra_bpmp_message
*msg
)
322 struct tegra_bpmp_channel
*channel
;
325 if (WARN_ON(!irqs_disabled()))
328 if (!tegra_bpmp_message_valid(msg
))
331 channel
= tegra_bpmp_channel_get_tx(bpmp
);
333 err
= tegra_bpmp_channel_write(channel
, msg
->mrq
, MSG_ACK
,
334 msg
->tx
.data
, msg
->tx
.size
);
338 err
= mbox_send_message(bpmp
->mbox
.channel
, NULL
);
342 mbox_client_txdone(bpmp
->mbox
.channel
, 0);
344 err
= tegra_bpmp_wait_ack(channel
);
348 return __tegra_bpmp_channel_read(channel
, msg
->rx
.data
, msg
->rx
.size
,
351 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic
);
353 int tegra_bpmp_transfer(struct tegra_bpmp
*bpmp
,
354 struct tegra_bpmp_message
*msg
)
356 struct tegra_bpmp_channel
*channel
;
357 unsigned long timeout
;
360 if (WARN_ON(irqs_disabled()))
363 if (!tegra_bpmp_message_valid(msg
))
366 channel
= tegra_bpmp_write_threaded(bpmp
, msg
->mrq
, msg
->tx
.data
,
369 return PTR_ERR(channel
);
371 err
= mbox_send_message(bpmp
->mbox
.channel
, NULL
);
375 mbox_client_txdone(bpmp
->mbox
.channel
, 0);
377 timeout
= usecs_to_jiffies(bpmp
->soc
->channels
.thread
.timeout
);
379 err
= wait_for_completion_timeout(&channel
->completion
, timeout
);
383 return tegra_bpmp_channel_read(channel
, msg
->rx
.data
, msg
->rx
.size
,
386 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer
);
388 static struct tegra_bpmp_mrq
*tegra_bpmp_find_mrq(struct tegra_bpmp
*bpmp
,
391 struct tegra_bpmp_mrq
*entry
;
393 list_for_each_entry(entry
, &bpmp
->mrqs
, list
)
394 if (entry
->mrq
== mrq
)
400 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel
*channel
, int code
,
401 const void *data
, size_t size
)
403 unsigned long flags
= channel
->ib
->flags
;
404 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
405 struct tegra_bpmp_mb_data
*frame
;
408 if (WARN_ON(size
> MSG_DATA_MIN_SZ
))
411 err
= tegra_ivc_read_advance(channel
->ivc
);
412 if (WARN_ON(err
< 0))
415 if ((flags
& MSG_ACK
) == 0)
418 frame
= tegra_ivc_write_get_next_frame(channel
->ivc
);
419 if (WARN_ON(IS_ERR(frame
)))
424 if (data
&& size
> 0)
425 memcpy(frame
->data
, data
, size
);
427 err
= tegra_ivc_write_advance(channel
->ivc
);
428 if (WARN_ON(err
< 0))
431 if (flags
& MSG_RING
) {
432 err
= mbox_send_message(bpmp
->mbox
.channel
, NULL
);
433 if (WARN_ON(err
< 0))
436 mbox_client_txdone(bpmp
->mbox
.channel
, 0);
439 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return
);
441 static void tegra_bpmp_handle_mrq(struct tegra_bpmp
*bpmp
,
443 struct tegra_bpmp_channel
*channel
)
445 struct tegra_bpmp_mrq
*entry
;
448 spin_lock(&bpmp
->lock
);
450 entry
= tegra_bpmp_find_mrq(bpmp
, mrq
);
452 spin_unlock(&bpmp
->lock
);
453 tegra_bpmp_mrq_return(channel
, -EINVAL
, &zero
, sizeof(zero
));
457 entry
->handler(mrq
, channel
, entry
->data
);
459 spin_unlock(&bpmp
->lock
);
462 int tegra_bpmp_request_mrq(struct tegra_bpmp
*bpmp
, unsigned int mrq
,
463 tegra_bpmp_mrq_handler_t handler
, void *data
)
465 struct tegra_bpmp_mrq
*entry
;
471 entry
= devm_kzalloc(bpmp
->dev
, sizeof(*entry
), GFP_KERNEL
);
475 spin_lock_irqsave(&bpmp
->lock
, flags
);
478 entry
->handler
= handler
;
480 list_add(&entry
->list
, &bpmp
->mrqs
);
482 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
486 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq
);
488 void tegra_bpmp_free_mrq(struct tegra_bpmp
*bpmp
, unsigned int mrq
, void *data
)
490 struct tegra_bpmp_mrq
*entry
;
493 spin_lock_irqsave(&bpmp
->lock
, flags
);
495 entry
= tegra_bpmp_find_mrq(bpmp
, mrq
);
499 list_del(&entry
->list
);
500 devm_kfree(bpmp
->dev
, entry
);
503 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
505 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq
);
507 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq
,
508 struct tegra_bpmp_channel
*channel
,
511 struct mrq_ping_request
*request
;
512 struct mrq_ping_response response
;
514 request
= (struct mrq_ping_request
*)channel
->ib
->data
;
516 memset(&response
, 0, sizeof(response
));
517 response
.reply
= request
->challenge
<< 1;
519 tegra_bpmp_mrq_return(channel
, 0, &response
, sizeof(response
));
522 static int tegra_bpmp_ping(struct tegra_bpmp
*bpmp
)
524 struct mrq_ping_response response
;
525 struct mrq_ping_request request
;
526 struct tegra_bpmp_message msg
;
531 memset(&request
, 0, sizeof(request
));
532 request
.challenge
= 1;
534 memset(&response
, 0, sizeof(response
));
536 memset(&msg
, 0, sizeof(msg
));
538 msg
.tx
.data
= &request
;
539 msg
.tx
.size
= sizeof(request
);
540 msg
.rx
.data
= &response
;
541 msg
.rx
.size
= sizeof(response
);
543 local_irq_save(flags
);
545 err
= tegra_bpmp_transfer_atomic(bpmp
, &msg
);
547 local_irq_restore(flags
);
551 "ping ok: challenge: %u, response: %u, time: %lld\n",
552 request
.challenge
, response
.reply
,
553 ktime_to_us(ktime_sub(end
, start
)));
558 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp
*bpmp
, char *tag
,
561 struct mrq_query_tag_request request
;
562 struct tegra_bpmp_message msg
;
568 virt
= dma_alloc_coherent(bpmp
->dev
, MSG_DATA_MIN_SZ
, &phys
,
569 GFP_KERNEL
| GFP_DMA32
);
573 memset(&request
, 0, sizeof(request
));
576 memset(&msg
, 0, sizeof(msg
));
577 msg
.mrq
= MRQ_QUERY_TAG
;
578 msg
.tx
.data
= &request
;
579 msg
.tx
.size
= sizeof(request
);
581 local_irq_save(flags
);
582 err
= tegra_bpmp_transfer_atomic(bpmp
, &msg
);
583 local_irq_restore(flags
);
586 strlcpy(tag
, virt
, size
);
588 dma_free_coherent(bpmp
->dev
, MSG_DATA_MIN_SZ
, virt
, phys
);
593 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel
*channel
)
595 unsigned long flags
= channel
->ob
->flags
;
597 if ((flags
& MSG_RING
) == 0)
600 complete(&channel
->completion
);
603 static void tegra_bpmp_handle_rx(struct mbox_client
*client
, void *data
)
605 struct tegra_bpmp
*bpmp
= mbox_client_to_bpmp(client
);
606 struct tegra_bpmp_channel
*channel
;
607 unsigned int i
, count
;
610 channel
= tegra_bpmp_channel_get_rx(bpmp
);
611 count
= bpmp
->soc
->channels
.thread
.count
;
612 busy
= bpmp
->threaded
.busy
;
614 if (tegra_bpmp_master_acked(channel
))
615 tegra_bpmp_handle_mrq(bpmp
, channel
->ib
->code
, channel
);
617 spin_lock(&bpmp
->lock
);
619 for_each_set_bit(i
, busy
, count
) {
620 struct tegra_bpmp_channel
*channel
;
622 channel
= tegra_bpmp_channel_get_thread(bpmp
, i
);
626 if (tegra_bpmp_master_acked(channel
)) {
627 tegra_bpmp_channel_signal(channel
);
632 spin_unlock(&bpmp
->lock
);
635 static void tegra_bpmp_ivc_notify(struct tegra_ivc
*ivc
, void *data
)
637 struct tegra_bpmp
*bpmp
= data
;
640 if (WARN_ON(bpmp
->mbox
.channel
== NULL
))
643 err
= mbox_send_message(bpmp
->mbox
.channel
, NULL
);
647 mbox_client_txdone(bpmp
->mbox
.channel
, 0);
650 static int tegra_bpmp_channel_init(struct tegra_bpmp_channel
*channel
,
651 struct tegra_bpmp
*bpmp
,
654 size_t message_size
, queue_size
;
658 channel
->ivc
= devm_kzalloc(bpmp
->dev
, sizeof(*channel
->ivc
),
663 message_size
= tegra_ivc_align(MSG_MIN_SZ
);
664 queue_size
= tegra_ivc_total_queue_size(message_size
);
665 offset
= queue_size
* index
;
667 err
= tegra_ivc_init(channel
->ivc
, NULL
,
668 bpmp
->rx
.virt
+ offset
, bpmp
->rx
.phys
+ offset
,
669 bpmp
->tx
.virt
+ offset
, bpmp
->tx
.phys
+ offset
,
670 1, message_size
, tegra_bpmp_ivc_notify
,
673 dev_err(bpmp
->dev
, "failed to setup IVC for channel %u: %d\n",
678 init_completion(&channel
->completion
);
679 channel
->bpmp
= bpmp
;
684 static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel
*channel
)
686 /* reset the channel state */
687 tegra_ivc_reset(channel
->ivc
);
689 /* sync the channel state with BPMP */
690 while (tegra_ivc_notified(channel
->ivc
))
694 static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel
*channel
)
696 tegra_ivc_cleanup(channel
->ivc
);
699 static int tegra_bpmp_probe(struct platform_device
*pdev
)
701 struct tegra_bpmp_channel
*channel
;
702 struct tegra_bpmp
*bpmp
;
708 bpmp
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
), GFP_KERNEL
);
712 bpmp
->soc
= of_device_get_match_data(&pdev
->dev
);
713 bpmp
->dev
= &pdev
->dev
;
715 bpmp
->tx
.pool
= of_gen_pool_get(pdev
->dev
.of_node
, "shmem", 0);
716 if (!bpmp
->tx
.pool
) {
717 dev_err(&pdev
->dev
, "TX shmem pool not found\n");
721 bpmp
->tx
.virt
= gen_pool_dma_alloc(bpmp
->tx
.pool
, 4096, &bpmp
->tx
.phys
);
722 if (!bpmp
->tx
.virt
) {
723 dev_err(&pdev
->dev
, "failed to allocate from TX pool\n");
727 bpmp
->rx
.pool
= of_gen_pool_get(pdev
->dev
.of_node
, "shmem", 1);
728 if (!bpmp
->rx
.pool
) {
729 dev_err(&pdev
->dev
, "RX shmem pool not found\n");
734 bpmp
->rx
.virt
= gen_pool_dma_alloc(bpmp
->rx
.pool
, 4096, &bpmp
->rx
.phys
);
735 if (!bpmp
->rx
.pool
) {
736 dev_err(&pdev
->dev
, "failed to allocate from RX pool\n");
741 INIT_LIST_HEAD(&bpmp
->mrqs
);
742 spin_lock_init(&bpmp
->lock
);
744 bpmp
->threaded
.count
= bpmp
->soc
->channels
.thread
.count
;
745 sema_init(&bpmp
->threaded
.lock
, bpmp
->threaded
.count
);
747 size
= BITS_TO_LONGS(bpmp
->threaded
.count
) * sizeof(long);
749 bpmp
->threaded
.allocated
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
750 if (!bpmp
->threaded
.allocated
) {
755 bpmp
->threaded
.busy
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
756 if (!bpmp
->threaded
.busy
) {
761 bpmp
->num_channels
= bpmp
->soc
->channels
.cpu_tx
.count
+
762 bpmp
->soc
->channels
.thread
.count
+
763 bpmp
->soc
->channels
.cpu_rx
.count
;
765 bpmp
->channels
= devm_kcalloc(&pdev
->dev
, bpmp
->num_channels
,
766 sizeof(*channel
), GFP_KERNEL
);
767 if (!bpmp
->channels
) {
772 /* message channel initialization */
773 for (i
= 0; i
< bpmp
->num_channels
; i
++) {
774 struct tegra_bpmp_channel
*channel
= &bpmp
->channels
[i
];
776 err
= tegra_bpmp_channel_init(channel
, bpmp
, i
);
778 goto cleanup_channels
;
781 /* mbox registration */
782 bpmp
->mbox
.client
.dev
= &pdev
->dev
;
783 bpmp
->mbox
.client
.rx_callback
= tegra_bpmp_handle_rx
;
784 bpmp
->mbox
.client
.tx_block
= false;
785 bpmp
->mbox
.client
.knows_txdone
= false;
787 bpmp
->mbox
.channel
= mbox_request_channel(&bpmp
->mbox
.client
, 0);
788 if (IS_ERR(bpmp
->mbox
.channel
)) {
789 err
= PTR_ERR(bpmp
->mbox
.channel
);
790 dev_err(&pdev
->dev
, "failed to get HSP mailbox: %d\n", err
);
791 goto cleanup_channels
;
794 /* reset message channels */
795 for (i
= 0; i
< bpmp
->num_channels
; i
++) {
796 struct tegra_bpmp_channel
*channel
= &bpmp
->channels
[i
];
798 tegra_bpmp_channel_reset(channel
);
801 err
= tegra_bpmp_request_mrq(bpmp
, MRQ_PING
,
802 tegra_bpmp_mrq_handle_ping
, bpmp
);
806 err
= tegra_bpmp_ping(bpmp
);
808 dev_err(&pdev
->dev
, "failed to ping BPMP: %d\n", err
);
812 err
= tegra_bpmp_get_firmware_tag(bpmp
, tag
, sizeof(tag
) - 1);
814 dev_err(&pdev
->dev
, "failed to get firmware tag: %d\n", err
);
818 dev_info(&pdev
->dev
, "firmware: %s\n", tag
);
820 platform_set_drvdata(pdev
, bpmp
);
822 err
= of_platform_default_populate(pdev
->dev
.of_node
, NULL
, &pdev
->dev
);
826 err
= tegra_bpmp_init_clocks(bpmp
);
830 err
= tegra_bpmp_init_resets(bpmp
);
834 err
= tegra_bpmp_init_powergates(bpmp
);
838 err
= tegra_bpmp_init_debugfs(bpmp
);
840 dev_err(&pdev
->dev
, "debugfs initialization failed: %d\n", err
);
845 tegra_bpmp_free_mrq(bpmp
, MRQ_PING
, bpmp
);
847 mbox_free_channel(bpmp
->mbox
.channel
);
850 tegra_bpmp_channel_cleanup(&bpmp
->channels
[i
]);
852 gen_pool_free(bpmp
->rx
.pool
, (unsigned long)bpmp
->rx
.virt
, 4096);
854 gen_pool_free(bpmp
->tx
.pool
, (unsigned long)bpmp
->tx
.virt
, 4096);
858 static const struct tegra_bpmp_soc tegra186_soc
= {
863 .timeout
= 60 * USEC_PER_SEC
,
868 .timeout
= 600 * USEC_PER_SEC
,
879 static const struct of_device_id tegra_bpmp_match
[] = {
880 { .compatible
= "nvidia,tegra186-bpmp", .data
= &tegra186_soc
},
884 static struct platform_driver tegra_bpmp_driver
= {
886 .name
= "tegra-bpmp",
887 .of_match_table
= tegra_bpmp_match
,
889 .probe
= tegra_bpmp_probe
,
892 static int __init
tegra_bpmp_init(void)
894 return platform_driver_register(&tegra_bpmp_driver
);
896 core_initcall(tegra_bpmp_init
);