1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
6 #include <linux/clk/tegra.h>
7 #include <linux/genalloc.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/module.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
14 #include <linux/semaphore.h>
15 #include <linux/sched/clock.h>
17 #include <soc/tegra/bpmp.h>
18 #include <soc/tegra/bpmp-abi.h>
19 #include <soc/tegra/ivc.h>
21 #include "bpmp-private.h"
23 #define MSG_ACK BIT(0)
24 #define MSG_RING BIT(1)
27 static inline const struct tegra_bpmp_ops
*
28 channel_to_ops(struct tegra_bpmp_channel
*channel
)
30 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
32 return bpmp
->soc
->ops
;
35 struct tegra_bpmp
*tegra_bpmp_get(struct device
*dev
)
37 struct platform_device
*pdev
;
38 struct tegra_bpmp
*bpmp
;
39 struct device_node
*np
;
41 np
= of_parse_phandle(dev
->of_node
, "nvidia,bpmp", 0);
43 return ERR_PTR(-ENOENT
);
45 pdev
= of_find_device_by_node(np
);
47 bpmp
= ERR_PTR(-ENODEV
);
51 bpmp
= platform_get_drvdata(pdev
);
53 bpmp
= ERR_PTR(-EPROBE_DEFER
);
54 put_device(&pdev
->dev
);
62 EXPORT_SYMBOL_GPL(tegra_bpmp_get
);
64 void tegra_bpmp_put(struct tegra_bpmp
*bpmp
)
67 put_device(bpmp
->dev
);
69 EXPORT_SYMBOL_GPL(tegra_bpmp_put
);
72 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel
*channel
)
74 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
78 count
= bpmp
->soc
->channels
.thread
.count
;
80 index
= channel
- channel
->bpmp
->threaded_channels
;
81 if (index
< 0 || index
>= count
)
87 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message
*msg
)
89 return (msg
->tx
.size
<= MSG_DATA_MIN_SZ
) &&
90 (msg
->rx
.size
<= MSG_DATA_MIN_SZ
) &&
91 (msg
->tx
.size
== 0 || msg
->tx
.data
) &&
92 (msg
->rx
.size
== 0 || msg
->rx
.data
);
95 static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel
*channel
)
97 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
99 return ops
->is_response_ready(channel
);
102 static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel
*channel
)
104 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
106 return ops
->is_request_ready(channel
);
109 static int tegra_bpmp_wait_response(struct tegra_bpmp_channel
*channel
)
111 unsigned long timeout
= channel
->bpmp
->soc
->channels
.cpu_tx
.timeout
;
114 end
= ktime_add_us(ktime_get(), timeout
);
117 if (tegra_bpmp_is_response_ready(channel
))
119 } while (ktime_before(ktime_get(), end
));
124 static int tegra_bpmp_ack_response(struct tegra_bpmp_channel
*channel
)
126 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
128 return ops
->ack_response(channel
);
131 static int tegra_bpmp_ack_request(struct tegra_bpmp_channel
*channel
)
133 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
135 return ops
->ack_request(channel
);
139 tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel
*channel
)
141 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
143 return ops
->is_request_channel_free(channel
);
147 tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel
*channel
)
149 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
151 return ops
->is_response_channel_free(channel
);
155 tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel
*channel
)
157 unsigned long timeout
= channel
->bpmp
->soc
->channels
.cpu_tx
.timeout
;
160 start
= ns_to_ktime(local_clock());
163 if (tegra_bpmp_is_request_channel_free(channel
))
166 now
= ns_to_ktime(local_clock());
167 } while (ktime_us_delta(now
, start
) < timeout
);
172 static int tegra_bpmp_post_request(struct tegra_bpmp_channel
*channel
)
174 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
176 return ops
->post_request(channel
);
179 static int tegra_bpmp_post_response(struct tegra_bpmp_channel
*channel
)
181 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
183 return ops
->post_response(channel
);
186 static int tegra_bpmp_ring_doorbell(struct tegra_bpmp
*bpmp
)
188 return bpmp
->soc
->ops
->ring_doorbell(bpmp
);
191 static ssize_t
__tegra_bpmp_channel_read(struct tegra_bpmp_channel
*channel
,
192 void *data
, size_t size
, int *ret
)
196 if (data
&& size
> 0)
197 tegra_bpmp_mb_read(data
, &channel
->ib
, size
);
199 err
= tegra_bpmp_ack_response(channel
);
203 *ret
= tegra_bpmp_mb_read_field(&channel
->ib
, code
);
208 static ssize_t
tegra_bpmp_channel_read(struct tegra_bpmp_channel
*channel
,
209 void *data
, size_t size
, int *ret
)
211 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
216 index
= tegra_bpmp_channel_get_thread_index(channel
);
222 spin_lock_irqsave(&bpmp
->lock
, flags
);
223 err
= __tegra_bpmp_channel_read(channel
, data
, size
, ret
);
224 clear_bit(index
, bpmp
->threaded
.allocated
);
225 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
228 up(&bpmp
->threaded
.lock
);
233 static ssize_t
__tegra_bpmp_channel_write(struct tegra_bpmp_channel
*channel
,
234 unsigned int mrq
, unsigned long flags
,
235 const void *data
, size_t size
)
237 tegra_bpmp_mb_write_field(&channel
->ob
, code
, mrq
);
238 tegra_bpmp_mb_write_field(&channel
->ob
, flags
, flags
);
240 if (data
&& size
> 0)
241 tegra_bpmp_mb_write(&channel
->ob
, data
, size
);
243 return tegra_bpmp_post_request(channel
);
246 static struct tegra_bpmp_channel
*
247 tegra_bpmp_write_threaded(struct tegra_bpmp
*bpmp
, unsigned int mrq
,
248 const void *data
, size_t size
)
250 unsigned long timeout
= bpmp
->soc
->channels
.thread
.timeout
;
251 unsigned int count
= bpmp
->soc
->channels
.thread
.count
;
252 struct tegra_bpmp_channel
*channel
;
257 err
= down_timeout(&bpmp
->threaded
.lock
, usecs_to_jiffies(timeout
));
261 spin_lock_irqsave(&bpmp
->lock
, flags
);
263 index
= find_first_zero_bit(bpmp
->threaded
.allocated
, count
);
264 if (index
== count
) {
269 channel
= &bpmp
->threaded_channels
[index
];
271 if (!tegra_bpmp_is_request_channel_free(channel
)) {
276 set_bit(index
, bpmp
->threaded
.allocated
);
278 err
= __tegra_bpmp_channel_write(channel
, mrq
, MSG_ACK
| MSG_RING
,
281 goto clear_allocated
;
283 set_bit(index
, bpmp
->threaded
.busy
);
285 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
289 clear_bit(index
, bpmp
->threaded
.allocated
);
291 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
292 up(&bpmp
->threaded
.lock
);
297 static ssize_t
tegra_bpmp_channel_write(struct tegra_bpmp_channel
*channel
,
298 unsigned int mrq
, unsigned long flags
,
299 const void *data
, size_t size
)
303 err
= tegra_bpmp_wait_request_channel_free(channel
);
307 return __tegra_bpmp_channel_write(channel
, mrq
, flags
, data
, size
);
310 static int __maybe_unused
tegra_bpmp_resume(struct device
*dev
);
312 int tegra_bpmp_transfer_atomic(struct tegra_bpmp
*bpmp
,
313 struct tegra_bpmp_message
*msg
)
315 struct tegra_bpmp_channel
*channel
;
318 if (WARN_ON(!irqs_disabled()))
321 if (!tegra_bpmp_message_valid(msg
))
324 if (bpmp
->suspended
) {
325 /* Reset BPMP IPC channels during resume based on flags passed */
326 if (msg
->flags
& TEGRA_BPMP_MESSAGE_RESET
)
327 tegra_bpmp_resume(bpmp
->dev
);
332 channel
= bpmp
->tx_channel
;
334 spin_lock(&bpmp
->atomic_tx_lock
);
336 err
= tegra_bpmp_channel_write(channel
, msg
->mrq
, MSG_ACK
,
337 msg
->tx
.data
, msg
->tx
.size
);
339 spin_unlock(&bpmp
->atomic_tx_lock
);
343 spin_unlock(&bpmp
->atomic_tx_lock
);
345 err
= tegra_bpmp_ring_doorbell(bpmp
);
349 err
= tegra_bpmp_wait_response(channel
);
353 return __tegra_bpmp_channel_read(channel
, msg
->rx
.data
, msg
->rx
.size
,
356 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic
);
358 int tegra_bpmp_transfer(struct tegra_bpmp
*bpmp
,
359 struct tegra_bpmp_message
*msg
)
361 struct tegra_bpmp_channel
*channel
;
362 unsigned long timeout
;
365 if (WARN_ON(irqs_disabled()))
368 if (!tegra_bpmp_message_valid(msg
))
371 if (bpmp
->suspended
) {
372 /* Reset BPMP IPC channels during resume based on flags passed */
373 if (msg
->flags
& TEGRA_BPMP_MESSAGE_RESET
)
374 tegra_bpmp_resume(bpmp
->dev
);
379 channel
= tegra_bpmp_write_threaded(bpmp
, msg
->mrq
, msg
->tx
.data
,
382 return PTR_ERR(channel
);
384 err
= tegra_bpmp_ring_doorbell(bpmp
);
388 timeout
= usecs_to_jiffies(bpmp
->soc
->channels
.thread
.timeout
);
390 err
= wait_for_completion_timeout(&channel
->completion
, timeout
);
394 return tegra_bpmp_channel_read(channel
, msg
->rx
.data
, msg
->rx
.size
,
397 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer
);
399 static struct tegra_bpmp_mrq
*tegra_bpmp_find_mrq(struct tegra_bpmp
*bpmp
,
402 struct tegra_bpmp_mrq
*entry
;
404 list_for_each_entry(entry
, &bpmp
->mrqs
, list
)
405 if (entry
->mrq
== mrq
)
411 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel
*channel
, int code
,
412 const void *data
, size_t size
)
414 unsigned long flags
= tegra_bpmp_mb_read_field(&channel
->ib
, flags
);
415 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
418 if (WARN_ON(size
> MSG_DATA_MIN_SZ
))
421 err
= tegra_bpmp_ack_request(channel
);
422 if (WARN_ON(err
< 0))
425 if ((flags
& MSG_ACK
) == 0)
428 if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel
)))
431 tegra_bpmp_mb_write_field(&channel
->ob
, code
, code
);
433 if (data
&& size
> 0)
434 tegra_bpmp_mb_write(&channel
->ob
, data
, size
);
436 err
= tegra_bpmp_post_response(channel
);
437 if (WARN_ON(err
< 0))
440 if (flags
& MSG_RING
) {
441 err
= tegra_bpmp_ring_doorbell(bpmp
);
442 if (WARN_ON(err
< 0))
446 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return
);
448 static void tegra_bpmp_handle_mrq(struct tegra_bpmp
*bpmp
,
450 struct tegra_bpmp_channel
*channel
)
452 struct tegra_bpmp_mrq
*entry
;
455 spin_lock(&bpmp
->lock
);
457 entry
= tegra_bpmp_find_mrq(bpmp
, mrq
);
459 spin_unlock(&bpmp
->lock
);
460 tegra_bpmp_mrq_return(channel
, -EINVAL
, &zero
, sizeof(zero
));
464 entry
->handler(mrq
, channel
, entry
->data
);
466 spin_unlock(&bpmp
->lock
);
469 int tegra_bpmp_request_mrq(struct tegra_bpmp
*bpmp
, unsigned int mrq
,
470 tegra_bpmp_mrq_handler_t handler
, void *data
)
472 struct tegra_bpmp_mrq
*entry
;
478 entry
= devm_kzalloc(bpmp
->dev
, sizeof(*entry
), GFP_KERNEL
);
482 spin_lock_irqsave(&bpmp
->lock
, flags
);
485 entry
->handler
= handler
;
487 list_add(&entry
->list
, &bpmp
->mrqs
);
489 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
493 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq
);
495 void tegra_bpmp_free_mrq(struct tegra_bpmp
*bpmp
, unsigned int mrq
, void *data
)
497 struct tegra_bpmp_mrq
*entry
;
500 spin_lock_irqsave(&bpmp
->lock
, flags
);
502 entry
= tegra_bpmp_find_mrq(bpmp
, mrq
);
506 list_del(&entry
->list
);
507 devm_kfree(bpmp
->dev
, entry
);
510 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
512 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq
);
514 bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp
*bpmp
, unsigned int mrq
)
516 struct mrq_query_abi_request req
= { .mrq
= mrq
};
517 struct mrq_query_abi_response resp
;
518 struct tegra_bpmp_message msg
= {
519 .mrq
= MRQ_QUERY_ABI
,
526 .size
= sizeof(resp
),
531 err
= tegra_bpmp_transfer(bpmp
, &msg
);
532 if (err
|| msg
.rx
.ret
)
535 return resp
.status
== 0;
537 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported
);
539 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq
,
540 struct tegra_bpmp_channel
*channel
,
543 struct mrq_ping_request request
;
544 struct mrq_ping_response response
;
546 tegra_bpmp_mb_read(&request
, &channel
->ib
, sizeof(request
));
548 memset(&response
, 0, sizeof(response
));
549 response
.reply
= request
.challenge
<< 1;
551 tegra_bpmp_mrq_return(channel
, 0, &response
, sizeof(response
));
554 static int tegra_bpmp_ping(struct tegra_bpmp
*bpmp
)
556 struct mrq_ping_response response
;
557 struct mrq_ping_request request
;
558 struct tegra_bpmp_message msg
;
563 memset(&request
, 0, sizeof(request
));
564 request
.challenge
= 1;
566 memset(&response
, 0, sizeof(response
));
568 memset(&msg
, 0, sizeof(msg
));
570 msg
.tx
.data
= &request
;
571 msg
.tx
.size
= sizeof(request
);
572 msg
.rx
.data
= &response
;
573 msg
.rx
.size
= sizeof(response
);
575 local_irq_save(flags
);
577 err
= tegra_bpmp_transfer_atomic(bpmp
, &msg
);
579 local_irq_restore(flags
);
583 "ping ok: challenge: %u, response: %u, time: %lld\n",
584 request
.challenge
, response
.reply
,
585 ktime_to_us(ktime_sub(end
, start
)));
590 /* deprecated version of tag query */
591 static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp
*bpmp
, char *tag
,
594 struct mrq_query_tag_request request
;
595 struct tegra_bpmp_message msg
;
604 virt
= dma_alloc_coherent(bpmp
->dev
, TAG_SZ
, &phys
,
605 GFP_KERNEL
| GFP_DMA32
);
609 memset(&request
, 0, sizeof(request
));
612 memset(&msg
, 0, sizeof(msg
));
613 msg
.mrq
= MRQ_QUERY_TAG
;
614 msg
.tx
.data
= &request
;
615 msg
.tx
.size
= sizeof(request
);
617 local_irq_save(flags
);
618 err
= tegra_bpmp_transfer_atomic(bpmp
, &msg
);
619 local_irq_restore(flags
);
622 memcpy(tag
, virt
, TAG_SZ
);
624 dma_free_coherent(bpmp
->dev
, TAG_SZ
, virt
, phys
);
629 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp
*bpmp
, char *tag
,
632 if (tegra_bpmp_mrq_is_supported(bpmp
, MRQ_QUERY_FW_TAG
)) {
633 struct mrq_query_fw_tag_response resp
;
634 struct tegra_bpmp_message msg
= {
635 .mrq
= MRQ_QUERY_FW_TAG
,
638 .size
= sizeof(resp
),
643 if (size
!= sizeof(resp
.tag
))
646 err
= tegra_bpmp_transfer(bpmp
, &msg
);
653 memcpy(tag
, resp
.tag
, sizeof(resp
.tag
));
657 return tegra_bpmp_get_firmware_tag_old(bpmp
, tag
, size
);
660 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel
*channel
)
662 unsigned long flags
= tegra_bpmp_mb_read_field(&channel
->ob
, flags
);
664 if ((flags
& MSG_RING
) == 0)
667 complete(&channel
->completion
);
670 void tegra_bpmp_handle_rx(struct tegra_bpmp
*bpmp
)
672 struct tegra_bpmp_channel
*channel
;
673 unsigned int i
, count
;
676 channel
= bpmp
->rx_channel
;
677 count
= bpmp
->soc
->channels
.thread
.count
;
678 busy
= bpmp
->threaded
.busy
;
680 if (tegra_bpmp_is_request_ready(channel
)) {
681 unsigned int mrq
= tegra_bpmp_mb_read_field(&channel
->ib
, code
);
683 tegra_bpmp_handle_mrq(bpmp
, mrq
, channel
);
686 spin_lock(&bpmp
->lock
);
688 for_each_set_bit(i
, busy
, count
) {
689 struct tegra_bpmp_channel
*channel
;
691 channel
= &bpmp
->threaded_channels
[i
];
693 if (tegra_bpmp_is_response_ready(channel
)) {
694 tegra_bpmp_channel_signal(channel
);
699 spin_unlock(&bpmp
->lock
);
702 static int tegra_bpmp_probe(struct platform_device
*pdev
)
704 struct tegra_bpmp
*bpmp
;
709 bpmp
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
), GFP_KERNEL
);
713 bpmp
->soc
= of_device_get_match_data(&pdev
->dev
);
714 bpmp
->dev
= &pdev
->dev
;
716 INIT_LIST_HEAD(&bpmp
->mrqs
);
717 spin_lock_init(&bpmp
->lock
);
719 bpmp
->threaded
.count
= bpmp
->soc
->channels
.thread
.count
;
720 sema_init(&bpmp
->threaded
.lock
, bpmp
->threaded
.count
);
722 size
= BITS_TO_LONGS(bpmp
->threaded
.count
) * sizeof(long);
724 bpmp
->threaded
.allocated
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
725 if (!bpmp
->threaded
.allocated
)
728 bpmp
->threaded
.busy
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
729 if (!bpmp
->threaded
.busy
)
732 spin_lock_init(&bpmp
->atomic_tx_lock
);
733 bpmp
->tx_channel
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
->tx_channel
),
735 if (!bpmp
->tx_channel
)
738 bpmp
->rx_channel
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
->rx_channel
),
740 if (!bpmp
->rx_channel
)
743 bpmp
->threaded_channels
= devm_kcalloc(&pdev
->dev
, bpmp
->threaded
.count
,
744 sizeof(*bpmp
->threaded_channels
),
746 if (!bpmp
->threaded_channels
)
749 platform_set_drvdata(pdev
, bpmp
);
751 err
= bpmp
->soc
->ops
->init(bpmp
);
755 err
= tegra_bpmp_request_mrq(bpmp
, MRQ_PING
,
756 tegra_bpmp_mrq_handle_ping
, bpmp
);
760 err
= tegra_bpmp_ping(bpmp
);
762 dev_err(&pdev
->dev
, "failed to ping BPMP: %d\n", err
);
766 err
= tegra_bpmp_get_firmware_tag(bpmp
, tag
, sizeof(tag
));
768 dev_err(&pdev
->dev
, "failed to get firmware tag: %d\n", err
);
772 dev_info(&pdev
->dev
, "firmware: %.*s\n", (int)sizeof(tag
), tag
);
774 err
= of_platform_default_populate(pdev
->dev
.of_node
, NULL
, &pdev
->dev
);
778 if (of_property_present(pdev
->dev
.of_node
, "#clock-cells")) {
779 err
= tegra_bpmp_init_clocks(bpmp
);
784 if (of_property_present(pdev
->dev
.of_node
, "#reset-cells")) {
785 err
= tegra_bpmp_init_resets(bpmp
);
790 if (of_property_present(pdev
->dev
.of_node
, "#power-domain-cells")) {
791 err
= tegra_bpmp_init_powergates(bpmp
);
796 err
= tegra_bpmp_init_debugfs(bpmp
);
798 dev_err(&pdev
->dev
, "debugfs initialization failed: %d\n", err
);
803 tegra_bpmp_free_mrq(bpmp
, MRQ_PING
, bpmp
);
805 if (bpmp
->soc
->ops
->deinit
)
806 bpmp
->soc
->ops
->deinit(bpmp
);
811 static int __maybe_unused
tegra_bpmp_suspend(struct device
*dev
)
813 struct tegra_bpmp
*bpmp
= dev_get_drvdata(dev
);
815 bpmp
->suspended
= true;
820 static int __maybe_unused
tegra_bpmp_resume(struct device
*dev
)
822 struct tegra_bpmp
*bpmp
= dev_get_drvdata(dev
);
824 bpmp
->suspended
= false;
826 if (bpmp
->soc
->ops
->resume
)
827 return bpmp
->soc
->ops
->resume(bpmp
);
832 static const struct dev_pm_ops tegra_bpmp_pm_ops
= {
833 .suspend_noirq
= tegra_bpmp_suspend
,
834 .resume_noirq
= tegra_bpmp_resume
,
837 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
838 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
839 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
840 static const struct tegra_bpmp_soc tegra186_soc
= {
844 .timeout
= 60 * USEC_PER_SEC
,
849 .timeout
= 600 * USEC_PER_SEC
,
856 .ops
= &tegra186_bpmp_ops
,
861 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
862 static const struct tegra_bpmp_soc tegra210_soc
= {
867 .timeout
= 60 * USEC_PER_SEC
,
872 .timeout
= 600 * USEC_PER_SEC
,
880 .ops
= &tegra210_bpmp_ops
,
884 static const struct of_device_id tegra_bpmp_match
[] = {
885 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
886 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
887 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
888 { .compatible
= "nvidia,tegra186-bpmp", .data
= &tegra186_soc
},
890 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
891 { .compatible
= "nvidia,tegra210-bpmp", .data
= &tegra210_soc
},
896 static struct platform_driver tegra_bpmp_driver
= {
898 .name
= "tegra-bpmp",
899 .of_match_table
= tegra_bpmp_match
,
900 .pm
= &tegra_bpmp_pm_ops
,
901 .suppress_bind_attrs
= true,
903 .probe
= tegra_bpmp_probe
,
905 builtin_platform_driver(tegra_bpmp_driver
);