sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / firmware / tegra / bpmp.c
blob4ff02d310868b60a9a1ed78e90155039f1bcee91
1 /*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
14 #include <linux/clk/tegra.h>
15 #include <linux/genalloc.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/semaphore.h>
23 #include <soc/tegra/bpmp.h>
24 #include <soc/tegra/bpmp-abi.h>
25 #include <soc/tegra/ivc.h>
27 #define MSG_ACK BIT(0)
28 #define MSG_RING BIT(1)
30 static inline struct tegra_bpmp *
31 mbox_client_to_bpmp(struct mbox_client *client)
33 return container_of(client, struct tegra_bpmp, mbox.client);
36 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
38 struct platform_device *pdev;
39 struct tegra_bpmp *bpmp;
40 struct device_node *np;
42 np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
43 if (!np)
44 return ERR_PTR(-ENOENT);
46 pdev = of_find_device_by_node(np);
47 if (!pdev) {
48 bpmp = ERR_PTR(-ENODEV);
49 goto put;
52 bpmp = platform_get_drvdata(pdev);
53 if (!bpmp) {
54 bpmp = ERR_PTR(-EPROBE_DEFER);
55 put_device(&pdev->dev);
56 goto put;
59 put:
60 of_node_put(np);
61 return bpmp;
63 EXPORT_SYMBOL_GPL(tegra_bpmp_get);
65 void tegra_bpmp_put(struct tegra_bpmp *bpmp)
67 if (bpmp)
68 put_device(bpmp->dev);
70 EXPORT_SYMBOL_GPL(tegra_bpmp_put);
72 static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel)
74 return channel - channel->bpmp->channels;
77 static int
78 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
80 struct tegra_bpmp *bpmp = channel->bpmp;
81 unsigned int offset, count;
82 int index;
84 offset = bpmp->soc->channels.thread.offset;
85 count = bpmp->soc->channels.thread.count;
87 index = tegra_bpmp_channel_get_index(channel);
88 if (index < 0)
89 return index;
91 if (index < offset || index >= offset + count)
92 return -EINVAL;
94 return index - offset;
97 static struct tegra_bpmp_channel *
98 tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index)
100 unsigned int offset = bpmp->soc->channels.thread.offset;
101 unsigned int count = bpmp->soc->channels.thread.count;
103 if (index >= count)
104 return NULL;
106 return &bpmp->channels[offset + index];
109 static struct tegra_bpmp_channel *
110 tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp)
112 unsigned int offset = bpmp->soc->channels.cpu_tx.offset;
114 return &bpmp->channels[offset + smp_processor_id()];
117 static struct tegra_bpmp_channel *
118 tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp)
120 unsigned int offset = bpmp->soc->channels.cpu_rx.offset;
122 return &bpmp->channels[offset];
125 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
127 return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
128 (msg->rx.size <= MSG_DATA_MIN_SZ) &&
129 (msg->tx.size == 0 || msg->tx.data) &&
130 (msg->rx.size == 0 || msg->rx.data);
133 static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
135 void *frame;
137 frame = tegra_ivc_read_get_next_frame(channel->ivc);
138 if (IS_ERR(frame)) {
139 channel->ib = NULL;
140 return false;
143 channel->ib = frame;
145 return true;
148 static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
150 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
151 ktime_t end;
153 end = ktime_add_us(ktime_get(), timeout);
155 do {
156 if (tegra_bpmp_master_acked(channel))
157 return 0;
158 } while (ktime_before(ktime_get(), end));
160 return -ETIMEDOUT;
163 static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
165 void *frame;
167 frame = tegra_ivc_write_get_next_frame(channel->ivc);
168 if (IS_ERR(frame)) {
169 channel->ob = NULL;
170 return false;
173 channel->ob = frame;
175 return true;
178 static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
180 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
181 ktime_t start, now;
183 start = ns_to_ktime(local_clock());
185 do {
186 if (tegra_bpmp_master_free(channel))
187 return 0;
189 now = ns_to_ktime(local_clock());
190 } while (ktime_us_delta(now, start) < timeout);
192 return -ETIMEDOUT;
195 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
196 void *data, size_t size)
198 if (data && size > 0)
199 memcpy(data, channel->ib->data, size);
201 return tegra_ivc_read_advance(channel->ivc);
204 static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
205 void *data, size_t size)
207 struct tegra_bpmp *bpmp = channel->bpmp;
208 unsigned long flags;
209 ssize_t err;
210 int index;
212 index = tegra_bpmp_channel_get_thread_index(channel);
213 if (index < 0)
214 return index;
216 spin_lock_irqsave(&bpmp->lock, flags);
217 err = __tegra_bpmp_channel_read(channel, data, size);
218 clear_bit(index, bpmp->threaded.allocated);
219 spin_unlock_irqrestore(&bpmp->lock, flags);
221 up(&bpmp->threaded.lock);
223 return err;
226 static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
227 unsigned int mrq, unsigned long flags,
228 const void *data, size_t size)
230 channel->ob->code = mrq;
231 channel->ob->flags = flags;
233 if (data && size > 0)
234 memcpy(channel->ob->data, data, size);
236 return tegra_ivc_write_advance(channel->ivc);
239 static struct tegra_bpmp_channel *
240 tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
241 const void *data, size_t size)
243 unsigned long timeout = bpmp->soc->channels.thread.timeout;
244 unsigned int count = bpmp->soc->channels.thread.count;
245 struct tegra_bpmp_channel *channel;
246 unsigned long flags;
247 unsigned int index;
248 int err;
250 err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
251 if (err < 0)
252 return ERR_PTR(err);
254 spin_lock_irqsave(&bpmp->lock, flags);
256 index = find_first_zero_bit(bpmp->threaded.allocated, count);
257 if (index == count) {
258 channel = ERR_PTR(-EBUSY);
259 goto unlock;
262 channel = tegra_bpmp_channel_get_thread(bpmp, index);
263 if (!channel) {
264 channel = ERR_PTR(-EINVAL);
265 goto unlock;
268 if (!tegra_bpmp_master_free(channel)) {
269 channel = ERR_PTR(-EBUSY);
270 goto unlock;
273 set_bit(index, bpmp->threaded.allocated);
275 err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
276 data, size);
277 if (err < 0) {
278 clear_bit(index, bpmp->threaded.allocated);
279 goto unlock;
282 set_bit(index, bpmp->threaded.busy);
284 unlock:
285 spin_unlock_irqrestore(&bpmp->lock, flags);
286 return channel;
289 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
290 unsigned int mrq, unsigned long flags,
291 const void *data, size_t size)
293 int err;
295 err = tegra_bpmp_wait_master_free(channel);
296 if (err < 0)
297 return err;
299 return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
302 int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
303 struct tegra_bpmp_message *msg)
305 struct tegra_bpmp_channel *channel;
306 int err;
308 if (WARN_ON(!irqs_disabled()))
309 return -EPERM;
311 if (!tegra_bpmp_message_valid(msg))
312 return -EINVAL;
314 channel = tegra_bpmp_channel_get_tx(bpmp);
316 err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
317 msg->tx.data, msg->tx.size);
318 if (err < 0)
319 return err;
321 err = mbox_send_message(bpmp->mbox.channel, NULL);
322 if (err < 0)
323 return err;
325 mbox_client_txdone(bpmp->mbox.channel, 0);
327 err = tegra_bpmp_wait_ack(channel);
328 if (err < 0)
329 return err;
331 return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
333 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
335 int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
336 struct tegra_bpmp_message *msg)
338 struct tegra_bpmp_channel *channel;
339 unsigned long timeout;
340 int err;
342 if (WARN_ON(irqs_disabled()))
343 return -EPERM;
345 if (!tegra_bpmp_message_valid(msg))
346 return -EINVAL;
348 channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
349 msg->tx.size);
350 if (IS_ERR(channel))
351 return PTR_ERR(channel);
353 err = mbox_send_message(bpmp->mbox.channel, NULL);
354 if (err < 0)
355 return err;
357 mbox_client_txdone(bpmp->mbox.channel, 0);
359 timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
361 err = wait_for_completion_timeout(&channel->completion, timeout);
362 if (err == 0)
363 return -ETIMEDOUT;
365 return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
367 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
369 static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
370 unsigned int mrq)
372 struct tegra_bpmp_mrq *entry;
374 list_for_each_entry(entry, &bpmp->mrqs, list)
375 if (entry->mrq == mrq)
376 return entry;
378 return NULL;
381 static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
382 int code, const void *data, size_t size)
384 unsigned long flags = channel->ib->flags;
385 struct tegra_bpmp *bpmp = channel->bpmp;
386 struct tegra_bpmp_mb_data *frame;
387 int err;
389 if (WARN_ON(size > MSG_DATA_MIN_SZ))
390 return;
392 err = tegra_ivc_read_advance(channel->ivc);
393 if (WARN_ON(err < 0))
394 return;
396 if ((flags & MSG_ACK) == 0)
397 return;
399 frame = tegra_ivc_write_get_next_frame(channel->ivc);
400 if (WARN_ON(IS_ERR(frame)))
401 return;
403 frame->code = code;
405 if (data && size > 0)
406 memcpy(frame->data, data, size);
408 err = tegra_ivc_write_advance(channel->ivc);
409 if (WARN_ON(err < 0))
410 return;
412 if (flags & MSG_RING) {
413 err = mbox_send_message(bpmp->mbox.channel, NULL);
414 if (WARN_ON(err < 0))
415 return;
417 mbox_client_txdone(bpmp->mbox.channel, 0);
421 static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
422 unsigned int mrq,
423 struct tegra_bpmp_channel *channel)
425 struct tegra_bpmp_mrq *entry;
426 u32 zero = 0;
428 spin_lock(&bpmp->lock);
430 entry = tegra_bpmp_find_mrq(bpmp, mrq);
431 if (!entry) {
432 spin_unlock(&bpmp->lock);
433 tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
434 return;
437 entry->handler(mrq, channel, entry->data);
439 spin_unlock(&bpmp->lock);
442 int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
443 tegra_bpmp_mrq_handler_t handler, void *data)
445 struct tegra_bpmp_mrq *entry;
446 unsigned long flags;
448 if (!handler)
449 return -EINVAL;
451 entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
452 if (!entry)
453 return -ENOMEM;
455 spin_lock_irqsave(&bpmp->lock, flags);
457 entry->mrq = mrq;
458 entry->handler = handler;
459 entry->data = data;
460 list_add(&entry->list, &bpmp->mrqs);
462 spin_unlock_irqrestore(&bpmp->lock, flags);
464 return 0;
466 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
468 void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
470 struct tegra_bpmp_mrq *entry;
471 unsigned long flags;
473 spin_lock_irqsave(&bpmp->lock, flags);
475 entry = tegra_bpmp_find_mrq(bpmp, mrq);
476 if (!entry)
477 goto unlock;
479 list_del(&entry->list);
480 devm_kfree(bpmp->dev, entry);
482 unlock:
483 spin_unlock_irqrestore(&bpmp->lock, flags);
485 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
487 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
488 struct tegra_bpmp_channel *channel,
489 void *data)
491 struct mrq_ping_request *request;
492 struct mrq_ping_response response;
494 request = (struct mrq_ping_request *)channel->ib->data;
496 memset(&response, 0, sizeof(response));
497 response.reply = request->challenge << 1;
499 tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
502 static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
504 struct mrq_ping_response response;
505 struct mrq_ping_request request;
506 struct tegra_bpmp_message msg;
507 unsigned long flags;
508 ktime_t start, end;
509 int err;
511 memset(&request, 0, sizeof(request));
512 request.challenge = 1;
514 memset(&response, 0, sizeof(response));
516 memset(&msg, 0, sizeof(msg));
517 msg.mrq = MRQ_PING;
518 msg.tx.data = &request;
519 msg.tx.size = sizeof(request);
520 msg.rx.data = &response;
521 msg.rx.size = sizeof(response);
523 local_irq_save(flags);
524 start = ktime_get();
525 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
526 end = ktime_get();
527 local_irq_restore(flags);
529 if (!err)
530 dev_dbg(bpmp->dev,
531 "ping ok: challenge: %u, response: %u, time: %lld\n",
532 request.challenge, response.reply,
533 ktime_to_us(ktime_sub(end, start)));
535 return err;
538 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
539 size_t size)
541 struct mrq_query_tag_request request;
542 struct tegra_bpmp_message msg;
543 unsigned long flags;
544 dma_addr_t phys;
545 void *virt;
546 int err;
548 virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
549 GFP_KERNEL | GFP_DMA32);
550 if (!virt)
551 return -ENOMEM;
553 memset(&request, 0, sizeof(request));
554 request.addr = phys;
556 memset(&msg, 0, sizeof(msg));
557 msg.mrq = MRQ_QUERY_TAG;
558 msg.tx.data = &request;
559 msg.tx.size = sizeof(request);
561 local_irq_save(flags);
562 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
563 local_irq_restore(flags);
565 if (err == 0)
566 strlcpy(tag, virt, size);
568 dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
570 return err;
573 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
575 unsigned long flags = channel->ob->flags;
577 if ((flags & MSG_RING) == 0)
578 return;
580 complete(&channel->completion);
583 static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
585 struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
586 struct tegra_bpmp_channel *channel;
587 unsigned int i, count;
588 unsigned long *busy;
590 channel = tegra_bpmp_channel_get_rx(bpmp);
591 count = bpmp->soc->channels.thread.count;
592 busy = bpmp->threaded.busy;
594 if (tegra_bpmp_master_acked(channel))
595 tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
597 spin_lock(&bpmp->lock);
599 for_each_set_bit(i, busy, count) {
600 struct tegra_bpmp_channel *channel;
602 channel = tegra_bpmp_channel_get_thread(bpmp, i);
603 if (!channel)
604 continue;
606 if (tegra_bpmp_master_acked(channel)) {
607 tegra_bpmp_channel_signal(channel);
608 clear_bit(i, busy);
612 spin_unlock(&bpmp->lock);
615 static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
617 struct tegra_bpmp *bpmp = data;
618 int err;
620 if (WARN_ON(bpmp->mbox.channel == NULL))
621 return;
623 err = mbox_send_message(bpmp->mbox.channel, NULL);
624 if (err < 0)
625 return;
627 mbox_client_txdone(bpmp->mbox.channel, 0);
630 static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
631 struct tegra_bpmp *bpmp,
632 unsigned int index)
634 size_t message_size, queue_size;
635 unsigned int offset;
636 int err;
638 channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
639 GFP_KERNEL);
640 if (!channel->ivc)
641 return -ENOMEM;
643 message_size = tegra_ivc_align(MSG_MIN_SZ);
644 queue_size = tegra_ivc_total_queue_size(message_size);
645 offset = queue_size * index;
647 err = tegra_ivc_init(channel->ivc, NULL,
648 bpmp->rx.virt + offset, bpmp->rx.phys + offset,
649 bpmp->tx.virt + offset, bpmp->tx.phys + offset,
650 1, message_size, tegra_bpmp_ivc_notify,
651 bpmp);
652 if (err < 0) {
653 dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
654 index, err);
655 return err;
658 init_completion(&channel->completion);
659 channel->bpmp = bpmp;
661 return 0;
664 static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
666 /* reset the channel state */
667 tegra_ivc_reset(channel->ivc);
669 /* sync the channel state with BPMP */
670 while (tegra_ivc_notified(channel->ivc))
674 static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
676 tegra_ivc_cleanup(channel->ivc);
679 static int tegra_bpmp_probe(struct platform_device *pdev)
681 struct tegra_bpmp_channel *channel;
682 struct tegra_bpmp *bpmp;
683 unsigned int i;
684 char tag[32];
685 size_t size;
686 int err;
688 bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
689 if (!bpmp)
690 return -ENOMEM;
692 bpmp->soc = of_device_get_match_data(&pdev->dev);
693 bpmp->dev = &pdev->dev;
695 bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
696 if (!bpmp->tx.pool) {
697 dev_err(&pdev->dev, "TX shmem pool not found\n");
698 return -ENOMEM;
701 bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
702 if (!bpmp->tx.virt) {
703 dev_err(&pdev->dev, "failed to allocate from TX pool\n");
704 return -ENOMEM;
707 bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
708 if (!bpmp->rx.pool) {
709 dev_err(&pdev->dev, "RX shmem pool not found\n");
710 err = -ENOMEM;
711 goto free_tx;
714 bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
715 if (!bpmp->rx.pool) {
716 dev_err(&pdev->dev, "failed to allocate from RX pool\n");
717 err = -ENOMEM;
718 goto free_tx;
721 INIT_LIST_HEAD(&bpmp->mrqs);
722 spin_lock_init(&bpmp->lock);
724 bpmp->threaded.count = bpmp->soc->channels.thread.count;
725 sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
727 size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
729 bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
730 if (!bpmp->threaded.allocated) {
731 err = -ENOMEM;
732 goto free_rx;
735 bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
736 if (!bpmp->threaded.busy) {
737 err = -ENOMEM;
738 goto free_rx;
741 bpmp->num_channels = bpmp->soc->channels.cpu_tx.count +
742 bpmp->soc->channels.thread.count +
743 bpmp->soc->channels.cpu_rx.count;
745 bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels,
746 sizeof(*channel), GFP_KERNEL);
747 if (!bpmp->channels) {
748 err = -ENOMEM;
749 goto free_rx;
752 /* message channel initialization */
753 for (i = 0; i < bpmp->num_channels; i++) {
754 struct tegra_bpmp_channel *channel = &bpmp->channels[i];
756 err = tegra_bpmp_channel_init(channel, bpmp, i);
757 if (err < 0)
758 goto cleanup_channels;
761 /* mbox registration */
762 bpmp->mbox.client.dev = &pdev->dev;
763 bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
764 bpmp->mbox.client.tx_block = false;
765 bpmp->mbox.client.knows_txdone = false;
767 bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
768 if (IS_ERR(bpmp->mbox.channel)) {
769 err = PTR_ERR(bpmp->mbox.channel);
770 dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
771 goto cleanup_channels;
774 /* reset message channels */
775 for (i = 0; i < bpmp->num_channels; i++) {
776 struct tegra_bpmp_channel *channel = &bpmp->channels[i];
778 tegra_bpmp_channel_reset(channel);
781 err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
782 tegra_bpmp_mrq_handle_ping, bpmp);
783 if (err < 0)
784 goto free_mbox;
786 err = tegra_bpmp_ping(bpmp);
787 if (err < 0) {
788 dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
789 goto free_mrq;
792 err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
793 if (err < 0) {
794 dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
795 goto free_mrq;
798 dev_info(&pdev->dev, "firmware: %s\n", tag);
800 err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
801 if (err < 0)
802 goto free_mrq;
804 err = tegra_bpmp_init_clocks(bpmp);
805 if (err < 0)
806 goto free_mrq;
808 err = tegra_bpmp_init_resets(bpmp);
809 if (err < 0)
810 goto free_mrq;
812 platform_set_drvdata(pdev, bpmp);
814 return 0;
816 free_mrq:
817 tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
818 free_mbox:
819 mbox_free_channel(bpmp->mbox.channel);
820 cleanup_channels:
821 while (i--)
822 tegra_bpmp_channel_cleanup(&bpmp->channels[i]);
823 free_rx:
824 gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
825 free_tx:
826 gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
827 return err;
830 static const struct tegra_bpmp_soc tegra186_soc = {
831 .channels = {
832 .cpu_tx = {
833 .offset = 0,
834 .count = 6,
835 .timeout = 60 * USEC_PER_SEC,
837 .thread = {
838 .offset = 6,
839 .count = 7,
840 .timeout = 600 * USEC_PER_SEC,
842 .cpu_rx = {
843 .offset = 13,
844 .count = 1,
845 .timeout = 0,
848 .num_resets = 193,
851 static const struct of_device_id tegra_bpmp_match[] = {
852 { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
856 static struct platform_driver tegra_bpmp_driver = {
857 .driver = {
858 .name = "tegra-bpmp",
859 .of_match_table = tegra_bpmp_match,
861 .probe = tegra_bpmp_probe,
864 static int __init tegra_bpmp_init(void)
866 return platform_driver_register(&tegra_bpmp_driver);
868 core_initcall(tegra_bpmp_init);