mmc: core: Reset HPI enabled state during re-init and in case of errors
[linux/fpc-iii.git] / drivers / firmware / tegra / bpmp.c
bloba3d5b518c10e4a13deeba7c6e9309c13c33cdded
1 /*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
14 #include <linux/clk/tegra.h>
15 #include <linux/genalloc.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm.h>
22 #include <linux/semaphore.h>
23 #include <linux/sched/clock.h>
25 #include <soc/tegra/bpmp.h>
26 #include <soc/tegra/bpmp-abi.h>
27 #include <soc/tegra/ivc.h>
29 #define MSG_ACK BIT(0)
30 #define MSG_RING BIT(1)
32 static inline struct tegra_bpmp *
33 mbox_client_to_bpmp(struct mbox_client *client)
35 return container_of(client, struct tegra_bpmp, mbox.client);
38 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
40 struct platform_device *pdev;
41 struct tegra_bpmp *bpmp;
42 struct device_node *np;
44 np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
45 if (!np)
46 return ERR_PTR(-ENOENT);
48 pdev = of_find_device_by_node(np);
49 if (!pdev) {
50 bpmp = ERR_PTR(-ENODEV);
51 goto put;
54 bpmp = platform_get_drvdata(pdev);
55 if (!bpmp) {
56 bpmp = ERR_PTR(-EPROBE_DEFER);
57 put_device(&pdev->dev);
58 goto put;
61 put:
62 of_node_put(np);
63 return bpmp;
65 EXPORT_SYMBOL_GPL(tegra_bpmp_get);
67 void tegra_bpmp_put(struct tegra_bpmp *bpmp)
69 if (bpmp)
70 put_device(bpmp->dev);
72 EXPORT_SYMBOL_GPL(tegra_bpmp_put);
74 static int
75 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
77 struct tegra_bpmp *bpmp = channel->bpmp;
78 unsigned int count;
79 int index;
81 count = bpmp->soc->channels.thread.count;
83 index = channel - channel->bpmp->threaded_channels;
84 if (index < 0 || index >= count)
85 return -EINVAL;
87 return index;
90 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
92 return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
93 (msg->rx.size <= MSG_DATA_MIN_SZ) &&
94 (msg->tx.size == 0 || msg->tx.data) &&
95 (msg->rx.size == 0 || msg->rx.data);
98 static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
100 void *frame;
102 frame = tegra_ivc_read_get_next_frame(channel->ivc);
103 if (IS_ERR(frame)) {
104 channel->ib = NULL;
105 return false;
108 channel->ib = frame;
110 return true;
113 static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
115 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
116 ktime_t end;
118 end = ktime_add_us(ktime_get(), timeout);
120 do {
121 if (tegra_bpmp_master_acked(channel))
122 return 0;
123 } while (ktime_before(ktime_get(), end));
125 return -ETIMEDOUT;
128 static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
130 void *frame;
132 frame = tegra_ivc_write_get_next_frame(channel->ivc);
133 if (IS_ERR(frame)) {
134 channel->ob = NULL;
135 return false;
138 channel->ob = frame;
140 return true;
143 static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
145 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
146 ktime_t start, now;
148 start = ns_to_ktime(local_clock());
150 do {
151 if (tegra_bpmp_master_free(channel))
152 return 0;
154 now = ns_to_ktime(local_clock());
155 } while (ktime_us_delta(now, start) < timeout);
157 return -ETIMEDOUT;
160 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
161 void *data, size_t size, int *ret)
163 int err;
165 if (data && size > 0)
166 memcpy(data, channel->ib->data, size);
168 err = tegra_ivc_read_advance(channel->ivc);
169 if (err < 0)
170 return err;
172 *ret = channel->ib->code;
174 return 0;
177 static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
178 void *data, size_t size, int *ret)
180 struct tegra_bpmp *bpmp = channel->bpmp;
181 unsigned long flags;
182 ssize_t err;
183 int index;
185 index = tegra_bpmp_channel_get_thread_index(channel);
186 if (index < 0) {
187 err = index;
188 goto unlock;
191 spin_lock_irqsave(&bpmp->lock, flags);
192 err = __tegra_bpmp_channel_read(channel, data, size, ret);
193 clear_bit(index, bpmp->threaded.allocated);
194 spin_unlock_irqrestore(&bpmp->lock, flags);
196 unlock:
197 up(&bpmp->threaded.lock);
199 return err;
202 static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
203 unsigned int mrq, unsigned long flags,
204 const void *data, size_t size)
206 channel->ob->code = mrq;
207 channel->ob->flags = flags;
209 if (data && size > 0)
210 memcpy(channel->ob->data, data, size);
212 return tegra_ivc_write_advance(channel->ivc);
215 static struct tegra_bpmp_channel *
216 tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
217 const void *data, size_t size)
219 unsigned long timeout = bpmp->soc->channels.thread.timeout;
220 unsigned int count = bpmp->soc->channels.thread.count;
221 struct tegra_bpmp_channel *channel;
222 unsigned long flags;
223 unsigned int index;
224 int err;
226 err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
227 if (err < 0)
228 return ERR_PTR(err);
230 spin_lock_irqsave(&bpmp->lock, flags);
232 index = find_first_zero_bit(bpmp->threaded.allocated, count);
233 if (index == count) {
234 err = -EBUSY;
235 goto unlock;
238 channel = &bpmp->threaded_channels[index];
240 if (!tegra_bpmp_master_free(channel)) {
241 err = -EBUSY;
242 goto unlock;
245 set_bit(index, bpmp->threaded.allocated);
247 err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
248 data, size);
249 if (err < 0)
250 goto clear_allocated;
252 set_bit(index, bpmp->threaded.busy);
254 spin_unlock_irqrestore(&bpmp->lock, flags);
255 return channel;
257 clear_allocated:
258 clear_bit(index, bpmp->threaded.allocated);
259 unlock:
260 spin_unlock_irqrestore(&bpmp->lock, flags);
261 up(&bpmp->threaded.lock);
263 return ERR_PTR(err);
266 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
267 unsigned int mrq, unsigned long flags,
268 const void *data, size_t size)
270 int err;
272 err = tegra_bpmp_wait_master_free(channel);
273 if (err < 0)
274 return err;
276 return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
279 int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
280 struct tegra_bpmp_message *msg)
282 struct tegra_bpmp_channel *channel;
283 int err;
285 if (WARN_ON(!irqs_disabled()))
286 return -EPERM;
288 if (!tegra_bpmp_message_valid(msg))
289 return -EINVAL;
291 channel = bpmp->tx_channel;
293 spin_lock(&bpmp->atomic_tx_lock);
295 err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
296 msg->tx.data, msg->tx.size);
297 if (err < 0) {
298 spin_unlock(&bpmp->atomic_tx_lock);
299 return err;
302 spin_unlock(&bpmp->atomic_tx_lock);
304 err = mbox_send_message(bpmp->mbox.channel, NULL);
305 if (err < 0)
306 return err;
308 mbox_client_txdone(bpmp->mbox.channel, 0);
310 err = tegra_bpmp_wait_ack(channel);
311 if (err < 0)
312 return err;
314 return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
315 &msg->rx.ret);
317 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
319 int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
320 struct tegra_bpmp_message *msg)
322 struct tegra_bpmp_channel *channel;
323 unsigned long timeout;
324 int err;
326 if (WARN_ON(irqs_disabled()))
327 return -EPERM;
329 if (!tegra_bpmp_message_valid(msg))
330 return -EINVAL;
332 channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
333 msg->tx.size);
334 if (IS_ERR(channel))
335 return PTR_ERR(channel);
337 err = mbox_send_message(bpmp->mbox.channel, NULL);
338 if (err < 0)
339 return err;
341 mbox_client_txdone(bpmp->mbox.channel, 0);
343 timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
345 err = wait_for_completion_timeout(&channel->completion, timeout);
346 if (err == 0)
347 return -ETIMEDOUT;
349 return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
350 &msg->rx.ret);
352 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
354 static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
355 unsigned int mrq)
357 struct tegra_bpmp_mrq *entry;
359 list_for_each_entry(entry, &bpmp->mrqs, list)
360 if (entry->mrq == mrq)
361 return entry;
363 return NULL;
366 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
367 const void *data, size_t size)
369 unsigned long flags = channel->ib->flags;
370 struct tegra_bpmp *bpmp = channel->bpmp;
371 struct tegra_bpmp_mb_data *frame;
372 int err;
374 if (WARN_ON(size > MSG_DATA_MIN_SZ))
375 return;
377 err = tegra_ivc_read_advance(channel->ivc);
378 if (WARN_ON(err < 0))
379 return;
381 if ((flags & MSG_ACK) == 0)
382 return;
384 frame = tegra_ivc_write_get_next_frame(channel->ivc);
385 if (WARN_ON(IS_ERR(frame)))
386 return;
388 frame->code = code;
390 if (data && size > 0)
391 memcpy(frame->data, data, size);
393 err = tegra_ivc_write_advance(channel->ivc);
394 if (WARN_ON(err < 0))
395 return;
397 if (flags & MSG_RING) {
398 err = mbox_send_message(bpmp->mbox.channel, NULL);
399 if (WARN_ON(err < 0))
400 return;
402 mbox_client_txdone(bpmp->mbox.channel, 0);
405 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
407 static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
408 unsigned int mrq,
409 struct tegra_bpmp_channel *channel)
411 struct tegra_bpmp_mrq *entry;
412 u32 zero = 0;
414 spin_lock(&bpmp->lock);
416 entry = tegra_bpmp_find_mrq(bpmp, mrq);
417 if (!entry) {
418 spin_unlock(&bpmp->lock);
419 tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
420 return;
423 entry->handler(mrq, channel, entry->data);
425 spin_unlock(&bpmp->lock);
428 int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
429 tegra_bpmp_mrq_handler_t handler, void *data)
431 struct tegra_bpmp_mrq *entry;
432 unsigned long flags;
434 if (!handler)
435 return -EINVAL;
437 entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
438 if (!entry)
439 return -ENOMEM;
441 spin_lock_irqsave(&bpmp->lock, flags);
443 entry->mrq = mrq;
444 entry->handler = handler;
445 entry->data = data;
446 list_add(&entry->list, &bpmp->mrqs);
448 spin_unlock_irqrestore(&bpmp->lock, flags);
450 return 0;
452 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
454 void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
456 struct tegra_bpmp_mrq *entry;
457 unsigned long flags;
459 spin_lock_irqsave(&bpmp->lock, flags);
461 entry = tegra_bpmp_find_mrq(bpmp, mrq);
462 if (!entry)
463 goto unlock;
465 list_del(&entry->list);
466 devm_kfree(bpmp->dev, entry);
468 unlock:
469 spin_unlock_irqrestore(&bpmp->lock, flags);
471 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
473 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
474 struct tegra_bpmp_channel *channel,
475 void *data)
477 struct mrq_ping_request *request;
478 struct mrq_ping_response response;
480 request = (struct mrq_ping_request *)channel->ib->data;
482 memset(&response, 0, sizeof(response));
483 response.reply = request->challenge << 1;
485 tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
488 static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
490 struct mrq_ping_response response;
491 struct mrq_ping_request request;
492 struct tegra_bpmp_message msg;
493 unsigned long flags;
494 ktime_t start, end;
495 int err;
497 memset(&request, 0, sizeof(request));
498 request.challenge = 1;
500 memset(&response, 0, sizeof(response));
502 memset(&msg, 0, sizeof(msg));
503 msg.mrq = MRQ_PING;
504 msg.tx.data = &request;
505 msg.tx.size = sizeof(request);
506 msg.rx.data = &response;
507 msg.rx.size = sizeof(response);
509 local_irq_save(flags);
510 start = ktime_get();
511 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
512 end = ktime_get();
513 local_irq_restore(flags);
515 if (!err)
516 dev_dbg(bpmp->dev,
517 "ping ok: challenge: %u, response: %u, time: %lld\n",
518 request.challenge, response.reply,
519 ktime_to_us(ktime_sub(end, start)));
521 return err;
524 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
525 size_t size)
527 struct mrq_query_tag_request request;
528 struct tegra_bpmp_message msg;
529 unsigned long flags;
530 dma_addr_t phys;
531 void *virt;
532 int err;
534 virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
535 GFP_KERNEL | GFP_DMA32);
536 if (!virt)
537 return -ENOMEM;
539 memset(&request, 0, sizeof(request));
540 request.addr = phys;
542 memset(&msg, 0, sizeof(msg));
543 msg.mrq = MRQ_QUERY_TAG;
544 msg.tx.data = &request;
545 msg.tx.size = sizeof(request);
547 local_irq_save(flags);
548 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
549 local_irq_restore(flags);
551 if (err == 0)
552 strlcpy(tag, virt, size);
554 dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
556 return err;
559 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
561 unsigned long flags = channel->ob->flags;
563 if ((flags & MSG_RING) == 0)
564 return;
566 complete(&channel->completion);
569 static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
571 struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
572 struct tegra_bpmp_channel *channel;
573 unsigned int i, count;
574 unsigned long *busy;
576 channel = bpmp->rx_channel;
577 count = bpmp->soc->channels.thread.count;
578 busy = bpmp->threaded.busy;
580 if (tegra_bpmp_master_acked(channel))
581 tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
583 spin_lock(&bpmp->lock);
585 for_each_set_bit(i, busy, count) {
586 struct tegra_bpmp_channel *channel;
588 channel = &bpmp->threaded_channels[i];
590 if (tegra_bpmp_master_acked(channel)) {
591 tegra_bpmp_channel_signal(channel);
592 clear_bit(i, busy);
596 spin_unlock(&bpmp->lock);
599 static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
601 struct tegra_bpmp *bpmp = data;
602 int err;
604 if (WARN_ON(bpmp->mbox.channel == NULL))
605 return;
607 err = mbox_send_message(bpmp->mbox.channel, NULL);
608 if (err < 0)
609 return;
611 mbox_client_txdone(bpmp->mbox.channel, 0);
614 static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
615 struct tegra_bpmp *bpmp,
616 unsigned int index)
618 size_t message_size, queue_size;
619 unsigned int offset;
620 int err;
622 channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
623 GFP_KERNEL);
624 if (!channel->ivc)
625 return -ENOMEM;
627 message_size = tegra_ivc_align(MSG_MIN_SZ);
628 queue_size = tegra_ivc_total_queue_size(message_size);
629 offset = queue_size * index;
631 err = tegra_ivc_init(channel->ivc, NULL,
632 bpmp->rx.virt + offset, bpmp->rx.phys + offset,
633 bpmp->tx.virt + offset, bpmp->tx.phys + offset,
634 1, message_size, tegra_bpmp_ivc_notify,
635 bpmp);
636 if (err < 0) {
637 dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
638 index, err);
639 return err;
642 init_completion(&channel->completion);
643 channel->bpmp = bpmp;
645 return 0;
648 static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
650 /* reset the channel state */
651 tegra_ivc_reset(channel->ivc);
653 /* sync the channel state with BPMP */
654 while (tegra_ivc_notified(channel->ivc))
658 static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
660 tegra_ivc_cleanup(channel->ivc);
663 static int tegra_bpmp_probe(struct platform_device *pdev)
665 struct tegra_bpmp *bpmp;
666 unsigned int i;
667 char tag[32];
668 size_t size;
669 int err;
671 bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
672 if (!bpmp)
673 return -ENOMEM;
675 bpmp->soc = of_device_get_match_data(&pdev->dev);
676 bpmp->dev = &pdev->dev;
678 bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
679 if (!bpmp->tx.pool) {
680 dev_err(&pdev->dev, "TX shmem pool not found\n");
681 return -ENOMEM;
684 bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
685 if (!bpmp->tx.virt) {
686 dev_err(&pdev->dev, "failed to allocate from TX pool\n");
687 return -ENOMEM;
690 bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
691 if (!bpmp->rx.pool) {
692 dev_err(&pdev->dev, "RX shmem pool not found\n");
693 err = -ENOMEM;
694 goto free_tx;
697 bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
698 if (!bpmp->rx.virt) {
699 dev_err(&pdev->dev, "failed to allocate from RX pool\n");
700 err = -ENOMEM;
701 goto free_tx;
704 INIT_LIST_HEAD(&bpmp->mrqs);
705 spin_lock_init(&bpmp->lock);
707 bpmp->threaded.count = bpmp->soc->channels.thread.count;
708 sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
710 size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
712 bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
713 if (!bpmp->threaded.allocated) {
714 err = -ENOMEM;
715 goto free_rx;
718 bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
719 if (!bpmp->threaded.busy) {
720 err = -ENOMEM;
721 goto free_rx;
724 spin_lock_init(&bpmp->atomic_tx_lock);
725 bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
726 GFP_KERNEL);
727 if (!bpmp->tx_channel) {
728 err = -ENOMEM;
729 goto free_rx;
732 bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
733 GFP_KERNEL);
734 if (!bpmp->rx_channel) {
735 err = -ENOMEM;
736 goto free_rx;
739 bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
740 sizeof(*bpmp->threaded_channels),
741 GFP_KERNEL);
742 if (!bpmp->threaded_channels) {
743 err = -ENOMEM;
744 goto free_rx;
747 err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
748 bpmp->soc->channels.cpu_tx.offset);
749 if (err < 0)
750 goto free_rx;
752 err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
753 bpmp->soc->channels.cpu_rx.offset);
754 if (err < 0)
755 goto cleanup_tx_channel;
757 for (i = 0; i < bpmp->threaded.count; i++) {
758 err = tegra_bpmp_channel_init(
759 &bpmp->threaded_channels[i], bpmp,
760 bpmp->soc->channels.thread.offset + i);
761 if (err < 0)
762 goto cleanup_threaded_channels;
765 /* mbox registration */
766 bpmp->mbox.client.dev = &pdev->dev;
767 bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
768 bpmp->mbox.client.tx_block = false;
769 bpmp->mbox.client.knows_txdone = false;
771 bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
772 if (IS_ERR(bpmp->mbox.channel)) {
773 err = PTR_ERR(bpmp->mbox.channel);
774 dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
775 goto cleanup_threaded_channels;
778 /* reset message channels */
779 tegra_bpmp_channel_reset(bpmp->tx_channel);
780 tegra_bpmp_channel_reset(bpmp->rx_channel);
781 for (i = 0; i < bpmp->threaded.count; i++)
782 tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
784 err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
785 tegra_bpmp_mrq_handle_ping, bpmp);
786 if (err < 0)
787 goto free_mbox;
789 err = tegra_bpmp_ping(bpmp);
790 if (err < 0) {
791 dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
792 goto free_mrq;
795 err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
796 if (err < 0) {
797 dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
798 goto free_mrq;
801 dev_info(&pdev->dev, "firmware: %s\n", tag);
803 platform_set_drvdata(pdev, bpmp);
805 err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
806 if (err < 0)
807 goto free_mrq;
809 err = tegra_bpmp_init_clocks(bpmp);
810 if (err < 0)
811 goto free_mrq;
813 err = tegra_bpmp_init_resets(bpmp);
814 if (err < 0)
815 goto free_mrq;
817 err = tegra_bpmp_init_powergates(bpmp);
818 if (err < 0)
819 goto free_mrq;
821 err = tegra_bpmp_init_debugfs(bpmp);
822 if (err < 0)
823 dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
825 return 0;
827 free_mrq:
828 tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
829 free_mbox:
830 mbox_free_channel(bpmp->mbox.channel);
831 cleanup_threaded_channels:
832 for (i = 0; i < bpmp->threaded.count; i++) {
833 if (bpmp->threaded_channels[i].bpmp)
834 tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
837 tegra_bpmp_channel_cleanup(bpmp->rx_channel);
838 cleanup_tx_channel:
839 tegra_bpmp_channel_cleanup(bpmp->tx_channel);
840 free_rx:
841 gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
842 free_tx:
843 gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
844 return err;
847 static int __maybe_unused tegra_bpmp_resume(struct device *dev)
849 struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
850 unsigned int i;
852 /* reset message channels */
853 tegra_bpmp_channel_reset(bpmp->tx_channel);
854 tegra_bpmp_channel_reset(bpmp->rx_channel);
856 for (i = 0; i < bpmp->threaded.count; i++)
857 tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
859 return 0;
862 static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume);
864 static const struct tegra_bpmp_soc tegra186_soc = {
865 .channels = {
866 .cpu_tx = {
867 .offset = 3,
868 .timeout = 60 * USEC_PER_SEC,
870 .thread = {
871 .offset = 0,
872 .count = 3,
873 .timeout = 600 * USEC_PER_SEC,
875 .cpu_rx = {
876 .offset = 13,
877 .timeout = 0,
880 .num_resets = 193,
883 static const struct of_device_id tegra_bpmp_match[] = {
884 { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
888 static struct platform_driver tegra_bpmp_driver = {
889 .driver = {
890 .name = "tegra-bpmp",
891 .of_match_table = tegra_bpmp_match,
892 .pm = &tegra_bpmp_pm_ops,
894 .probe = tegra_bpmp_probe,
897 static int __init tegra_bpmp_init(void)
899 return platform_driver_register(&tegra_bpmp_driver);
901 core_initcall(tegra_bpmp_init);