ARM: cpu topology: Add debugfs interface for cpu_power
[cmplus.git] / drivers / remoteproc / remoteproc.c
blob12dd4ddc4e009403fac9a71f75bd3841cce6e1d0
1 /*
2 * Remote Processor Framework
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Copyright (C) 2011 Google, Inc.
7 * Ohad Ben-Cohen <ohad@wizery.com>
8 * Mark Grosen <mgrosen@ti.com>
9 * Brian Swetland <swetland@google.com>
10 * Fernando Guzman Lugo <fernando.lugo@ti.com>
11 * Robert Tivy <rtivy@ti.com>
12 * Armando Uribe De Leon <x0095078@ti.com>
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * version 2 as published by the Free Software Foundation.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
24 #define pr_fmt(fmt) "%s: " fmt, __func__
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/interrupt.h>
29 #include <linux/device.h>
30 #include <linux/delay.h>
31 #include <linux/slab.h>
32 #include <linux/platform_device.h>
33 #include <linux/firmware.h>
34 #include <linux/io.h>
35 #include <linux/list.h>
36 #include <linux/debugfs.h>
37 #include <linux/remoteproc.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/uaccess.h>
40 #include <linux/elf.h>
41 #include <linux/elfcore.h>
42 #include <plat/remoteproc.h>
44 /* list of available remote processors on this board */
45 static LIST_HEAD(rprocs);
46 static DEFINE_SPINLOCK(rprocs_lock);
48 /* debugfs parent dir */
49 static struct dentry *rproc_dbg;
51 static ssize_t rproc_format_trace_buf(struct rproc *rproc, char __user *userbuf,
52 size_t count, loff_t *ppos,
53 const void *src, int size)
55 const char *buf = (const char *) src;
56 ssize_t num_copied = 0;
57 static int from_beg;
58 loff_t pos = *ppos;
59 int *w_idx;
60 int i, w_pos, ret = 0;
62 if (mutex_lock_interruptible(&rproc->tlock))
63 return -EINTR;
65 /* When src is NULL, the remoteproc is offline. */
66 if (!src) {
67 ret = -EIO;
68 goto unlock;
71 if (size < 2 * sizeof(u32)) {
72 ret = -EINVAL;
73 goto unlock;
76 /* Assume write_idx is the penultimate byte in the buffer trace*/
77 size = size - (sizeof(u32) * 2);
78 w_idx = (int *)(buf + size);
79 w_pos = *w_idx;
81 if (from_beg)
82 goto print_beg;
84 if (pos == 0)
85 *ppos = w_pos;
87 for (i = w_pos; i < size && buf[i]; i++)
90 if (i > w_pos)
91 num_copied =
92 simple_read_from_buffer(userbuf, count, ppos, src, i);
93 if (!num_copied) {
94 from_beg = 1;
95 *ppos = 0;
96 } else {
97 ret = num_copied;
98 goto unlock;
100 print_beg:
101 for (i = 0; i < w_pos && buf[i]; i++)
104 if (i) {
105 num_copied =
106 simple_read_from_buffer(userbuf, count, ppos, src, i);
107 if (!num_copied)
108 from_beg = 0;
109 ret = num_copied;
111 unlock:
112 mutex_unlock(&rproc->tlock);
113 return ret;
116 static ssize_t rproc_name_read(struct file *filp, char __user *userbuf,
117 size_t count, loff_t *ppos)
119 struct rproc *rproc = filp->private_data;
120 /* need room for the name, a newline and a terminating null */
121 char buf[RPROC_MAX_NAME + 2];
122 int i;
124 i = snprintf(buf, RPROC_MAX_NAME + 2, "%s\n", rproc->name);
126 return simple_read_from_buffer(userbuf, count, ppos, buf, i);
129 static ssize_t rproc_version_read(struct file *filp, char __user *userbuf,
130 size_t count, loff_t *ppos)
133 struct rproc *rproc = filp->private_data;
134 char *pch;
135 int len;
136 pch = strstr(rproc->header, "version:");
137 if (!pch)
138 return 0;
139 pch += strlen("version:") + 1;
140 len = rproc->header_len - (pch - rproc->header);
141 return simple_read_from_buffer(userbuf, count, ppos, pch, len);
144 static int rproc_open_generic(struct inode *inode, struct file *file)
146 file->private_data = inode->i_private;
147 return 0;
150 #define DEBUGFS_READONLY_FILE(name, v, l) \
151 static ssize_t name## _rproc_read(struct file *filp, \
152 char __user *ubuf, size_t count, loff_t *ppos) \
154 struct rproc *rproc = filp->private_data; \
155 return rproc_format_trace_buf(rproc, ubuf, count, ppos, v, l); \
158 static const struct file_operations name ##_rproc_ops = { \
159 .read = name ##_rproc_read, \
160 .open = rproc_open_generic, \
161 .llseek = generic_file_llseek, \
164 #ifdef CONFIG_REMOTEPROC_CORE_DUMP
166 /* + 1 for the notes segment */
167 #define NUM_PHDR (RPROC_MAX_MEM_ENTRIES + 1)
169 #define CORE_STR "CORE"
171 /* Intermediate core-dump-file format */
172 struct core_rproc {
173 struct rproc *rproc;
174 /* ELF state */
175 Elf_Half e_phnum;
177 struct core {
178 struct elfhdr elf;
179 struct elf_phdr phdr[NUM_PHDR];
180 struct {
181 struct elf_note note_prstatus;
182 char name[sizeof(CORE_STR)];
183 struct elf_prstatus prstatus __aligned(4);
184 } core_note __packed __aligned(4);
185 } core __packed;
187 loff_t offset;
190 /* Return the number of segments to be written to the core file */
191 static int rproc_core_map_count(const struct rproc *rproc)
193 int i = 0;
194 int count = 0;
195 for (;; i++) {
196 if (!rproc->memory_maps[i].size)
197 break;
198 if (!rproc->memory_maps[i].core)
199 continue;
200 count++;
203 /* The Ducati has a low number of segments */
204 if (count > PN_XNUM)
205 return -1;
207 return count;
210 /* Copied from fs/binfmt_elf.c */
211 static void fill_elf_header(struct elfhdr *elf, int segs)
213 memset(elf, 0, sizeof(*elf));
215 memcpy(elf->e_ident, ELFMAG, SELFMAG);
216 elf->e_ident[EI_CLASS] = ELFCLASS32;
217 elf->e_ident[EI_DATA] = ELFDATA2LSB;
218 elf->e_ident[EI_VERSION] = EV_CURRENT;
219 elf->e_ident[EI_OSABI] = ELFOSABI_NONE;
221 elf->e_type = ET_CORE;
222 elf->e_machine = EM_ARM;
223 elf->e_version = EV_CURRENT;
224 elf->e_phoff = sizeof(struct elfhdr);
225 elf->e_flags = EF_ARM_EABI_VER5;
226 elf->e_ehsize = sizeof(struct elfhdr);
227 elf->e_phentsize = sizeof(struct elf_phdr);
228 elf->e_phnum = segs;
230 return;
233 static void fill_elf_segment_headers(struct core_rproc *d)
235 int i = 0;
236 int hi = 0;
237 loff_t offset = d->offset;
238 for (;; i++) {
239 u32 size;
241 size = d->rproc->memory_maps[i].size;
242 if (!size)
243 break;
244 if (!d->rproc->memory_maps[i].core)
245 continue;
247 BUG_ON(hi >= d->e_phnum - 1);
249 d->core.phdr[hi].p_type = PT_LOAD;
250 d->core.phdr[hi].p_offset = offset;
251 d->core.phdr[hi].p_vaddr = d->rproc->memory_maps[i].da;
252 d->core.phdr[hi].p_paddr = d->rproc->memory_maps[i].pa;
253 d->core.phdr[hi].p_filesz = size;
254 d->core.phdr[hi].p_memsz = size;
255 /* FIXME: get these from the Ducati */
256 d->core.phdr[hi].p_flags = PF_R | PF_W | PF_X;
258 pr_debug("%s: phdr type %d f_off %08x va %08x pa %08x fl %x\n",
259 __func__,
260 d->core.phdr[hi].p_type,
261 d->core.phdr[hi].p_offset,
262 d->core.phdr[hi].p_vaddr,
263 d->core.phdr[hi].p_paddr,
264 d->core.phdr[hi].p_flags);
266 offset += size;
267 hi++;
271 static int setup_rproc_elf_core_dump(struct core_rproc *d)
273 short __phnum;
274 struct elf_phdr *nphdr;
275 struct exc_regs *xregs = d->rproc->cdump_buf1;
276 struct pt_regs *regs =
277 (struct pt_regs *)&d->core.core_note.prstatus.pr_reg;
279 memset(&d->core.elf, 0, sizeof(d->core.elf));
281 __phnum = rproc_core_map_count(d->rproc);
282 if (__phnum < 0 || __phnum > ARRAY_SIZE(d->core.phdr))
283 return -EIO;
284 d->e_phnum = __phnum + 1; /* + 1 for notes */
286 pr_info("number of segments: %d\n", d->e_phnum);
288 fill_elf_header(&d->core.elf, d->e_phnum);
290 nphdr = d->core.phdr + __phnum;
291 nphdr->p_type = PT_NOTE;
292 nphdr->p_offset = 0;
293 nphdr->p_vaddr = 0;
294 nphdr->p_paddr = 0;
295 nphdr->p_filesz = 0;
296 nphdr->p_memsz = 0;
297 nphdr->p_flags = 0;
298 nphdr->p_align = 0;
300 /* The notes start right after the phdr array. Adjust p_filesz
301 * accordingly if you add more notes
303 nphdr->p_filesz = sizeof(d->core.core_note);
304 nphdr->p_offset = offsetof(struct core, core_note);
306 d->core.core_note.note_prstatus.n_namesz = sizeof(CORE_STR);
307 d->core.core_note.note_prstatus.n_descsz =
308 sizeof(struct elf_prstatus);
309 d->core.core_note.note_prstatus.n_type = NT_PRSTATUS;
310 memcpy(d->core.core_note.name, CORE_STR, sizeof(CORE_STR));
312 remoteproc_fill_pt_regs(regs, xregs);
314 /* We ignore the NVIC registers for now */
316 d->offset = sizeof(struct core);
317 d->offset = roundup(d->offset, PAGE_SIZE);
318 fill_elf_segment_headers(d);
319 return 0;
322 static int core_rproc_open(struct inode *inode, struct file *filp)
324 int i;
325 struct core_rproc *d;
327 d = kzalloc(sizeof(*d), GFP_KERNEL);
328 if (!d)
329 return -ENOMEM;
331 d->rproc = inode->i_private;
332 filp->private_data = d;
334 setup_rproc_elf_core_dump(d);
336 if (0) {
337 const struct rproc *rproc;
338 rproc = d->rproc;
339 for (i = 0; rproc->memory_maps[i].size; i++) {
340 pr_info("%s: memory_map[%d] pa %08x sz %d core %d\n",
341 __func__,
343 rproc->memory_maps[i].pa,
344 rproc->memory_maps[i].size,
345 rproc->memory_maps[i].core);
349 return 0;
352 static int core_rproc_release(struct inode *inode, struct file *filp)
354 pr_info("%s\n", __func__);
355 kfree(filp->private_data);
356 return 0;
359 /* Given an offset to read from, return the index of the memory-map region to
360 * read from.
362 static int rproc_memory_map_index(const struct rproc *rproc, loff_t *off)
364 int i = 0;
365 for (;; i++) {
366 int size = rproc->memory_maps[i].size;
368 if (!size)
369 break;
370 if (!rproc->memory_maps[i].core)
371 continue;
372 if (*off < size)
373 return i;
375 *off -= size;
378 return -1;
381 ssize_t core_rproc_write(struct file *filp,
382 const char __user *buffer, size_t count, loff_t *off)
384 char cmd[100];
385 int cmdlen;
386 struct core_rproc *d = filp->private_data;
387 struct rproc *rproc = d->rproc;
389 cmdlen = min(sizeof(cmd) - 1, count);
390 if (copy_from_user(cmd, buffer, cmdlen))
391 return -EFAULT;
392 cmd[cmdlen] = 0;
394 if (!strncmp(cmd, "enable", 6)) {
395 pr_info("remoteproc %s halt on crash ENABLED\n", rproc->name);
396 rproc->halt_on_crash = true;
397 goto done;
398 } else if (!strncmp(cmd, "disable", 7)) {
399 pr_info("remoteproc %s halt on crash DISABLED\n", rproc->name);
400 rproc->halt_on_crash = false;
401 /* If you disable halt-on-crashed after the remote processor
402 * has already crashed, we will let it continue crashing (so it
403 * can get handled otherwise) as well.
405 if (rproc->state != RPROC_CRASHED)
406 goto done;
407 } else if (strncmp(cmd, "continue", 8)) {
408 pr_err("%s: invalid command: expecting \"enable\"," \
409 "\"disable\", or \"continue\"\n", __func__);
410 return -EINVAL;
413 if (rproc->state == RPROC_CRASHED) {
414 pr_info("remoteproc %s: resuming crash recovery\n",
415 rproc->name);
416 blocking_notifier_call_chain(&rproc->nbh, RPROC_ERROR, NULL);
419 done:
420 *off += count;
421 return count;
424 static ssize_t core_rproc_read(struct file *filp,
425 char __user *userbuf, size_t count, loff_t *ppos)
427 const struct core_rproc *d = filp->private_data;
428 const struct rproc *rproc = d->rproc;
429 int index;
430 loff_t pos;
431 size_t remaining = count;
432 ssize_t copied = 0;
434 pr_debug("%s count %d off %lld\n", __func__, count, *ppos);
436 /* copy the ELF and segment header first */
437 if (*ppos < d->offset) {
438 copied = simple_read_from_buffer(userbuf, count,
439 ppos, &d->core, d->offset);
440 if (copied < 0) {
441 pr_err("%s: could not copy ELF header\n", __func__);
442 return -EIO;
445 pr_debug("%s: copied %d/%lld from ELF header\n", __func__,
446 copied, d->offset);
447 remaining -= copied;
450 /* copy the data */
451 while (remaining) {
452 size_t remaining_in_region;
453 const struct rproc_mem_entry *r;
454 void __iomem *kvaddr;
456 pos = *ppos - d->offset;
457 index = rproc_memory_map_index(rproc, &pos);
458 if (index < 0) {
459 pr_info("%s: EOF at off %lld\n", __func__, *ppos);
460 break;
463 r = &rproc->memory_maps[index];
465 remaining_in_region = r->size - pos;
466 if (remaining_in_region > remaining)
467 remaining_in_region = remaining;
469 pr_debug("%s: iomap 0x%x size %d\n", __func__, r->pa, r->size);
470 kvaddr = ioremap(r->pa, r->size);
471 if (!kvaddr) {
472 pr_err("%s: iomap error: region %d (phys 0x%08x size %d)\n",
473 __func__, index, r->pa, r->size);
474 return -EIO;
477 pr_debug("%s: off %lld -> [%d](pa 0x%08x off %lld sz %d)\n",
478 __func__,
479 *ppos, index, r->pa, pos, r->size);
481 if (copy_to_user(userbuf + copied, kvaddr + pos,
482 remaining_in_region)) {
483 pr_err("%s: copy_to_user error\n", __func__);
484 return -EFAULT;
487 iounmap(kvaddr);
489 copied += remaining_in_region;
490 *ppos += remaining_in_region;
491 BUG_ON(remaining < remaining_in_region);
492 remaining -= remaining_in_region;
495 return copied;
498 static const struct file_operations core_rproc_ops = {
499 .read = core_rproc_read,
500 .write = core_rproc_write,
501 .open = core_rproc_open,
502 .release = core_rproc_release,
503 .llseek = generic_file_llseek,
505 #endif /* CONFIG_REMOTEPROC_CORE_DUMP */
507 static const struct file_operations rproc_name_ops = {
508 .read = rproc_name_read,
509 .open = rproc_open_generic,
510 .llseek = generic_file_llseek,
513 static const struct file_operations rproc_version_ops = {
514 .read = rproc_version_read,
515 .open = rproc_open_generic,
516 .llseek = generic_file_llseek,
519 DEBUGFS_READONLY_FILE(trace0, rproc->trace_buf0, rproc->trace_len0);
520 DEBUGFS_READONLY_FILE(trace1, rproc->trace_buf1, rproc->trace_len1);
521 DEBUGFS_READONLY_FILE(trace0_last, rproc->last_trace_buf0,
522 rproc->last_trace_len0);
523 DEBUGFS_READONLY_FILE(trace1_last, rproc->last_trace_buf1,
524 rproc->last_trace_len1);
525 DEBUGFS_READONLY_FILE(cdump0, rproc->cdump_buf0, rproc->cdump_len0);
526 DEBUGFS_READONLY_FILE(cdump1, rproc->cdump_buf1, rproc->cdump_len1);
528 #define DEBUGFS_ADD(name) \
529 debugfs_create_file(#name, 0444, rproc->dbg_dir, \
530 rproc, &name## _rproc_ops)
533 * __find_rproc_by_name - find a registered remote processor by name
534 * @name: name of the remote processor
536 * Internal function that returns the rproc @name, or NULL if @name does
537 * not exists.
539 static struct rproc *__find_rproc_by_name(const char *name)
541 struct rproc *rproc;
542 struct list_head *tmp;
544 spin_lock(&rprocs_lock);
546 list_for_each(tmp, &rprocs) {
547 rproc = list_entry(tmp, struct rproc, next);
548 if (!strcmp(rproc->name, name))
549 break;
550 rproc = NULL;
553 spin_unlock(&rprocs_lock);
555 return rproc;
559 * __rproc_da_to_pa - convert a device (virtual) address to its physical address
560 * @maps: the remote processor's memory mappings array
561 * @da: a device address (as seen by the remote processor)
562 * @pa: pointer to the physical address result
564 * This function converts @da to its physical address (pa) by going through
565 * @maps, looking for a mapping that contains @da, and then calculating the
566 * appropriate pa.
568 * On success 0 is returned, and the @pa is updated with the result.
569 * Otherwise, -EINVAL is returned.
571 static int
572 rproc_da_to_pa(const struct rproc_mem_entry *maps, u64 da, phys_addr_t *pa)
574 int i;
575 u64 offset;
577 for (i = 0; maps[i].size; i++) {
578 const struct rproc_mem_entry *me = &maps[i];
580 if (da >= me->da && da < (me->da + me->size)) {
581 offset = da - me->da;
582 pr_debug("%s: matched mem entry no. %d\n",
583 __func__, i);
584 *pa = me->pa + offset;
585 return 0;
589 return -EINVAL;
592 static int rproc_mmu_fault_isr(struct rproc *rproc, u64 da, u32 flags)
594 dev_err(rproc->dev, "%s\n", __func__);
595 schedule_work(&rproc->error_work);
596 return -EIO;
599 static int rproc_watchdog_isr(struct rproc *rproc)
601 dev_err(rproc->dev, "%s\n", __func__);
602 schedule_work(&rproc->error_work);
603 return 0;
606 static int rproc_crash(struct rproc *rproc)
608 init_completion(&rproc->error_comp);
609 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
610 pm_runtime_dont_use_autosuspend(rproc->dev);
611 #endif
612 if (rproc->ops->dump_registers)
613 rproc->ops->dump_registers(rproc);
615 if (rproc->trace_buf0 && rproc->last_trace_buf0)
616 memcpy(rproc->last_trace_buf0, rproc->trace_buf0,
617 rproc->last_trace_len0);
618 if (rproc->trace_buf1 && rproc->last_trace_buf1)
619 memcpy(rproc->last_trace_buf1, rproc->trace_buf1,
620 rproc->last_trace_len1);
621 rproc->state = RPROC_CRASHED;
623 return 0;
626 static int _event_notify(struct rproc *rproc, int type, void *data)
628 if (type == RPROC_ERROR) {
629 mutex_lock(&rproc->lock);
630 /* only notify first crash */
631 if (rproc->state == RPROC_CRASHED) {
632 mutex_unlock(&rproc->lock);
633 return 0;
635 rproc_crash(rproc);
636 mutex_unlock(&rproc->lock);
637 /* If halt_on_crash do not notify the error */
638 pr_info("remoteproc: %s has crashed\n", rproc->name);
639 if (rproc->halt_on_crash) {
640 /* FIXME: send uevent here */
641 pr_info("remoteproc: %s: halt-on-crash enabled: "
642 "deferring crash recovery\n", rproc->name);
643 return 0;
647 return blocking_notifier_call_chain(&rproc->nbh, type, data);
651 * rproc_start - power on the remote processor and let it start running
652 * @rproc: the remote processor
653 * @bootaddr: address of first instruction to execute (optional)
655 * Start a remote processor (i.e. power it on, take it out of reset, etc..)
657 static void rproc_start(struct rproc *rproc, u64 bootaddr)
659 struct device *dev = rproc->dev;
660 int err;
662 err = mutex_lock_interruptible(&rproc->lock);
663 if (err) {
664 dev_err(dev, "can't lock remote processor %d\n", err);
665 return;
668 if (rproc->ops->iommu_init) {
669 err = rproc->ops->iommu_init(rproc, rproc_mmu_fault_isr);
670 if (err) {
671 dev_err(dev, "can't configure iommu %d\n", err);
672 goto unlock_mutex;
676 if (rproc->ops->watchdog_init) {
677 err = rproc->ops->watchdog_init(rproc, rproc_watchdog_isr);
678 if (err) {
679 dev_err(dev, "can't configure watchdog timer %d\n",
680 err);
681 goto wdt_error;
685 #ifdef CONFIG_REMOTEPROC_CORE_DUMP
686 debugfs_create_file("core", 0400, rproc->dbg_dir,
687 rproc, &core_rproc_ops);
688 #endif
690 err = rproc->ops->start(rproc, bootaddr);
691 if (err) {
692 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, err);
693 goto start_error;
696 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
697 pm_runtime_use_autosuspend(dev);
698 pm_runtime_set_autosuspend_delay(dev, rproc->sus_timeout);
699 pm_runtime_get_noresume(rproc->dev);
700 pm_runtime_set_active(rproc->dev);
701 if (!rproc->secure_mode)
702 pm_runtime_enable(rproc->dev);
703 pm_runtime_mark_last_busy(rproc->dev);
704 pm_runtime_put_autosuspend(rproc->dev);
705 #endif
707 rproc->state = RPROC_RUNNING;
709 dev_info(dev, "remote processor %s is now up\n", rproc->name);
710 rproc->secure_ok = true;
711 complete_all(&rproc->secure_restart);
712 mutex_unlock(&rproc->lock);
714 return;
717 * signal always, as we would need a notification in both the
718 * normal->secure & secure->normal mode transitions, otherwise
719 * we would have to introduce one more variable.
721 start_error:
722 if (rproc->ops->watchdog_exit)
723 rproc->ops->watchdog_exit(rproc);
724 wdt_error:
725 if (rproc->ops->iommu_exit)
726 rproc->ops->iommu_exit(rproc);
727 unlock_mutex:
728 rproc->secure_ok = false;
729 complete_all(&rproc->secure_restart);
730 mutex_unlock(&rproc->lock);
733 static void rproc_reset_poolmem(struct rproc *rproc)
735 struct rproc_mem_pool *pool = rproc->memory_pool;
737 if (!pool || !pool->mem_base || !pool->mem_size) {
738 pr_warn("invalid pool\n");
739 return;
742 pool->cur_base = pool->mem_base;
743 pool->cur_size = pool->mem_size;
746 static int rproc_add_mem_entry(struct rproc *rproc, struct fw_resource *rsc)
748 struct rproc_mem_entry *me = rproc->memory_maps;
749 int i = 0;
750 int ret = 0;
752 while (me->da || me->pa || me->size) {
753 me += 1;
754 i++;
755 if (i == RPROC_MAX_MEM_ENTRIES) {
756 ret = -ENOSPC;
757 break;
761 if (!ret) {
762 me->da = rsc->da;
763 me->pa = (phys_addr_t)rsc->pa;
764 me->size = rsc->len;
765 #ifdef CONFIG_REMOTEPROC_CORE_DUMP
766 /* FIXME: ION heaps are reported as RSC_CARVEOUT. We need a
767 * better way to understand which sections are for
768 * code/stack/heap/static data, and which belong to the
769 * carveouts we don't care about in a core dump.
770 * Perhaps the ION carveout should be reported as RSC_DEVMEM.
772 me->core = (rsc->type == RSC_CARVEOUT && rsc->pa != 0xba300000);
773 #endif
776 return ret;
779 static int rproc_alloc_poolmem(struct rproc *rproc, u32 size, phys_addr_t *pa)
781 struct rproc_mem_pool *pool = rproc->memory_pool;
783 *pa = 0;
784 if (!pool || !pool->mem_base || !pool->mem_size) {
785 pr_warn("invalid pool\n");
786 return -EINVAL;
788 if (pool->cur_size < size) {
789 pr_warn("out of carveout memory\n");
790 return -ENOMEM;
793 *pa = pool->cur_base;
794 pool->cur_base += size;
795 pool->cur_size -= size;
796 return 0;
799 static int rproc_check_poolmem(struct rproc *rproc, u32 size, phys_addr_t pa)
801 struct rproc_mem_pool *pool = rproc->memory_pool;
803 if (!pool || !pool->st_base || !pool->st_size) {
804 pr_warn("invalid pool\n");
805 return -EINVAL;
808 if (pa < pool->st_base || pa + size > pool->st_base + pool->st_size) {
809 pr_warn("section size does not fit within carveout memory\n");
810 return -ENOSPC;
813 return 0;
816 static int rproc_handle_resources(struct rproc *rproc, struct fw_resource *rsc,
817 int len, u64 *bootaddr)
819 struct device *dev = rproc->dev;
820 phys_addr_t pa;
821 u64 da;
822 u64 trace_da0 = 0;
823 u64 trace_da1 = 0;
824 u64 cdump_da0 = 0;
825 u64 cdump_da1 = 0;
826 int ret = 0;
828 while (len >= sizeof(*rsc) && !ret) {
829 da = rsc->da;
830 pa = rsc->pa;
831 dev_dbg(dev, "resource: type %d, da 0x%llx, pa 0x%llx, "
832 "mapped pa: 0x%x, len 0x%x, reserved 0x%x, "
833 "name %s\n", rsc->type, rsc->da, rsc->pa, pa,
834 rsc->len, rsc->reserved, rsc->name);
836 if (rsc->reserved)
837 dev_warn(dev, "nonzero reserved\n");
839 switch (rsc->type) {
840 case RSC_TRACE:
841 if (trace_da0 && trace_da1) {
842 dev_warn(dev, "skipping extra trace rsc %s\n",
843 rsc->name);
844 break;
847 /* store the da for processing at the end */
848 if (!trace_da0) {
849 rproc->trace_len0 = rsc->len;
850 rproc->last_trace_len0 = rsc->len;
851 trace_da0 = da;
852 } else {
853 rproc->trace_len1 = rsc->len;
854 rproc->last_trace_len1 = rsc->len;
855 trace_da1 = da;
857 break;
858 case RSC_CRASHDUMP:
859 if (rproc->cdump_buf0 && rproc->cdump_buf1) {
860 dev_warn(dev, "skipping extra trace rsc %s\n",
861 rsc->name);
862 break;
864 /* store the da for processing at the end */
865 if (!cdump_da0) {
866 rproc->cdump_len0 = rsc->len;
867 cdump_da0 = da;
868 } else {
869 rproc->cdump_len1 = rsc->len;
870 cdump_da1 = da;
872 break;
873 case RSC_BOOTADDR:
874 *bootaddr = da;
875 break;
876 case RSC_DEVMEM:
877 ret = rproc_add_mem_entry(rproc, rsc);
878 if (ret) {
879 dev_err(dev, "can't add mem_entry %s\n",
880 rsc->name);
881 break;
883 break;
884 case RSC_CARVEOUT:
885 if (!pa) {
886 ret = rproc_alloc_poolmem(rproc, rsc->len, &pa);
887 if (ret) {
888 dev_err(dev, "can't alloc poolmem %s\n",
889 rsc->name);
890 break;
892 rsc->pa = pa;
893 } else {
894 ret = rproc_check_poolmem(rproc, rsc->len, pa);
895 if (ret) {
896 dev_err(dev, "static memory for %s "
897 "doesn't belong to poolmem\n",
898 rsc->name);
899 break;
902 ret = rproc_add_mem_entry(rproc, rsc);
903 if (ret) {
904 dev_err(dev, "can't add mem_entry %s\n",
905 rsc->name);
906 break;
908 break;
909 default:
910 /* we don't support much right now. so use dbg lvl */
911 dev_dbg(dev, "unsupported resource type %d\n",
912 rsc->type);
913 break;
916 rsc++;
917 len -= sizeof(*rsc);
920 if (ret)
921 goto error;
924 * post-process trace buffers, as we cannot rely on the order of the
925 * trace section and the carveout sections.
927 * trace buffer memory _is_ normal memory, so we cast away the
928 * __iomem to make sparse happy
931 if (mutex_lock_interruptible(&rproc->tlock))
932 goto error;
934 if (trace_da0) {
935 ret = rproc_da_to_pa(rproc->memory_maps, trace_da0, &pa);
936 if (ret)
937 goto unlock;
938 rproc->trace_buf0 = (__force void *)
939 ioremap_nocache(pa, rproc->trace_len0);
940 if (rproc->trace_buf0) {
941 DEBUGFS_ADD(trace0);
942 if (!rproc->last_trace_buf0) {
943 rproc->last_trace_buf0 = kzalloc(sizeof(u32) *
944 rproc->last_trace_len0,
945 GFP_KERNEL);
946 if (!rproc->last_trace_buf0) {
947 ret = -ENOMEM;
948 goto unlock;
950 DEBUGFS_ADD(trace0_last);
952 } else {
953 dev_err(dev, "can't ioremap trace buffer0\n");
954 ret = -EIO;
955 goto unlock;
958 if (trace_da1) {
959 ret = rproc_da_to_pa(rproc->memory_maps, trace_da1, &pa);
960 if (ret)
961 goto unlock;
962 rproc->trace_buf1 = (__force void *)
963 ioremap_nocache(pa, rproc->trace_len1);
964 if (rproc->trace_buf1) {
965 DEBUGFS_ADD(trace1);
966 if (!rproc->last_trace_buf1) {
967 rproc->last_trace_buf1 = kzalloc(sizeof(u32) *
968 rproc->last_trace_len1,
969 GFP_KERNEL);
970 if (!rproc->last_trace_buf1) {
971 ret = -ENOMEM;
972 goto unlock;
974 DEBUGFS_ADD(trace1_last);
976 } else {
977 dev_err(dev, "can't ioremap trace buffer1\n");
978 ret = -EIO;
979 goto unlock;
984 * post-process crash-dump buffers, as we cannot rely on the order of
985 * the crash-dump section and the carveout sections.
987 * crash-dump memory _is_ normal memory, so we cast away the __iomem to
988 * make sparse happy
990 if (cdump_da0) {
991 ret = rproc_da_to_pa(rproc->memory_maps, cdump_da0, &pa);
992 if (ret)
993 goto unlock;
994 rproc->cdump_buf0 = (__force void *)
995 ioremap_nocache(pa, rproc->cdump_len0);
996 if (rproc->cdump_buf0)
997 DEBUGFS_ADD(cdump0);
998 else {
999 dev_err(dev, "can't ioremap cdump buffer0\n");
1000 ret = -EIO;
1001 goto unlock;
1004 if (cdump_da1) {
1005 ret = rproc_da_to_pa(rproc->memory_maps, cdump_da1, &pa);
1006 if (ret)
1007 goto unlock;
1008 rproc->cdump_buf1 = (__force void *)
1009 ioremap_nocache(pa, rproc->cdump_len1);
1010 if (rproc->cdump_buf1)
1011 DEBUGFS_ADD(cdump1);
1012 else {
1013 dev_err(dev, "can't ioremap cdump buffer1\n");
1014 ret = -EIO;
1018 unlock:
1019 mutex_unlock(&rproc->tlock);
1021 error:
1022 if (ret && rproc->dbg_dir) {
1023 debugfs_remove_recursive(rproc->dbg_dir);
1024 rproc->dbg_dir = NULL;
1026 return ret;
1029 static int rproc_process_fw(struct rproc *rproc, struct fw_section *section,
1030 int left, u64 *bootaddr)
1032 struct device *dev = rproc->dev;
1033 phys_addr_t pa;
1034 u32 len, type;
1035 u64 da;
1036 int ret = 0;
1037 void *ptr;
1038 bool copy;
1040 /* first section should be FW_RESOURCE section */
1041 if (section->type != FW_RESOURCE) {
1042 dev_err(dev, "first section is not FW_RESOURCE: type %u found",
1043 section->type);
1044 ret = -EINVAL;
1045 goto exit;
1048 while (left > sizeof(struct fw_section)) {
1049 da = section->da;
1050 len = section->len;
1051 type = section->type;
1052 copy = true;
1054 dev_dbg(dev, "section: type %d da 0x%llx len 0x%x\n",
1055 type, da, len);
1057 left -= sizeof(struct fw_section);
1058 if (left < section->len) {
1059 dev_err(dev, "BIOS image is truncated\n");
1060 ret = -EINVAL;
1061 break;
1064 /* a resource table needs special handling */
1065 if (section->type == FW_RESOURCE) {
1066 ret = rproc_handle_resources(rproc,
1067 (struct fw_resource *) section->content,
1068 len, bootaddr);
1069 if (ret) {
1070 break;
1074 if (section->type <= FW_DATA) {
1075 ret = rproc_da_to_pa(rproc->memory_maps, da, &pa);
1076 if (ret) {
1077 dev_err(dev, "rproc_da_to_pa failed:%d\n", ret);
1078 break;
1080 } else if (rproc->secure_mode) {
1081 pa = da;
1082 if (section->type == FW_MMU)
1083 rproc->secure_ttb = (void *)pa;
1084 } else
1085 copy = false;
1087 dev_dbg(dev, "da 0x%llx pa 0x%x len 0x%x\n", da, pa, len);
1089 if (copy) {
1090 /* ioremaping normal memory, so make sparse happy */
1091 ptr = (__force void *) ioremap_nocache(pa, len);
1092 if (!ptr) {
1093 dev_err(dev, "can't ioremap 0x%x\n", pa);
1094 ret = -ENOMEM;
1095 break;
1098 memcpy(ptr, section->content, len);
1100 /* iounmap normal memory, so make sparse happy */
1101 iounmap((__force void __iomem *) ptr);
1104 section = (struct fw_section *)(section->content + len);
1105 left -= len;
1108 exit:
1109 return ret;
1112 static void rproc_loader_cont(const struct firmware *fw, void *context)
1114 struct rproc *rproc = context;
1115 struct device *dev = rproc->dev;
1116 const char *fwfile = rproc->firmware;
1117 u64 bootaddr = 0;
1118 struct fw_header *image;
1119 struct fw_section *section;
1120 int left, ret;
1122 if (!fw) {
1123 dev_err(dev, "%s: failed to load %s\n", __func__, fwfile);
1124 goto complete_fw;
1127 dev_info(dev, "Loaded BIOS image %s, size %d\n", fwfile, fw->size);
1129 /* make sure this image is sane */
1130 if (fw->size < sizeof(struct fw_header)) {
1131 dev_err(dev, "Image is too small\n");
1132 goto out;
1135 image = (struct fw_header *) fw->data;
1137 if (memcmp(image->magic, "RPRC", 4)) {
1138 dev_err(dev, "Image is corrupted (bad magic)\n");
1139 goto out;
1142 dev_info(dev, "BIOS image version is %d\n", image->version);
1144 rproc->header = kzalloc(image->header_len, GFP_KERNEL);
1145 if (!rproc->header) {
1146 dev_err(dev, "%s: kzalloc failed\n", __func__);
1147 goto out;
1149 memcpy(rproc->header, image->header, image->header_len);
1150 rproc->header_len = image->header_len;
1152 /* Ensure we recognize this BIOS version: */
1153 if (image->version != RPROC_BIOS_VERSION) {
1154 dev_err(dev, "Expected BIOS version: %d!\n",
1155 RPROC_BIOS_VERSION);
1156 goto out;
1159 /* now process the image, section by section */
1160 section = (struct fw_section *)(image->header + image->header_len);
1162 left = fw->size - sizeof(struct fw_header) - image->header_len;
1164 ret = rproc_process_fw(rproc, section, left, &bootaddr);
1165 if (ret) {
1166 dev_err(dev, "Failed to process the image: %d\n", ret);
1167 goto out;
1170 rproc_start(rproc, bootaddr);
1172 out:
1173 release_firmware(fw);
1174 complete_fw:
1175 /* allow all contexts calling rproc_put() to proceed */
1176 complete_all(&rproc->firmware_loading_complete);
1179 static int rproc_loader(struct rproc *rproc)
1181 const char *fwfile = rproc->firmware;
1182 struct device *dev = rproc->dev;
1183 int ret;
1185 if (!fwfile) {
1186 dev_err(dev, "%s: no firmware to load\n", __func__);
1187 return -EINVAL;
1191 * allow building remoteproc as built-in kernel code, without
1192 * hanging the boot process
1194 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, fwfile,
1195 dev, GFP_KERNEL, rproc, rproc_loader_cont);
1196 if (ret < 0) {
1197 dev_err(dev, "request_firmware_nowait failed: %d\n", ret);
1198 return ret;
1201 return 0;
1204 int rproc_set_secure(const char *name, bool enable)
1206 struct rproc *rproc;
1207 int ret;
1209 rproc = __find_rproc_by_name(name);
1210 if (!rproc) {
1211 pr_err("can't find remote processor %s\n", name);
1212 return -ENODEV;
1216 * set the secure_mode here, the secure_ttb will be filled up during
1217 * the reload process.
1219 if (mutex_lock_interruptible(&rproc->secure_lock))
1220 return -EINTR;
1221 rproc->secure_mode = enable;
1222 rproc->secure_ttb = NULL;
1223 rproc->secure_ok = false;
1224 init_completion(&rproc->secure_restart);
1227 * restart the processor, the mode will dictate regular load or
1228 * secure load
1230 _event_notify(rproc, RPROC_SECURE, (void *)enable);
1232 /* block until the restart is complete */
1233 if (wait_for_completion_interruptible(&rproc->secure_restart)) {
1234 pr_err("error waiting restart completion\n");
1235 ret = -EINTR;
1236 goto out;
1239 ret = rproc->secure_ok ? 0 : -EACCES;
1240 out:
1241 mutex_unlock(&rproc->secure_lock);
1243 return ret;
1245 EXPORT_SYMBOL(rproc_set_secure);
1247 int rproc_error_notify(struct rproc *rproc)
1249 return _event_notify(rproc, RPROC_ERROR, NULL);
1251 EXPORT_SYMBOL_GPL(rproc_error_notify);
1253 struct rproc *rproc_get(const char *name)
1255 struct rproc *rproc, *ret = NULL;
1256 struct device *dev;
1257 int err;
1259 rproc = __find_rproc_by_name(name);
1260 if (!rproc) {
1261 pr_err("can't find remote processor %s\n", name);
1262 return NULL;
1265 dev = rproc->dev;
1267 err = mutex_lock_interruptible(&rproc->lock);
1268 if (err) {
1269 dev_err(dev, "can't lock remote processor %s\n", name);
1270 return NULL;
1273 if (rproc->state == RPROC_CRASHED) {
1274 mutex_unlock(&rproc->lock);
1275 if (wait_for_completion_interruptible(&rproc->error_comp)) {
1276 dev_err(dev, "error waiting error completion\n");
1277 return NULL;
1279 mutex_lock(&rproc->lock);
1282 /* prevent underlying implementation from being removed */
1283 if (!try_module_get(rproc->owner)) {
1284 dev_err(dev, "%s: can't get owner\n", __func__);
1285 goto unlock_mutex;
1288 /* bail if rproc is already powered up */
1289 if (rproc->count++) {
1290 ret = rproc;
1291 goto unlock_mutex;
1294 /* rproc_put() calls should wait until async loader completes */
1295 init_completion(&rproc->firmware_loading_complete);
1297 dev_info(dev, "powering up %s\n", name);
1299 err = rproc_loader(rproc);
1300 if (err) {
1301 dev_err(dev, "failed to load rproc %s\n", rproc->name);
1302 complete_all(&rproc->firmware_loading_complete);
1303 module_put(rproc->owner);
1304 --rproc->count;
1305 goto unlock_mutex;
1308 rproc->state = RPROC_LOADING;
1309 ret = rproc;
1311 unlock_mutex:
1312 mutex_unlock(&rproc->lock);
1313 return ret;
1315 EXPORT_SYMBOL_GPL(rproc_get);
1317 void rproc_put(struct rproc *rproc)
1319 struct device *dev = rproc->dev;
1320 int ret;
1322 /* make sure rproc is not loading now */
1323 wait_for_completion(&rproc->firmware_loading_complete);
1325 ret = mutex_lock_interruptible(&rproc->lock);
1326 if (ret) {
1327 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1328 return;
1331 if (!rproc->count) {
1332 dev_warn(dev, "asymmetric rproc_put\n");
1333 ret = -EINVAL;
1334 goto out;
1337 /* if the remote proc is still needed, bail out */
1338 if (--rproc->count)
1339 goto out;
1341 if (mutex_lock_interruptible(&rproc->tlock))
1342 goto out;
1344 if (rproc->trace_buf0)
1345 /* iounmap normal memory, so make sparse happy */
1346 iounmap((__force void __iomem *) rproc->trace_buf0);
1347 if (rproc->trace_buf1)
1348 /* iounmap normal memory, so make sparse happy */
1349 iounmap((__force void __iomem *) rproc->trace_buf1);
1350 rproc->trace_buf0 = rproc->trace_buf1 = NULL;
1352 if (rproc->cdump_buf0)
1353 /* iounmap normal memory, so make sparse happy */
1354 iounmap((__force void __iomem *) rproc->cdump_buf0);
1355 if (rproc->cdump_buf1)
1356 /* iounmap normal memory, so make sparse happy */
1357 iounmap((__force void __iomem *) rproc->cdump_buf1);
1358 rproc->cdump_buf0 = rproc->cdump_buf1 = NULL;
1360 mutex_unlock(&rproc->tlock);
1362 rproc_reset_poolmem(rproc);
1363 memset(rproc->memory_maps, 0, sizeof(rproc->memory_maps));
1364 kfree(rproc->header);
1367 * make sure rproc is really running before powering it off.
1368 * this is important, because the fw loading might have failed.
1370 if (rproc->state == RPROC_RUNNING || rproc->state == RPROC_CRASHED) {
1371 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
1373 * Call resume, it will cancel any pending autosuspend,
1374 * so that no callback is executed after the device is stopped.
1375 * Device stop function takes care of shutting down the device.
1377 pm_runtime_get_sync(rproc->dev);
1378 pm_runtime_put_noidle(rproc->dev);
1379 if (!rproc->secure_reset)
1380 pm_runtime_disable(rproc->dev);
1382 pm_runtime_set_suspended(rproc->dev);
1383 #endif
1384 ret = rproc->ops->stop(rproc);
1385 if (ret) {
1386 dev_err(dev, "can't stop rproc %s: %d\n", rproc->name,
1387 ret);
1388 goto out;
1390 if (rproc->ops->watchdog_exit) {
1391 ret = rproc->ops->watchdog_exit(rproc);
1392 if (ret) {
1393 dev_err(rproc->dev, "error watchdog_exit %d\n",
1394 ret);
1395 goto out;
1398 if (rproc->ops->iommu_exit) {
1399 ret = rproc->ops->iommu_exit(rproc);
1400 if (ret) {
1401 dev_err(rproc->dev, "error iommu_exit %d\n",
1402 ret);
1403 goto out;
1408 if (rproc->state == RPROC_CRASHED)
1409 complete_all(&rproc->error_comp);
1411 rproc->state = RPROC_OFFLINE;
1413 dev_info(dev, "stopped remote processor %s\n", rproc->name);
1415 out:
1416 mutex_unlock(&rproc->lock);
1417 if (!ret)
1418 module_put(rproc->owner);
1420 EXPORT_SYMBOL_GPL(rproc_put);
1422 static void rproc_error_work(struct work_struct *work)
1424 struct rproc *rproc = container_of(work, struct rproc, error_work);
1426 dev_dbg(rproc->dev, "%s\n", __func__);
1427 _event_notify(rproc, RPROC_ERROR, NULL);
1430 int rproc_event_register(struct rproc *rproc, struct notifier_block *nb)
1432 return blocking_notifier_chain_register(&rproc->nbh, nb);
1434 EXPORT_SYMBOL_GPL(rproc_event_register);
1436 int rproc_event_unregister(struct rproc *rproc, struct notifier_block *nb)
1438 return blocking_notifier_chain_unregister(&rproc->nbh, nb);
1440 EXPORT_SYMBOL_GPL(rproc_event_unregister);
1442 void rproc_last_busy(struct rproc *rproc)
1444 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
1445 struct device *dev = rproc->dev;
1447 mutex_lock(&rproc->pm_lock);
1448 if (pm_runtime_suspended(dev) ||
1449 !pm_runtime_autosuspend_expiration(dev)) {
1450 pm_runtime_mark_last_busy(dev);
1451 mutex_unlock(&rproc->pm_lock);
1453 * if the remote processor is suspended, we can not wake it
1454 * up (that would abort system suspend), instead state that
1455 * the remote processor needs to be waken up on system resume.
1457 mutex_lock(&rproc->lock);
1458 if (rproc->state == RPROC_SUSPENDED) {
1459 rproc->need_resume = true;
1460 mutex_unlock(&rproc->lock);
1461 return;
1463 mutex_unlock(&rproc->lock);
1464 pm_runtime_get_sync(dev);
1465 pm_runtime_mark_last_busy(dev);
1466 pm_runtime_put_autosuspend(dev);
1467 return;
1469 pm_runtime_mark_last_busy(dev);
1470 mutex_unlock(&rproc->pm_lock);
1471 #endif
1473 EXPORT_SYMBOL(rproc_last_busy);
1475 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
1476 static int rproc_resume(struct device *dev)
1478 struct platform_device *pdev = to_platform_device(dev);
1479 struct rproc *rproc = platform_get_drvdata(pdev);
1480 int ret = 0;
1482 dev_dbg(dev, "Enter %s\n", __func__);
1484 mutex_lock(&rproc->lock);
1485 if (rproc->state != RPROC_SUSPENDED) {
1486 mutex_unlock(&rproc->lock);
1487 return 0;
1490 if (!rproc->need_resume)
1491 goto unlock;
1493 rproc->need_resume = false;
1494 pm_runtime_get_sync(dev);
1495 pm_runtime_mark_last_busy(dev);
1496 pm_runtime_put_autosuspend(dev);
1497 unlock:
1498 rproc->state = (ret) ? RPROC_CRASHED : RPROC_RUNNING;
1499 mutex_unlock(&rproc->lock);
1500 if (ret) {
1501 _event_notify(rproc, RPROC_ERROR, NULL);
1502 dev_err(dev, "Error resuming %d\n", ret);
1504 return ret;
1507 static int rproc_suspend(struct device *dev)
1509 struct platform_device *pdev = to_platform_device(dev);
1510 struct rproc *rproc = platform_get_drvdata(pdev);
1511 int ret = 0;
1513 dev_dbg(dev, "Enter %s\n", __func__);
1515 mutex_lock(&rproc->lock);
1516 if (rproc->state != RPROC_RUNNING) {
1517 mutex_unlock(&rproc->lock);
1518 return 0;
1521 if (pm_runtime_suspended(dev))
1522 goto out;
1524 * If it is not runtime suspended, it means remote processor is still
1525 * doing something. However we need to stop it.
1528 dev_dbg(dev, "%s: will be forced to suspend\n", rproc->name);
1530 rproc->force_suspend = true;
1531 ret = pm_runtime_suspend(dev);
1532 rproc->force_suspend = false;
1533 if (ret)
1534 goto out;
1536 * As the remote processor had to be forced to suspend, it was
1537 * executing some task, so it needs to be waken up on system resume
1539 rproc->need_resume = true;
1540 out:
1541 if (!ret)
1542 rproc->state = RPROC_SUSPENDED;
1543 mutex_unlock(&rproc->lock);
1545 return ret;
1548 static int rproc_runtime_resume(struct device *dev)
1550 struct platform_device *pdev = to_platform_device(dev);
1551 struct rproc *rproc = platform_get_drvdata(pdev);
1552 int ret = 0;
1554 dev_dbg(dev, "Enter %s\n", __func__);
1556 if (rproc->ops->resume)
1557 ret = rproc->ops->resume(rproc);
1559 if (!ret)
1560 _event_notify(rproc, RPROC_RESUME, NULL);
1562 return 0;
1565 static int rproc_runtime_suspend(struct device *dev)
1567 struct platform_device *pdev = to_platform_device(dev);
1568 struct rproc *rproc = platform_get_drvdata(pdev);
1569 int ret = 0;
1570 unsigned to;
1572 dev_dbg(dev, "Enter %s\n", __func__);
1574 if (rproc->state == RPROC_SUSPENDED)
1575 return 0;
1577 mutex_lock(&rproc->pm_lock);
1579 if (pm_runtime_autosuspend_expiration(dev) && !rproc->force_suspend) {
1580 ret = -EBUSY;
1581 goto abort;
1585 * Notify PROC_PRE_SUSPEND only when the suspend is not forced.
1586 * Users can use pre suspend call back to cancel autosuspend, but
1587 * when the suspend is forced, there is no need to notify them
1589 if (!rproc->force_suspend)
1590 ret = _event_notify(rproc, RPROC_PRE_SUSPEND, NULL);
1592 * If rproc user avoids suspend, that means it is still using rproc.
1593 * Lets go to abort suspend.
1595 if (ret) {
1596 dev_dbg(dev, "suspend aborted by user %d\n", ret);
1597 ret = -EBUSY;
1598 goto abort;
1600 /* Now call machine-specific suspend function (if exist) */
1601 if (rproc->ops->suspend)
1602 ret = rproc->ops->suspend(rproc, rproc->force_suspend);
1604 * If it fails with -EBUSY/EAGAIN, remote processor is still running,
1605 * but rproc users were not aware of that, so lets abort suspend.
1606 * If it is a different error, there is something wrong with the
1607 * remote processor. Return that error to pm runtime framework,
1608 * which will disable autosuspend.
1610 if (ret) {
1611 dev_dbg(dev, "suspend aborted by remote processor %d\n", ret);
1612 if (ret != -EBUSY && ret != -EAGAIN)
1613 dev_err(dev, "suspend error %d", ret);
1614 goto abort;
1616 /* we are not interested in the returned value */
1617 _event_notify(rproc, RPROC_POS_SUSPEND, NULL);
1618 mutex_unlock(&rproc->pm_lock);
1620 return 0;
1621 abort:
1622 pm_runtime_mark_last_busy(dev);
1623 to = jiffies_to_msecs(pm_runtime_autosuspend_expiration(dev) - jiffies);
1624 pm_schedule_suspend(dev, to);
1625 dev->power.timer_autosuspends = 1;
1626 mutex_unlock(&rproc->pm_lock);
1627 return ret;
1630 const struct dev_pm_ops rproc_gen_pm_ops = {
1631 SET_SYSTEM_SLEEP_PM_OPS(rproc_suspend, rproc_resume)
1632 SET_RUNTIME_PM_OPS(rproc_runtime_suspend, rproc_runtime_resume, NULL)
1634 #endif
1636 rproc_set_constraints(struct rproc *rproc, enum rproc_constraint type, long v)
1638 int ret;
1639 char *cname[] = {"scale", "latency", "bandwidth"};
1640 int (*func)(struct rproc *, long);
1642 switch (type) {
1643 case RPROC_CONSTRAINT_SCALE:
1644 func = rproc->ops->scale;
1645 break;
1646 case RPROC_CONSTRAINT_LATENCY:
1647 func = rproc->ops->set_lat;
1648 break;
1649 case RPROC_CONSTRAINT_BANDWIDTH:
1650 func = rproc->ops->set_bw;
1651 break;
1652 default:
1653 dev_err(rproc->dev, "invalid constraint\n");
1654 return -EINVAL;
1657 if (!func) {
1658 dev_err(rproc->dev, "%s: no %s constraint\n",
1659 __func__, cname[type]);
1660 return -EINVAL;
1663 mutex_lock(&rproc->lock);
1664 if (rproc->state == RPROC_OFFLINE) {
1665 pr_err("%s: rproc inactive\n", __func__);
1666 mutex_unlock(&rproc->lock);
1667 return -EPERM;
1670 dev_dbg(rproc->dev, "set %s constraint %ld\n", cname[type], v);
1671 ret = func(rproc, v);
1672 if (ret)
1673 dev_err(rproc->dev, "error %s constraint\n", cname[type]);
1674 mutex_unlock(&rproc->lock);
1676 return ret;
1678 EXPORT_SYMBOL(rproc_set_constraints);
1680 int rproc_register(struct device *dev, const char *name,
1681 const struct rproc_ops *ops,
1682 const char *firmware,
1683 struct rproc_mem_pool *memory_pool,
1684 struct module *owner,
1685 unsigned sus_timeout)
1687 struct platform_device *pdev = to_platform_device(dev);
1688 struct rproc *rproc;
1690 if (!dev || !name || !ops)
1691 return -EINVAL;
1693 rproc = kzalloc(sizeof(struct rproc), GFP_KERNEL);
1694 if (!rproc) {
1695 dev_err(dev, "%s: kzalloc failed\n", __func__);
1696 return -ENOMEM;
1699 rproc->dev = dev;
1700 rproc->name = name;
1701 rproc->ops = ops;
1702 rproc->firmware = firmware;
1703 rproc->owner = owner;
1704 rproc->memory_pool = memory_pool;
1705 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
1706 rproc->sus_timeout = sus_timeout;
1707 mutex_init(&rproc->pm_lock);
1708 #endif
1709 mutex_init(&rproc->lock);
1710 mutex_init(&rproc->secure_lock);
1711 mutex_init(&rproc->tlock);
1712 INIT_WORK(&rproc->error_work, rproc_error_work);
1713 BLOCKING_INIT_NOTIFIER_HEAD(&rproc->nbh);
1715 rproc->state = RPROC_OFFLINE;
1717 rproc->qos_request = kzalloc(sizeof(*rproc->qos_request),
1718 GFP_KERNEL);
1719 if (!rproc->qos_request) {
1720 kfree(rproc);
1721 return -ENOMEM;
1724 pm_qos_add_request(rproc->qos_request, PM_QOS_CPU_DMA_LATENCY,
1725 PM_QOS_DEFAULT_VALUE);
1727 rproc->secure_mode = false;
1728 rproc->secure_ttb = NULL;
1729 init_completion(&rproc->secure_restart);
1731 spin_lock(&rprocs_lock);
1732 list_add_tail(&rproc->next, &rprocs);
1733 spin_unlock(&rprocs_lock);
1735 platform_set_drvdata(pdev, rproc);
1737 dev_info(dev, "%s is available\n", name);
1739 if (!rproc_dbg)
1740 goto out;
1742 rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg);
1743 if (!rproc->dbg_dir) {
1744 dev_err(dev, "can't create debugfs dir\n");
1745 goto out;
1748 debugfs_create_file("name", 0444, rproc->dbg_dir, rproc,
1749 &rproc_name_ops);
1751 debugfs_create_file("version", 0444, rproc->dbg_dir, rproc,
1752 &rproc_version_ops);
1753 out:
1754 return 0;
1756 EXPORT_SYMBOL_GPL(rproc_register);
1758 int rproc_unregister(const char *name)
1760 struct rproc *rproc;
1762 rproc = __find_rproc_by_name(name);
1763 if (!rproc) {
1764 pr_err("can't find remote processor %s\n", name);
1765 return -EINVAL;
1768 dev_info(rproc->dev, "removing %s\n", name);
1770 if (rproc->dbg_dir)
1771 debugfs_remove_recursive(rproc->dbg_dir);
1773 spin_lock(&rprocs_lock);
1774 list_del(&rproc->next);
1775 spin_unlock(&rprocs_lock);
1777 rproc->secure_mode = false;
1778 rproc->secure_ttb = NULL;
1779 pm_qos_remove_request(rproc->qos_request);
1780 kfree(rproc->qos_request);
1781 kfree(rproc->last_trace_buf0);
1782 kfree(rproc->last_trace_buf1);
1783 kfree(rproc);
1785 return 0;
1787 EXPORT_SYMBOL_GPL(rproc_unregister);
1789 static int __init remoteproc_init(void)
1791 if (debugfs_initialized()) {
1792 rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
1793 if (!rproc_dbg)
1794 pr_err("can't create debugfs dir\n");
1797 return 0;
1799 /* must be ready in time for device_initcall users */
1800 subsys_initcall(remoteproc_init);
1802 static void __exit remoteproc_exit(void)
1804 if (rproc_dbg)
1805 debugfs_remove(rproc_dbg);
1807 module_exit(remoteproc_exit);
1809 MODULE_LICENSE("GPL v2");
1810 MODULE_DESCRIPTION("Generic Remote Processor Framework");