2 * Remote Processor Framework
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Copyright (C) 2011 Google, Inc.
7 * Ohad Ben-Cohen <ohad@wizery.com>
8 * Mark Grosen <mgrosen@ti.com>
9 * Brian Swetland <swetland@google.com>
10 * Fernando Guzman Lugo <fernando.lugo@ti.com>
11 * Robert Tivy <rtivy@ti.com>
12 * Armando Uribe De Leon <x0095078@ti.com>
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * version 2 as published by the Free Software Foundation.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
24 #define pr_fmt(fmt) "%s: " fmt, __func__
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/interrupt.h>
29 #include <linux/device.h>
30 #include <linux/delay.h>
31 #include <linux/slab.h>
32 #include <linux/platform_device.h>
33 #include <linux/firmware.h>
35 #include <linux/list.h>
36 #include <linux/debugfs.h>
37 #include <linux/remoteproc.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/uaccess.h>
40 #include <linux/elf.h>
41 #include <linux/elfcore.h>
42 #include <plat/remoteproc.h>
44 /* list of available remote processors on this board */
45 static LIST_HEAD(rprocs
);
46 static DEFINE_SPINLOCK(rprocs_lock
);
48 /* debugfs parent dir */
49 static struct dentry
*rproc_dbg
;
51 static ssize_t
rproc_format_trace_buf(struct rproc
*rproc
, char __user
*userbuf
,
52 size_t count
, loff_t
*ppos
,
53 const void *src
, int size
)
55 const char *buf
= (const char *) src
;
56 ssize_t num_copied
= 0;
60 int i
, w_pos
, ret
= 0;
62 if (mutex_lock_interruptible(&rproc
->tlock
))
65 /* When src is NULL, the remoteproc is offline. */
71 if (size
< 2 * sizeof(u32
)) {
76 /* Assume write_idx is the penultimate byte in the buffer trace*/
77 size
= size
- (sizeof(u32
) * 2);
78 w_idx
= (int *)(buf
+ size
);
87 for (i
= w_pos
; i
< size
&& buf
[i
]; i
++)
92 simple_read_from_buffer(userbuf
, count
, ppos
, src
, i
);
101 for (i
= 0; i
< w_pos
&& buf
[i
]; i
++)
106 simple_read_from_buffer(userbuf
, count
, ppos
, src
, i
);
112 mutex_unlock(&rproc
->tlock
);
116 static ssize_t
rproc_name_read(struct file
*filp
, char __user
*userbuf
,
117 size_t count
, loff_t
*ppos
)
119 struct rproc
*rproc
= filp
->private_data
;
120 /* need room for the name, a newline and a terminating null */
121 char buf
[RPROC_MAX_NAME
+ 2];
124 i
= snprintf(buf
, RPROC_MAX_NAME
+ 2, "%s\n", rproc
->name
);
126 return simple_read_from_buffer(userbuf
, count
, ppos
, buf
, i
);
129 static ssize_t
rproc_version_read(struct file
*filp
, char __user
*userbuf
,
130 size_t count
, loff_t
*ppos
)
133 struct rproc
*rproc
= filp
->private_data
;
136 pch
= strstr(rproc
->header
, "version:");
139 pch
+= strlen("version:") + 1;
140 len
= rproc
->header_len
- (pch
- rproc
->header
);
141 return simple_read_from_buffer(userbuf
, count
, ppos
, pch
, len
);
144 static int rproc_open_generic(struct inode
*inode
, struct file
*file
)
146 file
->private_data
= inode
->i_private
;
150 #define DEBUGFS_READONLY_FILE(name, v, l) \
151 static ssize_t name## _rproc_read(struct file *filp, \
152 char __user *ubuf, size_t count, loff_t *ppos) \
154 struct rproc *rproc = filp->private_data; \
155 return rproc_format_trace_buf(rproc, ubuf, count, ppos, v, l); \
158 static const struct file_operations name ##_rproc_ops = { \
159 .read = name ##_rproc_read, \
160 .open = rproc_open_generic, \
161 .llseek = generic_file_llseek, \
164 #ifdef CONFIG_REMOTEPROC_CORE_DUMP
166 /* + 1 for the notes segment */
167 #define NUM_PHDR (RPROC_MAX_MEM_ENTRIES + 1)
169 #define CORE_STR "CORE"
171 /* Intermediate core-dump-file format */
179 struct elf_phdr phdr
[NUM_PHDR
];
181 struct elf_note note_prstatus
;
182 char name
[sizeof(CORE_STR
)];
183 struct elf_prstatus prstatus
__aligned(4);
184 } core_note __packed
__aligned(4);
190 /* Return the number of segments to be written to the core file */
191 static int rproc_core_map_count(const struct rproc
*rproc
)
196 if (!rproc
->memory_maps
[i
].size
)
198 if (!rproc
->memory_maps
[i
].core
)
203 /* The Ducati has a low number of segments */
210 /* Copied from fs/binfmt_elf.c */
211 static void fill_elf_header(struct elfhdr
*elf
, int segs
)
213 memset(elf
, 0, sizeof(*elf
));
215 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
216 elf
->e_ident
[EI_CLASS
] = ELFCLASS32
;
217 elf
->e_ident
[EI_DATA
] = ELFDATA2LSB
;
218 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
219 elf
->e_ident
[EI_OSABI
] = ELFOSABI_NONE
;
221 elf
->e_type
= ET_CORE
;
222 elf
->e_machine
= EM_ARM
;
223 elf
->e_version
= EV_CURRENT
;
224 elf
->e_phoff
= sizeof(struct elfhdr
);
225 elf
->e_flags
= EF_ARM_EABI_VER5
;
226 elf
->e_ehsize
= sizeof(struct elfhdr
);
227 elf
->e_phentsize
= sizeof(struct elf_phdr
);
233 static void fill_elf_segment_headers(struct core_rproc
*d
)
237 loff_t offset
= d
->offset
;
241 size
= d
->rproc
->memory_maps
[i
].size
;
244 if (!d
->rproc
->memory_maps
[i
].core
)
247 BUG_ON(hi
>= d
->e_phnum
- 1);
249 d
->core
.phdr
[hi
].p_type
= PT_LOAD
;
250 d
->core
.phdr
[hi
].p_offset
= offset
;
251 d
->core
.phdr
[hi
].p_vaddr
= d
->rproc
->memory_maps
[i
].da
;
252 d
->core
.phdr
[hi
].p_paddr
= d
->rproc
->memory_maps
[i
].pa
;
253 d
->core
.phdr
[hi
].p_filesz
= size
;
254 d
->core
.phdr
[hi
].p_memsz
= size
;
255 /* FIXME: get these from the Ducati */
256 d
->core
.phdr
[hi
].p_flags
= PF_R
| PF_W
| PF_X
;
258 pr_debug("%s: phdr type %d f_off %08x va %08x pa %08x fl %x\n",
260 d
->core
.phdr
[hi
].p_type
,
261 d
->core
.phdr
[hi
].p_offset
,
262 d
->core
.phdr
[hi
].p_vaddr
,
263 d
->core
.phdr
[hi
].p_paddr
,
264 d
->core
.phdr
[hi
].p_flags
);
271 static int setup_rproc_elf_core_dump(struct core_rproc
*d
)
274 struct elf_phdr
*nphdr
;
275 struct exc_regs
*xregs
= d
->rproc
->cdump_buf1
;
276 struct pt_regs
*regs
=
277 (struct pt_regs
*)&d
->core
.core_note
.prstatus
.pr_reg
;
279 memset(&d
->core
.elf
, 0, sizeof(d
->core
.elf
));
281 __phnum
= rproc_core_map_count(d
->rproc
);
282 if (__phnum
< 0 || __phnum
> ARRAY_SIZE(d
->core
.phdr
))
284 d
->e_phnum
= __phnum
+ 1; /* + 1 for notes */
286 pr_info("number of segments: %d\n", d
->e_phnum
);
288 fill_elf_header(&d
->core
.elf
, d
->e_phnum
);
290 nphdr
= d
->core
.phdr
+ __phnum
;
291 nphdr
->p_type
= PT_NOTE
;
300 /* The notes start right after the phdr array. Adjust p_filesz
301 * accordingly if you add more notes
303 nphdr
->p_filesz
= sizeof(d
->core
.core_note
);
304 nphdr
->p_offset
= offsetof(struct core
, core_note
);
306 d
->core
.core_note
.note_prstatus
.n_namesz
= sizeof(CORE_STR
);
307 d
->core
.core_note
.note_prstatus
.n_descsz
=
308 sizeof(struct elf_prstatus
);
309 d
->core
.core_note
.note_prstatus
.n_type
= NT_PRSTATUS
;
310 memcpy(d
->core
.core_note
.name
, CORE_STR
, sizeof(CORE_STR
));
312 remoteproc_fill_pt_regs(regs
, xregs
);
314 /* We ignore the NVIC registers for now */
316 d
->offset
= sizeof(struct core
);
317 d
->offset
= roundup(d
->offset
, PAGE_SIZE
);
318 fill_elf_segment_headers(d
);
322 static int core_rproc_open(struct inode
*inode
, struct file
*filp
)
325 struct core_rproc
*d
;
327 d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
331 d
->rproc
= inode
->i_private
;
332 filp
->private_data
= d
;
334 setup_rproc_elf_core_dump(d
);
337 const struct rproc
*rproc
;
339 for (i
= 0; rproc
->memory_maps
[i
].size
; i
++) {
340 pr_info("%s: memory_map[%d] pa %08x sz %d core %d\n",
343 rproc
->memory_maps
[i
].pa
,
344 rproc
->memory_maps
[i
].size
,
345 rproc
->memory_maps
[i
].core
);
352 static int core_rproc_release(struct inode
*inode
, struct file
*filp
)
354 pr_info("%s\n", __func__
);
355 kfree(filp
->private_data
);
359 /* Given an offset to read from, return the index of the memory-map region to
362 static int rproc_memory_map_index(const struct rproc
*rproc
, loff_t
*off
)
366 int size
= rproc
->memory_maps
[i
].size
;
370 if (!rproc
->memory_maps
[i
].core
)
381 ssize_t
core_rproc_write(struct file
*filp
,
382 const char __user
*buffer
, size_t count
, loff_t
*off
)
386 struct core_rproc
*d
= filp
->private_data
;
387 struct rproc
*rproc
= d
->rproc
;
389 cmdlen
= min(sizeof(cmd
) - 1, count
);
390 if (copy_from_user(cmd
, buffer
, cmdlen
))
394 if (!strncmp(cmd
, "enable", 6)) {
395 pr_info("remoteproc %s halt on crash ENABLED\n", rproc
->name
);
396 rproc
->halt_on_crash
= true;
398 } else if (!strncmp(cmd
, "disable", 7)) {
399 pr_info("remoteproc %s halt on crash DISABLED\n", rproc
->name
);
400 rproc
->halt_on_crash
= false;
401 /* If you disable halt-on-crashed after the remote processor
402 * has already crashed, we will let it continue crashing (so it
403 * can get handled otherwise) as well.
405 if (rproc
->state
!= RPROC_CRASHED
)
407 } else if (strncmp(cmd
, "continue", 8)) {
408 pr_err("%s: invalid command: expecting \"enable\"," \
409 "\"disable\", or \"continue\"\n", __func__
);
413 if (rproc
->state
== RPROC_CRASHED
) {
414 pr_info("remoteproc %s: resuming crash recovery\n",
416 blocking_notifier_call_chain(&rproc
->nbh
, RPROC_ERROR
, NULL
);
424 static ssize_t
core_rproc_read(struct file
*filp
,
425 char __user
*userbuf
, size_t count
, loff_t
*ppos
)
427 const struct core_rproc
*d
= filp
->private_data
;
428 const struct rproc
*rproc
= d
->rproc
;
431 size_t remaining
= count
;
434 pr_debug("%s count %d off %lld\n", __func__
, count
, *ppos
);
436 /* copy the ELF and segment header first */
437 if (*ppos
< d
->offset
) {
438 copied
= simple_read_from_buffer(userbuf
, count
,
439 ppos
, &d
->core
, d
->offset
);
441 pr_err("%s: could not copy ELF header\n", __func__
);
445 pr_debug("%s: copied %d/%lld from ELF header\n", __func__
,
452 size_t remaining_in_region
;
453 const struct rproc_mem_entry
*r
;
454 void __iomem
*kvaddr
;
456 pos
= *ppos
- d
->offset
;
457 index
= rproc_memory_map_index(rproc
, &pos
);
459 pr_info("%s: EOF at off %lld\n", __func__
, *ppos
);
463 r
= &rproc
->memory_maps
[index
];
465 remaining_in_region
= r
->size
- pos
;
466 if (remaining_in_region
> remaining
)
467 remaining_in_region
= remaining
;
469 pr_debug("%s: iomap 0x%x size %d\n", __func__
, r
->pa
, r
->size
);
470 kvaddr
= ioremap(r
->pa
, r
->size
);
472 pr_err("%s: iomap error: region %d (phys 0x%08x size %d)\n",
473 __func__
, index
, r
->pa
, r
->size
);
477 pr_debug("%s: off %lld -> [%d](pa 0x%08x off %lld sz %d)\n",
479 *ppos
, index
, r
->pa
, pos
, r
->size
);
481 if (copy_to_user(userbuf
+ copied
, kvaddr
+ pos
,
482 remaining_in_region
)) {
483 pr_err("%s: copy_to_user error\n", __func__
);
489 copied
+= remaining_in_region
;
490 *ppos
+= remaining_in_region
;
491 BUG_ON(remaining
< remaining_in_region
);
492 remaining
-= remaining_in_region
;
498 static const struct file_operations core_rproc_ops
= {
499 .read
= core_rproc_read
,
500 .write
= core_rproc_write
,
501 .open
= core_rproc_open
,
502 .release
= core_rproc_release
,
503 .llseek
= generic_file_llseek
,
505 #endif /* CONFIG_REMOTEPROC_CORE_DUMP */
507 static const struct file_operations rproc_name_ops
= {
508 .read
= rproc_name_read
,
509 .open
= rproc_open_generic
,
510 .llseek
= generic_file_llseek
,
513 static const struct file_operations rproc_version_ops
= {
514 .read
= rproc_version_read
,
515 .open
= rproc_open_generic
,
516 .llseek
= generic_file_llseek
,
519 DEBUGFS_READONLY_FILE(trace0
, rproc
->trace_buf0
, rproc
->trace_len0
);
520 DEBUGFS_READONLY_FILE(trace1
, rproc
->trace_buf1
, rproc
->trace_len1
);
521 DEBUGFS_READONLY_FILE(trace0_last
, rproc
->last_trace_buf0
,
522 rproc
->last_trace_len0
);
523 DEBUGFS_READONLY_FILE(trace1_last
, rproc
->last_trace_buf1
,
524 rproc
->last_trace_len1
);
525 DEBUGFS_READONLY_FILE(cdump0
, rproc
->cdump_buf0
, rproc
->cdump_len0
);
526 DEBUGFS_READONLY_FILE(cdump1
, rproc
->cdump_buf1
, rproc
->cdump_len1
);
528 #define DEBUGFS_ADD(name) \
529 debugfs_create_file(#name, 0444, rproc->dbg_dir, \
530 rproc, &name## _rproc_ops)
533 * __find_rproc_by_name - find a registered remote processor by name
534 * @name: name of the remote processor
536 * Internal function that returns the rproc @name, or NULL if @name does
539 static struct rproc
*__find_rproc_by_name(const char *name
)
542 struct list_head
*tmp
;
544 spin_lock(&rprocs_lock
);
546 list_for_each(tmp
, &rprocs
) {
547 rproc
= list_entry(tmp
, struct rproc
, next
);
548 if (!strcmp(rproc
->name
, name
))
553 spin_unlock(&rprocs_lock
);
559 * __rproc_da_to_pa - convert a device (virtual) address to its physical address
560 * @maps: the remote processor's memory mappings array
561 * @da: a device address (as seen by the remote processor)
562 * @pa: pointer to the physical address result
564 * This function converts @da to its physical address (pa) by going through
565 * @maps, looking for a mapping that contains @da, and then calculating the
568 * On success 0 is returned, and the @pa is updated with the result.
569 * Otherwise, -EINVAL is returned.
572 rproc_da_to_pa(const struct rproc_mem_entry
*maps
, u64 da
, phys_addr_t
*pa
)
577 for (i
= 0; maps
[i
].size
; i
++) {
578 const struct rproc_mem_entry
*me
= &maps
[i
];
580 if (da
>= me
->da
&& da
< (me
->da
+ me
->size
)) {
581 offset
= da
- me
->da
;
582 pr_debug("%s: matched mem entry no. %d\n",
584 *pa
= me
->pa
+ offset
;
592 static int rproc_mmu_fault_isr(struct rproc
*rproc
, u64 da
, u32 flags
)
594 dev_err(rproc
->dev
, "%s\n", __func__
);
595 schedule_work(&rproc
->error_work
);
599 static int rproc_watchdog_isr(struct rproc
*rproc
)
601 dev_err(rproc
->dev
, "%s\n", __func__
);
602 schedule_work(&rproc
->error_work
);
606 static int rproc_crash(struct rproc
*rproc
)
608 init_completion(&rproc
->error_comp
);
609 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
610 pm_runtime_dont_use_autosuspend(rproc
->dev
);
612 if (rproc
->ops
->dump_registers
)
613 rproc
->ops
->dump_registers(rproc
);
615 if (rproc
->trace_buf0
&& rproc
->last_trace_buf0
)
616 memcpy(rproc
->last_trace_buf0
, rproc
->trace_buf0
,
617 rproc
->last_trace_len0
);
618 if (rproc
->trace_buf1
&& rproc
->last_trace_buf1
)
619 memcpy(rproc
->last_trace_buf1
, rproc
->trace_buf1
,
620 rproc
->last_trace_len1
);
621 rproc
->state
= RPROC_CRASHED
;
626 static int _event_notify(struct rproc
*rproc
, int type
, void *data
)
628 if (type
== RPROC_ERROR
) {
629 mutex_lock(&rproc
->lock
);
630 /* only notify first crash */
631 if (rproc
->state
== RPROC_CRASHED
) {
632 mutex_unlock(&rproc
->lock
);
636 mutex_unlock(&rproc
->lock
);
637 /* If halt_on_crash do not notify the error */
638 pr_info("remoteproc: %s has crashed\n", rproc
->name
);
639 if (rproc
->halt_on_crash
) {
640 /* FIXME: send uevent here */
641 pr_info("remoteproc: %s: halt-on-crash enabled: "
642 "deferring crash recovery\n", rproc
->name
);
647 return blocking_notifier_call_chain(&rproc
->nbh
, type
, data
);
651 * rproc_start - power on the remote processor and let it start running
652 * @rproc: the remote processor
653 * @bootaddr: address of first instruction to execute (optional)
655 * Start a remote processor (i.e. power it on, take it out of reset, etc..)
657 static void rproc_start(struct rproc
*rproc
, u64 bootaddr
)
659 struct device
*dev
= rproc
->dev
;
662 err
= mutex_lock_interruptible(&rproc
->lock
);
664 dev_err(dev
, "can't lock remote processor %d\n", err
);
668 if (rproc
->ops
->iommu_init
) {
669 err
= rproc
->ops
->iommu_init(rproc
, rproc_mmu_fault_isr
);
671 dev_err(dev
, "can't configure iommu %d\n", err
);
676 if (rproc
->ops
->watchdog_init
) {
677 err
= rproc
->ops
->watchdog_init(rproc
, rproc_watchdog_isr
);
679 dev_err(dev
, "can't configure watchdog timer %d\n",
685 #ifdef CONFIG_REMOTEPROC_CORE_DUMP
686 debugfs_create_file("core", 0400, rproc
->dbg_dir
,
687 rproc
, &core_rproc_ops
);
690 err
= rproc
->ops
->start(rproc
, bootaddr
);
692 dev_err(dev
, "can't start rproc %s: %d\n", rproc
->name
, err
);
696 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
697 pm_runtime_use_autosuspend(dev
);
698 pm_runtime_set_autosuspend_delay(dev
, rproc
->sus_timeout
);
699 pm_runtime_get_noresume(rproc
->dev
);
700 pm_runtime_set_active(rproc
->dev
);
701 if (!rproc
->secure_mode
)
702 pm_runtime_enable(rproc
->dev
);
703 pm_runtime_mark_last_busy(rproc
->dev
);
704 pm_runtime_put_autosuspend(rproc
->dev
);
707 rproc
->state
= RPROC_RUNNING
;
709 dev_info(dev
, "remote processor %s is now up\n", rproc
->name
);
710 rproc
->secure_ok
= true;
711 complete_all(&rproc
->secure_restart
);
712 mutex_unlock(&rproc
->lock
);
717 * signal always, as we would need a notification in both the
718 * normal->secure & secure->normal mode transitions, otherwise
719 * we would have to introduce one more variable.
722 if (rproc
->ops
->watchdog_exit
)
723 rproc
->ops
->watchdog_exit(rproc
);
725 if (rproc
->ops
->iommu_exit
)
726 rproc
->ops
->iommu_exit(rproc
);
728 rproc
->secure_ok
= false;
729 complete_all(&rproc
->secure_restart
);
730 mutex_unlock(&rproc
->lock
);
733 static void rproc_reset_poolmem(struct rproc
*rproc
)
735 struct rproc_mem_pool
*pool
= rproc
->memory_pool
;
737 if (!pool
|| !pool
->mem_base
|| !pool
->mem_size
) {
738 pr_warn("invalid pool\n");
742 pool
->cur_base
= pool
->mem_base
;
743 pool
->cur_size
= pool
->mem_size
;
746 static int rproc_add_mem_entry(struct rproc
*rproc
, struct fw_resource
*rsc
)
748 struct rproc_mem_entry
*me
= rproc
->memory_maps
;
752 while (me
->da
|| me
->pa
|| me
->size
) {
755 if (i
== RPROC_MAX_MEM_ENTRIES
) {
763 me
->pa
= (phys_addr_t
)rsc
->pa
;
765 #ifdef CONFIG_REMOTEPROC_CORE_DUMP
766 /* FIXME: ION heaps are reported as RSC_CARVEOUT. We need a
767 * better way to understand which sections are for
768 * code/stack/heap/static data, and which belong to the
769 * carveouts we don't care about in a core dump.
770 * Perhaps the ION carveout should be reported as RSC_DEVMEM.
772 me
->core
= (rsc
->type
== RSC_CARVEOUT
&& rsc
->pa
!= 0xba300000);
779 static int rproc_alloc_poolmem(struct rproc
*rproc
, u32 size
, phys_addr_t
*pa
)
781 struct rproc_mem_pool
*pool
= rproc
->memory_pool
;
784 if (!pool
|| !pool
->mem_base
|| !pool
->mem_size
) {
785 pr_warn("invalid pool\n");
788 if (pool
->cur_size
< size
) {
789 pr_warn("out of carveout memory\n");
793 *pa
= pool
->cur_base
;
794 pool
->cur_base
+= size
;
795 pool
->cur_size
-= size
;
799 static int rproc_check_poolmem(struct rproc
*rproc
, u32 size
, phys_addr_t pa
)
801 struct rproc_mem_pool
*pool
= rproc
->memory_pool
;
803 if (!pool
|| !pool
->st_base
|| !pool
->st_size
) {
804 pr_warn("invalid pool\n");
808 if (pa
< pool
->st_base
|| pa
+ size
> pool
->st_base
+ pool
->st_size
) {
809 pr_warn("section size does not fit within carveout memory\n");
816 static int rproc_handle_resources(struct rproc
*rproc
, struct fw_resource
*rsc
,
817 int len
, u64
*bootaddr
)
819 struct device
*dev
= rproc
->dev
;
828 while (len
>= sizeof(*rsc
) && !ret
) {
831 dev_dbg(dev
, "resource: type %d, da 0x%llx, pa 0x%llx, "
832 "mapped pa: 0x%x, len 0x%x, reserved 0x%x, "
833 "name %s\n", rsc
->type
, rsc
->da
, rsc
->pa
, pa
,
834 rsc
->len
, rsc
->reserved
, rsc
->name
);
837 dev_warn(dev
, "nonzero reserved\n");
841 if (trace_da0
&& trace_da1
) {
842 dev_warn(dev
, "skipping extra trace rsc %s\n",
847 /* store the da for processing at the end */
849 rproc
->trace_len0
= rsc
->len
;
850 rproc
->last_trace_len0
= rsc
->len
;
853 rproc
->trace_len1
= rsc
->len
;
854 rproc
->last_trace_len1
= rsc
->len
;
859 if (rproc
->cdump_buf0
&& rproc
->cdump_buf1
) {
860 dev_warn(dev
, "skipping extra trace rsc %s\n",
864 /* store the da for processing at the end */
866 rproc
->cdump_len0
= rsc
->len
;
869 rproc
->cdump_len1
= rsc
->len
;
877 ret
= rproc_add_mem_entry(rproc
, rsc
);
879 dev_err(dev
, "can't add mem_entry %s\n",
886 ret
= rproc_alloc_poolmem(rproc
, rsc
->len
, &pa
);
888 dev_err(dev
, "can't alloc poolmem %s\n",
894 ret
= rproc_check_poolmem(rproc
, rsc
->len
, pa
);
896 dev_err(dev
, "static memory for %s "
897 "doesn't belong to poolmem\n",
902 ret
= rproc_add_mem_entry(rproc
, rsc
);
904 dev_err(dev
, "can't add mem_entry %s\n",
910 /* we don't support much right now. so use dbg lvl */
911 dev_dbg(dev
, "unsupported resource type %d\n",
924 * post-process trace buffers, as we cannot rely on the order of the
925 * trace section and the carveout sections.
927 * trace buffer memory _is_ normal memory, so we cast away the
928 * __iomem to make sparse happy
931 if (mutex_lock_interruptible(&rproc
->tlock
))
935 ret
= rproc_da_to_pa(rproc
->memory_maps
, trace_da0
, &pa
);
938 rproc
->trace_buf0
= (__force
void *)
939 ioremap_nocache(pa
, rproc
->trace_len0
);
940 if (rproc
->trace_buf0
) {
942 if (!rproc
->last_trace_buf0
) {
943 rproc
->last_trace_buf0
= kzalloc(sizeof(u32
) *
944 rproc
->last_trace_len0
,
946 if (!rproc
->last_trace_buf0
) {
950 DEBUGFS_ADD(trace0_last
);
953 dev_err(dev
, "can't ioremap trace buffer0\n");
959 ret
= rproc_da_to_pa(rproc
->memory_maps
, trace_da1
, &pa
);
962 rproc
->trace_buf1
= (__force
void *)
963 ioremap_nocache(pa
, rproc
->trace_len1
);
964 if (rproc
->trace_buf1
) {
966 if (!rproc
->last_trace_buf1
) {
967 rproc
->last_trace_buf1
= kzalloc(sizeof(u32
) *
968 rproc
->last_trace_len1
,
970 if (!rproc
->last_trace_buf1
) {
974 DEBUGFS_ADD(trace1_last
);
977 dev_err(dev
, "can't ioremap trace buffer1\n");
984 * post-process crash-dump buffers, as we cannot rely on the order of
985 * the crash-dump section and the carveout sections.
987 * crash-dump memory _is_ normal memory, so we cast away the __iomem to
991 ret
= rproc_da_to_pa(rproc
->memory_maps
, cdump_da0
, &pa
);
994 rproc
->cdump_buf0
= (__force
void *)
995 ioremap_nocache(pa
, rproc
->cdump_len0
);
996 if (rproc
->cdump_buf0
)
999 dev_err(dev
, "can't ioremap cdump buffer0\n");
1005 ret
= rproc_da_to_pa(rproc
->memory_maps
, cdump_da1
, &pa
);
1008 rproc
->cdump_buf1
= (__force
void *)
1009 ioremap_nocache(pa
, rproc
->cdump_len1
);
1010 if (rproc
->cdump_buf1
)
1011 DEBUGFS_ADD(cdump1
);
1013 dev_err(dev
, "can't ioremap cdump buffer1\n");
1019 mutex_unlock(&rproc
->tlock
);
1022 if (ret
&& rproc
->dbg_dir
) {
1023 debugfs_remove_recursive(rproc
->dbg_dir
);
1024 rproc
->dbg_dir
= NULL
;
1029 static int rproc_process_fw(struct rproc
*rproc
, struct fw_section
*section
,
1030 int left
, u64
*bootaddr
)
1032 struct device
*dev
= rproc
->dev
;
1040 /* first section should be FW_RESOURCE section */
1041 if (section
->type
!= FW_RESOURCE
) {
1042 dev_err(dev
, "first section is not FW_RESOURCE: type %u found",
1048 while (left
> sizeof(struct fw_section
)) {
1051 type
= section
->type
;
1054 dev_dbg(dev
, "section: type %d da 0x%llx len 0x%x\n",
1057 left
-= sizeof(struct fw_section
);
1058 if (left
< section
->len
) {
1059 dev_err(dev
, "BIOS image is truncated\n");
1064 /* a resource table needs special handling */
1065 if (section
->type
== FW_RESOURCE
) {
1066 ret
= rproc_handle_resources(rproc
,
1067 (struct fw_resource
*) section
->content
,
1074 if (section
->type
<= FW_DATA
) {
1075 ret
= rproc_da_to_pa(rproc
->memory_maps
, da
, &pa
);
1077 dev_err(dev
, "rproc_da_to_pa failed:%d\n", ret
);
1080 } else if (rproc
->secure_mode
) {
1082 if (section
->type
== FW_MMU
)
1083 rproc
->secure_ttb
= (void *)pa
;
1087 dev_dbg(dev
, "da 0x%llx pa 0x%x len 0x%x\n", da
, pa
, len
);
1090 /* ioremaping normal memory, so make sparse happy */
1091 ptr
= (__force
void *) ioremap_nocache(pa
, len
);
1093 dev_err(dev
, "can't ioremap 0x%x\n", pa
);
1098 memcpy(ptr
, section
->content
, len
);
1100 /* iounmap normal memory, so make sparse happy */
1101 iounmap((__force
void __iomem
*) ptr
);
1104 section
= (struct fw_section
*)(section
->content
+ len
);
1112 static void rproc_loader_cont(const struct firmware
*fw
, void *context
)
1114 struct rproc
*rproc
= context
;
1115 struct device
*dev
= rproc
->dev
;
1116 const char *fwfile
= rproc
->firmware
;
1118 struct fw_header
*image
;
1119 struct fw_section
*section
;
1123 dev_err(dev
, "%s: failed to load %s\n", __func__
, fwfile
);
1127 dev_info(dev
, "Loaded BIOS image %s, size %d\n", fwfile
, fw
->size
);
1129 /* make sure this image is sane */
1130 if (fw
->size
< sizeof(struct fw_header
)) {
1131 dev_err(dev
, "Image is too small\n");
1135 image
= (struct fw_header
*) fw
->data
;
1137 if (memcmp(image
->magic
, "RPRC", 4)) {
1138 dev_err(dev
, "Image is corrupted (bad magic)\n");
1142 dev_info(dev
, "BIOS image version is %d\n", image
->version
);
1144 rproc
->header
= kzalloc(image
->header_len
, GFP_KERNEL
);
1145 if (!rproc
->header
) {
1146 dev_err(dev
, "%s: kzalloc failed\n", __func__
);
1149 memcpy(rproc
->header
, image
->header
, image
->header_len
);
1150 rproc
->header_len
= image
->header_len
;
1152 /* Ensure we recognize this BIOS version: */
1153 if (image
->version
!= RPROC_BIOS_VERSION
) {
1154 dev_err(dev
, "Expected BIOS version: %d!\n",
1155 RPROC_BIOS_VERSION
);
1159 /* now process the image, section by section */
1160 section
= (struct fw_section
*)(image
->header
+ image
->header_len
);
1162 left
= fw
->size
- sizeof(struct fw_header
) - image
->header_len
;
1164 ret
= rproc_process_fw(rproc
, section
, left
, &bootaddr
);
1166 dev_err(dev
, "Failed to process the image: %d\n", ret
);
1170 rproc_start(rproc
, bootaddr
);
1173 release_firmware(fw
);
1175 /* allow all contexts calling rproc_put() to proceed */
1176 complete_all(&rproc
->firmware_loading_complete
);
1179 static int rproc_loader(struct rproc
*rproc
)
1181 const char *fwfile
= rproc
->firmware
;
1182 struct device
*dev
= rproc
->dev
;
1186 dev_err(dev
, "%s: no firmware to load\n", __func__
);
1191 * allow building remoteproc as built-in kernel code, without
1192 * hanging the boot process
1194 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
, fwfile
,
1195 dev
, GFP_KERNEL
, rproc
, rproc_loader_cont
);
1197 dev_err(dev
, "request_firmware_nowait failed: %d\n", ret
);
1204 int rproc_set_secure(const char *name
, bool enable
)
1206 struct rproc
*rproc
;
1209 rproc
= __find_rproc_by_name(name
);
1211 pr_err("can't find remote processor %s\n", name
);
1216 * set the secure_mode here, the secure_ttb will be filled up during
1217 * the reload process.
1219 if (mutex_lock_interruptible(&rproc
->secure_lock
))
1221 rproc
->secure_mode
= enable
;
1222 rproc
->secure_ttb
= NULL
;
1223 rproc
->secure_ok
= false;
1224 init_completion(&rproc
->secure_restart
);
1227 * restart the processor, the mode will dictate regular load or
1230 _event_notify(rproc
, RPROC_SECURE
, (void *)enable
);
1232 /* block until the restart is complete */
1233 if (wait_for_completion_interruptible(&rproc
->secure_restart
)) {
1234 pr_err("error waiting restart completion\n");
1239 ret
= rproc
->secure_ok
? 0 : -EACCES
;
1241 mutex_unlock(&rproc
->secure_lock
);
1245 EXPORT_SYMBOL(rproc_set_secure
);
1247 int rproc_error_notify(struct rproc
*rproc
)
1249 return _event_notify(rproc
, RPROC_ERROR
, NULL
);
1251 EXPORT_SYMBOL_GPL(rproc_error_notify
);
1253 struct rproc
*rproc_get(const char *name
)
1255 struct rproc
*rproc
, *ret
= NULL
;
1259 rproc
= __find_rproc_by_name(name
);
1261 pr_err("can't find remote processor %s\n", name
);
1267 err
= mutex_lock_interruptible(&rproc
->lock
);
1269 dev_err(dev
, "can't lock remote processor %s\n", name
);
1273 if (rproc
->state
== RPROC_CRASHED
) {
1274 mutex_unlock(&rproc
->lock
);
1275 if (wait_for_completion_interruptible(&rproc
->error_comp
)) {
1276 dev_err(dev
, "error waiting error completion\n");
1279 mutex_lock(&rproc
->lock
);
1282 /* prevent underlying implementation from being removed */
1283 if (!try_module_get(rproc
->owner
)) {
1284 dev_err(dev
, "%s: can't get owner\n", __func__
);
1288 /* bail if rproc is already powered up */
1289 if (rproc
->count
++) {
1294 /* rproc_put() calls should wait until async loader completes */
1295 init_completion(&rproc
->firmware_loading_complete
);
1297 dev_info(dev
, "powering up %s\n", name
);
1299 err
= rproc_loader(rproc
);
1301 dev_err(dev
, "failed to load rproc %s\n", rproc
->name
);
1302 complete_all(&rproc
->firmware_loading_complete
);
1303 module_put(rproc
->owner
);
1308 rproc
->state
= RPROC_LOADING
;
1312 mutex_unlock(&rproc
->lock
);
1315 EXPORT_SYMBOL_GPL(rproc_get
);
1317 void rproc_put(struct rproc
*rproc
)
1319 struct device
*dev
= rproc
->dev
;
1322 /* make sure rproc is not loading now */
1323 wait_for_completion(&rproc
->firmware_loading_complete
);
1325 ret
= mutex_lock_interruptible(&rproc
->lock
);
1327 dev_err(dev
, "can't lock rproc %s: %d\n", rproc
->name
, ret
);
1331 if (!rproc
->count
) {
1332 dev_warn(dev
, "asymmetric rproc_put\n");
1337 /* if the remote proc is still needed, bail out */
1341 if (mutex_lock_interruptible(&rproc
->tlock
))
1344 if (rproc
->trace_buf0
)
1345 /* iounmap normal memory, so make sparse happy */
1346 iounmap((__force
void __iomem
*) rproc
->trace_buf0
);
1347 if (rproc
->trace_buf1
)
1348 /* iounmap normal memory, so make sparse happy */
1349 iounmap((__force
void __iomem
*) rproc
->trace_buf1
);
1350 rproc
->trace_buf0
= rproc
->trace_buf1
= NULL
;
1352 if (rproc
->cdump_buf0
)
1353 /* iounmap normal memory, so make sparse happy */
1354 iounmap((__force
void __iomem
*) rproc
->cdump_buf0
);
1355 if (rproc
->cdump_buf1
)
1356 /* iounmap normal memory, so make sparse happy */
1357 iounmap((__force
void __iomem
*) rproc
->cdump_buf1
);
1358 rproc
->cdump_buf0
= rproc
->cdump_buf1
= NULL
;
1360 mutex_unlock(&rproc
->tlock
);
1362 rproc_reset_poolmem(rproc
);
1363 memset(rproc
->memory_maps
, 0, sizeof(rproc
->memory_maps
));
1364 kfree(rproc
->header
);
1367 * make sure rproc is really running before powering it off.
1368 * this is important, because the fw loading might have failed.
1370 if (rproc
->state
== RPROC_RUNNING
|| rproc
->state
== RPROC_CRASHED
) {
1371 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
1373 * Call resume, it will cancel any pending autosuspend,
1374 * so that no callback is executed after the device is stopped.
1375 * Device stop function takes care of shutting down the device.
1377 pm_runtime_get_sync(rproc
->dev
);
1378 pm_runtime_put_noidle(rproc
->dev
);
1379 if (!rproc
->secure_reset
)
1380 pm_runtime_disable(rproc
->dev
);
1382 pm_runtime_set_suspended(rproc
->dev
);
1384 ret
= rproc
->ops
->stop(rproc
);
1386 dev_err(dev
, "can't stop rproc %s: %d\n", rproc
->name
,
1390 if (rproc
->ops
->watchdog_exit
) {
1391 ret
= rproc
->ops
->watchdog_exit(rproc
);
1393 dev_err(rproc
->dev
, "error watchdog_exit %d\n",
1398 if (rproc
->ops
->iommu_exit
) {
1399 ret
= rproc
->ops
->iommu_exit(rproc
);
1401 dev_err(rproc
->dev
, "error iommu_exit %d\n",
1408 if (rproc
->state
== RPROC_CRASHED
)
1409 complete_all(&rproc
->error_comp
);
1411 rproc
->state
= RPROC_OFFLINE
;
1413 dev_info(dev
, "stopped remote processor %s\n", rproc
->name
);
1416 mutex_unlock(&rproc
->lock
);
1418 module_put(rproc
->owner
);
1420 EXPORT_SYMBOL_GPL(rproc_put
);
1422 static void rproc_error_work(struct work_struct
*work
)
1424 struct rproc
*rproc
= container_of(work
, struct rproc
, error_work
);
1426 dev_dbg(rproc
->dev
, "%s\n", __func__
);
1427 _event_notify(rproc
, RPROC_ERROR
, NULL
);
1430 int rproc_event_register(struct rproc
*rproc
, struct notifier_block
*nb
)
1432 return blocking_notifier_chain_register(&rproc
->nbh
, nb
);
1434 EXPORT_SYMBOL_GPL(rproc_event_register
);
1436 int rproc_event_unregister(struct rproc
*rproc
, struct notifier_block
*nb
)
1438 return blocking_notifier_chain_unregister(&rproc
->nbh
, nb
);
1440 EXPORT_SYMBOL_GPL(rproc_event_unregister
);
1442 void rproc_last_busy(struct rproc
*rproc
)
1444 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
1445 struct device
*dev
= rproc
->dev
;
1447 mutex_lock(&rproc
->pm_lock
);
1448 if (pm_runtime_suspended(dev
) ||
1449 !pm_runtime_autosuspend_expiration(dev
)) {
1450 pm_runtime_mark_last_busy(dev
);
1451 mutex_unlock(&rproc
->pm_lock
);
1453 * if the remote processor is suspended, we can not wake it
1454 * up (that would abort system suspend), instead state that
1455 * the remote processor needs to be waken up on system resume.
1457 mutex_lock(&rproc
->lock
);
1458 if (rproc
->state
== RPROC_SUSPENDED
) {
1459 rproc
->need_resume
= true;
1460 mutex_unlock(&rproc
->lock
);
1463 mutex_unlock(&rproc
->lock
);
1464 pm_runtime_get_sync(dev
);
1465 pm_runtime_mark_last_busy(dev
);
1466 pm_runtime_put_autosuspend(dev
);
1469 pm_runtime_mark_last_busy(dev
);
1470 mutex_unlock(&rproc
->pm_lock
);
1473 EXPORT_SYMBOL(rproc_last_busy
);
1475 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
1476 static int rproc_resume(struct device
*dev
)
1478 struct platform_device
*pdev
= to_platform_device(dev
);
1479 struct rproc
*rproc
= platform_get_drvdata(pdev
);
1482 dev_dbg(dev
, "Enter %s\n", __func__
);
1484 mutex_lock(&rproc
->lock
);
1485 if (rproc
->state
!= RPROC_SUSPENDED
) {
1486 mutex_unlock(&rproc
->lock
);
1490 if (!rproc
->need_resume
)
1493 rproc
->need_resume
= false;
1494 pm_runtime_get_sync(dev
);
1495 pm_runtime_mark_last_busy(dev
);
1496 pm_runtime_put_autosuspend(dev
);
1498 rproc
->state
= (ret
) ? RPROC_CRASHED
: RPROC_RUNNING
;
1499 mutex_unlock(&rproc
->lock
);
1501 _event_notify(rproc
, RPROC_ERROR
, NULL
);
1502 dev_err(dev
, "Error resuming %d\n", ret
);
1507 static int rproc_suspend(struct device
*dev
)
1509 struct platform_device
*pdev
= to_platform_device(dev
);
1510 struct rproc
*rproc
= platform_get_drvdata(pdev
);
1513 dev_dbg(dev
, "Enter %s\n", __func__
);
1515 mutex_lock(&rproc
->lock
);
1516 if (rproc
->state
!= RPROC_RUNNING
) {
1517 mutex_unlock(&rproc
->lock
);
1521 if (pm_runtime_suspended(dev
))
1524 * If it is not runtime suspended, it means remote processor is still
1525 * doing something. However we need to stop it.
1528 dev_dbg(dev
, "%s: will be forced to suspend\n", rproc
->name
);
1530 rproc
->force_suspend
= true;
1531 ret
= pm_runtime_suspend(dev
);
1532 rproc
->force_suspend
= false;
1536 * As the remote processor had to be forced to suspend, it was
1537 * executing some task, so it needs to be waken up on system resume
1539 rproc
->need_resume
= true;
1542 rproc
->state
= RPROC_SUSPENDED
;
1543 mutex_unlock(&rproc
->lock
);
1548 static int rproc_runtime_resume(struct device
*dev
)
1550 struct platform_device
*pdev
= to_platform_device(dev
);
1551 struct rproc
*rproc
= platform_get_drvdata(pdev
);
1554 dev_dbg(dev
, "Enter %s\n", __func__
);
1556 if (rproc
->ops
->resume
)
1557 ret
= rproc
->ops
->resume(rproc
);
1560 _event_notify(rproc
, RPROC_RESUME
, NULL
);
1565 static int rproc_runtime_suspend(struct device
*dev
)
1567 struct platform_device
*pdev
= to_platform_device(dev
);
1568 struct rproc
*rproc
= platform_get_drvdata(pdev
);
1572 dev_dbg(dev
, "Enter %s\n", __func__
);
1574 if (rproc
->state
== RPROC_SUSPENDED
)
1577 mutex_lock(&rproc
->pm_lock
);
1579 if (pm_runtime_autosuspend_expiration(dev
) && !rproc
->force_suspend
) {
1585 * Notify PROC_PRE_SUSPEND only when the suspend is not forced.
1586 * Users can use pre suspend call back to cancel autosuspend, but
1587 * when the suspend is forced, there is no need to notify them
1589 if (!rproc
->force_suspend
)
1590 ret
= _event_notify(rproc
, RPROC_PRE_SUSPEND
, NULL
);
1592 * If rproc user avoids suspend, that means it is still using rproc.
1593 * Lets go to abort suspend.
1596 dev_dbg(dev
, "suspend aborted by user %d\n", ret
);
1600 /* Now call machine-specific suspend function (if exist) */
1601 if (rproc
->ops
->suspend
)
1602 ret
= rproc
->ops
->suspend(rproc
, rproc
->force_suspend
);
1604 * If it fails with -EBUSY/EAGAIN, remote processor is still running,
1605 * but rproc users were not aware of that, so lets abort suspend.
1606 * If it is a different error, there is something wrong with the
1607 * remote processor. Return that error to pm runtime framework,
1608 * which will disable autosuspend.
1611 dev_dbg(dev
, "suspend aborted by remote processor %d\n", ret
);
1612 if (ret
!= -EBUSY
&& ret
!= -EAGAIN
)
1613 dev_err(dev
, "suspend error %d", ret
);
1616 /* we are not interested in the returned value */
1617 _event_notify(rproc
, RPROC_POS_SUSPEND
, NULL
);
1618 mutex_unlock(&rproc
->pm_lock
);
1622 pm_runtime_mark_last_busy(dev
);
1623 to
= jiffies_to_msecs(pm_runtime_autosuspend_expiration(dev
) - jiffies
);
1624 pm_schedule_suspend(dev
, to
);
1625 dev
->power
.timer_autosuspends
= 1;
1626 mutex_unlock(&rproc
->pm_lock
);
1630 const struct dev_pm_ops rproc_gen_pm_ops
= {
1631 SET_SYSTEM_SLEEP_PM_OPS(rproc_suspend
, rproc_resume
)
1632 SET_RUNTIME_PM_OPS(rproc_runtime_suspend
, rproc_runtime_resume
, NULL
)
1636 rproc_set_constraints(struct rproc
*rproc
, enum rproc_constraint type
, long v
)
1639 char *cname
[] = {"scale", "latency", "bandwidth"};
1640 int (*func
)(struct rproc
*, long);
1643 case RPROC_CONSTRAINT_SCALE
:
1644 func
= rproc
->ops
->scale
;
1646 case RPROC_CONSTRAINT_LATENCY
:
1647 func
= rproc
->ops
->set_lat
;
1649 case RPROC_CONSTRAINT_BANDWIDTH
:
1650 func
= rproc
->ops
->set_bw
;
1653 dev_err(rproc
->dev
, "invalid constraint\n");
1658 dev_err(rproc
->dev
, "%s: no %s constraint\n",
1659 __func__
, cname
[type
]);
1663 mutex_lock(&rproc
->lock
);
1664 if (rproc
->state
== RPROC_OFFLINE
) {
1665 pr_err("%s: rproc inactive\n", __func__
);
1666 mutex_unlock(&rproc
->lock
);
1670 dev_dbg(rproc
->dev
, "set %s constraint %ld\n", cname
[type
], v
);
1671 ret
= func(rproc
, v
);
1673 dev_err(rproc
->dev
, "error %s constraint\n", cname
[type
]);
1674 mutex_unlock(&rproc
->lock
);
1678 EXPORT_SYMBOL(rproc_set_constraints
);
1680 int rproc_register(struct device
*dev
, const char *name
,
1681 const struct rproc_ops
*ops
,
1682 const char *firmware
,
1683 struct rproc_mem_pool
*memory_pool
,
1684 struct module
*owner
,
1685 unsigned sus_timeout
)
1687 struct platform_device
*pdev
= to_platform_device(dev
);
1688 struct rproc
*rproc
;
1690 if (!dev
|| !name
|| !ops
)
1693 rproc
= kzalloc(sizeof(struct rproc
), GFP_KERNEL
);
1695 dev_err(dev
, "%s: kzalloc failed\n", __func__
);
1702 rproc
->firmware
= firmware
;
1703 rproc
->owner
= owner
;
1704 rproc
->memory_pool
= memory_pool
;
1705 #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
1706 rproc
->sus_timeout
= sus_timeout
;
1707 mutex_init(&rproc
->pm_lock
);
1709 mutex_init(&rproc
->lock
);
1710 mutex_init(&rproc
->secure_lock
);
1711 mutex_init(&rproc
->tlock
);
1712 INIT_WORK(&rproc
->error_work
, rproc_error_work
);
1713 BLOCKING_INIT_NOTIFIER_HEAD(&rproc
->nbh
);
1715 rproc
->state
= RPROC_OFFLINE
;
1717 rproc
->qos_request
= kzalloc(sizeof(*rproc
->qos_request
),
1719 if (!rproc
->qos_request
) {
1724 pm_qos_add_request(rproc
->qos_request
, PM_QOS_CPU_DMA_LATENCY
,
1725 PM_QOS_DEFAULT_VALUE
);
1727 rproc
->secure_mode
= false;
1728 rproc
->secure_ttb
= NULL
;
1729 init_completion(&rproc
->secure_restart
);
1731 spin_lock(&rprocs_lock
);
1732 list_add_tail(&rproc
->next
, &rprocs
);
1733 spin_unlock(&rprocs_lock
);
1735 platform_set_drvdata(pdev
, rproc
);
1737 dev_info(dev
, "%s is available\n", name
);
1742 rproc
->dbg_dir
= debugfs_create_dir(dev_name(dev
), rproc_dbg
);
1743 if (!rproc
->dbg_dir
) {
1744 dev_err(dev
, "can't create debugfs dir\n");
1748 debugfs_create_file("name", 0444, rproc
->dbg_dir
, rproc
,
1751 debugfs_create_file("version", 0444, rproc
->dbg_dir
, rproc
,
1752 &rproc_version_ops
);
1756 EXPORT_SYMBOL_GPL(rproc_register
);
1758 int rproc_unregister(const char *name
)
1760 struct rproc
*rproc
;
1762 rproc
= __find_rproc_by_name(name
);
1764 pr_err("can't find remote processor %s\n", name
);
1768 dev_info(rproc
->dev
, "removing %s\n", name
);
1771 debugfs_remove_recursive(rproc
->dbg_dir
);
1773 spin_lock(&rprocs_lock
);
1774 list_del(&rproc
->next
);
1775 spin_unlock(&rprocs_lock
);
1777 rproc
->secure_mode
= false;
1778 rproc
->secure_ttb
= NULL
;
1779 pm_qos_remove_request(rproc
->qos_request
);
1780 kfree(rproc
->qos_request
);
1781 kfree(rproc
->last_trace_buf0
);
1782 kfree(rproc
->last_trace_buf1
);
1787 EXPORT_SYMBOL_GPL(rproc_unregister
);
1789 static int __init
remoteproc_init(void)
1791 if (debugfs_initialized()) {
1792 rproc_dbg
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
1794 pr_err("can't create debugfs dir\n");
1799 /* must be ready in time for device_initcall users */
1800 subsys_initcall(remoteproc_init
);
1802 static void __exit
remoteproc_exit(void)
1805 debugfs_remove(rproc_dbg
);
1807 module_exit(remoteproc_exit
);
1809 MODULE_LICENSE("GPL v2");
1810 MODULE_DESCRIPTION("Generic Remote Processor Framework");