treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / mips / kernel / perf_event.c
blob5d7a9c03903b5057059af712cd8fe86ac498348a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Linux performance counter support for MIPS.
5 * Copyright (C) 2010 MIPS Technologies, Inc.
6 * Author: Deng-Cheng Zhu
8 * This code is based on the implementation for ARM, which is in turn
9 * based on the sparc64 perf event code and the x86 code. Performance
10 * counter access is based on the MIPS Oprofile code. And the callchain
11 * support references the code of MIPS stacktrace.c.
14 #include <linux/perf_event.h>
15 #include <linux/sched/task_stack.h>
17 #include <asm/stacktrace.h>
19 /* Callchain handling code. */
22 * Leave userspace callchain empty for now. When we find a way to trace
23 * the user stack callchains, we will add it here.
26 static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
27 unsigned long reg29)
29 unsigned long *sp = (unsigned long *)reg29;
30 unsigned long addr;
32 while (!kstack_end(sp)) {
33 addr = *sp++;
34 if (__kernel_text_address(addr)) {
35 perf_callchain_store(entry, addr);
36 if (entry->nr >= entry->max_stack)
37 break;
42 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
43 struct pt_regs *regs)
45 unsigned long sp = regs->regs[29];
46 #ifdef CONFIG_KALLSYMS
47 unsigned long ra = regs->regs[31];
48 unsigned long pc = regs->cp0_epc;
50 if (raw_show_trace || !__kernel_text_address(pc)) {
51 unsigned long stack_page =
52 (unsigned long)task_stack_page(current);
53 if (stack_page && sp >= stack_page &&
54 sp <= stack_page + THREAD_SIZE - 32)
55 save_raw_perf_callchain(entry, sp);
56 return;
58 do {
59 perf_callchain_store(entry, pc);
60 if (entry->nr >= entry->max_stack)
61 break;
62 pc = unwind_stack(current, &sp, pc, &ra);
63 } while (pc);
64 #else
65 save_raw_perf_callchain(entry, sp);
66 #endif