Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / powerpc / perf / perf_regs.c
blob6f681b105eec21bb1d0375f136aaea0e6a71da1a
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2016 Anju T, IBM Corporation.
4 */
6 #include <linux/errno.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/perf_event.h>
11 #include <linux/bug.h>
12 #include <linux/stddef.h>
13 #include <asm/ptrace.h>
14 #include <asm/perf_regs.h>
16 u64 PERF_REG_EXTENDED_MASK;
18 #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
20 #define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
22 static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
23 PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
24 PT_REGS_OFFSET(PERF_REG_POWERPC_R1, gpr[1]),
25 PT_REGS_OFFSET(PERF_REG_POWERPC_R2, gpr[2]),
26 PT_REGS_OFFSET(PERF_REG_POWERPC_R3, gpr[3]),
27 PT_REGS_OFFSET(PERF_REG_POWERPC_R4, gpr[4]),
28 PT_REGS_OFFSET(PERF_REG_POWERPC_R5, gpr[5]),
29 PT_REGS_OFFSET(PERF_REG_POWERPC_R6, gpr[6]),
30 PT_REGS_OFFSET(PERF_REG_POWERPC_R7, gpr[7]),
31 PT_REGS_OFFSET(PERF_REG_POWERPC_R8, gpr[8]),
32 PT_REGS_OFFSET(PERF_REG_POWERPC_R9, gpr[9]),
33 PT_REGS_OFFSET(PERF_REG_POWERPC_R10, gpr[10]),
34 PT_REGS_OFFSET(PERF_REG_POWERPC_R11, gpr[11]),
35 PT_REGS_OFFSET(PERF_REG_POWERPC_R12, gpr[12]),
36 PT_REGS_OFFSET(PERF_REG_POWERPC_R13, gpr[13]),
37 PT_REGS_OFFSET(PERF_REG_POWERPC_R14, gpr[14]),
38 PT_REGS_OFFSET(PERF_REG_POWERPC_R15, gpr[15]),
39 PT_REGS_OFFSET(PERF_REG_POWERPC_R16, gpr[16]),
40 PT_REGS_OFFSET(PERF_REG_POWERPC_R17, gpr[17]),
41 PT_REGS_OFFSET(PERF_REG_POWERPC_R18, gpr[18]),
42 PT_REGS_OFFSET(PERF_REG_POWERPC_R19, gpr[19]),
43 PT_REGS_OFFSET(PERF_REG_POWERPC_R20, gpr[20]),
44 PT_REGS_OFFSET(PERF_REG_POWERPC_R21, gpr[21]),
45 PT_REGS_OFFSET(PERF_REG_POWERPC_R22, gpr[22]),
46 PT_REGS_OFFSET(PERF_REG_POWERPC_R23, gpr[23]),
47 PT_REGS_OFFSET(PERF_REG_POWERPC_R24, gpr[24]),
48 PT_REGS_OFFSET(PERF_REG_POWERPC_R25, gpr[25]),
49 PT_REGS_OFFSET(PERF_REG_POWERPC_R26, gpr[26]),
50 PT_REGS_OFFSET(PERF_REG_POWERPC_R27, gpr[27]),
51 PT_REGS_OFFSET(PERF_REG_POWERPC_R28, gpr[28]),
52 PT_REGS_OFFSET(PERF_REG_POWERPC_R29, gpr[29]),
53 PT_REGS_OFFSET(PERF_REG_POWERPC_R30, gpr[30]),
54 PT_REGS_OFFSET(PERF_REG_POWERPC_R31, gpr[31]),
55 PT_REGS_OFFSET(PERF_REG_POWERPC_NIP, nip),
56 PT_REGS_OFFSET(PERF_REG_POWERPC_MSR, msr),
57 PT_REGS_OFFSET(PERF_REG_POWERPC_ORIG_R3, orig_gpr3),
58 PT_REGS_OFFSET(PERF_REG_POWERPC_CTR, ctr),
59 PT_REGS_OFFSET(PERF_REG_POWERPC_LINK, link),
60 PT_REGS_OFFSET(PERF_REG_POWERPC_XER, xer),
61 PT_REGS_OFFSET(PERF_REG_POWERPC_CCR, ccr),
62 #ifdef CONFIG_PPC64
63 PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, softe),
64 #else
65 PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, mq),
66 #endif
67 PT_REGS_OFFSET(PERF_REG_POWERPC_TRAP, trap),
68 PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
69 PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
70 PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
71 PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
74 /* Function to return the extended register values */
75 static u64 get_ext_regs_value(int idx)
77 switch (idx) {
78 case PERF_REG_POWERPC_MMCR0:
79 return mfspr(SPRN_MMCR0);
80 case PERF_REG_POWERPC_MMCR1:
81 return mfspr(SPRN_MMCR1);
82 case PERF_REG_POWERPC_MMCR2:
83 return mfspr(SPRN_MMCR2);
84 #ifdef CONFIG_PPC64
85 case PERF_REG_POWERPC_MMCR3:
86 return mfspr(SPRN_MMCR3);
87 case PERF_REG_POWERPC_SIER2:
88 return mfspr(SPRN_SIER2);
89 case PERF_REG_POWERPC_SIER3:
90 return mfspr(SPRN_SIER3);
91 #endif
92 default: return 0;
96 u64 perf_reg_value(struct pt_regs *regs, int idx)
98 u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX;
100 if (cpu_has_feature(CPU_FTR_ARCH_31))
101 perf_reg_extended_max = PERF_REG_MAX_ISA_31;
102 else if (cpu_has_feature(CPU_FTR_ARCH_300))
103 perf_reg_extended_max = PERF_REG_MAX_ISA_300;
105 if (idx == PERF_REG_POWERPC_SIER &&
106 (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
107 IS_ENABLED(CONFIG_PPC32) ||
108 !is_sier_available()))
109 return 0;
111 if (idx == PERF_REG_POWERPC_MMCRA &&
112 (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
113 IS_ENABLED(CONFIG_PPC32)))
114 return 0;
116 if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max)
117 return get_ext_regs_value(idx);
120 * If the idx is referring to value beyond the
121 * supported registers, return 0 with a warning
123 if (WARN_ON_ONCE(idx >= perf_reg_extended_max))
124 return 0;
126 return regs_get_register(regs, pt_regs_offset[idx]);
129 int perf_reg_validate(u64 mask)
131 if (!mask || mask & REG_RESERVED)
132 return -EINVAL;
133 return 0;
136 u64 perf_reg_abi(struct task_struct *task)
138 #ifdef CONFIG_PPC64
139 if (!test_tsk_thread_flag(task, TIF_32BIT))
140 return PERF_SAMPLE_REGS_ABI_64;
141 else
142 #endif
143 return PERF_SAMPLE_REGS_ABI_32;
146 void perf_get_regs_user(struct perf_regs *regs_user,
147 struct pt_regs *regs)
149 regs_user->regs = task_pt_regs(current);
150 regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
151 PERF_SAMPLE_REGS_ABI_NONE;