1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/sched/task_stack.h>
6 #include <linux/perf_event.h>
8 #include <linux/stddef.h>
9 #include <asm/perf_regs.h>
10 #include <asm/ptrace.h>
13 #define PERF_REG_X86_MAX PERF_REG_X86_32_MAX
15 #define PERF_REG_X86_MAX PERF_REG_X86_64_MAX
18 #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
20 static unsigned int pt_regs_offset
[PERF_REG_X86_MAX
] = {
21 PT_REGS_OFFSET(PERF_REG_X86_AX
, ax
),
22 PT_REGS_OFFSET(PERF_REG_X86_BX
, bx
),
23 PT_REGS_OFFSET(PERF_REG_X86_CX
, cx
),
24 PT_REGS_OFFSET(PERF_REG_X86_DX
, dx
),
25 PT_REGS_OFFSET(PERF_REG_X86_SI
, si
),
26 PT_REGS_OFFSET(PERF_REG_X86_DI
, di
),
27 PT_REGS_OFFSET(PERF_REG_X86_BP
, bp
),
28 PT_REGS_OFFSET(PERF_REG_X86_SP
, sp
),
29 PT_REGS_OFFSET(PERF_REG_X86_IP
, ip
),
30 PT_REGS_OFFSET(PERF_REG_X86_FLAGS
, flags
),
31 PT_REGS_OFFSET(PERF_REG_X86_CS
, cs
),
32 PT_REGS_OFFSET(PERF_REG_X86_SS
, ss
),
34 PT_REGS_OFFSET(PERF_REG_X86_DS
, ds
),
35 PT_REGS_OFFSET(PERF_REG_X86_ES
, es
),
36 PT_REGS_OFFSET(PERF_REG_X86_FS
, fs
),
37 PT_REGS_OFFSET(PERF_REG_X86_GS
, gs
),
40 * The pt_regs struct does not store
41 * ds, es, fs, gs in 64 bit mode.
49 PT_REGS_OFFSET(PERF_REG_X86_R8
, r8
),
50 PT_REGS_OFFSET(PERF_REG_X86_R9
, r9
),
51 PT_REGS_OFFSET(PERF_REG_X86_R10
, r10
),
52 PT_REGS_OFFSET(PERF_REG_X86_R11
, r11
),
53 PT_REGS_OFFSET(PERF_REG_X86_R12
, r12
),
54 PT_REGS_OFFSET(PERF_REG_X86_R13
, r13
),
55 PT_REGS_OFFSET(PERF_REG_X86_R14
, r14
),
56 PT_REGS_OFFSET(PERF_REG_X86_R15
, r15
),
60 u64
perf_reg_value(struct pt_regs
*regs
, int idx
)
62 if (WARN_ON_ONCE(idx
>= ARRAY_SIZE(pt_regs_offset
)))
65 return regs_get_register(regs
, pt_regs_offset
[idx
]);
68 #define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL))
71 int perf_reg_validate(u64 mask
)
73 if (!mask
|| mask
& REG_RESERVED
)
79 u64
perf_reg_abi(struct task_struct
*task
)
81 return PERF_SAMPLE_REGS_ABI_32
;
84 void perf_get_regs_user(struct perf_regs
*regs_user
,
86 struct pt_regs
*regs_user_copy
)
88 regs_user
->regs
= task_pt_regs(current
);
89 regs_user
->abi
= perf_reg_abi(current
);
91 #else /* CONFIG_X86_64 */
92 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
93 (1ULL << PERF_REG_X86_ES) | \
94 (1ULL << PERF_REG_X86_FS) | \
95 (1ULL << PERF_REG_X86_GS))
97 int perf_reg_validate(u64 mask
)
99 if (!mask
|| mask
& REG_RESERVED
)
102 if (mask
& REG_NOSUPPORT
)
108 u64
perf_reg_abi(struct task_struct
*task
)
110 if (test_tsk_thread_flag(task
, TIF_IA32
))
111 return PERF_SAMPLE_REGS_ABI_32
;
113 return PERF_SAMPLE_REGS_ABI_64
;
116 void perf_get_regs_user(struct perf_regs
*regs_user
,
117 struct pt_regs
*regs
,
118 struct pt_regs
*regs_user_copy
)
120 struct pt_regs
*user_regs
= task_pt_regs(current
);
123 * If we're in an NMI that interrupted task_pt_regs setup, then
124 * we can't sample user regs at all. This check isn't really
125 * sufficient, though, as we could be in an NMI inside an interrupt
126 * that happened during task_pt_regs setup.
128 if (regs
->sp
> (unsigned long)&user_regs
->r11
&&
129 regs
->sp
<= (unsigned long)(user_regs
+ 1)) {
130 regs_user
->abi
= PERF_SAMPLE_REGS_ABI_NONE
;
131 regs_user
->regs
= NULL
;
136 * These registers are always saved on 64-bit syscall entry.
137 * On 32-bit entry points, they are saved too except r8..r11.
139 regs_user_copy
->ip
= user_regs
->ip
;
140 regs_user_copy
->ax
= user_regs
->ax
;
141 regs_user_copy
->cx
= user_regs
->cx
;
142 regs_user_copy
->dx
= user_regs
->dx
;
143 regs_user_copy
->si
= user_regs
->si
;
144 regs_user_copy
->di
= user_regs
->di
;
145 regs_user_copy
->r8
= user_regs
->r8
;
146 regs_user_copy
->r9
= user_regs
->r9
;
147 regs_user_copy
->r10
= user_regs
->r10
;
148 regs_user_copy
->r11
= user_regs
->r11
;
149 regs_user_copy
->orig_ax
= user_regs
->orig_ax
;
150 regs_user_copy
->flags
= user_regs
->flags
;
151 regs_user_copy
->sp
= user_regs
->sp
;
152 regs_user_copy
->cs
= user_regs
->cs
;
153 regs_user_copy
->ss
= user_regs
->ss
;
155 * Store user space frame-pointer value on sample
156 * to facilitate stack unwinding for cases when
157 * user space executable code has such support
158 * enabled at compile time:
160 regs_user_copy
->bp
= user_regs
->bp
;
162 regs_user_copy
->bx
= -1;
163 regs_user_copy
->r12
= -1;
164 regs_user_copy
->r13
= -1;
165 regs_user_copy
->r14
= -1;
166 regs_user_copy
->r15
= -1;
168 * For this to be at all useful, we need a reasonable guess for
169 * the ABI. Be careful: we're in NMI context, and we're
170 * considering current to be the current task, so we should
171 * be careful not to look at any other percpu variables that might
172 * change during context switches.
174 regs_user
->abi
= user_64bit_mode(user_regs
) ?
175 PERF_SAMPLE_REGS_ABI_64
: PERF_SAMPLE_REGS_ABI_32
;
177 regs_user
->regs
= regs_user_copy
;
179 #endif /* CONFIG_X86_32 */