Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / xtensa / kernel / hw_breakpoint.c
blobb35656ab7dbd184ba4bb099c9f716ea4b7ee9657
1 /*
2 * Xtensa hardware breakpoints/watchpoints handling functions
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2016 Cadence Design Systems Inc.
9 */
11 #include <linux/hw_breakpoint.h>
12 #include <linux/log2.h>
13 #include <linux/percpu.h>
14 #include <linux/perf_event.h>
15 #include <variant/core.h>
17 /* Breakpoint currently in use for each IBREAKA. */
18 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
20 /* Watchpoint currently in use for each DBREAKA. */
21 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]);
23 int hw_breakpoint_slots(int type)
25 switch (type) {
26 case TYPE_INST:
27 return XCHAL_NUM_IBREAK;
28 case TYPE_DATA:
29 return XCHAL_NUM_DBREAK;
30 default:
31 pr_warn("unknown slot type: %d\n", type);
32 return 0;
36 int arch_check_bp_in_kernelspace(struct perf_event *bp)
38 unsigned int len;
39 unsigned long va;
40 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
42 va = info->address;
43 len = bp->attr.bp_len;
45 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
49 * Construct an arch_hw_breakpoint from a perf_event.
51 static int arch_build_bp_info(struct perf_event *bp)
53 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
55 /* Type */
56 switch (bp->attr.bp_type) {
57 case HW_BREAKPOINT_X:
58 info->type = XTENSA_BREAKPOINT_EXECUTE;
59 break;
60 case HW_BREAKPOINT_R:
61 info->type = XTENSA_BREAKPOINT_LOAD;
62 break;
63 case HW_BREAKPOINT_W:
64 info->type = XTENSA_BREAKPOINT_STORE;
65 break;
66 case HW_BREAKPOINT_RW:
67 info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
68 break;
69 default:
70 return -EINVAL;
73 /* Len */
74 info->len = bp->attr.bp_len;
75 if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len))
76 return -EINVAL;
78 /* Address */
79 info->address = bp->attr.bp_addr;
80 if (info->address & (info->len - 1))
81 return -EINVAL;
83 return 0;
86 int arch_validate_hwbkpt_settings(struct perf_event *bp)
88 int ret;
90 /* Build the arch_hw_breakpoint. */
91 ret = arch_build_bp_info(bp);
92 return ret;
95 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
96 unsigned long val, void *data)
98 return NOTIFY_DONE;
101 static void xtensa_wsr(unsigned long v, u8 sr)
103 /* We don't have indexed wsr and creating instruction dynamically
104 * doesn't seem worth it given how small XCHAL_NUM_IBREAK and
105 * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here
106 * the switch below needs to be extended.
108 BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2);
109 BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2);
111 switch (sr) {
112 #if XCHAL_NUM_IBREAK > 0
113 case SREG_IBREAKA + 0:
114 WSR(v, SREG_IBREAKA + 0);
115 break;
116 #endif
117 #if XCHAL_NUM_IBREAK > 1
118 case SREG_IBREAKA + 1:
119 WSR(v, SREG_IBREAKA + 1);
120 break;
121 #endif
123 #if XCHAL_NUM_DBREAK > 0
124 case SREG_DBREAKA + 0:
125 WSR(v, SREG_DBREAKA + 0);
126 break;
127 case SREG_DBREAKC + 0:
128 WSR(v, SREG_DBREAKC + 0);
129 break;
130 #endif
131 #if XCHAL_NUM_DBREAK > 1
132 case SREG_DBREAKA + 1:
133 WSR(v, SREG_DBREAKA + 1);
134 break;
136 case SREG_DBREAKC + 1:
137 WSR(v, SREG_DBREAKC + 1);
138 break;
139 #endif
143 static int alloc_slot(struct perf_event **slot, size_t n,
144 struct perf_event *bp)
146 size_t i;
148 for (i = 0; i < n; ++i) {
149 if (!slot[i]) {
150 slot[i] = bp;
151 return i;
154 return -EBUSY;
157 static void set_ibreak_regs(int reg, struct perf_event *bp)
159 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
160 unsigned long ibreakenable;
162 xtensa_wsr(info->address, SREG_IBREAKA + reg);
163 RSR(ibreakenable, SREG_IBREAKENABLE);
164 WSR(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
167 static void set_dbreak_regs(int reg, struct perf_event *bp)
169 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
170 unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len;
172 if (info->type & XTENSA_BREAKPOINT_LOAD)
173 dbreakc |= DBREAKC_LOAD_MASK;
174 if (info->type & XTENSA_BREAKPOINT_STORE)
175 dbreakc |= DBREAKC_STOR_MASK;
177 xtensa_wsr(info->address, SREG_DBREAKA + reg);
178 xtensa_wsr(dbreakc, SREG_DBREAKC + reg);
181 int arch_install_hw_breakpoint(struct perf_event *bp)
183 int i;
185 if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) {
186 /* Breakpoint */
187 i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
188 if (i < 0)
189 return i;
190 set_ibreak_regs(i, bp);
192 } else {
193 /* Watchpoint */
194 i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
195 if (i < 0)
196 return i;
197 set_dbreak_regs(i, bp);
199 return 0;
202 static int free_slot(struct perf_event **slot, size_t n,
203 struct perf_event *bp)
205 size_t i;
207 for (i = 0; i < n; ++i) {
208 if (slot[i] == bp) {
209 slot[i] = NULL;
210 return i;
213 return -EBUSY;
216 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
218 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
219 int i;
221 if (info->type == XTENSA_BREAKPOINT_EXECUTE) {
222 unsigned long ibreakenable;
224 /* Breakpoint */
225 i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
226 if (i >= 0) {
227 RSR(ibreakenable, SREG_IBREAKENABLE);
228 WSR(ibreakenable & ~(1 << i), SREG_IBREAKENABLE);
230 } else {
231 /* Watchpoint */
232 i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
233 if (i >= 0)
234 xtensa_wsr(0, SREG_DBREAKC + i);
238 void hw_breakpoint_pmu_read(struct perf_event *bp)
242 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
244 int i;
245 struct thread_struct *t = &tsk->thread;
247 for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
248 if (t->ptrace_bp[i]) {
249 unregister_hw_breakpoint(t->ptrace_bp[i]);
250 t->ptrace_bp[i] = NULL;
253 for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
254 if (t->ptrace_wp[i]) {
255 unregister_hw_breakpoint(t->ptrace_wp[i]);
256 t->ptrace_wp[i] = NULL;
262 * Set ptrace breakpoint pointers to zero for this task.
263 * This is required in order to prevent child processes from unregistering
264 * breakpoints held by their parent.
266 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
268 memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
269 memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
272 void restore_dbreak(void)
274 int i;
276 for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
277 struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i];
279 if (bp)
280 set_dbreak_regs(i, bp);
282 clear_thread_flag(TIF_DB_DISABLED);
285 int check_hw_breakpoint(struct pt_regs *regs)
287 if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) {
288 int i;
289 struct perf_event **bp = this_cpu_ptr(bp_on_reg);
291 for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
292 if (bp[i] && !bp[i]->attr.disabled &&
293 regs->pc == bp[i]->attr.bp_addr)
294 perf_bp_event(bp[i], regs);
296 return 0;
297 } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) {
298 struct perf_event **bp = this_cpu_ptr(wp_on_reg);
299 int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >>
300 DEBUGCAUSE_DBNUM_SHIFT;
302 if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) {
303 if (user_mode(regs)) {
304 perf_bp_event(bp[dbnum], regs);
305 } else {
306 set_thread_flag(TIF_DB_DISABLED);
307 xtensa_wsr(0, SREG_DBREAKC + dbnum);
309 } else {
310 WARN_ONCE(1,
311 "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n",
312 dbnum);
314 return 0;
316 return -ENOENT;