iwlwifi: mvm: fix version check for GEO_TX_POWER_LIMIT support
[linux/fpc-iii.git] / arch / sh / kernel / hw_breakpoint.c
blobd9ff3b42da7cb11a3e6d62ec39dcfc2c35ce40d2
1 /*
2 * arch/sh/kernel/hw_breakpoint.c
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
6 * Copyright (C) 2009 - 2010 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <linux/sched/signal.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/percpu.h>
17 #include <linux/kallsyms.h>
18 #include <linux/notifier.h>
19 #include <linux/kprobes.h>
20 #include <linux/kdebug.h>
21 #include <linux/io.h>
22 #include <linux/clk.h>
23 #include <asm/hw_breakpoint.h>
24 #include <asm/mmu_context.h>
25 #include <asm/ptrace.h>
26 #include <asm/traps.h>
29 * Stores the breakpoints currently in use on each breakpoint address
30 * register for each cpus
32 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
35 * A dummy placeholder for early accesses until the CPUs get a chance to
36 * register their UBCs later in the boot process.
38 static struct sh_ubc ubc_dummy = { .num_events = 0 };
40 static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
43 * Install a perf counter breakpoint.
45 * We seek a free UBC channel and use it for this breakpoint.
47 * Atomic: we hold the counter->ctx->lock and we only handle variables
48 * and registers local to this cpu.
50 int arch_install_hw_breakpoint(struct perf_event *bp)
52 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
53 int i;
55 for (i = 0; i < sh_ubc->num_events; i++) {
56 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
58 if (!*slot) {
59 *slot = bp;
60 break;
64 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
65 return -EBUSY;
67 clk_enable(sh_ubc->clk);
68 sh_ubc->enable(info, i);
70 return 0;
74 * Uninstall the breakpoint contained in the given counter.
76 * First we search the debug address register it uses and then we disable
77 * it.
79 * Atomic: we hold the counter->ctx->lock and we only handle variables
80 * and registers local to this cpu.
82 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
84 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
85 int i;
87 for (i = 0; i < sh_ubc->num_events; i++) {
88 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
90 if (*slot == bp) {
91 *slot = NULL;
92 break;
96 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
97 return;
99 sh_ubc->disable(info, i);
100 clk_disable(sh_ubc->clk);
103 static int get_hbp_len(u16 hbp_len)
105 unsigned int len_in_bytes = 0;
107 switch (hbp_len) {
108 case SH_BREAKPOINT_LEN_1:
109 len_in_bytes = 1;
110 break;
111 case SH_BREAKPOINT_LEN_2:
112 len_in_bytes = 2;
113 break;
114 case SH_BREAKPOINT_LEN_4:
115 len_in_bytes = 4;
116 break;
117 case SH_BREAKPOINT_LEN_8:
118 len_in_bytes = 8;
119 break;
121 return len_in_bytes;
125 * Check for virtual address in kernel space.
127 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
129 unsigned int len;
130 unsigned long va;
132 va = hw->address;
133 len = get_hbp_len(hw->len);
135 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
138 int arch_bp_generic_fields(int sh_len, int sh_type,
139 int *gen_len, int *gen_type)
141 /* Len */
142 switch (sh_len) {
143 case SH_BREAKPOINT_LEN_1:
144 *gen_len = HW_BREAKPOINT_LEN_1;
145 break;
146 case SH_BREAKPOINT_LEN_2:
147 *gen_len = HW_BREAKPOINT_LEN_2;
148 break;
149 case SH_BREAKPOINT_LEN_4:
150 *gen_len = HW_BREAKPOINT_LEN_4;
151 break;
152 case SH_BREAKPOINT_LEN_8:
153 *gen_len = HW_BREAKPOINT_LEN_8;
154 break;
155 default:
156 return -EINVAL;
159 /* Type */
160 switch (sh_type) {
161 case SH_BREAKPOINT_READ:
162 *gen_type = HW_BREAKPOINT_R;
163 case SH_BREAKPOINT_WRITE:
164 *gen_type = HW_BREAKPOINT_W;
165 break;
166 case SH_BREAKPOINT_RW:
167 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
168 break;
169 default:
170 return -EINVAL;
173 return 0;
176 static int arch_build_bp_info(struct perf_event *bp,
177 const struct perf_event_attr *attr,
178 struct arch_hw_breakpoint *hw)
180 hw->address = attr->bp_addr;
182 /* Len */
183 switch (attr->bp_len) {
184 case HW_BREAKPOINT_LEN_1:
185 hw->len = SH_BREAKPOINT_LEN_1;
186 break;
187 case HW_BREAKPOINT_LEN_2:
188 hw->len = SH_BREAKPOINT_LEN_2;
189 break;
190 case HW_BREAKPOINT_LEN_4:
191 hw->len = SH_BREAKPOINT_LEN_4;
192 break;
193 case HW_BREAKPOINT_LEN_8:
194 hw->len = SH_BREAKPOINT_LEN_8;
195 break;
196 default:
197 return -EINVAL;
200 /* Type */
201 switch (attr->bp_type) {
202 case HW_BREAKPOINT_R:
203 hw->type = SH_BREAKPOINT_READ;
204 break;
205 case HW_BREAKPOINT_W:
206 hw->type = SH_BREAKPOINT_WRITE;
207 break;
208 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
209 hw->type = SH_BREAKPOINT_RW;
210 break;
211 default:
212 return -EINVAL;
215 return 0;
219 * Validate the arch-specific HW Breakpoint register settings
221 int hw_breakpoint_arch_parse(struct perf_event *bp,
222 const struct perf_event_attr *attr,
223 struct arch_hw_breakpoint *hw)
225 unsigned int align;
226 int ret;
228 ret = arch_build_bp_info(bp, attr, hw);
229 if (ret)
230 return ret;
232 ret = -EINVAL;
234 switch (hw->len) {
235 case SH_BREAKPOINT_LEN_1:
236 align = 0;
237 break;
238 case SH_BREAKPOINT_LEN_2:
239 align = 1;
240 break;
241 case SH_BREAKPOINT_LEN_4:
242 align = 3;
243 break;
244 case SH_BREAKPOINT_LEN_8:
245 align = 7;
246 break;
247 default:
248 return ret;
252 * Check that the low-order bits of the address are appropriate
253 * for the alignment implied by len.
255 if (hw->address & align)
256 return -EINVAL;
258 return 0;
262 * Release the user breakpoints used by ptrace
264 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
266 int i;
267 struct thread_struct *t = &tsk->thread;
269 for (i = 0; i < sh_ubc->num_events; i++) {
270 unregister_hw_breakpoint(t->ptrace_bps[i]);
271 t->ptrace_bps[i] = NULL;
275 static int __kprobes hw_breakpoint_handler(struct die_args *args)
277 int cpu, i, rc = NOTIFY_STOP;
278 struct perf_event *bp;
279 unsigned int cmf, resume_mask;
282 * Do an early return if none of the channels triggered.
284 cmf = sh_ubc->triggered_mask();
285 if (unlikely(!cmf))
286 return NOTIFY_DONE;
289 * By default, resume all of the active channels.
291 resume_mask = sh_ubc->active_mask();
294 * Disable breakpoints during exception handling.
296 sh_ubc->disable_all();
298 cpu = get_cpu();
299 for (i = 0; i < sh_ubc->num_events; i++) {
300 unsigned long event_mask = (1 << i);
302 if (likely(!(cmf & event_mask)))
303 continue;
306 * The counter may be concurrently released but that can only
307 * occur from a call_rcu() path. We can then safely fetch
308 * the breakpoint, use its callback, touch its counter
309 * while we are in an rcu_read_lock() path.
311 rcu_read_lock();
313 bp = per_cpu(bp_per_reg[i], cpu);
314 if (bp)
315 rc = NOTIFY_DONE;
318 * Reset the condition match flag to denote completion of
319 * exception handling.
321 sh_ubc->clear_triggered_mask(event_mask);
324 * bp can be NULL due to concurrent perf counter
325 * removing.
327 if (!bp) {
328 rcu_read_unlock();
329 break;
333 * Don't restore the channel if the breakpoint is from
334 * ptrace, as it always operates in one-shot mode.
336 if (bp->overflow_handler == ptrace_triggered)
337 resume_mask &= ~(1 << i);
339 perf_bp_event(bp, args->regs);
341 /* Deliver the signal to userspace */
342 if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
343 force_sig_fault(SIGTRAP, TRAP_HWBKPT,
344 (void __user *)NULL, current);
347 rcu_read_unlock();
350 if (cmf == 0)
351 rc = NOTIFY_DONE;
353 sh_ubc->enable_all(resume_mask);
355 put_cpu();
357 return rc;
360 BUILD_TRAP_HANDLER(breakpoint)
362 unsigned long ex = lookup_exception_vector();
363 TRAP_HANDLER_DECL;
365 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
369 * Handle debug exception notifications.
371 int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
372 unsigned long val, void *data)
374 struct die_args *args = data;
376 if (val != DIE_BREAKPOINT)
377 return NOTIFY_DONE;
380 * If the breakpoint hasn't been triggered by the UBC, it's
381 * probably from a debugger, so don't do anything more here.
383 * This also permits the UBC interface clock to remain off for
384 * non-UBC breakpoints, as we don't need to check the triggered
385 * or active channel masks.
387 if (args->trapnr != sh_ubc->trap_nr)
388 return NOTIFY_DONE;
390 return hw_breakpoint_handler(data);
393 void hw_breakpoint_pmu_read(struct perf_event *bp)
395 /* TODO */
398 int register_sh_ubc(struct sh_ubc *ubc)
400 /* Bail if it's already assigned */
401 if (sh_ubc != &ubc_dummy)
402 return -EBUSY;
403 sh_ubc = ubc;
405 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
407 WARN_ON(ubc->num_events > HBP_NUM);
409 return 0;