mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / tile / kernel / kgdb.c
blobd4eb5fb2df9d793045241c9d41b930681b22a957
1 /*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
14 * TILE-Gx KGDB support.
17 #include <linux/ptrace.h>
18 #include <linux/kgdb.h>
19 #include <linux/kdebug.h>
20 #include <linux/uaccess.h>
21 #include <linux/module.h>
22 #include <linux/sched/task_stack.h>
24 #include <asm/cacheflush.h>
26 static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
27 static unsigned long stepped_addr;
28 static tile_bundle_bits stepped_instr;
30 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
31 { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
32 { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
33 { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
34 { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
35 { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
36 { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
37 { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
38 { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
39 { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
40 { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
41 { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
42 { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
43 { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
44 { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
45 { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
46 { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
47 { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
48 { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
49 { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
50 { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
51 { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
52 { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
53 { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
54 { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
55 { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
56 { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
57 { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
58 { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
59 { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
60 { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
61 { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
62 { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
63 { "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
64 { "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
65 { "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
66 { "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
67 { "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
68 { "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
69 { "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
70 { "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
71 { "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
72 { "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
73 { "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
74 { "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
75 { "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
76 { "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
77 { "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
78 { "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
79 { "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
80 { "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
81 { "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
82 { "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
83 { "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
84 { "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
85 { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
86 { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
87 { "sn", GDB_SIZEOF_REG, -1},
88 { "idn0", GDB_SIZEOF_REG, -1},
89 { "idn1", GDB_SIZEOF_REG, -1},
90 { "udn0", GDB_SIZEOF_REG, -1},
91 { "udn1", GDB_SIZEOF_REG, -1},
92 { "udn2", GDB_SIZEOF_REG, -1},
93 { "udn3", GDB_SIZEOF_REG, -1},
94 { "zero", GDB_SIZEOF_REG, -1},
95 { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
96 { "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
99 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
101 if (regno >= DBG_MAX_REG_NUM || regno < 0)
102 return NULL;
104 if (dbg_reg_def[regno].offset != -1)
105 memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
106 dbg_reg_def[regno].size);
107 else
108 memset(mem, 0, dbg_reg_def[regno].size);
109 return dbg_reg_def[regno].name;
112 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
114 if (regno >= DBG_MAX_REG_NUM || regno < 0)
115 return -EINVAL;
117 if (dbg_reg_def[regno].offset != -1)
118 memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
119 dbg_reg_def[regno].size);
120 return 0;
124 * Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
125 * we may not be able to get all the info.
127 void
128 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
130 struct pt_regs *thread_regs;
131 const int NGPRS = TREG_LAST_GPR + 1;
133 if (task == NULL)
134 return;
136 thread_regs = task_pt_regs(task);
137 memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
138 memset(&gdb_regs[NGPRS], 0,
139 (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
140 gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
141 gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
144 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
146 regs->pc = pc;
149 static void kgdb_call_nmi_hook(void *ignored)
151 kgdb_nmicallback(raw_smp_processor_id(), NULL);
154 void kgdb_roundup_cpus(unsigned long flags)
156 local_irq_enable();
157 smp_call_function(kgdb_call_nmi_hook, NULL, 0);
158 local_irq_disable();
162 * Convert a kernel address to the writable kernel text mapping.
164 static unsigned long writable_address(unsigned long addr)
166 unsigned long ret = 0;
168 if (core_kernel_text(addr))
169 ret = ktext_writable_addr(addr);
170 else if (is_module_text_address(addr))
171 ret = addr;
172 else
173 pr_err("Unknown virtual address 0x%lx\n", addr);
175 return ret;
179 * Calculate the new address for after a step.
181 static unsigned long get_step_address(struct pt_regs *regs)
183 int src_reg;
184 int jump_off;
185 int br_off;
186 unsigned long addr;
187 unsigned int opcode;
188 tile_bundle_bits bundle;
190 /* Move to the next instruction by default. */
191 addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
192 bundle = *(unsigned long *)instruction_pointer(regs);
194 /* 0: X mode, Otherwise: Y mode. */
195 if (bundle & TILEGX_BUNDLE_MODE_MASK) {
196 if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
197 get_RRROpcodeExtension_Y1(bundle) ==
198 UNARY_RRR_1_OPCODE_Y1) {
199 opcode = get_UnaryOpcodeExtension_Y1(bundle);
201 switch (opcode) {
202 case JALR_UNARY_OPCODE_Y1:
203 case JALRP_UNARY_OPCODE_Y1:
204 case JR_UNARY_OPCODE_Y1:
205 case JRP_UNARY_OPCODE_Y1:
206 src_reg = get_SrcA_Y1(bundle);
207 dbg_get_reg(src_reg, &addr, regs);
208 break;
211 } else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
212 if (get_RRROpcodeExtension_X1(bundle) ==
213 UNARY_RRR_0_OPCODE_X1) {
214 opcode = get_UnaryOpcodeExtension_X1(bundle);
216 switch (opcode) {
217 case JALR_UNARY_OPCODE_X1:
218 case JALRP_UNARY_OPCODE_X1:
219 case JR_UNARY_OPCODE_X1:
220 case JRP_UNARY_OPCODE_X1:
221 src_reg = get_SrcA_X1(bundle);
222 dbg_get_reg(src_reg, &addr, regs);
223 break;
226 } else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
227 opcode = get_JumpOpcodeExtension_X1(bundle);
229 switch (opcode) {
230 case JAL_JUMP_OPCODE_X1:
231 case J_JUMP_OPCODE_X1:
232 jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
233 addr = regs->pc +
234 (jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
235 break;
237 } else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
238 br_off = 0;
239 opcode = get_BrType_X1(bundle);
241 switch (opcode) {
242 case BEQZT_BRANCH_OPCODE_X1:
243 case BEQZ_BRANCH_OPCODE_X1:
244 if (get_SrcA_X1(bundle) == 0)
245 br_off = get_BrOff_X1(bundle);
246 break;
247 case BGEZT_BRANCH_OPCODE_X1:
248 case BGEZ_BRANCH_OPCODE_X1:
249 if (get_SrcA_X1(bundle) >= 0)
250 br_off = get_BrOff_X1(bundle);
251 break;
252 case BGTZT_BRANCH_OPCODE_X1:
253 case BGTZ_BRANCH_OPCODE_X1:
254 if (get_SrcA_X1(bundle) > 0)
255 br_off = get_BrOff_X1(bundle);
256 break;
257 case BLBCT_BRANCH_OPCODE_X1:
258 case BLBC_BRANCH_OPCODE_X1:
259 if (!(get_SrcA_X1(bundle) & 1))
260 br_off = get_BrOff_X1(bundle);
261 break;
262 case BLBST_BRANCH_OPCODE_X1:
263 case BLBS_BRANCH_OPCODE_X1:
264 if (get_SrcA_X1(bundle) & 1)
265 br_off = get_BrOff_X1(bundle);
266 break;
267 case BLEZT_BRANCH_OPCODE_X1:
268 case BLEZ_BRANCH_OPCODE_X1:
269 if (get_SrcA_X1(bundle) <= 0)
270 br_off = get_BrOff_X1(bundle);
271 break;
272 case BLTZT_BRANCH_OPCODE_X1:
273 case BLTZ_BRANCH_OPCODE_X1:
274 if (get_SrcA_X1(bundle) < 0)
275 br_off = get_BrOff_X1(bundle);
276 break;
277 case BNEZT_BRANCH_OPCODE_X1:
278 case BNEZ_BRANCH_OPCODE_X1:
279 if (get_SrcA_X1(bundle) != 0)
280 br_off = get_BrOff_X1(bundle);
281 break;
284 if (br_off != 0) {
285 br_off = sign_extend(br_off, 17);
286 addr = regs->pc +
287 (br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
291 return addr;
295 * Replace the next instruction after the current instruction with a
296 * breakpoint instruction.
298 static void do_single_step(struct pt_regs *regs)
300 unsigned long addr_wr;
302 /* Determine where the target instruction will send us to. */
303 stepped_addr = get_step_address(regs);
304 probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
305 BREAK_INSTR_SIZE);
307 addr_wr = writable_address(stepped_addr);
308 probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
309 BREAK_INSTR_SIZE);
310 smp_wmb();
311 flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
314 static void undo_single_step(struct pt_regs *regs)
316 unsigned long addr_wr;
318 if (stepped_instr == 0)
319 return;
321 addr_wr = writable_address(stepped_addr);
322 probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
323 BREAK_INSTR_SIZE);
324 stepped_instr = 0;
325 smp_wmb();
326 flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
330 * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
331 * then try to fall into the debugger.
333 static int
334 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
336 int ret;
337 unsigned long flags;
338 struct die_args *args = (struct die_args *)ptr;
339 struct pt_regs *regs = args->regs;
341 #ifdef CONFIG_KPROBES
343 * Return immediately if the kprobes fault notifier has set
344 * DIE_PAGE_FAULT.
346 if (cmd == DIE_PAGE_FAULT)
347 return NOTIFY_DONE;
348 #endif /* CONFIG_KPROBES */
350 switch (cmd) {
351 case DIE_BREAK:
352 case DIE_COMPILED_BPT:
353 break;
354 case DIE_SSTEPBP:
355 local_irq_save(flags);
356 kgdb_handle_exception(0, SIGTRAP, 0, regs);
357 local_irq_restore(flags);
358 return NOTIFY_STOP;
359 default:
360 /* Userspace events, ignore. */
361 if (user_mode(regs))
362 return NOTIFY_DONE;
365 local_irq_save(flags);
366 ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
367 local_irq_restore(flags);
368 if (ret)
369 return NOTIFY_DONE;
371 return NOTIFY_STOP;
374 static struct notifier_block kgdb_notifier = {
375 .notifier_call = kgdb_notify,
379 * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
380 * @vector: The error vector of the exception that happened.
381 * @signo: The signal number of the exception that happened.
382 * @err_code: The error code of the exception that happened.
383 * @remcom_in_buffer: The buffer of the packet we have read.
384 * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
385 * @regs: The &struct pt_regs of the current process.
387 * This function MUST handle the 'c' and 's' command packets,
388 * as well packets to set / remove a hardware breakpoint, if used.
389 * If there are additional packets which the hardware needs to handle,
390 * they are handled here. The code should return -1 if it wants to
391 * process more packets, and a %0 or %1 if it wants to exit from the
392 * kgdb callback.
394 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
395 char *remcom_in_buffer, char *remcom_out_buffer,
396 struct pt_regs *regs)
398 char *ptr;
399 unsigned long address;
401 /* Undo any stepping we may have done. */
402 undo_single_step(regs);
404 switch (remcom_in_buffer[0]) {
405 case 'c':
406 case 's':
407 case 'D':
408 case 'k':
410 * Try to read optional parameter, pc unchanged if no parm.
411 * If this was a compiled-in breakpoint, we need to move
412 * to the next instruction or we will just breakpoint
413 * over and over again.
415 ptr = &remcom_in_buffer[1];
416 if (kgdb_hex2long(&ptr, &address))
417 regs->pc = address;
418 else if (*(unsigned long *)regs->pc == compiled_bpt)
419 regs->pc += BREAK_INSTR_SIZE;
421 if (remcom_in_buffer[0] == 's') {
422 do_single_step(regs);
423 kgdb_single_step = 1;
424 atomic_set(&kgdb_cpu_doing_single_step,
425 raw_smp_processor_id());
426 } else
427 atomic_set(&kgdb_cpu_doing_single_step, -1);
429 return 0;
432 return -1; /* this means that we do not want to exit from the handler */
435 struct kgdb_arch arch_kgdb_ops;
438 * kgdb_arch_init - Perform any architecture specific initialization.
440 * This function will handle the initialization of any architecture
441 * specific callbacks.
443 int kgdb_arch_init(void)
445 tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
447 memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
448 return register_die_notifier(&kgdb_notifier);
452 * kgdb_arch_exit - Perform any architecture specific uninitialization.
454 * This function will handle the uninitialization of any architecture
455 * specific callbacks, for dynamic registration and unregistration.
457 void kgdb_arch_exit(void)
459 unregister_die_notifier(&kgdb_notifier);
462 int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
464 int err;
465 unsigned long addr_wr = writable_address(bpt->bpt_addr);
467 if (addr_wr == 0)
468 return -1;
470 err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
471 BREAK_INSTR_SIZE);
472 if (err)
473 return err;
475 err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
476 BREAK_INSTR_SIZE);
477 smp_wmb();
478 flush_icache_range((unsigned long)bpt->bpt_addr,
479 (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
480 return err;
483 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
485 int err;
486 unsigned long addr_wr = writable_address(bpt->bpt_addr);
488 if (addr_wr == 0)
489 return -1;
491 err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
492 BREAK_INSTR_SIZE);
493 smp_wmb();
494 flush_icache_range((unsigned long)bpt->bpt_addr,
495 (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
496 return err;