msm: scm: Mark inline asm as volatile
[linux/fpc-iii.git] / arch / tile / kernel / backtrace.c
blob55a6a74974b4dad8c973484bcf2dc1934c34b309
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
15 #include <linux/kernel.h>
16 #include <linux/string.h>
18 #include <asm/backtrace.h>
20 #include <arch/chip.h>
22 #include <asm/opcode-tile.h>
25 #define TREG_SP 54
26 #define TREG_LR 55
29 #if TILE_CHIP >= 10
30 #define tile_bundle_bits tilegx_bundle_bits
31 #define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE
32 #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
33 #define tile_decoded_instruction tilegx_decoded_instruction
34 #define tile_mnemonic tilegx_mnemonic
35 #define parse_insn_tile parse_insn_tilegx
36 #define TILE_OPC_IRET TILEGX_OPC_IRET
37 #define TILE_OPC_ADDI TILEGX_OPC_ADDI
38 #define TILE_OPC_ADDLI TILEGX_OPC_ADDLI
39 #define TILE_OPC_INFO TILEGX_OPC_INFO
40 #define TILE_OPC_INFOL TILEGX_OPC_INFOL
41 #define TILE_OPC_JRP TILEGX_OPC_JRP
42 #define TILE_OPC_MOVE TILEGX_OPC_MOVE
43 #define OPCODE_STORE TILEGX_OPC_ST
44 typedef long long bt_int_reg_t;
45 #else
46 #define OPCODE_STORE TILE_OPC_SW
47 typedef int bt_int_reg_t;
48 #endif
50 /** A decoded bundle used for backtracer analysis. */
51 struct BacktraceBundle {
52 tile_bundle_bits bits;
53 int num_insns;
54 struct tile_decoded_instruction
55 insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
59 /* This implementation only makes sense for native tools. */
60 /** Default function to read memory. */
61 static bool bt_read_memory(void *result, VirtualAddress addr,
62 unsigned int size, void *extra)
64 /* FIXME: this should do some horrible signal stuff to catch
65 * SEGV cleanly and fail.
67 * Or else the caller should do the setjmp for efficiency.
70 memcpy(result, (const void *)addr, size);
71 return true;
75 /** Locates an instruction inside the given bundle that
76 * has the specified mnemonic, and whose first 'num_operands_to_match'
77 * operands exactly match those in 'operand_values'.
79 static const struct tile_decoded_instruction *find_matching_insn(
80 const struct BacktraceBundle *bundle,
81 tile_mnemonic mnemonic,
82 const int *operand_values,
83 int num_operands_to_match)
85 int i, j;
86 bool match;
88 for (i = 0; i < bundle->num_insns; i++) {
89 const struct tile_decoded_instruction *insn =
90 &bundle->insns[i];
92 if (insn->opcode->mnemonic != mnemonic)
93 continue;
95 match = true;
96 for (j = 0; j < num_operands_to_match; j++) {
97 if (operand_values[j] != insn->operand_values[j]) {
98 match = false;
99 break;
103 if (match)
104 return insn;
107 return NULL;
110 /** Does this bundle contain an 'iret' instruction? */
111 static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
113 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
116 /** Does this bundle contain an 'addi sp, sp, OFFSET' or
117 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
119 static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
121 static const int vals[2] = { TREG_SP, TREG_SP };
123 const struct tile_decoded_instruction *insn =
124 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
125 if (insn == NULL)
126 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
127 #if TILE_CHIP >= 10
128 if (insn == NULL)
129 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2);
130 if (insn == NULL)
131 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXI, vals, 2);
132 #endif
133 if (insn == NULL)
134 return false;
136 *adjust = insn->operand_values[2];
137 return true;
140 /** Does this bundle contain any 'info OP' or 'infol OP'
141 * instruction, and if so, what are their OP? Note that OP is interpreted
142 * as an unsigned value by this code since that's what the caller wants.
143 * Returns the number of info ops found.
145 static int bt_get_info_ops(const struct BacktraceBundle *bundle,
146 int operands[MAX_INFO_OPS_PER_BUNDLE])
148 int num_ops = 0;
149 int i;
151 for (i = 0; i < bundle->num_insns; i++) {
152 const struct tile_decoded_instruction *insn =
153 &bundle->insns[i];
155 if (insn->opcode->mnemonic == TILE_OPC_INFO ||
156 insn->opcode->mnemonic == TILE_OPC_INFOL) {
157 operands[num_ops++] = insn->operand_values[0];
161 return num_ops;
164 /** Does this bundle contain a jrp instruction, and if so, to which
165 * register is it jumping?
167 static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
169 const struct tile_decoded_instruction *insn =
170 find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
171 if (insn == NULL)
172 return false;
174 *target_reg = insn->operand_values[0];
175 return true;
178 /** Does this bundle modify the specified register in any way? */
179 static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
181 int i, j;
182 for (i = 0; i < bundle->num_insns; i++) {
183 const struct tile_decoded_instruction *insn =
184 &bundle->insns[i];
186 if (insn->opcode->implicitly_written_register == reg)
187 return true;
189 for (j = 0; j < insn->opcode->num_operands; j++)
190 if (insn->operands[j]->is_dest_reg &&
191 insn->operand_values[j] == reg)
192 return true;
195 return false;
198 /** Does this bundle modify sp? */
199 static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
201 return bt_modifies_reg(bundle, TREG_SP);
204 /** Does this bundle modify lr? */
205 static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
207 return bt_modifies_reg(bundle, TREG_LR);
210 /** Does this bundle contain the instruction 'move fp, sp'? */
211 static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
213 static const int vals[2] = { 52, TREG_SP };
214 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
217 /** Does this bundle contain a store of lr to sp? */
218 static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
220 static const int vals[2] = { TREG_SP, TREG_LR };
221 return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL;
224 #if TILE_CHIP >= 10
225 /** Track moveli values placed into registers. */
226 static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
227 int moveli_args[])
229 int i;
230 for (i = 0; i < bundle->num_insns; i++) {
231 const struct tile_decoded_instruction *insn =
232 &bundle->insns[i];
234 if (insn->opcode->mnemonic == TILEGX_OPC_MOVELI) {
235 int reg = insn->operand_values[0];
236 moveli_args[reg] = insn->operand_values[1];
241 /** Does this bundle contain an 'add sp, sp, reg' instruction
242 * from a register that we saw a moveli into, and if so, what
243 * is the value in the register?
245 static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust,
246 int moveli_args[])
248 static const int vals[2] = { TREG_SP, TREG_SP };
250 const struct tile_decoded_instruction *insn =
251 find_matching_insn(bundle, TILEGX_OPC_ADDX, vals, 2);
252 if (insn) {
253 int reg = insn->operand_values[2];
254 if (moveli_args[reg]) {
255 *adjust = moveli_args[reg];
256 return true;
259 return false;
261 #endif
263 /** Locates the caller's PC and SP for a program starting at the
264 * given address.
266 static void find_caller_pc_and_caller_sp(CallerLocation *location,
267 const VirtualAddress start_pc,
268 BacktraceMemoryReader read_memory_func,
269 void *read_memory_func_extra)
271 /* Have we explicitly decided what the sp is,
272 * rather than just the default?
274 bool sp_determined = false;
276 /* Has any bundle seen so far modified lr? */
277 bool lr_modified = false;
279 /* Have we seen a move from sp to fp? */
280 bool sp_moved_to_r52 = false;
282 /* Have we seen a terminating bundle? */
283 bool seen_terminating_bundle = false;
285 /* Cut down on round-trip reading overhead by reading several
286 * bundles at a time.
288 tile_bundle_bits prefetched_bundles[32];
289 int num_bundles_prefetched = 0;
290 int next_bundle = 0;
291 VirtualAddress pc;
293 #if TILE_CHIP >= 10
294 /* Naively try to track moveli values to support addx for -m32. */
295 int moveli_args[TILEGX_NUM_REGISTERS] = { 0 };
296 #endif
298 /* Default to assuming that the caller's sp is the current sp.
299 * This is necessary to handle the case where we start backtracing
300 * right at the end of the epilog.
302 location->sp_location = SP_LOC_OFFSET;
303 location->sp_offset = 0;
305 /* Default to having no idea where the caller PC is. */
306 location->pc_location = PC_LOC_UNKNOWN;
308 /* Don't even try if the PC is not aligned. */
309 if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0)
310 return;
312 for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
314 struct BacktraceBundle bundle;
315 int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
316 int one_ago, jrp_reg;
317 bool has_jrp;
319 if (next_bundle >= num_bundles_prefetched) {
320 /* Prefetch some bytes, but don't cross a page
321 * boundary since that might cause a read failure we
322 * don't care about if we only need the first few
323 * bytes. Note: we don't care what the actual page
324 * size is; using the minimum possible page size will
325 * prevent any problems.
327 unsigned int bytes_to_prefetch = 4096 - (pc & 4095);
328 if (bytes_to_prefetch > sizeof prefetched_bundles)
329 bytes_to_prefetch = sizeof prefetched_bundles;
331 if (!read_memory_func(prefetched_bundles, pc,
332 bytes_to_prefetch,
333 read_memory_func_extra)) {
334 if (pc == start_pc) {
335 /* The program probably called a bad
336 * address, such as a NULL pointer.
337 * So treat this as if we are at the
338 * start of the function prolog so the
339 * backtrace will show how we got here.
341 location->pc_location = PC_LOC_IN_LR;
342 return;
345 /* Unreadable address. Give up. */
346 break;
349 next_bundle = 0;
350 num_bundles_prefetched =
351 bytes_to_prefetch / sizeof(tile_bundle_bits);
354 /* Decode the next bundle. */
355 bundle.bits = prefetched_bundles[next_bundle++];
356 bundle.num_insns =
357 parse_insn_tile(bundle.bits, pc, bundle.insns);
358 num_info_ops = bt_get_info_ops(&bundle, info_operands);
360 /* First look at any one_ago info ops if they are interesting,
361 * since they should shadow any non-one-ago info ops.
363 for (one_ago = (pc != start_pc) ? 1 : 0;
364 one_ago >= 0; one_ago--) {
365 int i;
366 for (i = 0; i < num_info_ops; i++) {
367 int info_operand = info_operands[i];
368 if (info_operand < CALLER_UNKNOWN_BASE) {
369 /* Weird; reserved value, ignore it. */
370 continue;
372 if (info_operand & ENTRY_POINT_INFO_OP) {
373 /* This info op is ignored by the backtracer. */
374 continue;
377 /* Skip info ops which are not in the
378 * "one_ago" mode we want right now.
380 if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0)
381 != (one_ago != 0))
382 continue;
384 /* Clear the flag to make later checking
385 * easier. */
386 info_operand &= ~ONE_BUNDLE_AGO_FLAG;
388 /* Default to looking at PC_IN_LR_FLAG. */
389 if (info_operand & PC_IN_LR_FLAG)
390 location->pc_location =
391 PC_LOC_IN_LR;
392 else
393 location->pc_location =
394 PC_LOC_ON_STACK;
396 switch (info_operand) {
397 case CALLER_UNKNOWN_BASE:
398 location->pc_location = PC_LOC_UNKNOWN;
399 location->sp_location = SP_LOC_UNKNOWN;
400 return;
402 case CALLER_SP_IN_R52_BASE:
403 case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG:
404 location->sp_location = SP_LOC_IN_R52;
405 return;
407 default:
409 const unsigned int val = info_operand
410 - CALLER_SP_OFFSET_BASE;
411 const unsigned int sp_offset =
412 (val >> NUM_INFO_OP_FLAGS) * 8;
413 if (sp_offset < 32768) {
414 /* This is a properly encoded
415 * SP offset. */
416 location->sp_location =
417 SP_LOC_OFFSET;
418 location->sp_offset =
419 sp_offset;
420 return;
421 } else {
422 /* This looked like an SP
423 * offset, but it's outside
424 * the legal range, so this
425 * must be an unrecognized
426 * info operand. Ignore it.
430 break;
435 if (seen_terminating_bundle) {
436 /* We saw a terminating bundle during the previous
437 * iteration, so we were only looking for an info op.
439 break;
442 if (bundle.bits == 0) {
443 /* Wacky terminating bundle. Stop looping, and hope
444 * we've already seen enough to find the caller.
446 break;
450 * Try to determine caller's SP.
453 if (!sp_determined) {
454 int adjust;
455 if (bt_has_addi_sp(&bundle, &adjust)
456 #if TILE_CHIP >= 10
457 || bt_has_add_sp(&bundle, &adjust, moveli_args)
458 #endif
460 location->sp_location = SP_LOC_OFFSET;
462 if (adjust <= 0) {
463 /* We are in prolog about to adjust
464 * SP. */
465 location->sp_offset = 0;
466 } else {
467 /* We are in epilog restoring SP. */
468 location->sp_offset = adjust;
471 sp_determined = true;
472 } else {
473 if (bt_has_move_r52_sp(&bundle)) {
474 /* Maybe in prolog, creating an
475 * alloca-style frame. But maybe in
476 * the middle of a fixed-size frame
477 * clobbering r52 with SP.
479 sp_moved_to_r52 = true;
482 if (bt_modifies_sp(&bundle)) {
483 if (sp_moved_to_r52) {
484 /* We saw SP get saved into
485 * r52 earlier (or now), which
486 * must have been in the
487 * prolog, so we now know that
488 * SP is still holding the
489 * caller's sp value.
491 location->sp_location =
492 SP_LOC_OFFSET;
493 location->sp_offset = 0;
494 } else {
495 /* Someone must have saved
496 * aside the caller's SP value
497 * into r52, so r52 holds the
498 * current value.
500 location->sp_location =
501 SP_LOC_IN_R52;
503 sp_determined = true;
507 #if TILE_CHIP >= 10
508 /* Track moveli arguments for -m32 mode. */
509 bt_update_moveli(&bundle, moveli_args);
510 #endif
513 if (bt_has_iret(&bundle)) {
514 /* This is a terminating bundle. */
515 seen_terminating_bundle = true;
516 continue;
520 * Try to determine caller's PC.
523 jrp_reg = -1;
524 has_jrp = bt_has_jrp(&bundle, &jrp_reg);
525 if (has_jrp)
526 seen_terminating_bundle = true;
528 if (location->pc_location == PC_LOC_UNKNOWN) {
529 if (has_jrp) {
530 if (jrp_reg == TREG_LR && !lr_modified) {
531 /* Looks like a leaf function, or else
532 * lr is already restored. */
533 location->pc_location =
534 PC_LOC_IN_LR;
535 } else {
536 location->pc_location =
537 PC_LOC_ON_STACK;
539 } else if (bt_has_sw_sp_lr(&bundle)) {
540 /* In prolog, spilling initial lr to stack. */
541 location->pc_location = PC_LOC_IN_LR;
542 } else if (bt_modifies_lr(&bundle)) {
543 lr_modified = true;
549 void backtrace_init(BacktraceIterator *state,
550 BacktraceMemoryReader read_memory_func,
551 void *read_memory_func_extra,
552 VirtualAddress pc, VirtualAddress lr,
553 VirtualAddress sp, VirtualAddress r52)
555 CallerLocation location;
556 VirtualAddress fp, initial_frame_caller_pc;
558 if (read_memory_func == NULL) {
559 read_memory_func = bt_read_memory;
562 /* Find out where we are in the initial frame. */
563 find_caller_pc_and_caller_sp(&location, pc,
564 read_memory_func, read_memory_func_extra);
566 switch (location.sp_location) {
567 case SP_LOC_UNKNOWN:
568 /* Give up. */
569 fp = -1;
570 break;
572 case SP_LOC_IN_R52:
573 fp = r52;
574 break;
576 case SP_LOC_OFFSET:
577 fp = sp + location.sp_offset;
578 break;
580 default:
581 /* Give up. */
582 fp = -1;
583 break;
586 /* If the frame pointer is not aligned to the basic word size
587 * something terrible happened and we should mark it as invalid.
589 if (fp % sizeof(bt_int_reg_t) != 0)
590 fp = -1;
592 /* -1 means "don't know initial_frame_caller_pc". */
593 initial_frame_caller_pc = -1;
595 switch (location.pc_location) {
596 case PC_LOC_UNKNOWN:
597 /* Give up. */
598 fp = -1;
599 break;
601 case PC_LOC_IN_LR:
602 if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
603 /* Give up. */
604 fp = -1;
605 } else {
606 initial_frame_caller_pc = lr;
608 break;
610 case PC_LOC_ON_STACK:
611 /* Leave initial_frame_caller_pc as -1,
612 * meaning check the stack.
614 break;
616 default:
617 /* Give up. */
618 fp = -1;
619 break;
622 state->pc = pc;
623 state->sp = sp;
624 state->fp = fp;
625 state->initial_frame_caller_pc = initial_frame_caller_pc;
626 state->read_memory_func = read_memory_func;
627 state->read_memory_func_extra = read_memory_func_extra;
630 /* Handle the case where the register holds more bits than the VA. */
631 static bool valid_addr_reg(bt_int_reg_t reg)
633 return ((VirtualAddress)reg == reg);
636 bool backtrace_next(BacktraceIterator *state)
638 VirtualAddress next_fp, next_pc;
639 bt_int_reg_t next_frame[2];
641 if (state->fp == -1) {
642 /* No parent frame. */
643 return false;
646 /* Try to read the frame linkage data chaining to the next function. */
647 if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame,
648 state->read_memory_func_extra)) {
649 return false;
652 next_fp = next_frame[1];
653 if (!valid_addr_reg(next_frame[1]) ||
654 next_fp % sizeof(bt_int_reg_t) != 0) {
655 /* Caller's frame pointer is suspect, so give up. */
656 return false;
659 if (state->initial_frame_caller_pc != -1) {
660 /* We must be in the initial stack frame and already know the
661 * caller PC.
663 next_pc = state->initial_frame_caller_pc;
665 /* Force reading stack next time, in case we were in the
666 * initial frame. We don't do this above just to paranoidly
667 * avoid changing the struct at all when we return false.
669 state->initial_frame_caller_pc = -1;
670 } else {
671 /* Get the caller PC from the frame linkage area. */
672 next_pc = next_frame[0];
673 if (!valid_addr_reg(next_frame[0]) || next_pc == 0 ||
674 next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
675 /* The PC is suspect, so give up. */
676 return false;
680 /* Update state to become the caller's stack frame. */
681 state->pc = next_pc;
682 state->sp = state->fp;
683 state->fp = next_fp;
685 return true;