2 * linux/arch/arm/mm/alignment.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2001 Russell King
6 * Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc.
7 * - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
8 * Copyright (C) 1996, Cygnus Software Technologies Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/moduleparam.h>
15 #include <linux/compiler.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/init.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
26 #include <asm/system_info.h>
27 #include <asm/unaligned.h>
28 #include <asm/opcodes.h>
34 * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
35 * /proc/sys/debug/alignment, modified and integrated into
36 * Linux 2.1 by Russell King
38 * Speed optimisations and better fault handling by Russell King.
41 * This code is not portable to processors with late data abort handling.
43 #define CODING_BITS(i) (i & 0x0e000000)
44 #define COND_BITS(i) (i & 0xf0000000)
46 #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
47 #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
48 #define LDST_U_BIT(i) (i & (1 << 23)) /* Add offset */
49 #define LDST_W_BIT(i) (i & (1 << 21)) /* Writeback */
50 #define LDST_L_BIT(i) (i & (1 << 20)) /* Load */
52 #define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
54 #define LDSTHD_I_BIT(i) (i & (1 << 22)) /* double/half-word immed */
55 #define LDM_S_BIT(i) (i & (1 << 22)) /* write CPSR from SPSR */
57 #define RN_BITS(i) ((i >> 16) & 15) /* Rn */
58 #define RD_BITS(i) ((i >> 12) & 15) /* Rd */
59 #define RM_BITS(i) (i & 15) /* Rm */
61 #define REGMASK_BITS(i) (i & 0xffff)
62 #define OFFSET_BITS(i) (i & 0x0fff)
64 #define IS_SHIFT(i) (i & 0x0ff0)
65 #define SHIFT_BITS(i) ((i >> 7) & 0x1f)
66 #define SHIFT_TYPE(i) (i & 0x60)
67 #define SHIFT_LSL 0x00
68 #define SHIFT_LSR 0x20
69 #define SHIFT_ASR 0x40
70 #define SHIFT_RORRRX 0x60
72 #define BAD_INSTR 0xdeadc0de
74 /* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
75 #define IS_T32(hi16) \
76 (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
78 static unsigned long ai_user
;
79 static unsigned long ai_sys
;
80 static void *ai_sys_last_pc
;
81 static unsigned long ai_skipped
;
82 static unsigned long ai_half
;
83 static unsigned long ai_word
;
84 static unsigned long ai_dword
;
85 static unsigned long ai_multi
;
86 static int ai_usermode
;
87 static unsigned long cr_no_alignment
;
89 core_param(alignment
, ai_usermode
, int, 0600);
91 #define UM_WARN (1 << 0)
92 #define UM_FIXUP (1 << 1)
93 #define UM_SIGNAL (1 << 2)
95 /* Return true if and only if the ARMv6 unaligned access model is in use. */
96 static bool cpu_is_v6_unaligned(void)
98 return cpu_architecture() >= CPU_ARCH_ARMv6
&& get_cr() & CR_U
;
101 static int safe_usermode(int new_usermode
, bool warn
)
104 * ARMv6 and later CPUs can perform unaligned accesses for
105 * most single load and store instructions up to word size.
106 * LDM, STM, LDRD and STRD still need to be handled.
108 * Ignoring the alignment fault is not an option on these
109 * CPUs since we spin re-faulting the instruction without
110 * making any progress.
112 if (cpu_is_v6_unaligned() && !(new_usermode
& (UM_FIXUP
| UM_SIGNAL
))) {
113 new_usermode
|= UM_FIXUP
;
116 pr_warn("alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
122 #ifdef CONFIG_PROC_FS
123 static const char *usermode_action
[] = {
132 static int alignment_proc_show(struct seq_file
*m
, void *v
)
134 seq_printf(m
, "User:\t\t%lu\n", ai_user
);
135 seq_printf(m
, "System:\t\t%lu (%pF)\n", ai_sys
, ai_sys_last_pc
);
136 seq_printf(m
, "Skipped:\t%lu\n", ai_skipped
);
137 seq_printf(m
, "Half:\t\t%lu\n", ai_half
);
138 seq_printf(m
, "Word:\t\t%lu\n", ai_word
);
139 if (cpu_architecture() >= CPU_ARCH_ARMv5TE
)
140 seq_printf(m
, "DWord:\t\t%lu\n", ai_dword
);
141 seq_printf(m
, "Multi:\t\t%lu\n", ai_multi
);
142 seq_printf(m
, "User faults:\t%i (%s)\n", ai_usermode
,
143 usermode_action
[ai_usermode
]);
148 static int alignment_proc_open(struct inode
*inode
, struct file
*file
)
150 return single_open(file
, alignment_proc_show
, NULL
);
153 static ssize_t
alignment_proc_write(struct file
*file
, const char __user
*buffer
,
154 size_t count
, loff_t
*pos
)
159 if (get_user(mode
, buffer
))
161 if (mode
>= '0' && mode
<= '5')
162 ai_usermode
= safe_usermode(mode
- '0', true);
167 static const struct file_operations alignment_proc_fops
= {
168 .open
= alignment_proc_open
,
171 .release
= single_release
,
172 .write
= alignment_proc_write
,
174 #endif /* CONFIG_PROC_FS */
188 #define FIRST_BYTE_16 "mov %1, %1, ror #8\n"
189 #define FIRST_BYTE_32 "mov %1, %1, ror #24\n"
190 #define NEXT_BYTE "ror #24"
193 #define FIRST_BYTE_16
194 #define FIRST_BYTE_32
195 #define NEXT_BYTE "lsr #8"
198 #define __get8_unaligned_check(ins,val,addr,err) \
200 ARM( "1: "ins" %1, [%2], #1\n" ) \
201 THUMB( "1: "ins" %1, [%2]\n" ) \
202 THUMB( " add %2, %2, #1\n" ) \
204 " .pushsection .text.fixup,\"ax\"\n" \
209 " .pushsection __ex_table,\"a\"\n" \
213 : "=r" (err), "=&r" (val), "=r" (addr) \
214 : "0" (err), "2" (addr))
216 #define __get16_unaligned_check(ins,val,addr) \
218 unsigned int err = 0, v, a = addr; \
219 __get8_unaligned_check(ins,v,a,err); \
220 val = v << ((BE) ? 8 : 0); \
221 __get8_unaligned_check(ins,v,a,err); \
222 val |= v << ((BE) ? 0 : 8); \
227 #define get16_unaligned_check(val,addr) \
228 __get16_unaligned_check("ldrb",val,addr)
230 #define get16t_unaligned_check(val,addr) \
231 __get16_unaligned_check("ldrbt",val,addr)
233 #define __get32_unaligned_check(ins,val,addr) \
235 unsigned int err = 0, v, a = addr; \
236 __get8_unaligned_check(ins,v,a,err); \
237 val = v << ((BE) ? 24 : 0); \
238 __get8_unaligned_check(ins,v,a,err); \
239 val |= v << ((BE) ? 16 : 8); \
240 __get8_unaligned_check(ins,v,a,err); \
241 val |= v << ((BE) ? 8 : 16); \
242 __get8_unaligned_check(ins,v,a,err); \
243 val |= v << ((BE) ? 0 : 24); \
248 #define get32_unaligned_check(val,addr) \
249 __get32_unaligned_check("ldrb",val,addr)
251 #define get32t_unaligned_check(val,addr) \
252 __get32_unaligned_check("ldrbt",val,addr)
254 #define __put16_unaligned_check(ins,val,addr) \
256 unsigned int err = 0, v = val, a = addr; \
257 __asm__( FIRST_BYTE_16 \
258 ARM( "1: "ins" %1, [%2], #1\n" ) \
259 THUMB( "1: "ins" %1, [%2]\n" ) \
260 THUMB( " add %2, %2, #1\n" ) \
261 " mov %1, %1, "NEXT_BYTE"\n" \
262 "2: "ins" %1, [%2]\n" \
264 " .pushsection .text.fixup,\"ax\"\n" \
269 " .pushsection __ex_table,\"a\"\n" \
274 : "=r" (err), "=&r" (v), "=&r" (a) \
275 : "0" (err), "1" (v), "2" (a)); \
280 #define put16_unaligned_check(val,addr) \
281 __put16_unaligned_check("strb",val,addr)
283 #define put16t_unaligned_check(val,addr) \
284 __put16_unaligned_check("strbt",val,addr)
286 #define __put32_unaligned_check(ins,val,addr) \
288 unsigned int err = 0, v = val, a = addr; \
289 __asm__( FIRST_BYTE_32 \
290 ARM( "1: "ins" %1, [%2], #1\n" ) \
291 THUMB( "1: "ins" %1, [%2]\n" ) \
292 THUMB( " add %2, %2, #1\n" ) \
293 " mov %1, %1, "NEXT_BYTE"\n" \
294 ARM( "2: "ins" %1, [%2], #1\n" ) \
295 THUMB( "2: "ins" %1, [%2]\n" ) \
296 THUMB( " add %2, %2, #1\n" ) \
297 " mov %1, %1, "NEXT_BYTE"\n" \
298 ARM( "3: "ins" %1, [%2], #1\n" ) \
299 THUMB( "3: "ins" %1, [%2]\n" ) \
300 THUMB( " add %2, %2, #1\n" ) \
301 " mov %1, %1, "NEXT_BYTE"\n" \
302 "4: "ins" %1, [%2]\n" \
304 " .pushsection .text.fixup,\"ax\"\n" \
309 " .pushsection __ex_table,\"a\"\n" \
316 : "=r" (err), "=&r" (v), "=&r" (a) \
317 : "0" (err), "1" (v), "2" (a)); \
322 #define put32_unaligned_check(val,addr) \
323 __put32_unaligned_check("strb", val, addr)
325 #define put32t_unaligned_check(val,addr) \
326 __put32_unaligned_check("strbt", val, addr)
329 do_alignment_finish_ldst(unsigned long addr
, unsigned long instr
, struct pt_regs
*regs
, union offset_union offset
)
331 if (!LDST_U_BIT(instr
))
332 offset
.un
= -offset
.un
;
334 if (!LDST_P_BIT(instr
))
337 if (!LDST_P_BIT(instr
) || LDST_W_BIT(instr
))
338 regs
->uregs
[RN_BITS(instr
)] = addr
;
342 do_alignment_ldrhstrh(unsigned long addr
, unsigned long instr
, struct pt_regs
*regs
)
344 unsigned int rd
= RD_BITS(instr
);
351 if (LDST_L_BIT(instr
)) {
353 get16_unaligned_check(val
, addr
);
355 /* signed half-word? */
357 val
= (signed long)((signed short) val
);
359 regs
->uregs
[rd
] = val
;
361 put16_unaligned_check(regs
->uregs
[rd
], addr
);
366 if (LDST_L_BIT(instr
)) {
368 unsigned int __ua_flags
= uaccess_save_and_enable();
370 get16t_unaligned_check(val
, addr
);
371 uaccess_restore(__ua_flags
);
373 /* signed half-word? */
375 val
= (signed long)((signed short) val
);
377 regs
->uregs
[rd
] = val
;
379 unsigned int __ua_flags
= uaccess_save_and_enable();
380 put16t_unaligned_check(regs
->uregs
[rd
], addr
);
381 uaccess_restore(__ua_flags
);
391 do_alignment_ldrdstrd(unsigned long addr
, unsigned long instr
,
392 struct pt_regs
*regs
)
394 unsigned int rd
= RD_BITS(instr
);
398 if ((instr
& 0xfe000000) == 0xe8000000) {
399 /* ARMv7 Thumb-2 32-bit LDRD/STRD */
400 rd2
= (instr
>> 8) & 0xf;
401 load
= !!(LDST_L_BIT(instr
));
402 } else if (((rd
& 1) == 1) || (rd
== 14))
405 load
= ((instr
& 0xf0) == 0xd0);
416 get32_unaligned_check(val
, addr
);
417 regs
->uregs
[rd
] = val
;
418 get32_unaligned_check(val
, addr
+ 4);
419 regs
->uregs
[rd2
] = val
;
421 put32_unaligned_check(regs
->uregs
[rd
], addr
);
422 put32_unaligned_check(regs
->uregs
[rd2
], addr
+ 4);
429 unsigned long val
, val2
;
430 unsigned int __ua_flags
= uaccess_save_and_enable();
432 get32t_unaligned_check(val
, addr
);
433 get32t_unaligned_check(val2
, addr
+ 4);
435 uaccess_restore(__ua_flags
);
437 regs
->uregs
[rd
] = val
;
438 regs
->uregs
[rd2
] = val2
;
440 unsigned int __ua_flags
= uaccess_save_and_enable();
441 put32t_unaligned_check(regs
->uregs
[rd
], addr
);
442 put32t_unaligned_check(regs
->uregs
[rd2
], addr
+ 4);
443 uaccess_restore(__ua_flags
);
454 do_alignment_ldrstr(unsigned long addr
, unsigned long instr
, struct pt_regs
*regs
)
456 unsigned int rd
= RD_BITS(instr
);
460 if ((!LDST_P_BIT(instr
) && LDST_W_BIT(instr
)) || user_mode(regs
))
463 if (LDST_L_BIT(instr
)) {
465 get32_unaligned_check(val
, addr
);
466 regs
->uregs
[rd
] = val
;
468 put32_unaligned_check(regs
->uregs
[rd
], addr
);
472 if (LDST_L_BIT(instr
)) {
474 unsigned int __ua_flags
= uaccess_save_and_enable();
475 get32t_unaligned_check(val
, addr
);
476 uaccess_restore(__ua_flags
);
477 regs
->uregs
[rd
] = val
;
479 unsigned int __ua_flags
= uaccess_save_and_enable();
480 put32t_unaligned_check(regs
->uregs
[rd
], addr
);
481 uaccess_restore(__ua_flags
);
490 * LDM/STM alignment handler.
492 * There are 4 variants of this instruction:
494 * B = rn pointer before instruction, A = rn pointer after instruction
495 * ------ increasing address ----->
496 * | | r0 | r1 | ... | rx | |
503 do_alignment_ldmstm(unsigned long addr
, unsigned long instr
, struct pt_regs
*regs
)
505 unsigned int rd
, rn
, correction
, nr_regs
, regbits
;
506 unsigned long eaddr
, newaddr
;
508 if (LDM_S_BIT(instr
))
511 correction
= 4; /* processor implementation defined */
512 regs
->ARM_pc
+= correction
;
516 /* count the number of registers in the mask to be transferred */
517 nr_regs
= hweight16(REGMASK_BITS(instr
)) * 4;
520 newaddr
= eaddr
= regs
->uregs
[rn
];
522 if (!LDST_U_BIT(instr
))
525 if (!LDST_U_BIT(instr
))
528 if (LDST_P_EQ_U(instr
)) /* U = P */
532 * For alignment faults on the ARM922T/ARM920T the MMU makes
533 * the FSR (and hence addr) equal to the updated base address
534 * of the multiple access rather than the restored value.
535 * Switch this message off if we've got a ARM92[02], otherwise
536 * [ls]dm alignment faults are noisy!
538 #if !(defined CONFIG_CPU_ARM922T) && !(defined CONFIG_CPU_ARM920T)
540 * This is a "hint" - we already have eaddr worked out by the
544 pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
545 "addr = %08lx, eaddr = %08lx\n",
546 instruction_pointer(regs
), instr
, addr
, eaddr
);
551 if (user_mode(regs
)) {
552 unsigned int __ua_flags
= uaccess_save_and_enable();
553 for (regbits
= REGMASK_BITS(instr
), rd
= 0; regbits
;
554 regbits
>>= 1, rd
+= 1)
556 if (LDST_L_BIT(instr
)) {
558 get32t_unaligned_check(val
, eaddr
);
559 regs
->uregs
[rd
] = val
;
561 put32t_unaligned_check(regs
->uregs
[rd
], eaddr
);
564 uaccess_restore(__ua_flags
);
566 for (regbits
= REGMASK_BITS(instr
), rd
= 0; regbits
;
567 regbits
>>= 1, rd
+= 1)
569 if (LDST_L_BIT(instr
)) {
571 get32_unaligned_check(val
, eaddr
);
572 regs
->uregs
[rd
] = val
;
574 put32_unaligned_check(regs
->uregs
[rd
], eaddr
);
579 if (LDST_W_BIT(instr
))
580 regs
->uregs
[rn
] = newaddr
;
581 if (!LDST_L_BIT(instr
) || !(REGMASK_BITS(instr
) & (1 << 15)))
582 regs
->ARM_pc
-= correction
;
586 regs
->ARM_pc
-= correction
;
590 pr_err("Alignment trap: not handling ldm with s-bit set\n");
595 * Convert Thumb ld/st instruction forms to equivalent ARM instructions so
596 * we can reuse ARM userland alignment fault fixups for Thumb.
598 * This implementation was initially based on the algorithm found in
599 * gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
600 * to convert only Thumb ld/st instruction forms to equivalent ARM forms.
603 * 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
604 * 2. If for some reason we're passed an non-ld/st Thumb instruction to
605 * decode, we return 0xdeadc0de. This should never happen under normal
606 * circumstances but if it does, we've got other problems to deal with
607 * elsewhere and we obviously can't fix those problems here.
611 thumb2arm(u16 tinstr
)
613 u32 L
= (tinstr
& (1<<11)) >> 11;
615 switch ((tinstr
& 0xf800) >> 11) {
616 /* 6.5.1 Format 1: */
617 case 0x6000 >> 11: /* 7.1.52 STR(1) */
618 case 0x6800 >> 11: /* 7.1.26 LDR(1) */
619 case 0x7000 >> 11: /* 7.1.55 STRB(1) */
620 case 0x7800 >> 11: /* 7.1.30 LDRB(1) */
622 ((tinstr
& (1<<12)) << (22-12)) | /* fixup */
623 (L
<<20) | /* L==1? */
624 ((tinstr
& (7<<0)) << (12-0)) | /* Rd */
625 ((tinstr
& (7<<3)) << (16-3)) | /* Rn */
626 ((tinstr
& (31<<6)) >> /* immed_5 */
627 (6 - ((tinstr
& (1<<12)) ? 0 : 2)));
628 case 0x8000 >> 11: /* 7.1.57 STRH(1) */
629 case 0x8800 >> 11: /* 7.1.32 LDRH(1) */
631 (L
<<20) | /* L==1? */
632 ((tinstr
& (7<<0)) << (12-0)) | /* Rd */
633 ((tinstr
& (7<<3)) << (16-3)) | /* Rn */
634 ((tinstr
& (7<<6)) >> (6-1)) | /* immed_5[2:0] */
635 ((tinstr
& (3<<9)) >> (9-8)); /* immed_5[4:3] */
637 /* 6.5.1 Format 2: */
641 static const u32 subset
[8] = {
642 0xe7800000, /* 7.1.53 STR(2) */
643 0xe18000b0, /* 7.1.58 STRH(2) */
644 0xe7c00000, /* 7.1.56 STRB(2) */
645 0xe19000d0, /* 7.1.34 LDRSB */
646 0xe7900000, /* 7.1.27 LDR(2) */
647 0xe19000b0, /* 7.1.33 LDRH(2) */
648 0xe7d00000, /* 7.1.31 LDRB(2) */
649 0xe19000f0 /* 7.1.35 LDRSH */
651 return subset
[(tinstr
& (7<<9)) >> 9] |
652 ((tinstr
& (7<<0)) << (12-0)) | /* Rd */
653 ((tinstr
& (7<<3)) << (16-3)) | /* Rn */
654 ((tinstr
& (7<<6)) >> (6-0)); /* Rm */
657 /* 6.5.1 Format 3: */
658 case 0x4800 >> 11: /* 7.1.28 LDR(3) */
659 /* NOTE: This case is not technically possible. We're
660 * loading 32-bit memory data via PC relative
661 * addressing mode. So we can and should eliminate
662 * this case. But I'll leave it here for now.
665 ((tinstr
& (7<<8)) << (12-8)) | /* Rd */
666 ((tinstr
& 255) << (2-0)); /* immed_8 */
668 /* 6.5.1 Format 4: */
669 case 0x9000 >> 11: /* 7.1.54 STR(3) */
670 case 0x9800 >> 11: /* 7.1.29 LDR(4) */
672 (L
<<20) | /* L==1? */
673 ((tinstr
& (7<<8)) << (12-8)) | /* Rd */
674 ((tinstr
& 255) << 2); /* immed_8 */
676 /* 6.6.1 Format 1: */
677 case 0xc000 >> 11: /* 7.1.51 STMIA */
678 case 0xc800 >> 11: /* 7.1.25 LDMIA */
680 u32 Rn
= (tinstr
& (7<<8)) >> 8;
681 u32 W
= ((L
<<Rn
) & (tinstr
&255)) ? 0 : 1<<21;
683 return 0xe8800000 | W
| (L
<<20) | (Rn
<<16) |
687 /* 6.6.1 Format 2: */
688 case 0xb000 >> 11: /* 7.1.48 PUSH */
689 case 0xb800 >> 11: /* 7.1.47 POP */
690 if ((tinstr
& (3 << 9)) == 0x0400) {
691 static const u32 subset
[4] = {
692 0xe92d0000, /* STMDB sp!,{registers} */
693 0xe92d4000, /* STMDB sp!,{registers,lr} */
694 0xe8bd0000, /* LDMIA sp!,{registers} */
695 0xe8bd8000 /* LDMIA sp!,{registers,pc} */
697 return subset
[(L
<<1) | ((tinstr
& (1<<8)) >> 8)] |
698 (tinstr
& 255); /* register_list */
700 /* Else fall through for illegal instruction case */
708 * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
709 * handlable by ARM alignment handler, also find the corresponding handler,
710 * so that we can reuse ARM userland alignment fault fixups for Thumb.
712 * @pinstr: original Thumb-2 instruction; returns new handlable instruction
713 * @regs: register context.
714 * @poffset: return offset from faulted addr for later writeback
717 * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
718 * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
721 do_alignment_t32_to_handler(unsigned long *pinstr
, struct pt_regs
*regs
,
722 union offset_union
*poffset
)
724 unsigned long instr
= *pinstr
;
725 u16 tinst1
= (instr
>> 16) & 0xffff;
726 u16 tinst2
= instr
& 0xffff;
728 switch (tinst1
& 0xffe0) {
729 /* A6.3.5 Load/Store multiple */
730 case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
731 case 0xe8a0: /* ...above writeback version */
732 case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */
733 case 0xe920: /* ...above writeback version */
734 /* no need offset decision since handler calculates it */
735 return do_alignment_ldmstm
;
737 case 0xf840: /* POP/PUSH T3 (single register) */
738 if (RN_BITS(instr
) == 13 && (tinst2
& 0x09ff) == 0x0904) {
739 u32 L
= !!(LDST_L_BIT(instr
));
740 const u32 subset
[2] = {
741 0xe92d0000, /* STMDB sp!,{registers} */
742 0xe8bd0000, /* LDMIA sp!,{registers} */
744 *pinstr
= subset
[L
] | (1<<RD_BITS(instr
));
745 return do_alignment_ldmstm
;
747 /* Else fall through for illegal instruction case */
750 /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
755 poffset
->un
= (tinst2
& 0xff) << 2;
758 return do_alignment_ldrdstrd
;
761 * No need to handle load/store instructions up to word size
762 * since ARMv6 and later CPUs can perform unaligned accesses.
771 do_alignment(unsigned long addr
, unsigned int fsr
, struct pt_regs
*regs
)
773 union offset_union
uninitialized_var(offset
);
774 unsigned long instr
= 0, instrptr
;
775 int (*handler
)(unsigned long addr
, unsigned long instr
, struct pt_regs
*regs
);
782 if (interrupts_enabled(regs
))
785 instrptr
= instruction_pointer(regs
);
787 if (thumb_mode(regs
)) {
788 u16
*ptr
= (u16
*)(instrptr
& ~1);
789 fault
= probe_kernel_address(ptr
, tinstr
);
790 tinstr
= __mem_to_opcode_thumb16(tinstr
);
792 if (cpu_architecture() >= CPU_ARCH_ARMv7
&&
796 fault
= probe_kernel_address(ptr
+ 1, tinst2
);
797 tinst2
= __mem_to_opcode_thumb16(tinst2
);
798 instr
= __opcode_thumb32_compose(tinstr
, tinst2
);
802 instr
= thumb2arm(tinstr
);
806 fault
= probe_kernel_address((void *)instrptr
, instr
);
807 instr
= __mem_to_opcode_arm(instr
);
819 ai_sys_last_pc
= (void *)instruction_pointer(regs
);
823 regs
->ARM_pc
+= isize
;
825 switch (CODING_BITS(instr
)) {
826 case 0x00000000: /* 3.13.4 load/store instruction extensions */
827 if (LDSTHD_I_BIT(instr
))
828 offset
.un
= (instr
& 0xf00) >> 4 | (instr
& 15);
830 offset
.un
= regs
->uregs
[RM_BITS(instr
)];
832 if ((instr
& 0x000000f0) == 0x000000b0 || /* LDRH, STRH */
833 (instr
& 0x001000f0) == 0x001000f0) /* LDRSH */
834 handler
= do_alignment_ldrhstrh
;
835 else if ((instr
& 0x001000f0) == 0x000000d0 || /* LDRD */
836 (instr
& 0x001000f0) == 0x000000f0) /* STRD */
837 handler
= do_alignment_ldrdstrd
;
838 else if ((instr
& 0x01f00ff0) == 0x01000090) /* SWP */
844 case 0x04000000: /* ldr or str immediate */
845 if (COND_BITS(instr
) == 0xf0000000) /* NEON VLDn, VSTn */
847 offset
.un
= OFFSET_BITS(instr
);
848 handler
= do_alignment_ldrstr
;
851 case 0x06000000: /* ldr or str register */
852 offset
.un
= regs
->uregs
[RM_BITS(instr
)];
854 if (IS_SHIFT(instr
)) {
855 unsigned int shiftval
= SHIFT_BITS(instr
);
857 switch(SHIFT_TYPE(instr
)) {
859 offset
.un
<<= shiftval
;
863 offset
.un
>>= shiftval
;
867 offset
.sn
>>= shiftval
;
873 if (regs
->ARM_cpsr
& PSR_C_BIT
)
874 offset
.un
|= 1 << 31;
876 offset
.un
= offset
.un
>> shiftval
|
877 offset
.un
<< (32 - shiftval
);
881 handler
= do_alignment_ldrstr
;
884 case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
887 handler
= do_alignment_t32_to_handler(&instr
, regs
, &offset
);
890 handler
= do_alignment_ldmstm
;
900 type
= handler(addr
, instr
, regs
);
902 if (type
== TYPE_ERROR
|| type
== TYPE_FAULT
) {
903 regs
->ARM_pc
-= isize
;
907 if (type
== TYPE_LDST
)
908 do_alignment_finish_ldst(addr
, instr
, regs
, offset
);
913 if (type
== TYPE_ERROR
)
916 * We got a fault - fix it up, or die.
918 do_bad_area(addr
, fsr
, regs
);
922 pr_err("Alignment trap: not handling swp instruction\n");
926 * Oops, we didn't handle the instruction.
928 pr_err("Alignment trap: not handling instruction "
929 "%0*lx at [<%08lx>]\n",
931 isize
== 2 ? tinstr
: instr
, instrptr
);
938 if (ai_usermode
& UM_WARN
)
939 printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
940 "Address=0x%08lx FSR 0x%03x\n", current
->comm
,
941 task_pid_nr(current
), instrptr
,
943 isize
== 2 ? tinstr
: instr
,
946 if (ai_usermode
& UM_FIXUP
)
949 if (ai_usermode
& UM_SIGNAL
) {
952 si
.si_signo
= SIGBUS
;
954 si
.si_code
= BUS_ADRALN
;
955 si
.si_addr
= (void __user
*)addr
;
957 force_sig_info(si
.si_signo
, &si
, current
);
960 * We're about to disable the alignment trap and return to
961 * user space. But if an interrupt occurs before actually
962 * reaching user space, then the IRQ vector entry code will
963 * notice that we were still in kernel space and therefore
964 * the alignment trap won't be re-enabled in that case as it
965 * is presumed to be always on from kernel space.
966 * Let's prevent that race by disabling interrupts here (they
967 * are disabled on the way back to user space anyway in
968 * entry-common.S) and disable the alignment trap only if
969 * there is no work pending for this thread.
971 raw_local_irq_disable();
972 if (!(current_thread_info()->flags
& _TIF_WORK_MASK
))
973 set_cr(cr_no_alignment
);
979 static int __init
noalign_setup(char *__unused
)
981 set_cr(__clear_cr(CR_A
));
984 __setup("noalign", noalign_setup
);
987 * This needs to be done after sysctl_init, otherwise sys/ will be
988 * overwritten. Actually, this shouldn't be in sys/ at all since
989 * it isn't a sysctl, and it doesn't contain sysctl information.
990 * We now locate it in /proc/cpu/alignment instead.
992 static int __init
alignment_init(void)
994 #ifdef CONFIG_PROC_FS
995 struct proc_dir_entry
*res
;
997 res
= proc_create("cpu/alignment", S_IWUSR
| S_IRUGO
, NULL
,
998 &alignment_proc_fops
);
1003 if (cpu_is_v6_unaligned()) {
1004 set_cr(__clear_cr(CR_A
));
1005 ai_usermode
= safe_usermode(ai_usermode
, false);
1008 cr_no_alignment
= get_cr() & ~CR_A
;
1010 hook_fault_code(FAULT_CODE_ALIGNMENT
, do_alignment
, SIGBUS
, BUS_ADRALN
,
1011 "alignment exception");
1014 * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
1015 * fault, not as alignment error.
1017 * TODO: handle ARMv6K properly. Runtime check for 'K' extension is
1020 if (cpu_architecture() <= CPU_ARCH_ARMv6
) {
1021 hook_fault_code(3, do_alignment
, SIGBUS
, BUS_ADRALN
,
1022 "alignment exception");
1028 fs_initcall(alignment_init
);