mm: page_alloc: do not treat a zone that cannot be used for dirty pages as "full"
[linux/fpc-iii.git] / arch / mips / kernel / unaligned.c
blobb897dde93e7a489637b7a64d174772cb2bd64a05
1 /*
2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The
13 * handler does not try to handle the case when the program counter points
14 * to an address not aligned to a word boundary.
16 * Putting data to unaligned addresses is a bad practice even on Intel where
17 * only the performance is affected. Much worse is that such code is non-
18 * portable. Due to several programs that die on MIPS due to alignment
19 * problems I decided to implement this handler anyway though I originally
20 * didn't intend to do this at all for user code.
22 * For now I enable fixing of address errors by default to make life easier.
23 * I however intend to disable this somewhen in the future when the alignment
24 * problems with user programs have been fixed. For programmers this is the
25 * right way to go.
27 * Fixing address errors is a per process option. The option is inherited
28 * across fork(2) and execve(2) calls. If you really want to use the
29 * option in your user programs - I discourage the use of the software
30 * emulation strongly - use the following code in your userland stuff:
32 * #include <sys/sysmips.h>
34 * ...
35 * sysmips(MIPS_FIXADE, x);
36 * ...
38 * The argument x is 0 for disabling software emulation, enabled otherwise.
40 * Below a little program to play around with this feature.
42 * #include <stdio.h>
43 * #include <sys/sysmips.h>
45 * struct foo {
46 * unsigned char bar[8];
47 * };
49 * main(int argc, char *argv[])
50 * {
51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52 * unsigned int *p = (unsigned int *) (x.bar + 3);
53 * int i;
55 * if (argc > 1)
56 * sysmips(MIPS_FIXADE, atoi(argv[1]));
58 * printf("*p = %08lx\n", *p);
60 * *p = 0xdeadface;
62 * for(i = 0; i <= 7; i++)
63 * printf("%02x ", x.bar[i]);
64 * printf("\n");
65 * }
67 * Coprocessor loads are not supported; I think this case is unimportant
68 * in the practice.
70 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
71 * exception for the R6000.
72 * A store crossing a page boundary might be executed only partially.
73 * Undo the partial store in this case.
75 #include <linux/context_tracking.h>
76 #include <linux/mm.h>
77 #include <linux/signal.h>
78 #include <linux/smp.h>
79 #include <linux/sched.h>
80 #include <linux/debugfs.h>
81 #include <linux/perf_event.h>
83 #include <asm/asm.h>
84 #include <asm/branch.h>
85 #include <asm/byteorder.h>
86 #include <asm/cop2.h>
87 #include <asm/fpu.h>
88 #include <asm/fpu_emulator.h>
89 #include <asm/inst.h>
90 #include <asm/uaccess.h>
91 #include <asm/fpu.h>
92 #include <asm/fpu_emulator.h>
94 #define STR(x) __STR(x)
95 #define __STR(x) #x
97 enum {
98 UNALIGNED_ACTION_QUIET,
99 UNALIGNED_ACTION_SIGNAL,
100 UNALIGNED_ACTION_SHOW,
102 #ifdef CONFIG_DEBUG_FS
103 static u32 unaligned_instructions;
104 static u32 unaligned_action;
105 #else
106 #define unaligned_action UNALIGNED_ACTION_QUIET
107 #endif
108 extern void show_registers(struct pt_regs *regs);
110 #ifdef __BIG_ENDIAN
111 #define LoadHW(addr, value, res) \
112 __asm__ __volatile__ (".set\tnoat\n" \
113 "1:\tlb\t%0, 0(%2)\n" \
114 "2:\tlbu\t$1, 1(%2)\n\t" \
115 "sll\t%0, 0x8\n\t" \
116 "or\t%0, $1\n\t" \
117 "li\t%1, 0\n" \
118 "3:\t.set\tat\n\t" \
119 ".insn\n\t" \
120 ".section\t.fixup,\"ax\"\n\t" \
121 "4:\tli\t%1, %3\n\t" \
122 "j\t3b\n\t" \
123 ".previous\n\t" \
124 ".section\t__ex_table,\"a\"\n\t" \
125 STR(PTR)"\t1b, 4b\n\t" \
126 STR(PTR)"\t2b, 4b\n\t" \
127 ".previous" \
128 : "=&r" (value), "=r" (res) \
129 : "r" (addr), "i" (-EFAULT));
131 #define LoadW(addr, value, res) \
132 __asm__ __volatile__ ( \
133 "1:\tlwl\t%0, (%2)\n" \
134 "2:\tlwr\t%0, 3(%2)\n\t" \
135 "li\t%1, 0\n" \
136 "3:\n\t" \
137 ".insn\n\t" \
138 ".section\t.fixup,\"ax\"\n\t" \
139 "4:\tli\t%1, %3\n\t" \
140 "j\t3b\n\t" \
141 ".previous\n\t" \
142 ".section\t__ex_table,\"a\"\n\t" \
143 STR(PTR)"\t1b, 4b\n\t" \
144 STR(PTR)"\t2b, 4b\n\t" \
145 ".previous" \
146 : "=&r" (value), "=r" (res) \
147 : "r" (addr), "i" (-EFAULT));
149 #define LoadHWU(addr, value, res) \
150 __asm__ __volatile__ ( \
151 ".set\tnoat\n" \
152 "1:\tlbu\t%0, 0(%2)\n" \
153 "2:\tlbu\t$1, 1(%2)\n\t" \
154 "sll\t%0, 0x8\n\t" \
155 "or\t%0, $1\n\t" \
156 "li\t%1, 0\n" \
157 "3:\n\t" \
158 ".insn\n\t" \
159 ".set\tat\n\t" \
160 ".section\t.fixup,\"ax\"\n\t" \
161 "4:\tli\t%1, %3\n\t" \
162 "j\t3b\n\t" \
163 ".previous\n\t" \
164 ".section\t__ex_table,\"a\"\n\t" \
165 STR(PTR)"\t1b, 4b\n\t" \
166 STR(PTR)"\t2b, 4b\n\t" \
167 ".previous" \
168 : "=&r" (value), "=r" (res) \
169 : "r" (addr), "i" (-EFAULT));
171 #define LoadWU(addr, value, res) \
172 __asm__ __volatile__ ( \
173 "1:\tlwl\t%0, (%2)\n" \
174 "2:\tlwr\t%0, 3(%2)\n\t" \
175 "dsll\t%0, %0, 32\n\t" \
176 "dsrl\t%0, %0, 32\n\t" \
177 "li\t%1, 0\n" \
178 "3:\n\t" \
179 ".insn\n\t" \
180 "\t.section\t.fixup,\"ax\"\n\t" \
181 "4:\tli\t%1, %3\n\t" \
182 "j\t3b\n\t" \
183 ".previous\n\t" \
184 ".section\t__ex_table,\"a\"\n\t" \
185 STR(PTR)"\t1b, 4b\n\t" \
186 STR(PTR)"\t2b, 4b\n\t" \
187 ".previous" \
188 : "=&r" (value), "=r" (res) \
189 : "r" (addr), "i" (-EFAULT));
191 #define LoadDW(addr, value, res) \
192 __asm__ __volatile__ ( \
193 "1:\tldl\t%0, (%2)\n" \
194 "2:\tldr\t%0, 7(%2)\n\t" \
195 "li\t%1, 0\n" \
196 "3:\n\t" \
197 ".insn\n\t" \
198 "\t.section\t.fixup,\"ax\"\n\t" \
199 "4:\tli\t%1, %3\n\t" \
200 "j\t3b\n\t" \
201 ".previous\n\t" \
202 ".section\t__ex_table,\"a\"\n\t" \
203 STR(PTR)"\t1b, 4b\n\t" \
204 STR(PTR)"\t2b, 4b\n\t" \
205 ".previous" \
206 : "=&r" (value), "=r" (res) \
207 : "r" (addr), "i" (-EFAULT));
209 #define StoreHW(addr, value, res) \
210 __asm__ __volatile__ ( \
211 ".set\tnoat\n" \
212 "1:\tsb\t%1, 1(%2)\n\t" \
213 "srl\t$1, %1, 0x8\n" \
214 "2:\tsb\t$1, 0(%2)\n\t" \
215 ".set\tat\n\t" \
216 "li\t%0, 0\n" \
217 "3:\n\t" \
218 ".insn\n\t" \
219 ".section\t.fixup,\"ax\"\n\t" \
220 "4:\tli\t%0, %3\n\t" \
221 "j\t3b\n\t" \
222 ".previous\n\t" \
223 ".section\t__ex_table,\"a\"\n\t" \
224 STR(PTR)"\t1b, 4b\n\t" \
225 STR(PTR)"\t2b, 4b\n\t" \
226 ".previous" \
227 : "=r" (res) \
228 : "r" (value), "r" (addr), "i" (-EFAULT));
230 #define StoreW(addr, value, res) \
231 __asm__ __volatile__ ( \
232 "1:\tswl\t%1,(%2)\n" \
233 "2:\tswr\t%1, 3(%2)\n\t" \
234 "li\t%0, 0\n" \
235 "3:\n\t" \
236 ".insn\n\t" \
237 ".section\t.fixup,\"ax\"\n\t" \
238 "4:\tli\t%0, %3\n\t" \
239 "j\t3b\n\t" \
240 ".previous\n\t" \
241 ".section\t__ex_table,\"a\"\n\t" \
242 STR(PTR)"\t1b, 4b\n\t" \
243 STR(PTR)"\t2b, 4b\n\t" \
244 ".previous" \
245 : "=r" (res) \
246 : "r" (value), "r" (addr), "i" (-EFAULT));
248 #define StoreDW(addr, value, res) \
249 __asm__ __volatile__ ( \
250 "1:\tsdl\t%1,(%2)\n" \
251 "2:\tsdr\t%1, 7(%2)\n\t" \
252 "li\t%0, 0\n" \
253 "3:\n\t" \
254 ".insn\n\t" \
255 ".section\t.fixup,\"ax\"\n\t" \
256 "4:\tli\t%0, %3\n\t" \
257 "j\t3b\n\t" \
258 ".previous\n\t" \
259 ".section\t__ex_table,\"a\"\n\t" \
260 STR(PTR)"\t1b, 4b\n\t" \
261 STR(PTR)"\t2b, 4b\n\t" \
262 ".previous" \
263 : "=r" (res) \
264 : "r" (value), "r" (addr), "i" (-EFAULT));
265 #endif
267 #ifdef __LITTLE_ENDIAN
268 #define LoadHW(addr, value, res) \
269 __asm__ __volatile__ (".set\tnoat\n" \
270 "1:\tlb\t%0, 1(%2)\n" \
271 "2:\tlbu\t$1, 0(%2)\n\t" \
272 "sll\t%0, 0x8\n\t" \
273 "or\t%0, $1\n\t" \
274 "li\t%1, 0\n" \
275 "3:\t.set\tat\n\t" \
276 ".insn\n\t" \
277 ".section\t.fixup,\"ax\"\n\t" \
278 "4:\tli\t%1, %3\n\t" \
279 "j\t3b\n\t" \
280 ".previous\n\t" \
281 ".section\t__ex_table,\"a\"\n\t" \
282 STR(PTR)"\t1b, 4b\n\t" \
283 STR(PTR)"\t2b, 4b\n\t" \
284 ".previous" \
285 : "=&r" (value), "=r" (res) \
286 : "r" (addr), "i" (-EFAULT));
288 #define LoadW(addr, value, res) \
289 __asm__ __volatile__ ( \
290 "1:\tlwl\t%0, 3(%2)\n" \
291 "2:\tlwr\t%0, (%2)\n\t" \
292 "li\t%1, 0\n" \
293 "3:\n\t" \
294 ".insn\n\t" \
295 ".section\t.fixup,\"ax\"\n\t" \
296 "4:\tli\t%1, %3\n\t" \
297 "j\t3b\n\t" \
298 ".previous\n\t" \
299 ".section\t__ex_table,\"a\"\n\t" \
300 STR(PTR)"\t1b, 4b\n\t" \
301 STR(PTR)"\t2b, 4b\n\t" \
302 ".previous" \
303 : "=&r" (value), "=r" (res) \
304 : "r" (addr), "i" (-EFAULT));
306 #define LoadHWU(addr, value, res) \
307 __asm__ __volatile__ ( \
308 ".set\tnoat\n" \
309 "1:\tlbu\t%0, 1(%2)\n" \
310 "2:\tlbu\t$1, 0(%2)\n\t" \
311 "sll\t%0, 0x8\n\t" \
312 "or\t%0, $1\n\t" \
313 "li\t%1, 0\n" \
314 "3:\n\t" \
315 ".insn\n\t" \
316 ".set\tat\n\t" \
317 ".section\t.fixup,\"ax\"\n\t" \
318 "4:\tli\t%1, %3\n\t" \
319 "j\t3b\n\t" \
320 ".previous\n\t" \
321 ".section\t__ex_table,\"a\"\n\t" \
322 STR(PTR)"\t1b, 4b\n\t" \
323 STR(PTR)"\t2b, 4b\n\t" \
324 ".previous" \
325 : "=&r" (value), "=r" (res) \
326 : "r" (addr), "i" (-EFAULT));
328 #define LoadWU(addr, value, res) \
329 __asm__ __volatile__ ( \
330 "1:\tlwl\t%0, 3(%2)\n" \
331 "2:\tlwr\t%0, (%2)\n\t" \
332 "dsll\t%0, %0, 32\n\t" \
333 "dsrl\t%0, %0, 32\n\t" \
334 "li\t%1, 0\n" \
335 "3:\n\t" \
336 ".insn\n\t" \
337 "\t.section\t.fixup,\"ax\"\n\t" \
338 "4:\tli\t%1, %3\n\t" \
339 "j\t3b\n\t" \
340 ".previous\n\t" \
341 ".section\t__ex_table,\"a\"\n\t" \
342 STR(PTR)"\t1b, 4b\n\t" \
343 STR(PTR)"\t2b, 4b\n\t" \
344 ".previous" \
345 : "=&r" (value), "=r" (res) \
346 : "r" (addr), "i" (-EFAULT));
348 #define LoadDW(addr, value, res) \
349 __asm__ __volatile__ ( \
350 "1:\tldl\t%0, 7(%2)\n" \
351 "2:\tldr\t%0, (%2)\n\t" \
352 "li\t%1, 0\n" \
353 "3:\n\t" \
354 ".insn\n\t" \
355 "\t.section\t.fixup,\"ax\"\n\t" \
356 "4:\tli\t%1, %3\n\t" \
357 "j\t3b\n\t" \
358 ".previous\n\t" \
359 ".section\t__ex_table,\"a\"\n\t" \
360 STR(PTR)"\t1b, 4b\n\t" \
361 STR(PTR)"\t2b, 4b\n\t" \
362 ".previous" \
363 : "=&r" (value), "=r" (res) \
364 : "r" (addr), "i" (-EFAULT));
366 #define StoreHW(addr, value, res) \
367 __asm__ __volatile__ ( \
368 ".set\tnoat\n" \
369 "1:\tsb\t%1, 0(%2)\n\t" \
370 "srl\t$1,%1, 0x8\n" \
371 "2:\tsb\t$1, 1(%2)\n\t" \
372 ".set\tat\n\t" \
373 "li\t%0, 0\n" \
374 "3:\n\t" \
375 ".insn\n\t" \
376 ".section\t.fixup,\"ax\"\n\t" \
377 "4:\tli\t%0, %3\n\t" \
378 "j\t3b\n\t" \
379 ".previous\n\t" \
380 ".section\t__ex_table,\"a\"\n\t" \
381 STR(PTR)"\t1b, 4b\n\t" \
382 STR(PTR)"\t2b, 4b\n\t" \
383 ".previous" \
384 : "=r" (res) \
385 : "r" (value), "r" (addr), "i" (-EFAULT));
387 #define StoreW(addr, value, res) \
388 __asm__ __volatile__ ( \
389 "1:\tswl\t%1, 3(%2)\n" \
390 "2:\tswr\t%1, (%2)\n\t" \
391 "li\t%0, 0\n" \
392 "3:\n\t" \
393 ".insn\n\t" \
394 ".section\t.fixup,\"ax\"\n\t" \
395 "4:\tli\t%0, %3\n\t" \
396 "j\t3b\n\t" \
397 ".previous\n\t" \
398 ".section\t__ex_table,\"a\"\n\t" \
399 STR(PTR)"\t1b, 4b\n\t" \
400 STR(PTR)"\t2b, 4b\n\t" \
401 ".previous" \
402 : "=r" (res) \
403 : "r" (value), "r" (addr), "i" (-EFAULT));
405 #define StoreDW(addr, value, res) \
406 __asm__ __volatile__ ( \
407 "1:\tsdl\t%1, 7(%2)\n" \
408 "2:\tsdr\t%1, (%2)\n\t" \
409 "li\t%0, 0\n" \
410 "3:\n\t" \
411 ".insn\n\t" \
412 ".section\t.fixup,\"ax\"\n\t" \
413 "4:\tli\t%0, %3\n\t" \
414 "j\t3b\n\t" \
415 ".previous\n\t" \
416 ".section\t__ex_table,\"a\"\n\t" \
417 STR(PTR)"\t1b, 4b\n\t" \
418 STR(PTR)"\t2b, 4b\n\t" \
419 ".previous" \
420 : "=r" (res) \
421 : "r" (value), "r" (addr), "i" (-EFAULT));
422 #endif
424 static void emulate_load_store_insn(struct pt_regs *regs,
425 void __user *addr, unsigned int __user *pc)
427 union mips_instruction insn;
428 unsigned long value;
429 unsigned int res;
430 unsigned long origpc;
431 unsigned long orig31;
432 void __user *fault_addr = NULL;
434 origpc = (unsigned long)pc;
435 orig31 = regs->regs[31];
437 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
440 * This load never faults.
442 __get_user(insn.word, pc);
444 switch (insn.i_format.opcode) {
446 * These are instructions that a compiler doesn't generate. We
447 * can assume therefore that the code is MIPS-aware and
448 * really buggy. Emulating these instructions would break the
449 * semantics anyway.
451 case ll_op:
452 case lld_op:
453 case sc_op:
454 case scd_op:
457 * For these instructions the only way to create an address
458 * error is an attempted access to kernel/supervisor address
459 * space.
461 case ldl_op:
462 case ldr_op:
463 case lwl_op:
464 case lwr_op:
465 case sdl_op:
466 case sdr_op:
467 case swl_op:
468 case swr_op:
469 case lb_op:
470 case lbu_op:
471 case sb_op:
472 goto sigbus;
475 * The remaining opcodes are the ones that are really of
476 * interest.
478 case lh_op:
479 if (!access_ok(VERIFY_READ, addr, 2))
480 goto sigbus;
482 LoadHW(addr, value, res);
483 if (res)
484 goto fault;
485 compute_return_epc(regs);
486 regs->regs[insn.i_format.rt] = value;
487 break;
489 case lw_op:
490 if (!access_ok(VERIFY_READ, addr, 4))
491 goto sigbus;
493 LoadW(addr, value, res);
494 if (res)
495 goto fault;
496 compute_return_epc(regs);
497 regs->regs[insn.i_format.rt] = value;
498 break;
500 case lhu_op:
501 if (!access_ok(VERIFY_READ, addr, 2))
502 goto sigbus;
504 LoadHWU(addr, value, res);
505 if (res)
506 goto fault;
507 compute_return_epc(regs);
508 regs->regs[insn.i_format.rt] = value;
509 break;
511 case lwu_op:
512 #ifdef CONFIG_64BIT
514 * A 32-bit kernel might be running on a 64-bit processor. But
515 * if we're on a 32-bit processor and an i-cache incoherency
516 * or race makes us see a 64-bit instruction here the sdl/sdr
517 * would blow up, so for now we don't handle unaligned 64-bit
518 * instructions on 32-bit kernels.
520 if (!access_ok(VERIFY_READ, addr, 4))
521 goto sigbus;
523 LoadWU(addr, value, res);
524 if (res)
525 goto fault;
526 compute_return_epc(regs);
527 regs->regs[insn.i_format.rt] = value;
528 break;
529 #endif /* CONFIG_64BIT */
531 /* Cannot handle 64-bit instructions in 32-bit kernel */
532 goto sigill;
534 case ld_op:
535 #ifdef CONFIG_64BIT
537 * A 32-bit kernel might be running on a 64-bit processor. But
538 * if we're on a 32-bit processor and an i-cache incoherency
539 * or race makes us see a 64-bit instruction here the sdl/sdr
540 * would blow up, so for now we don't handle unaligned 64-bit
541 * instructions on 32-bit kernels.
543 if (!access_ok(VERIFY_READ, addr, 8))
544 goto sigbus;
546 LoadDW(addr, value, res);
547 if (res)
548 goto fault;
549 compute_return_epc(regs);
550 regs->regs[insn.i_format.rt] = value;
551 break;
552 #endif /* CONFIG_64BIT */
554 /* Cannot handle 64-bit instructions in 32-bit kernel */
555 goto sigill;
557 case sh_op:
558 if (!access_ok(VERIFY_WRITE, addr, 2))
559 goto sigbus;
561 compute_return_epc(regs);
562 value = regs->regs[insn.i_format.rt];
563 StoreHW(addr, value, res);
564 if (res)
565 goto fault;
566 break;
568 case sw_op:
569 if (!access_ok(VERIFY_WRITE, addr, 4))
570 goto sigbus;
572 compute_return_epc(regs);
573 value = regs->regs[insn.i_format.rt];
574 StoreW(addr, value, res);
575 if (res)
576 goto fault;
577 break;
579 case sd_op:
580 #ifdef CONFIG_64BIT
582 * A 32-bit kernel might be running on a 64-bit processor. But
583 * if we're on a 32-bit processor and an i-cache incoherency
584 * or race makes us see a 64-bit instruction here the sdl/sdr
585 * would blow up, so for now we don't handle unaligned 64-bit
586 * instructions on 32-bit kernels.
588 if (!access_ok(VERIFY_WRITE, addr, 8))
589 goto sigbus;
591 compute_return_epc(regs);
592 value = regs->regs[insn.i_format.rt];
593 StoreDW(addr, value, res);
594 if (res)
595 goto fault;
596 break;
597 #endif /* CONFIG_64BIT */
599 /* Cannot handle 64-bit instructions in 32-bit kernel */
600 goto sigill;
602 case lwc1_op:
603 case ldc1_op:
604 case swc1_op:
605 case sdc1_op:
606 die_if_kernel("Unaligned FP access in kernel code", regs);
607 BUG_ON(!used_math());
609 lose_fpu(1); /* Save FPU state for the emulator. */
610 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
611 &fault_addr);
612 own_fpu(1); /* Restore FPU state. */
614 /* Signal if something went wrong. */
615 process_fpemu_return(res, fault_addr);
617 if (res == 0)
618 break;
619 return;
622 * COP2 is available to implementor for application specific use.
623 * It's up to applications to register a notifier chain and do
624 * whatever they have to do, including possible sending of signals.
626 case lwc2_op:
627 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
628 break;
630 case ldc2_op:
631 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
632 break;
634 case swc2_op:
635 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
636 break;
638 case sdc2_op:
639 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
640 break;
642 default:
644 * Pheeee... We encountered an yet unknown instruction or
645 * cache coherence problem. Die sucker, die ...
647 goto sigill;
650 #ifdef CONFIG_DEBUG_FS
651 unaligned_instructions++;
652 #endif
654 return;
656 fault:
657 /* roll back jump/branch */
658 regs->cp0_epc = origpc;
659 regs->regs[31] = orig31;
660 /* Did we have an exception handler installed? */
661 if (fixup_exception(regs))
662 return;
664 die_if_kernel("Unhandled kernel unaligned access", regs);
665 force_sig(SIGSEGV, current);
667 return;
669 sigbus:
670 die_if_kernel("Unhandled kernel unaligned access", regs);
671 force_sig(SIGBUS, current);
673 return;
675 sigill:
676 die_if_kernel
677 ("Unhandled kernel unaligned access or invalid instruction", regs);
678 force_sig(SIGILL, current);
681 /* Recode table from 16-bit register notation to 32-bit GPR. */
682 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
684 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
685 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
687 static void emulate_load_store_microMIPS(struct pt_regs *regs,
688 void __user *addr)
690 unsigned long value;
691 unsigned int res;
692 int i;
693 unsigned int reg = 0, rvar;
694 unsigned long orig31;
695 u16 __user *pc16;
696 u16 halfword;
697 unsigned int word;
698 unsigned long origpc, contpc;
699 union mips_instruction insn;
700 struct mm_decoded_insn mminsn;
701 void __user *fault_addr = NULL;
703 origpc = regs->cp0_epc;
704 orig31 = regs->regs[31];
706 mminsn.micro_mips_mode = 1;
709 * This load never faults.
711 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
712 __get_user(halfword, pc16);
713 pc16++;
714 contpc = regs->cp0_epc + 2;
715 word = ((unsigned int)halfword << 16);
716 mminsn.pc_inc = 2;
718 if (!mm_insn_16bit(halfword)) {
719 __get_user(halfword, pc16);
720 pc16++;
721 contpc = regs->cp0_epc + 4;
722 mminsn.pc_inc = 4;
723 word |= halfword;
725 mminsn.insn = word;
727 if (get_user(halfword, pc16))
728 goto fault;
729 mminsn.next_pc_inc = 2;
730 word = ((unsigned int)halfword << 16);
732 if (!mm_insn_16bit(halfword)) {
733 pc16++;
734 if (get_user(halfword, pc16))
735 goto fault;
736 mminsn.next_pc_inc = 4;
737 word |= halfword;
739 mminsn.next_insn = word;
741 insn = (union mips_instruction)(mminsn.insn);
742 if (mm_isBranchInstr(regs, mminsn, &contpc))
743 insn = (union mips_instruction)(mminsn.next_insn);
745 /* Parse instruction to find what to do */
747 switch (insn.mm_i_format.opcode) {
749 case mm_pool32a_op:
750 switch (insn.mm_x_format.func) {
751 case mm_lwxs_op:
752 reg = insn.mm_x_format.rd;
753 goto loadW;
756 goto sigbus;
758 case mm_pool32b_op:
759 switch (insn.mm_m_format.func) {
760 case mm_lwp_func:
761 reg = insn.mm_m_format.rd;
762 if (reg == 31)
763 goto sigbus;
765 if (!access_ok(VERIFY_READ, addr, 8))
766 goto sigbus;
768 LoadW(addr, value, res);
769 if (res)
770 goto fault;
771 regs->regs[reg] = value;
772 addr += 4;
773 LoadW(addr, value, res);
774 if (res)
775 goto fault;
776 regs->regs[reg + 1] = value;
777 goto success;
779 case mm_swp_func:
780 reg = insn.mm_m_format.rd;
781 if (reg == 31)
782 goto sigbus;
784 if (!access_ok(VERIFY_WRITE, addr, 8))
785 goto sigbus;
787 value = regs->regs[reg];
788 StoreW(addr, value, res);
789 if (res)
790 goto fault;
791 addr += 4;
792 value = regs->regs[reg + 1];
793 StoreW(addr, value, res);
794 if (res)
795 goto fault;
796 goto success;
798 case mm_ldp_func:
799 #ifdef CONFIG_64BIT
800 reg = insn.mm_m_format.rd;
801 if (reg == 31)
802 goto sigbus;
804 if (!access_ok(VERIFY_READ, addr, 16))
805 goto sigbus;
807 LoadDW(addr, value, res);
808 if (res)
809 goto fault;
810 regs->regs[reg] = value;
811 addr += 8;
812 LoadDW(addr, value, res);
813 if (res)
814 goto fault;
815 regs->regs[reg + 1] = value;
816 goto success;
817 #endif /* CONFIG_64BIT */
819 goto sigill;
821 case mm_sdp_func:
822 #ifdef CONFIG_64BIT
823 reg = insn.mm_m_format.rd;
824 if (reg == 31)
825 goto sigbus;
827 if (!access_ok(VERIFY_WRITE, addr, 16))
828 goto sigbus;
830 value = regs->regs[reg];
831 StoreDW(addr, value, res);
832 if (res)
833 goto fault;
834 addr += 8;
835 value = regs->regs[reg + 1];
836 StoreDW(addr, value, res);
837 if (res)
838 goto fault;
839 goto success;
840 #endif /* CONFIG_64BIT */
842 goto sigill;
844 case mm_lwm32_func:
845 reg = insn.mm_m_format.rd;
846 rvar = reg & 0xf;
847 if ((rvar > 9) || !reg)
848 goto sigill;
849 if (reg & 0x10) {
850 if (!access_ok
851 (VERIFY_READ, addr, 4 * (rvar + 1)))
852 goto sigbus;
853 } else {
854 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
855 goto sigbus;
857 if (rvar == 9)
858 rvar = 8;
859 for (i = 16; rvar; rvar--, i++) {
860 LoadW(addr, value, res);
861 if (res)
862 goto fault;
863 addr += 4;
864 regs->regs[i] = value;
866 if ((reg & 0xf) == 9) {
867 LoadW(addr, value, res);
868 if (res)
869 goto fault;
870 addr += 4;
871 regs->regs[30] = value;
873 if (reg & 0x10) {
874 LoadW(addr, value, res);
875 if (res)
876 goto fault;
877 regs->regs[31] = value;
879 goto success;
881 case mm_swm32_func:
882 reg = insn.mm_m_format.rd;
883 rvar = reg & 0xf;
884 if ((rvar > 9) || !reg)
885 goto sigill;
886 if (reg & 0x10) {
887 if (!access_ok
888 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
889 goto sigbus;
890 } else {
891 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
892 goto sigbus;
894 if (rvar == 9)
895 rvar = 8;
896 for (i = 16; rvar; rvar--, i++) {
897 value = regs->regs[i];
898 StoreW(addr, value, res);
899 if (res)
900 goto fault;
901 addr += 4;
903 if ((reg & 0xf) == 9) {
904 value = regs->regs[30];
905 StoreW(addr, value, res);
906 if (res)
907 goto fault;
908 addr += 4;
910 if (reg & 0x10) {
911 value = regs->regs[31];
912 StoreW(addr, value, res);
913 if (res)
914 goto fault;
916 goto success;
918 case mm_ldm_func:
919 #ifdef CONFIG_64BIT
920 reg = insn.mm_m_format.rd;
921 rvar = reg & 0xf;
922 if ((rvar > 9) || !reg)
923 goto sigill;
924 if (reg & 0x10) {
925 if (!access_ok
926 (VERIFY_READ, addr, 8 * (rvar + 1)))
927 goto sigbus;
928 } else {
929 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
930 goto sigbus;
932 if (rvar == 9)
933 rvar = 8;
935 for (i = 16; rvar; rvar--, i++) {
936 LoadDW(addr, value, res);
937 if (res)
938 goto fault;
939 addr += 4;
940 regs->regs[i] = value;
942 if ((reg & 0xf) == 9) {
943 LoadDW(addr, value, res);
944 if (res)
945 goto fault;
946 addr += 8;
947 regs->regs[30] = value;
949 if (reg & 0x10) {
950 LoadDW(addr, value, res);
951 if (res)
952 goto fault;
953 regs->regs[31] = value;
955 goto success;
956 #endif /* CONFIG_64BIT */
958 goto sigill;
960 case mm_sdm_func:
961 #ifdef CONFIG_64BIT
962 reg = insn.mm_m_format.rd;
963 rvar = reg & 0xf;
964 if ((rvar > 9) || !reg)
965 goto sigill;
966 if (reg & 0x10) {
967 if (!access_ok
968 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
969 goto sigbus;
970 } else {
971 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
972 goto sigbus;
974 if (rvar == 9)
975 rvar = 8;
977 for (i = 16; rvar; rvar--, i++) {
978 value = regs->regs[i];
979 StoreDW(addr, value, res);
980 if (res)
981 goto fault;
982 addr += 8;
984 if ((reg & 0xf) == 9) {
985 value = regs->regs[30];
986 StoreDW(addr, value, res);
987 if (res)
988 goto fault;
989 addr += 8;
991 if (reg & 0x10) {
992 value = regs->regs[31];
993 StoreDW(addr, value, res);
994 if (res)
995 goto fault;
997 goto success;
998 #endif /* CONFIG_64BIT */
1000 goto sigill;
1002 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1005 goto sigbus;
1007 case mm_pool32c_op:
1008 switch (insn.mm_m_format.func) {
1009 case mm_lwu_func:
1010 reg = insn.mm_m_format.rd;
1011 goto loadWU;
1014 /* LL,SC,LLD,SCD are not serviced */
1015 goto sigbus;
1017 case mm_pool32f_op:
1018 switch (insn.mm_x_format.func) {
1019 case mm_lwxc1_func:
1020 case mm_swxc1_func:
1021 case mm_ldxc1_func:
1022 case mm_sdxc1_func:
1023 goto fpu_emul;
1026 goto sigbus;
1028 case mm_ldc132_op:
1029 case mm_sdc132_op:
1030 case mm_lwc132_op:
1031 case mm_swc132_op:
1032 fpu_emul:
1033 /* roll back jump/branch */
1034 regs->cp0_epc = origpc;
1035 regs->regs[31] = orig31;
1037 die_if_kernel("Unaligned FP access in kernel code", regs);
1038 BUG_ON(!used_math());
1039 BUG_ON(!is_fpu_owner());
1041 lose_fpu(1); /* save the FPU state for the emulator */
1042 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1043 &fault_addr);
1044 own_fpu(1); /* restore FPU state */
1046 /* If something went wrong, signal */
1047 process_fpemu_return(res, fault_addr);
1049 if (res == 0)
1050 goto success;
1051 return;
1053 case mm_lh32_op:
1054 reg = insn.mm_i_format.rt;
1055 goto loadHW;
1057 case mm_lhu32_op:
1058 reg = insn.mm_i_format.rt;
1059 goto loadHWU;
1061 case mm_lw32_op:
1062 reg = insn.mm_i_format.rt;
1063 goto loadW;
1065 case mm_sh32_op:
1066 reg = insn.mm_i_format.rt;
1067 goto storeHW;
1069 case mm_sw32_op:
1070 reg = insn.mm_i_format.rt;
1071 goto storeW;
1073 case mm_ld32_op:
1074 reg = insn.mm_i_format.rt;
1075 goto loadDW;
1077 case mm_sd32_op:
1078 reg = insn.mm_i_format.rt;
1079 goto storeDW;
1081 case mm_pool16c_op:
1082 switch (insn.mm16_m_format.func) {
1083 case mm_lwm16_op:
1084 reg = insn.mm16_m_format.rlist;
1085 rvar = reg + 1;
1086 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1087 goto sigbus;
1089 for (i = 16; rvar; rvar--, i++) {
1090 LoadW(addr, value, res);
1091 if (res)
1092 goto fault;
1093 addr += 4;
1094 regs->regs[i] = value;
1096 LoadW(addr, value, res);
1097 if (res)
1098 goto fault;
1099 regs->regs[31] = value;
1101 goto success;
1103 case mm_swm16_op:
1104 reg = insn.mm16_m_format.rlist;
1105 rvar = reg + 1;
1106 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1107 goto sigbus;
1109 for (i = 16; rvar; rvar--, i++) {
1110 value = regs->regs[i];
1111 StoreW(addr, value, res);
1112 if (res)
1113 goto fault;
1114 addr += 4;
1116 value = regs->regs[31];
1117 StoreW(addr, value, res);
1118 if (res)
1119 goto fault;
1121 goto success;
1125 goto sigbus;
1127 case mm_lhu16_op:
1128 reg = reg16to32[insn.mm16_rb_format.rt];
1129 goto loadHWU;
1131 case mm_lw16_op:
1132 reg = reg16to32[insn.mm16_rb_format.rt];
1133 goto loadW;
1135 case mm_sh16_op:
1136 reg = reg16to32st[insn.mm16_rb_format.rt];
1137 goto storeHW;
1139 case mm_sw16_op:
1140 reg = reg16to32st[insn.mm16_rb_format.rt];
1141 goto storeW;
1143 case mm_lwsp16_op:
1144 reg = insn.mm16_r5_format.rt;
1145 goto loadW;
1147 case mm_swsp16_op:
1148 reg = insn.mm16_r5_format.rt;
1149 goto storeW;
1151 case mm_lwgp16_op:
1152 reg = reg16to32[insn.mm16_r3_format.rt];
1153 goto loadW;
1155 default:
1156 goto sigill;
1159 loadHW:
1160 if (!access_ok(VERIFY_READ, addr, 2))
1161 goto sigbus;
1163 LoadHW(addr, value, res);
1164 if (res)
1165 goto fault;
1166 regs->regs[reg] = value;
1167 goto success;
1169 loadHWU:
1170 if (!access_ok(VERIFY_READ, addr, 2))
1171 goto sigbus;
1173 LoadHWU(addr, value, res);
1174 if (res)
1175 goto fault;
1176 regs->regs[reg] = value;
1177 goto success;
1179 loadW:
1180 if (!access_ok(VERIFY_READ, addr, 4))
1181 goto sigbus;
1183 LoadW(addr, value, res);
1184 if (res)
1185 goto fault;
1186 regs->regs[reg] = value;
1187 goto success;
1189 loadWU:
1190 #ifdef CONFIG_64BIT
1192 * A 32-bit kernel might be running on a 64-bit processor. But
1193 * if we're on a 32-bit processor and an i-cache incoherency
1194 * or race makes us see a 64-bit instruction here the sdl/sdr
1195 * would blow up, so for now we don't handle unaligned 64-bit
1196 * instructions on 32-bit kernels.
1198 if (!access_ok(VERIFY_READ, addr, 4))
1199 goto sigbus;
1201 LoadWU(addr, value, res);
1202 if (res)
1203 goto fault;
1204 regs->regs[reg] = value;
1205 goto success;
1206 #endif /* CONFIG_64BIT */
1208 /* Cannot handle 64-bit instructions in 32-bit kernel */
1209 goto sigill;
1211 loadDW:
1212 #ifdef CONFIG_64BIT
1214 * A 32-bit kernel might be running on a 64-bit processor. But
1215 * if we're on a 32-bit processor and an i-cache incoherency
1216 * or race makes us see a 64-bit instruction here the sdl/sdr
1217 * would blow up, so for now we don't handle unaligned 64-bit
1218 * instructions on 32-bit kernels.
1220 if (!access_ok(VERIFY_READ, addr, 8))
1221 goto sigbus;
1223 LoadDW(addr, value, res);
1224 if (res)
1225 goto fault;
1226 regs->regs[reg] = value;
1227 goto success;
1228 #endif /* CONFIG_64BIT */
1230 /* Cannot handle 64-bit instructions in 32-bit kernel */
1231 goto sigill;
1233 storeHW:
1234 if (!access_ok(VERIFY_WRITE, addr, 2))
1235 goto sigbus;
1237 value = regs->regs[reg];
1238 StoreHW(addr, value, res);
1239 if (res)
1240 goto fault;
1241 goto success;
1243 storeW:
1244 if (!access_ok(VERIFY_WRITE, addr, 4))
1245 goto sigbus;
1247 value = regs->regs[reg];
1248 StoreW(addr, value, res);
1249 if (res)
1250 goto fault;
1251 goto success;
1253 storeDW:
1254 #ifdef CONFIG_64BIT
1256 * A 32-bit kernel might be running on a 64-bit processor. But
1257 * if we're on a 32-bit processor and an i-cache incoherency
1258 * or race makes us see a 64-bit instruction here the sdl/sdr
1259 * would blow up, so for now we don't handle unaligned 64-bit
1260 * instructions on 32-bit kernels.
1262 if (!access_ok(VERIFY_WRITE, addr, 8))
1263 goto sigbus;
1265 value = regs->regs[reg];
1266 StoreDW(addr, value, res);
1267 if (res)
1268 goto fault;
1269 goto success;
1270 #endif /* CONFIG_64BIT */
1272 /* Cannot handle 64-bit instructions in 32-bit kernel */
1273 goto sigill;
1275 success:
1276 regs->cp0_epc = contpc; /* advance or branch */
1278 #ifdef CONFIG_DEBUG_FS
1279 unaligned_instructions++;
1280 #endif
1281 return;
1283 fault:
1284 /* roll back jump/branch */
1285 regs->cp0_epc = origpc;
1286 regs->regs[31] = orig31;
1287 /* Did we have an exception handler installed? */
1288 if (fixup_exception(regs))
1289 return;
1291 die_if_kernel("Unhandled kernel unaligned access", regs);
1292 force_sig(SIGSEGV, current);
1294 return;
1296 sigbus:
1297 die_if_kernel("Unhandled kernel unaligned access", regs);
1298 force_sig(SIGBUS, current);
1300 return;
1302 sigill:
1303 die_if_kernel
1304 ("Unhandled kernel unaligned access or invalid instruction", regs);
1305 force_sig(SIGILL, current);
1308 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1310 unsigned long value;
1311 unsigned int res;
1312 int reg;
1313 unsigned long orig31;
1314 u16 __user *pc16;
1315 unsigned long origpc;
1316 union mips16e_instruction mips16inst, oldinst;
1318 origpc = regs->cp0_epc;
1319 orig31 = regs->regs[31];
1320 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1322 * This load never faults.
1324 __get_user(mips16inst.full, pc16);
1325 oldinst = mips16inst;
1327 /* skip EXTEND instruction */
1328 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1329 pc16++;
1330 __get_user(mips16inst.full, pc16);
1331 } else if (delay_slot(regs)) {
1332 /* skip jump instructions */
1333 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1334 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1335 pc16++;
1336 pc16++;
1337 if (get_user(mips16inst.full, pc16))
1338 goto sigbus;
1341 switch (mips16inst.ri.opcode) {
1342 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1343 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1344 case MIPS16e_ldpc_func:
1345 case MIPS16e_ldsp_func:
1346 reg = reg16to32[mips16inst.ri64.ry];
1347 goto loadDW;
1349 case MIPS16e_sdsp_func:
1350 reg = reg16to32[mips16inst.ri64.ry];
1351 goto writeDW;
1353 case MIPS16e_sdrasp_func:
1354 reg = 29; /* GPRSP */
1355 goto writeDW;
1358 goto sigbus;
1360 case MIPS16e_swsp_op:
1361 case MIPS16e_lwpc_op:
1362 case MIPS16e_lwsp_op:
1363 reg = reg16to32[mips16inst.ri.rx];
1364 break;
1366 case MIPS16e_i8_op:
1367 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1368 goto sigbus;
1369 reg = 29; /* GPRSP */
1370 break;
1372 default:
1373 reg = reg16to32[mips16inst.rri.ry];
1374 break;
1377 switch (mips16inst.ri.opcode) {
1379 case MIPS16e_lb_op:
1380 case MIPS16e_lbu_op:
1381 case MIPS16e_sb_op:
1382 goto sigbus;
1384 case MIPS16e_lh_op:
1385 if (!access_ok(VERIFY_READ, addr, 2))
1386 goto sigbus;
1388 LoadHW(addr, value, res);
1389 if (res)
1390 goto fault;
1391 MIPS16e_compute_return_epc(regs, &oldinst);
1392 regs->regs[reg] = value;
1393 break;
1395 case MIPS16e_lhu_op:
1396 if (!access_ok(VERIFY_READ, addr, 2))
1397 goto sigbus;
1399 LoadHWU(addr, value, res);
1400 if (res)
1401 goto fault;
1402 MIPS16e_compute_return_epc(regs, &oldinst);
1403 regs->regs[reg] = value;
1404 break;
1406 case MIPS16e_lw_op:
1407 case MIPS16e_lwpc_op:
1408 case MIPS16e_lwsp_op:
1409 if (!access_ok(VERIFY_READ, addr, 4))
1410 goto sigbus;
1412 LoadW(addr, value, res);
1413 if (res)
1414 goto fault;
1415 MIPS16e_compute_return_epc(regs, &oldinst);
1416 regs->regs[reg] = value;
1417 break;
1419 case MIPS16e_lwu_op:
1420 #ifdef CONFIG_64BIT
1422 * A 32-bit kernel might be running on a 64-bit processor. But
1423 * if we're on a 32-bit processor and an i-cache incoherency
1424 * or race makes us see a 64-bit instruction here the sdl/sdr
1425 * would blow up, so for now we don't handle unaligned 64-bit
1426 * instructions on 32-bit kernels.
1428 if (!access_ok(VERIFY_READ, addr, 4))
1429 goto sigbus;
1431 LoadWU(addr, value, res);
1432 if (res)
1433 goto fault;
1434 MIPS16e_compute_return_epc(regs, &oldinst);
1435 regs->regs[reg] = value;
1436 break;
1437 #endif /* CONFIG_64BIT */
1439 /* Cannot handle 64-bit instructions in 32-bit kernel */
1440 goto sigill;
1442 case MIPS16e_ld_op:
1443 loadDW:
1444 #ifdef CONFIG_64BIT
1446 * A 32-bit kernel might be running on a 64-bit processor. But
1447 * if we're on a 32-bit processor and an i-cache incoherency
1448 * or race makes us see a 64-bit instruction here the sdl/sdr
1449 * would blow up, so for now we don't handle unaligned 64-bit
1450 * instructions on 32-bit kernels.
1452 if (!access_ok(VERIFY_READ, addr, 8))
1453 goto sigbus;
1455 LoadDW(addr, value, res);
1456 if (res)
1457 goto fault;
1458 MIPS16e_compute_return_epc(regs, &oldinst);
1459 regs->regs[reg] = value;
1460 break;
1461 #endif /* CONFIG_64BIT */
1463 /* Cannot handle 64-bit instructions in 32-bit kernel */
1464 goto sigill;
1466 case MIPS16e_sh_op:
1467 if (!access_ok(VERIFY_WRITE, addr, 2))
1468 goto sigbus;
1470 MIPS16e_compute_return_epc(regs, &oldinst);
1471 value = regs->regs[reg];
1472 StoreHW(addr, value, res);
1473 if (res)
1474 goto fault;
1475 break;
1477 case MIPS16e_sw_op:
1478 case MIPS16e_swsp_op:
1479 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1480 if (!access_ok(VERIFY_WRITE, addr, 4))
1481 goto sigbus;
1483 MIPS16e_compute_return_epc(regs, &oldinst);
1484 value = regs->regs[reg];
1485 StoreW(addr, value, res);
1486 if (res)
1487 goto fault;
1488 break;
1490 case MIPS16e_sd_op:
1491 writeDW:
1492 #ifdef CONFIG_64BIT
1494 * A 32-bit kernel might be running on a 64-bit processor. But
1495 * if we're on a 32-bit processor and an i-cache incoherency
1496 * or race makes us see a 64-bit instruction here the sdl/sdr
1497 * would blow up, so for now we don't handle unaligned 64-bit
1498 * instructions on 32-bit kernels.
1500 if (!access_ok(VERIFY_WRITE, addr, 8))
1501 goto sigbus;
1503 MIPS16e_compute_return_epc(regs, &oldinst);
1504 value = regs->regs[reg];
1505 StoreDW(addr, value, res);
1506 if (res)
1507 goto fault;
1508 break;
1509 #endif /* CONFIG_64BIT */
1511 /* Cannot handle 64-bit instructions in 32-bit kernel */
1512 goto sigill;
1514 default:
1516 * Pheeee... We encountered an yet unknown instruction or
1517 * cache coherence problem. Die sucker, die ...
1519 goto sigill;
1522 #ifdef CONFIG_DEBUG_FS
1523 unaligned_instructions++;
1524 #endif
1526 return;
1528 fault:
1529 /* roll back jump/branch */
1530 regs->cp0_epc = origpc;
1531 regs->regs[31] = orig31;
1532 /* Did we have an exception handler installed? */
1533 if (fixup_exception(regs))
1534 return;
1536 die_if_kernel("Unhandled kernel unaligned access", regs);
1537 force_sig(SIGSEGV, current);
1539 return;
1541 sigbus:
1542 die_if_kernel("Unhandled kernel unaligned access", regs);
1543 force_sig(SIGBUS, current);
1545 return;
1547 sigill:
1548 die_if_kernel
1549 ("Unhandled kernel unaligned access or invalid instruction", regs);
1550 force_sig(SIGILL, current);
1553 asmlinkage void do_ade(struct pt_regs *regs)
1555 enum ctx_state prev_state;
1556 unsigned int __user *pc;
1557 mm_segment_t seg;
1559 prev_state = exception_enter();
1560 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1561 1, regs, regs->cp0_badvaddr);
1563 * Did we catch a fault trying to load an instruction?
1565 if (regs->cp0_badvaddr == regs->cp0_epc)
1566 goto sigbus;
1568 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1569 goto sigbus;
1570 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1571 goto sigbus;
1574 * Do branch emulation only if we didn't forward the exception.
1575 * This is all so but ugly ...
1579 * Are we running in microMIPS mode?
1581 if (get_isa16_mode(regs->cp0_epc)) {
1583 * Did we catch a fault trying to load an instruction in
1584 * 16-bit mode?
1586 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1587 goto sigbus;
1588 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1589 show_registers(regs);
1591 if (cpu_has_mmips) {
1592 seg = get_fs();
1593 if (!user_mode(regs))
1594 set_fs(KERNEL_DS);
1595 emulate_load_store_microMIPS(regs,
1596 (void __user *)regs->cp0_badvaddr);
1597 set_fs(seg);
1599 return;
1602 if (cpu_has_mips16) {
1603 seg = get_fs();
1604 if (!user_mode(regs))
1605 set_fs(KERNEL_DS);
1606 emulate_load_store_MIPS16e(regs,
1607 (void __user *)regs->cp0_badvaddr);
1608 set_fs(seg);
1610 return;
1613 goto sigbus;
1616 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1617 show_registers(regs);
1618 pc = (unsigned int __user *)exception_epc(regs);
1620 seg = get_fs();
1621 if (!user_mode(regs))
1622 set_fs(KERNEL_DS);
1623 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1624 set_fs(seg);
1626 return;
1628 sigbus:
1629 die_if_kernel("Kernel unaligned instruction access", regs);
1630 force_sig(SIGBUS, current);
1633 * XXX On return from the signal handler we should advance the epc
1635 exception_exit(prev_state);
1638 #ifdef CONFIG_DEBUG_FS
1639 extern struct dentry *mips_debugfs_dir;
1640 static int __init debugfs_unaligned(void)
1642 struct dentry *d;
1644 if (!mips_debugfs_dir)
1645 return -ENODEV;
1646 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1647 mips_debugfs_dir, &unaligned_instructions);
1648 if (!d)
1649 return -ENOMEM;
1650 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1651 mips_debugfs_dir, &unaligned_action);
1652 if (!d)
1653 return -ENOMEM;
1654 return 0;
1656 __initcall(debugfs_unaligned);
1657 #endif