Linux 4.2.2
[linux/fpc-iii.git] / arch / mips / kernel / mips-r2-to-r6-emul.c
blobf2977f00911b303177fe086f72c0ae6be4a39313
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (c) 2014 Imagination Technologies Ltd.
7 * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
8 * Author: Markos Chandras <markos.chandras@imgtec.com>
10 * MIPS R2 user space instruction emulator for MIPS R6
13 #include <linux/bug.h>
14 #include <linux/compiler.h>
15 #include <linux/debugfs.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/ptrace.h>
20 #include <linux/seq_file.h>
22 #include <asm/asm.h>
23 #include <asm/branch.h>
24 #include <asm/break.h>
25 #include <asm/fpu.h>
26 #include <asm/fpu_emulator.h>
27 #include <asm/inst.h>
28 #include <asm/mips-r2-to-r6-emul.h>
29 #include <asm/local.h>
30 #include <asm/ptrace.h>
31 #include <asm/uaccess.h>
33 #ifdef CONFIG_64BIT
34 #define ADDIU "daddiu "
35 #define INS "dins "
36 #define EXT "dext "
37 #else
38 #define ADDIU "addiu "
39 #define INS "ins "
40 #define EXT "ext "
41 #endif /* CONFIG_64BIT */
43 #define SB "sb "
44 #define LB "lb "
45 #define LL "ll "
46 #define SC "sc "
48 DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
49 DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
50 DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
52 extern const unsigned int fpucondbit[8];
54 #define MIPS_R2_EMUL_TOTAL_PASS 10
56 int mipsr2_emulation = 0;
58 static int __init mipsr2emu_enable(char *s)
60 mipsr2_emulation = 1;
62 pr_info("MIPS R2-to-R6 Emulator Enabled!");
64 return 1;
66 __setup("mipsr2emu", mipsr2emu_enable);
68 /**
69 * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
70 * for performance instead of the traditional way of using a stack trampoline
71 * which is rather slow.
72 * @regs: Process register set
73 * @ir: Instruction
75 static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
77 switch (MIPSInst_OPCODE(ir)) {
78 case addiu_op:
79 if (MIPSInst_RT(ir))
80 regs->regs[MIPSInst_RT(ir)] =
81 (s32)regs->regs[MIPSInst_RS(ir)] +
82 (s32)MIPSInst_SIMM(ir);
83 return 0;
84 case daddiu_op:
85 if (config_enabled(CONFIG_32BIT))
86 break;
88 if (MIPSInst_RT(ir))
89 regs->regs[MIPSInst_RT(ir)] =
90 (s64)regs->regs[MIPSInst_RS(ir)] +
91 (s64)MIPSInst_SIMM(ir);
92 return 0;
93 case lwc1_op:
94 case swc1_op:
95 case cop1_op:
96 case cop1x_op:
97 /* FPU instructions in delay slot */
98 return -SIGFPE;
99 case spec_op:
100 switch (MIPSInst_FUNC(ir)) {
101 case or_op:
102 if (MIPSInst_RD(ir))
103 regs->regs[MIPSInst_RD(ir)] =
104 regs->regs[MIPSInst_RS(ir)] |
105 regs->regs[MIPSInst_RT(ir)];
106 return 0;
107 case sll_op:
108 if (MIPSInst_RS(ir))
109 break;
111 if (MIPSInst_RD(ir))
112 regs->regs[MIPSInst_RD(ir)] =
113 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
114 MIPSInst_FD(ir));
115 return 0;
116 case srl_op:
117 if (MIPSInst_RS(ir))
118 break;
120 if (MIPSInst_RD(ir))
121 regs->regs[MIPSInst_RD(ir)] =
122 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
123 MIPSInst_FD(ir));
124 return 0;
125 case addu_op:
126 if (MIPSInst_FD(ir))
127 break;
129 if (MIPSInst_RD(ir))
130 regs->regs[MIPSInst_RD(ir)] =
131 (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
132 (u32)regs->regs[MIPSInst_RT(ir)]);
133 return 0;
134 case subu_op:
135 if (MIPSInst_FD(ir))
136 break;
138 if (MIPSInst_RD(ir))
139 regs->regs[MIPSInst_RD(ir)] =
140 (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
141 (u32)regs->regs[MIPSInst_RT(ir)]);
142 return 0;
143 case dsll_op:
144 if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
145 break;
147 if (MIPSInst_RD(ir))
148 regs->regs[MIPSInst_RD(ir)] =
149 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
150 MIPSInst_FD(ir));
151 return 0;
152 case dsrl_op:
153 if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
154 break;
156 if (MIPSInst_RD(ir))
157 regs->regs[MIPSInst_RD(ir)] =
158 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
159 MIPSInst_FD(ir));
160 return 0;
161 case daddu_op:
162 if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
163 break;
165 if (MIPSInst_RD(ir))
166 regs->regs[MIPSInst_RD(ir)] =
167 (u64)regs->regs[MIPSInst_RS(ir)] +
168 (u64)regs->regs[MIPSInst_RT(ir)];
169 return 0;
170 case dsubu_op:
171 if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
172 break;
174 if (MIPSInst_RD(ir))
175 regs->regs[MIPSInst_RD(ir)] =
176 (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
177 (u64)regs->regs[MIPSInst_RT(ir)]);
178 return 0;
180 break;
181 default:
182 pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
183 ir, MIPSInst_OPCODE(ir));
186 return SIGILL;
190 * movf_func - Emulate a MOVF instruction
191 * @regs: Process register set
192 * @ir: Instruction
194 * Returns 0 since it always succeeds.
196 static int movf_func(struct pt_regs *regs, u32 ir)
198 u32 csr;
199 u32 cond;
201 csr = current->thread.fpu.fcr31;
202 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
204 if (((csr & cond) == 0) && MIPSInst_RD(ir))
205 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
207 MIPS_R2_STATS(movs);
209 return 0;
213 * movt_func - Emulate a MOVT instruction
214 * @regs: Process register set
215 * @ir: Instruction
217 * Returns 0 since it always succeeds.
219 static int movt_func(struct pt_regs *regs, u32 ir)
221 u32 csr;
222 u32 cond;
224 csr = current->thread.fpu.fcr31;
225 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
227 if (((csr & cond) != 0) && MIPSInst_RD(ir))
228 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
230 MIPS_R2_STATS(movs);
232 return 0;
236 * jr_func - Emulate a JR instruction.
237 * @pt_regs: Process register set
238 * @ir: Instruction
240 * Returns SIGILL if JR was in delay slot, SIGEMT if we
241 * can't compute the EPC, SIGSEGV if we can't access the
242 * userland instruction or 0 on success.
244 static int jr_func(struct pt_regs *regs, u32 ir)
246 int err;
247 unsigned long cepc, epc, nepc;
248 u32 nir;
250 if (delay_slot(regs))
251 return SIGILL;
253 /* EPC after the RI/JR instruction */
254 nepc = regs->cp0_epc;
255 /* Roll back to the reserved R2 JR instruction */
256 regs->cp0_epc -= 4;
257 epc = regs->cp0_epc;
258 err = __compute_return_epc(regs);
260 if (err < 0)
261 return SIGEMT;
264 /* Computed EPC */
265 cepc = regs->cp0_epc;
267 /* Get DS instruction */
268 err = __get_user(nir, (u32 __user *)nepc);
269 if (err)
270 return SIGSEGV;
272 MIPS_R2BR_STATS(jrs);
274 /* If nir == 0(NOP), then nothing else to do */
275 if (nir) {
277 * Negative err means FPU instruction in BD-slot,
278 * Zero err means 'BD-slot emulation done'
279 * For anything else we go back to trampoline emulation.
281 err = mipsr6_emul(regs, nir);
282 if (err > 0) {
283 regs->cp0_epc = nepc;
284 err = mips_dsemul(regs, nir, cepc);
285 if (err == SIGILL)
286 err = SIGEMT;
287 MIPS_R2_STATS(dsemul);
291 return err;
295 * movz_func - Emulate a MOVZ instruction
296 * @regs: Process register set
297 * @ir: Instruction
299 * Returns 0 since it always succeeds.
301 static int movz_func(struct pt_regs *regs, u32 ir)
303 if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
304 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
305 MIPS_R2_STATS(movs);
307 return 0;
311 * movn_func - Emulate a MOVZ instruction
312 * @regs: Process register set
313 * @ir: Instruction
315 * Returns 0 since it always succeeds.
317 static int movn_func(struct pt_regs *regs, u32 ir)
319 if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
320 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
321 MIPS_R2_STATS(movs);
323 return 0;
327 * mfhi_func - Emulate a MFHI instruction
328 * @regs: Process register set
329 * @ir: Instruction
331 * Returns 0 since it always succeeds.
333 static int mfhi_func(struct pt_regs *regs, u32 ir)
335 if (MIPSInst_RD(ir))
336 regs->regs[MIPSInst_RD(ir)] = regs->hi;
338 MIPS_R2_STATS(hilo);
340 return 0;
344 * mthi_func - Emulate a MTHI instruction
345 * @regs: Process register set
346 * @ir: Instruction
348 * Returns 0 since it always succeeds.
350 static int mthi_func(struct pt_regs *regs, u32 ir)
352 regs->hi = regs->regs[MIPSInst_RS(ir)];
354 MIPS_R2_STATS(hilo);
356 return 0;
360 * mflo_func - Emulate a MFLO instruction
361 * @regs: Process register set
362 * @ir: Instruction
364 * Returns 0 since it always succeeds.
366 static int mflo_func(struct pt_regs *regs, u32 ir)
368 if (MIPSInst_RD(ir))
369 regs->regs[MIPSInst_RD(ir)] = regs->lo;
371 MIPS_R2_STATS(hilo);
373 return 0;
377 * mtlo_func - Emulate a MTLO instruction
378 * @regs: Process register set
379 * @ir: Instruction
381 * Returns 0 since it always succeeds.
383 static int mtlo_func(struct pt_regs *regs, u32 ir)
385 regs->lo = regs->regs[MIPSInst_RS(ir)];
387 MIPS_R2_STATS(hilo);
389 return 0;
393 * mult_func - Emulate a MULT instruction
394 * @regs: Process register set
395 * @ir: Instruction
397 * Returns 0 since it always succeeds.
399 static int mult_func(struct pt_regs *regs, u32 ir)
401 s64 res;
402 s32 rt, rs;
404 rt = regs->regs[MIPSInst_RT(ir)];
405 rs = regs->regs[MIPSInst_RS(ir)];
406 res = (s64)rt * (s64)rs;
408 rs = res;
409 regs->lo = (s64)rs;
410 rt = res >> 32;
411 res = (s64)rt;
412 regs->hi = res;
414 MIPS_R2_STATS(muls);
416 return 0;
420 * multu_func - Emulate a MULTU instruction
421 * @regs: Process register set
422 * @ir: Instruction
424 * Returns 0 since it always succeeds.
426 static int multu_func(struct pt_regs *regs, u32 ir)
428 u64 res;
429 u32 rt, rs;
431 rt = regs->regs[MIPSInst_RT(ir)];
432 rs = regs->regs[MIPSInst_RS(ir)];
433 res = (u64)rt * (u64)rs;
434 rt = res;
435 regs->lo = (s64)rt;
436 regs->hi = (s64)(res >> 32);
438 MIPS_R2_STATS(muls);
440 return 0;
444 * div_func - Emulate a DIV instruction
445 * @regs: Process register set
446 * @ir: Instruction
448 * Returns 0 since it always succeeds.
450 static int div_func(struct pt_regs *regs, u32 ir)
452 s32 rt, rs;
454 rt = regs->regs[MIPSInst_RT(ir)];
455 rs = regs->regs[MIPSInst_RS(ir)];
457 regs->lo = (s64)(rs / rt);
458 regs->hi = (s64)(rs % rt);
460 MIPS_R2_STATS(divs);
462 return 0;
466 * divu_func - Emulate a DIVU instruction
467 * @regs: Process register set
468 * @ir: Instruction
470 * Returns 0 since it always succeeds.
472 static int divu_func(struct pt_regs *regs, u32 ir)
474 u32 rt, rs;
476 rt = regs->regs[MIPSInst_RT(ir)];
477 rs = regs->regs[MIPSInst_RS(ir)];
479 regs->lo = (s64)(rs / rt);
480 regs->hi = (s64)(rs % rt);
482 MIPS_R2_STATS(divs);
484 return 0;
488 * dmult_func - Emulate a DMULT instruction
489 * @regs: Process register set
490 * @ir: Instruction
492 * Returns 0 on success or SIGILL for 32-bit kernels.
494 static int dmult_func(struct pt_regs *regs, u32 ir)
496 s64 res;
497 s64 rt, rs;
499 if (config_enabled(CONFIG_32BIT))
500 return SIGILL;
502 rt = regs->regs[MIPSInst_RT(ir)];
503 rs = regs->regs[MIPSInst_RS(ir)];
504 res = rt * rs;
506 regs->lo = res;
507 __asm__ __volatile__(
508 "dmuh %0, %1, %2\t\n"
509 : "=r"(res)
510 : "r"(rt), "r"(rs));
512 regs->hi = res;
514 MIPS_R2_STATS(muls);
516 return 0;
520 * dmultu_func - Emulate a DMULTU instruction
521 * @regs: Process register set
522 * @ir: Instruction
524 * Returns 0 on success or SIGILL for 32-bit kernels.
526 static int dmultu_func(struct pt_regs *regs, u32 ir)
528 u64 res;
529 u64 rt, rs;
531 if (config_enabled(CONFIG_32BIT))
532 return SIGILL;
534 rt = regs->regs[MIPSInst_RT(ir)];
535 rs = regs->regs[MIPSInst_RS(ir)];
536 res = rt * rs;
538 regs->lo = res;
539 __asm__ __volatile__(
540 "dmuhu %0, %1, %2\t\n"
541 : "=r"(res)
542 : "r"(rt), "r"(rs));
544 regs->hi = res;
546 MIPS_R2_STATS(muls);
548 return 0;
552 * ddiv_func - Emulate a DDIV instruction
553 * @regs: Process register set
554 * @ir: Instruction
556 * Returns 0 on success or SIGILL for 32-bit kernels.
558 static int ddiv_func(struct pt_regs *regs, u32 ir)
560 s64 rt, rs;
562 if (config_enabled(CONFIG_32BIT))
563 return SIGILL;
565 rt = regs->regs[MIPSInst_RT(ir)];
566 rs = regs->regs[MIPSInst_RS(ir)];
568 regs->lo = rs / rt;
569 regs->hi = rs % rt;
571 MIPS_R2_STATS(divs);
573 return 0;
577 * ddivu_func - Emulate a DDIVU instruction
578 * @regs: Process register set
579 * @ir: Instruction
581 * Returns 0 on success or SIGILL for 32-bit kernels.
583 static int ddivu_func(struct pt_regs *regs, u32 ir)
585 u64 rt, rs;
587 if (config_enabled(CONFIG_32BIT))
588 return SIGILL;
590 rt = regs->regs[MIPSInst_RT(ir)];
591 rs = regs->regs[MIPSInst_RS(ir)];
593 regs->lo = rs / rt;
594 regs->hi = rs % rt;
596 MIPS_R2_STATS(divs);
598 return 0;
601 /* R6 removed instructions for the SPECIAL opcode */
602 static struct r2_decoder_table spec_op_table[] = {
603 { 0xfc1ff83f, 0x00000008, jr_func },
604 { 0xfc00ffff, 0x00000018, mult_func },
605 { 0xfc00ffff, 0x00000019, multu_func },
606 { 0xfc00ffff, 0x0000001c, dmult_func },
607 { 0xfc00ffff, 0x0000001d, dmultu_func },
608 { 0xffff07ff, 0x00000010, mfhi_func },
609 { 0xfc1fffff, 0x00000011, mthi_func },
610 { 0xffff07ff, 0x00000012, mflo_func },
611 { 0xfc1fffff, 0x00000013, mtlo_func },
612 { 0xfc0307ff, 0x00000001, movf_func },
613 { 0xfc0307ff, 0x00010001, movt_func },
614 { 0xfc0007ff, 0x0000000a, movz_func },
615 { 0xfc0007ff, 0x0000000b, movn_func },
616 { 0xfc00ffff, 0x0000001a, div_func },
617 { 0xfc00ffff, 0x0000001b, divu_func },
618 { 0xfc00ffff, 0x0000001e, ddiv_func },
619 { 0xfc00ffff, 0x0000001f, ddivu_func },
624 * madd_func - Emulate a MADD instruction
625 * @regs: Process register set
626 * @ir: Instruction
628 * Returns 0 since it always succeeds.
630 static int madd_func(struct pt_regs *regs, u32 ir)
632 s64 res;
633 s32 rt, rs;
635 rt = regs->regs[MIPSInst_RT(ir)];
636 rs = regs->regs[MIPSInst_RS(ir)];
637 res = (s64)rt * (s64)rs;
638 rt = regs->hi;
639 rs = regs->lo;
640 res += ((((s64)rt) << 32) | (u32)rs);
642 rt = res;
643 regs->lo = (s64)rt;
644 rs = res >> 32;
645 regs->hi = (s64)rs;
647 MIPS_R2_STATS(dsps);
649 return 0;
653 * maddu_func - Emulate a MADDU instruction
654 * @regs: Process register set
655 * @ir: Instruction
657 * Returns 0 since it always succeeds.
659 static int maddu_func(struct pt_regs *regs, u32 ir)
661 u64 res;
662 u32 rt, rs;
664 rt = regs->regs[MIPSInst_RT(ir)];
665 rs = regs->regs[MIPSInst_RS(ir)];
666 res = (u64)rt * (u64)rs;
667 rt = regs->hi;
668 rs = regs->lo;
669 res += ((((s64)rt) << 32) | (u32)rs);
671 rt = res;
672 regs->lo = (s64)rt;
673 rs = res >> 32;
674 regs->hi = (s64)rs;
676 MIPS_R2_STATS(dsps);
678 return 0;
682 * msub_func - Emulate a MSUB instruction
683 * @regs: Process register set
684 * @ir: Instruction
686 * Returns 0 since it always succeeds.
688 static int msub_func(struct pt_regs *regs, u32 ir)
690 s64 res;
691 s32 rt, rs;
693 rt = regs->regs[MIPSInst_RT(ir)];
694 rs = regs->regs[MIPSInst_RS(ir)];
695 res = (s64)rt * (s64)rs;
696 rt = regs->hi;
697 rs = regs->lo;
698 res = ((((s64)rt) << 32) | (u32)rs) - res;
700 rt = res;
701 regs->lo = (s64)rt;
702 rs = res >> 32;
703 regs->hi = (s64)rs;
705 MIPS_R2_STATS(dsps);
707 return 0;
711 * msubu_func - Emulate a MSUBU instruction
712 * @regs: Process register set
713 * @ir: Instruction
715 * Returns 0 since it always succeeds.
717 static int msubu_func(struct pt_regs *regs, u32 ir)
719 u64 res;
720 u32 rt, rs;
722 rt = regs->regs[MIPSInst_RT(ir)];
723 rs = regs->regs[MIPSInst_RS(ir)];
724 res = (u64)rt * (u64)rs;
725 rt = regs->hi;
726 rs = regs->lo;
727 res = ((((s64)rt) << 32) | (u32)rs) - res;
729 rt = res;
730 regs->lo = (s64)rt;
731 rs = res >> 32;
732 regs->hi = (s64)rs;
734 MIPS_R2_STATS(dsps);
736 return 0;
740 * mul_func - Emulate a MUL instruction
741 * @regs: Process register set
742 * @ir: Instruction
744 * Returns 0 since it always succeeds.
746 static int mul_func(struct pt_regs *regs, u32 ir)
748 s64 res;
749 s32 rt, rs;
751 if (!MIPSInst_RD(ir))
752 return 0;
753 rt = regs->regs[MIPSInst_RT(ir)];
754 rs = regs->regs[MIPSInst_RS(ir)];
755 res = (s64)rt * (s64)rs;
757 rs = res;
758 regs->regs[MIPSInst_RD(ir)] = (s64)rs;
760 MIPS_R2_STATS(muls);
762 return 0;
766 * clz_func - Emulate a CLZ instruction
767 * @regs: Process register set
768 * @ir: Instruction
770 * Returns 0 since it always succeeds.
772 static int clz_func(struct pt_regs *regs, u32 ir)
774 u32 res;
775 u32 rs;
777 if (!MIPSInst_RD(ir))
778 return 0;
780 rs = regs->regs[MIPSInst_RS(ir)];
781 __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
782 regs->regs[MIPSInst_RD(ir)] = res;
784 MIPS_R2_STATS(bops);
786 return 0;
790 * clo_func - Emulate a CLO instruction
791 * @regs: Process register set
792 * @ir: Instruction
794 * Returns 0 since it always succeeds.
797 static int clo_func(struct pt_regs *regs, u32 ir)
799 u32 res;
800 u32 rs;
802 if (!MIPSInst_RD(ir))
803 return 0;
805 rs = regs->regs[MIPSInst_RS(ir)];
806 __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
807 regs->regs[MIPSInst_RD(ir)] = res;
809 MIPS_R2_STATS(bops);
811 return 0;
815 * dclz_func - Emulate a DCLZ instruction
816 * @regs: Process register set
817 * @ir: Instruction
819 * Returns 0 since it always succeeds.
821 static int dclz_func(struct pt_regs *regs, u32 ir)
823 u64 res;
824 u64 rs;
826 if (config_enabled(CONFIG_32BIT))
827 return SIGILL;
829 if (!MIPSInst_RD(ir))
830 return 0;
832 rs = regs->regs[MIPSInst_RS(ir)];
833 __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
834 regs->regs[MIPSInst_RD(ir)] = res;
836 MIPS_R2_STATS(bops);
838 return 0;
842 * dclo_func - Emulate a DCLO instruction
843 * @regs: Process register set
844 * @ir: Instruction
846 * Returns 0 since it always succeeds.
848 static int dclo_func(struct pt_regs *regs, u32 ir)
850 u64 res;
851 u64 rs;
853 if (config_enabled(CONFIG_32BIT))
854 return SIGILL;
856 if (!MIPSInst_RD(ir))
857 return 0;
859 rs = regs->regs[MIPSInst_RS(ir)];
860 __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
861 regs->regs[MIPSInst_RD(ir)] = res;
863 MIPS_R2_STATS(bops);
865 return 0;
868 /* R6 removed instructions for the SPECIAL2 opcode */
869 static struct r2_decoder_table spec2_op_table[] = {
870 { 0xfc00ffff, 0x70000000, madd_func },
871 { 0xfc00ffff, 0x70000001, maddu_func },
872 { 0xfc0007ff, 0x70000002, mul_func },
873 { 0xfc00ffff, 0x70000004, msub_func },
874 { 0xfc00ffff, 0x70000005, msubu_func },
875 { 0xfc0007ff, 0x70000020, clz_func },
876 { 0xfc0007ff, 0x70000021, clo_func },
877 { 0xfc0007ff, 0x70000024, dclz_func },
878 { 0xfc0007ff, 0x70000025, dclo_func },
882 static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
883 struct r2_decoder_table *table)
885 struct r2_decoder_table *p;
886 int err;
888 for (p = table; p->func; p++) {
889 if ((inst & p->mask) == p->code) {
890 err = (p->func)(regs, inst);
891 return err;
894 return SIGILL;
898 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
899 * @regs: Process register set
900 * @inst: Instruction to decode and emulate
901 * @fcr31: Floating Point Control and Status Register returned
903 int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
905 int err = 0;
906 unsigned long vaddr;
907 u32 nir;
908 unsigned long cpc, epc, nepc, r31, res, rs, rt;
910 void __user *fault_addr = NULL;
911 int pass = 0;
913 repeat:
914 r31 = regs->regs[31];
915 epc = regs->cp0_epc;
916 err = compute_return_epc(regs);
917 if (err < 0) {
918 BUG();
919 return SIGEMT;
921 pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
922 inst, epc, pass);
924 switch (MIPSInst_OPCODE(inst)) {
925 case spec_op:
926 err = mipsr2_find_op_func(regs, inst, spec_op_table);
927 if (err < 0) {
928 /* FPU instruction under JR */
929 regs->cp0_cause |= CAUSEF_BD;
930 goto fpu_emul;
932 break;
933 case spec2_op:
934 err = mipsr2_find_op_func(regs, inst, spec2_op_table);
935 break;
936 case bcond_op:
937 rt = MIPSInst_RT(inst);
938 rs = MIPSInst_RS(inst);
939 switch (rt) {
940 case tgei_op:
941 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
942 do_trap_or_bp(regs, 0, "TGEI");
944 MIPS_R2_STATS(traps);
946 break;
947 case tgeiu_op:
948 if (regs->regs[rs] >= MIPSInst_UIMM(inst))
949 do_trap_or_bp(regs, 0, "TGEIU");
951 MIPS_R2_STATS(traps);
953 break;
954 case tlti_op:
955 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
956 do_trap_or_bp(regs, 0, "TLTI");
958 MIPS_R2_STATS(traps);
960 break;
961 case tltiu_op:
962 if (regs->regs[rs] < MIPSInst_UIMM(inst))
963 do_trap_or_bp(regs, 0, "TLTIU");
965 MIPS_R2_STATS(traps);
967 break;
968 case teqi_op:
969 if (regs->regs[rs] == MIPSInst_SIMM(inst))
970 do_trap_or_bp(regs, 0, "TEQI");
972 MIPS_R2_STATS(traps);
974 break;
975 case tnei_op:
976 if (regs->regs[rs] != MIPSInst_SIMM(inst))
977 do_trap_or_bp(regs, 0, "TNEI");
979 MIPS_R2_STATS(traps);
981 break;
982 case bltzl_op:
983 case bgezl_op:
984 case bltzall_op:
985 case bgezall_op:
986 if (delay_slot(regs)) {
987 err = SIGILL;
988 break;
990 regs->regs[31] = r31;
991 regs->cp0_epc = epc;
992 err = __compute_return_epc(regs);
993 if (err < 0)
994 return SIGEMT;
995 if (err != BRANCH_LIKELY_TAKEN)
996 break;
997 cpc = regs->cp0_epc;
998 nepc = epc + 4;
999 err = __get_user(nir, (u32 __user *)nepc);
1000 if (err) {
1001 err = SIGSEGV;
1002 break;
1005 * This will probably be optimized away when
1006 * CONFIG_DEBUG_FS is not enabled
1008 switch (rt) {
1009 case bltzl_op:
1010 MIPS_R2BR_STATS(bltzl);
1011 break;
1012 case bgezl_op:
1013 MIPS_R2BR_STATS(bgezl);
1014 break;
1015 case bltzall_op:
1016 MIPS_R2BR_STATS(bltzall);
1017 break;
1018 case bgezall_op:
1019 MIPS_R2BR_STATS(bgezall);
1020 break;
1023 switch (MIPSInst_OPCODE(nir)) {
1024 case cop1_op:
1025 case cop1x_op:
1026 case lwc1_op:
1027 case swc1_op:
1028 regs->cp0_cause |= CAUSEF_BD;
1029 goto fpu_emul;
1031 if (nir) {
1032 err = mipsr6_emul(regs, nir);
1033 if (err > 0) {
1034 err = mips_dsemul(regs, nir, cpc);
1035 if (err == SIGILL)
1036 err = SIGEMT;
1037 MIPS_R2_STATS(dsemul);
1040 break;
1041 case bltzal_op:
1042 case bgezal_op:
1043 if (delay_slot(regs)) {
1044 err = SIGILL;
1045 break;
1047 regs->regs[31] = r31;
1048 regs->cp0_epc = epc;
1049 err = __compute_return_epc(regs);
1050 if (err < 0)
1051 return SIGEMT;
1052 cpc = regs->cp0_epc;
1053 nepc = epc + 4;
1054 err = __get_user(nir, (u32 __user *)nepc);
1055 if (err) {
1056 err = SIGSEGV;
1057 break;
1060 * This will probably be optimized away when
1061 * CONFIG_DEBUG_FS is not enabled
1063 switch (rt) {
1064 case bltzal_op:
1065 MIPS_R2BR_STATS(bltzal);
1066 break;
1067 case bgezal_op:
1068 MIPS_R2BR_STATS(bgezal);
1069 break;
1072 switch (MIPSInst_OPCODE(nir)) {
1073 case cop1_op:
1074 case cop1x_op:
1075 case lwc1_op:
1076 case swc1_op:
1077 regs->cp0_cause |= CAUSEF_BD;
1078 goto fpu_emul;
1080 if (nir) {
1081 err = mipsr6_emul(regs, nir);
1082 if (err > 0) {
1083 err = mips_dsemul(regs, nir, cpc);
1084 if (err == SIGILL)
1085 err = SIGEMT;
1086 MIPS_R2_STATS(dsemul);
1089 break;
1090 default:
1091 regs->regs[31] = r31;
1092 regs->cp0_epc = epc;
1093 err = SIGILL;
1094 break;
1096 break;
1098 case beql_op:
1099 case bnel_op:
1100 case blezl_op:
1101 case bgtzl_op:
1102 if (delay_slot(regs)) {
1103 err = SIGILL;
1104 break;
1106 regs->regs[31] = r31;
1107 regs->cp0_epc = epc;
1108 err = __compute_return_epc(regs);
1109 if (err < 0)
1110 return SIGEMT;
1111 if (err != BRANCH_LIKELY_TAKEN)
1112 break;
1113 cpc = regs->cp0_epc;
1114 nepc = epc + 4;
1115 err = __get_user(nir, (u32 __user *)nepc);
1116 if (err) {
1117 err = SIGSEGV;
1118 break;
1121 * This will probably be optimized away when
1122 * CONFIG_DEBUG_FS is not enabled
1124 switch (MIPSInst_OPCODE(inst)) {
1125 case beql_op:
1126 MIPS_R2BR_STATS(beql);
1127 break;
1128 case bnel_op:
1129 MIPS_R2BR_STATS(bnel);
1130 break;
1131 case blezl_op:
1132 MIPS_R2BR_STATS(blezl);
1133 break;
1134 case bgtzl_op:
1135 MIPS_R2BR_STATS(bgtzl);
1136 break;
1139 switch (MIPSInst_OPCODE(nir)) {
1140 case cop1_op:
1141 case cop1x_op:
1142 case lwc1_op:
1143 case swc1_op:
1144 regs->cp0_cause |= CAUSEF_BD;
1145 goto fpu_emul;
1147 if (nir) {
1148 err = mipsr6_emul(regs, nir);
1149 if (err > 0) {
1150 err = mips_dsemul(regs, nir, cpc);
1151 if (err == SIGILL)
1152 err = SIGEMT;
1153 MIPS_R2_STATS(dsemul);
1156 break;
1157 case lwc1_op:
1158 case swc1_op:
1159 case cop1_op:
1160 case cop1x_op:
1161 fpu_emul:
1162 regs->regs[31] = r31;
1163 regs->cp0_epc = epc;
1164 if (!used_math()) { /* First time FPU user. */
1165 err = init_fpu();
1166 set_used_math();
1168 lose_fpu(1); /* Save FPU state for the emulator. */
1170 err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1171 &fault_addr);
1172 *fcr31 = current->thread.fpu.fcr31;
1175 * We can't allow the emulated instruction to leave any of
1176 * the cause bits set in $fcr31.
1178 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1181 * this is a tricky issue - lose_fpu() uses LL/SC atomics
1182 * if FPU is owned and effectively cancels user level LL/SC.
1183 * So, it could be logical to don't restore FPU ownership here.
1184 * But the sequence of multiple FPU instructions is much much
1185 * more often than LL-FPU-SC and I prefer loop here until
1186 * next scheduler cycle cancels FPU ownership
1188 own_fpu(1); /* Restore FPU state. */
1190 if (err)
1191 current->thread.cp0_baduaddr = (unsigned long)fault_addr;
1193 MIPS_R2_STATS(fpus);
1195 break;
1197 case lwl_op:
1198 rt = regs->regs[MIPSInst_RT(inst)];
1199 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1200 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1201 current->thread.cp0_baduaddr = vaddr;
1202 err = SIGSEGV;
1203 break;
1205 __asm__ __volatile__(
1206 " .set push\n"
1207 " .set reorder\n"
1208 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1209 "1:" LB "%1, 0(%2)\n"
1210 INS "%0, %1, 24, 8\n"
1211 " andi %1, %2, 0x3\n"
1212 " beq $0, %1, 9f\n"
1213 ADDIU "%2, %2, -1\n"
1214 "2:" LB "%1, 0(%2)\n"
1215 INS "%0, %1, 16, 8\n"
1216 " andi %1, %2, 0x3\n"
1217 " beq $0, %1, 9f\n"
1218 ADDIU "%2, %2, -1\n"
1219 "3:" LB "%1, 0(%2)\n"
1220 INS "%0, %1, 8, 8\n"
1221 " andi %1, %2, 0x3\n"
1222 " beq $0, %1, 9f\n"
1223 ADDIU "%2, %2, -1\n"
1224 "4:" LB "%1, 0(%2)\n"
1225 INS "%0, %1, 0, 8\n"
1226 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1227 "1:" LB "%1, 0(%2)\n"
1228 INS "%0, %1, 24, 8\n"
1229 ADDIU "%2, %2, 1\n"
1230 " andi %1, %2, 0x3\n"
1231 " beq $0, %1, 9f\n"
1232 "2:" LB "%1, 0(%2)\n"
1233 INS "%0, %1, 16, 8\n"
1234 ADDIU "%2, %2, 1\n"
1235 " andi %1, %2, 0x3\n"
1236 " beq $0, %1, 9f\n"
1237 "3:" LB "%1, 0(%2)\n"
1238 INS "%0, %1, 8, 8\n"
1239 ADDIU "%2, %2, 1\n"
1240 " andi %1, %2, 0x3\n"
1241 " beq $0, %1, 9f\n"
1242 "4:" LB "%1, 0(%2)\n"
1243 INS "%0, %1, 0, 8\n"
1244 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1245 "9: sll %0, %0, 0\n"
1246 "10:\n"
1247 " .insn\n"
1248 " .section .fixup,\"ax\"\n"
1249 "8: li %3,%4\n"
1250 " j 10b\n"
1251 " .previous\n"
1252 " .section __ex_table,\"a\"\n"
1253 " .word 1b,8b\n"
1254 " .word 2b,8b\n"
1255 " .word 3b,8b\n"
1256 " .word 4b,8b\n"
1257 " .previous\n"
1258 " .set pop\n"
1259 : "+&r"(rt), "=&r"(rs),
1260 "+&r"(vaddr), "+&r"(err)
1261 : "i"(SIGSEGV));
1263 if (MIPSInst_RT(inst) && !err)
1264 regs->regs[MIPSInst_RT(inst)] = rt;
1266 MIPS_R2_STATS(loads);
1268 break;
1270 case lwr_op:
1271 rt = regs->regs[MIPSInst_RT(inst)];
1272 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1273 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1274 current->thread.cp0_baduaddr = vaddr;
1275 err = SIGSEGV;
1276 break;
1278 __asm__ __volatile__(
1279 " .set push\n"
1280 " .set reorder\n"
1281 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1282 "1:" LB "%1, 0(%2)\n"
1283 INS "%0, %1, 0, 8\n"
1284 ADDIU "%2, %2, 1\n"
1285 " andi %1, %2, 0x3\n"
1286 " beq $0, %1, 9f\n"
1287 "2:" LB "%1, 0(%2)\n"
1288 INS "%0, %1, 8, 8\n"
1289 ADDIU "%2, %2, 1\n"
1290 " andi %1, %2, 0x3\n"
1291 " beq $0, %1, 9f\n"
1292 "3:" LB "%1, 0(%2)\n"
1293 INS "%0, %1, 16, 8\n"
1294 ADDIU "%2, %2, 1\n"
1295 " andi %1, %2, 0x3\n"
1296 " beq $0, %1, 9f\n"
1297 "4:" LB "%1, 0(%2)\n"
1298 INS "%0, %1, 24, 8\n"
1299 " sll %0, %0, 0\n"
1300 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1301 "1:" LB "%1, 0(%2)\n"
1302 INS "%0, %1, 0, 8\n"
1303 " andi %1, %2, 0x3\n"
1304 " beq $0, %1, 9f\n"
1305 ADDIU "%2, %2, -1\n"
1306 "2:" LB "%1, 0(%2)\n"
1307 INS "%0, %1, 8, 8\n"
1308 " andi %1, %2, 0x3\n"
1309 " beq $0, %1, 9f\n"
1310 ADDIU "%2, %2, -1\n"
1311 "3:" LB "%1, 0(%2)\n"
1312 INS "%0, %1, 16, 8\n"
1313 " andi %1, %2, 0x3\n"
1314 " beq $0, %1, 9f\n"
1315 ADDIU "%2, %2, -1\n"
1316 "4:" LB "%1, 0(%2)\n"
1317 INS "%0, %1, 24, 8\n"
1318 " sll %0, %0, 0\n"
1319 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1320 "9:\n"
1321 "10:\n"
1322 " .insn\n"
1323 " .section .fixup,\"ax\"\n"
1324 "8: li %3,%4\n"
1325 " j 10b\n"
1326 " .previous\n"
1327 " .section __ex_table,\"a\"\n"
1328 " .word 1b,8b\n"
1329 " .word 2b,8b\n"
1330 " .word 3b,8b\n"
1331 " .word 4b,8b\n"
1332 " .previous\n"
1333 " .set pop\n"
1334 : "+&r"(rt), "=&r"(rs),
1335 "+&r"(vaddr), "+&r"(err)
1336 : "i"(SIGSEGV));
1337 if (MIPSInst_RT(inst) && !err)
1338 regs->regs[MIPSInst_RT(inst)] = rt;
1340 MIPS_R2_STATS(loads);
1342 break;
1344 case swl_op:
1345 rt = regs->regs[MIPSInst_RT(inst)];
1346 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1347 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
1348 current->thread.cp0_baduaddr = vaddr;
1349 err = SIGSEGV;
1350 break;
1352 __asm__ __volatile__(
1353 " .set push\n"
1354 " .set reorder\n"
1355 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1356 EXT "%1, %0, 24, 8\n"
1357 "1:" SB "%1, 0(%2)\n"
1358 " andi %1, %2, 0x3\n"
1359 " beq $0, %1, 9f\n"
1360 ADDIU "%2, %2, -1\n"
1361 EXT "%1, %0, 16, 8\n"
1362 "2:" SB "%1, 0(%2)\n"
1363 " andi %1, %2, 0x3\n"
1364 " beq $0, %1, 9f\n"
1365 ADDIU "%2, %2, -1\n"
1366 EXT "%1, %0, 8, 8\n"
1367 "3:" SB "%1, 0(%2)\n"
1368 " andi %1, %2, 0x3\n"
1369 " beq $0, %1, 9f\n"
1370 ADDIU "%2, %2, -1\n"
1371 EXT "%1, %0, 0, 8\n"
1372 "4:" SB "%1, 0(%2)\n"
1373 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1374 EXT "%1, %0, 24, 8\n"
1375 "1:" SB "%1, 0(%2)\n"
1376 ADDIU "%2, %2, 1\n"
1377 " andi %1, %2, 0x3\n"
1378 " beq $0, %1, 9f\n"
1379 EXT "%1, %0, 16, 8\n"
1380 "2:" SB "%1, 0(%2)\n"
1381 ADDIU "%2, %2, 1\n"
1382 " andi %1, %2, 0x3\n"
1383 " beq $0, %1, 9f\n"
1384 EXT "%1, %0, 8, 8\n"
1385 "3:" SB "%1, 0(%2)\n"
1386 ADDIU "%2, %2, 1\n"
1387 " andi %1, %2, 0x3\n"
1388 " beq $0, %1, 9f\n"
1389 EXT "%1, %0, 0, 8\n"
1390 "4:" SB "%1, 0(%2)\n"
1391 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1392 "9:\n"
1393 " .insn\n"
1394 " .section .fixup,\"ax\"\n"
1395 "8: li %3,%4\n"
1396 " j 9b\n"
1397 " .previous\n"
1398 " .section __ex_table,\"a\"\n"
1399 " .word 1b,8b\n"
1400 " .word 2b,8b\n"
1401 " .word 3b,8b\n"
1402 " .word 4b,8b\n"
1403 " .previous\n"
1404 " .set pop\n"
1405 : "+&r"(rt), "=&r"(rs),
1406 "+&r"(vaddr), "+&r"(err)
1407 : "i"(SIGSEGV)
1408 : "memory");
1410 MIPS_R2_STATS(stores);
1412 break;
1414 case swr_op:
1415 rt = regs->regs[MIPSInst_RT(inst)];
1416 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1417 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
1418 current->thread.cp0_baduaddr = vaddr;
1419 err = SIGSEGV;
1420 break;
1422 __asm__ __volatile__(
1423 " .set push\n"
1424 " .set reorder\n"
1425 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1426 EXT "%1, %0, 0, 8\n"
1427 "1:" SB "%1, 0(%2)\n"
1428 ADDIU "%2, %2, 1\n"
1429 " andi %1, %2, 0x3\n"
1430 " beq $0, %1, 9f\n"
1431 EXT "%1, %0, 8, 8\n"
1432 "2:" SB "%1, 0(%2)\n"
1433 ADDIU "%2, %2, 1\n"
1434 " andi %1, %2, 0x3\n"
1435 " beq $0, %1, 9f\n"
1436 EXT "%1, %0, 16, 8\n"
1437 "3:" SB "%1, 0(%2)\n"
1438 ADDIU "%2, %2, 1\n"
1439 " andi %1, %2, 0x3\n"
1440 " beq $0, %1, 9f\n"
1441 EXT "%1, %0, 24, 8\n"
1442 "4:" SB "%1, 0(%2)\n"
1443 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1444 EXT "%1, %0, 0, 8\n"
1445 "1:" SB "%1, 0(%2)\n"
1446 " andi %1, %2, 0x3\n"
1447 " beq $0, %1, 9f\n"
1448 ADDIU "%2, %2, -1\n"
1449 EXT "%1, %0, 8, 8\n"
1450 "2:" SB "%1, 0(%2)\n"
1451 " andi %1, %2, 0x3\n"
1452 " beq $0, %1, 9f\n"
1453 ADDIU "%2, %2, -1\n"
1454 EXT "%1, %0, 16, 8\n"
1455 "3:" SB "%1, 0(%2)\n"
1456 " andi %1, %2, 0x3\n"
1457 " beq $0, %1, 9f\n"
1458 ADDIU "%2, %2, -1\n"
1459 EXT "%1, %0, 24, 8\n"
1460 "4:" SB "%1, 0(%2)\n"
1461 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1462 "9:\n"
1463 " .insn\n"
1464 " .section .fixup,\"ax\"\n"
1465 "8: li %3,%4\n"
1466 " j 9b\n"
1467 " .previous\n"
1468 " .section __ex_table,\"a\"\n"
1469 " .word 1b,8b\n"
1470 " .word 2b,8b\n"
1471 " .word 3b,8b\n"
1472 " .word 4b,8b\n"
1473 " .previous\n"
1474 " .set pop\n"
1475 : "+&r"(rt), "=&r"(rs),
1476 "+&r"(vaddr), "+&r"(err)
1477 : "i"(SIGSEGV)
1478 : "memory");
1480 MIPS_R2_STATS(stores);
1482 break;
1484 case ldl_op:
1485 if (config_enabled(CONFIG_32BIT)) {
1486 err = SIGILL;
1487 break;
1490 rt = regs->regs[MIPSInst_RT(inst)];
1491 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1492 if (!access_ok(VERIFY_READ, vaddr, 8)) {
1493 current->thread.cp0_baduaddr = vaddr;
1494 err = SIGSEGV;
1495 break;
1497 __asm__ __volatile__(
1498 " .set push\n"
1499 " .set reorder\n"
1500 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1501 "1: lb %1, 0(%2)\n"
1502 " dinsu %0, %1, 56, 8\n"
1503 " andi %1, %2, 0x7\n"
1504 " beq $0, %1, 9f\n"
1505 " daddiu %2, %2, -1\n"
1506 "2: lb %1, 0(%2)\n"
1507 " dinsu %0, %1, 48, 8\n"
1508 " andi %1, %2, 0x7\n"
1509 " beq $0, %1, 9f\n"
1510 " daddiu %2, %2, -1\n"
1511 "3: lb %1, 0(%2)\n"
1512 " dinsu %0, %1, 40, 8\n"
1513 " andi %1, %2, 0x7\n"
1514 " beq $0, %1, 9f\n"
1515 " daddiu %2, %2, -1\n"
1516 "4: lb %1, 0(%2)\n"
1517 " dinsu %0, %1, 32, 8\n"
1518 " andi %1, %2, 0x7\n"
1519 " beq $0, %1, 9f\n"
1520 " daddiu %2, %2, -1\n"
1521 "5: lb %1, 0(%2)\n"
1522 " dins %0, %1, 24, 8\n"
1523 " andi %1, %2, 0x7\n"
1524 " beq $0, %1, 9f\n"
1525 " daddiu %2, %2, -1\n"
1526 "6: lb %1, 0(%2)\n"
1527 " dins %0, %1, 16, 8\n"
1528 " andi %1, %2, 0x7\n"
1529 " beq $0, %1, 9f\n"
1530 " daddiu %2, %2, -1\n"
1531 "7: lb %1, 0(%2)\n"
1532 " dins %0, %1, 8, 8\n"
1533 " andi %1, %2, 0x7\n"
1534 " beq $0, %1, 9f\n"
1535 " daddiu %2, %2, -1\n"
1536 "0: lb %1, 0(%2)\n"
1537 " dins %0, %1, 0, 8\n"
1538 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1539 "1: lb %1, 0(%2)\n"
1540 " dinsu %0, %1, 56, 8\n"
1541 " daddiu %2, %2, 1\n"
1542 " andi %1, %2, 0x7\n"
1543 " beq $0, %1, 9f\n"
1544 "2: lb %1, 0(%2)\n"
1545 " dinsu %0, %1, 48, 8\n"
1546 " daddiu %2, %2, 1\n"
1547 " andi %1, %2, 0x7\n"
1548 " beq $0, %1, 9f\n"
1549 "3: lb %1, 0(%2)\n"
1550 " dinsu %0, %1, 40, 8\n"
1551 " daddiu %2, %2, 1\n"
1552 " andi %1, %2, 0x7\n"
1553 " beq $0, %1, 9f\n"
1554 "4: lb %1, 0(%2)\n"
1555 " dinsu %0, %1, 32, 8\n"
1556 " daddiu %2, %2, 1\n"
1557 " andi %1, %2, 0x7\n"
1558 " beq $0, %1, 9f\n"
1559 "5: lb %1, 0(%2)\n"
1560 " dins %0, %1, 24, 8\n"
1561 " daddiu %2, %2, 1\n"
1562 " andi %1, %2, 0x7\n"
1563 " beq $0, %1, 9f\n"
1564 "6: lb %1, 0(%2)\n"
1565 " dins %0, %1, 16, 8\n"
1566 " daddiu %2, %2, 1\n"
1567 " andi %1, %2, 0x7\n"
1568 " beq $0, %1, 9f\n"
1569 "7: lb %1, 0(%2)\n"
1570 " dins %0, %1, 8, 8\n"
1571 " daddiu %2, %2, 1\n"
1572 " andi %1, %2, 0x7\n"
1573 " beq $0, %1, 9f\n"
1574 "0: lb %1, 0(%2)\n"
1575 " dins %0, %1, 0, 8\n"
1576 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1577 "9:\n"
1578 " .insn\n"
1579 " .section .fixup,\"ax\"\n"
1580 "8: li %3,%4\n"
1581 " j 9b\n"
1582 " .previous\n"
1583 " .section __ex_table,\"a\"\n"
1584 " .word 1b,8b\n"
1585 " .word 2b,8b\n"
1586 " .word 3b,8b\n"
1587 " .word 4b,8b\n"
1588 " .word 5b,8b\n"
1589 " .word 6b,8b\n"
1590 " .word 7b,8b\n"
1591 " .word 0b,8b\n"
1592 " .previous\n"
1593 " .set pop\n"
1594 : "+&r"(rt), "=&r"(rs),
1595 "+&r"(vaddr), "+&r"(err)
1596 : "i"(SIGSEGV));
1597 if (MIPSInst_RT(inst) && !err)
1598 regs->regs[MIPSInst_RT(inst)] = rt;
1600 MIPS_R2_STATS(loads);
1601 break;
1603 case ldr_op:
1604 if (config_enabled(CONFIG_32BIT)) {
1605 err = SIGILL;
1606 break;
1609 rt = regs->regs[MIPSInst_RT(inst)];
1610 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1611 if (!access_ok(VERIFY_READ, vaddr, 8)) {
1612 current->thread.cp0_baduaddr = vaddr;
1613 err = SIGSEGV;
1614 break;
1616 __asm__ __volatile__(
1617 " .set push\n"
1618 " .set reorder\n"
1619 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1620 "1: lb %1, 0(%2)\n"
1621 " dins %0, %1, 0, 8\n"
1622 " daddiu %2, %2, 1\n"
1623 " andi %1, %2, 0x7\n"
1624 " beq $0, %1, 9f\n"
1625 "2: lb %1, 0(%2)\n"
1626 " dins %0, %1, 8, 8\n"
1627 " daddiu %2, %2, 1\n"
1628 " andi %1, %2, 0x7\n"
1629 " beq $0, %1, 9f\n"
1630 "3: lb %1, 0(%2)\n"
1631 " dins %0, %1, 16, 8\n"
1632 " daddiu %2, %2, 1\n"
1633 " andi %1, %2, 0x7\n"
1634 " beq $0, %1, 9f\n"
1635 "4: lb %1, 0(%2)\n"
1636 " dins %0, %1, 24, 8\n"
1637 " daddiu %2, %2, 1\n"
1638 " andi %1, %2, 0x7\n"
1639 " beq $0, %1, 9f\n"
1640 "5: lb %1, 0(%2)\n"
1641 " dinsu %0, %1, 32, 8\n"
1642 " daddiu %2, %2, 1\n"
1643 " andi %1, %2, 0x7\n"
1644 " beq $0, %1, 9f\n"
1645 "6: lb %1, 0(%2)\n"
1646 " dinsu %0, %1, 40, 8\n"
1647 " daddiu %2, %2, 1\n"
1648 " andi %1, %2, 0x7\n"
1649 " beq $0, %1, 9f\n"
1650 "7: lb %1, 0(%2)\n"
1651 " dinsu %0, %1, 48, 8\n"
1652 " daddiu %2, %2, 1\n"
1653 " andi %1, %2, 0x7\n"
1654 " beq $0, %1, 9f\n"
1655 "0: lb %1, 0(%2)\n"
1656 " dinsu %0, %1, 56, 8\n"
1657 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1658 "1: lb %1, 0(%2)\n"
1659 " dins %0, %1, 0, 8\n"
1660 " andi %1, %2, 0x7\n"
1661 " beq $0, %1, 9f\n"
1662 " daddiu %2, %2, -1\n"
1663 "2: lb %1, 0(%2)\n"
1664 " dins %0, %1, 8, 8\n"
1665 " andi %1, %2, 0x7\n"
1666 " beq $0, %1, 9f\n"
1667 " daddiu %2, %2, -1\n"
1668 "3: lb %1, 0(%2)\n"
1669 " dins %0, %1, 16, 8\n"
1670 " andi %1, %2, 0x7\n"
1671 " beq $0, %1, 9f\n"
1672 " daddiu %2, %2, -1\n"
1673 "4: lb %1, 0(%2)\n"
1674 " dins %0, %1, 24, 8\n"
1675 " andi %1, %2, 0x7\n"
1676 " beq $0, %1, 9f\n"
1677 " daddiu %2, %2, -1\n"
1678 "5: lb %1, 0(%2)\n"
1679 " dinsu %0, %1, 32, 8\n"
1680 " andi %1, %2, 0x7\n"
1681 " beq $0, %1, 9f\n"
1682 " daddiu %2, %2, -1\n"
1683 "6: lb %1, 0(%2)\n"
1684 " dinsu %0, %1, 40, 8\n"
1685 " andi %1, %2, 0x7\n"
1686 " beq $0, %1, 9f\n"
1687 " daddiu %2, %2, -1\n"
1688 "7: lb %1, 0(%2)\n"
1689 " dinsu %0, %1, 48, 8\n"
1690 " andi %1, %2, 0x7\n"
1691 " beq $0, %1, 9f\n"
1692 " daddiu %2, %2, -1\n"
1693 "0: lb %1, 0(%2)\n"
1694 " dinsu %0, %1, 56, 8\n"
1695 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1696 "9:\n"
1697 " .insn\n"
1698 " .section .fixup,\"ax\"\n"
1699 "8: li %3,%4\n"
1700 " j 9b\n"
1701 " .previous\n"
1702 " .section __ex_table,\"a\"\n"
1703 " .word 1b,8b\n"
1704 " .word 2b,8b\n"
1705 " .word 3b,8b\n"
1706 " .word 4b,8b\n"
1707 " .word 5b,8b\n"
1708 " .word 6b,8b\n"
1709 " .word 7b,8b\n"
1710 " .word 0b,8b\n"
1711 " .previous\n"
1712 " .set pop\n"
1713 : "+&r"(rt), "=&r"(rs),
1714 "+&r"(vaddr), "+&r"(err)
1715 : "i"(SIGSEGV));
1716 if (MIPSInst_RT(inst) && !err)
1717 regs->regs[MIPSInst_RT(inst)] = rt;
1719 MIPS_R2_STATS(loads);
1720 break;
1722 case sdl_op:
1723 if (config_enabled(CONFIG_32BIT)) {
1724 err = SIGILL;
1725 break;
1728 rt = regs->regs[MIPSInst_RT(inst)];
1729 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1730 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
1731 current->thread.cp0_baduaddr = vaddr;
1732 err = SIGSEGV;
1733 break;
1735 __asm__ __volatile__(
1736 " .set push\n"
1737 " .set reorder\n"
1738 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1739 " dextu %1, %0, 56, 8\n"
1740 "1: sb %1, 0(%2)\n"
1741 " andi %1, %2, 0x7\n"
1742 " beq $0, %1, 9f\n"
1743 " daddiu %2, %2, -1\n"
1744 " dextu %1, %0, 48, 8\n"
1745 "2: sb %1, 0(%2)\n"
1746 " andi %1, %2, 0x7\n"
1747 " beq $0, %1, 9f\n"
1748 " daddiu %2, %2, -1\n"
1749 " dextu %1, %0, 40, 8\n"
1750 "3: sb %1, 0(%2)\n"
1751 " andi %1, %2, 0x7\n"
1752 " beq $0, %1, 9f\n"
1753 " daddiu %2, %2, -1\n"
1754 " dextu %1, %0, 32, 8\n"
1755 "4: sb %1, 0(%2)\n"
1756 " andi %1, %2, 0x7\n"
1757 " beq $0, %1, 9f\n"
1758 " daddiu %2, %2, -1\n"
1759 " dext %1, %0, 24, 8\n"
1760 "5: sb %1, 0(%2)\n"
1761 " andi %1, %2, 0x7\n"
1762 " beq $0, %1, 9f\n"
1763 " daddiu %2, %2, -1\n"
1764 " dext %1, %0, 16, 8\n"
1765 "6: sb %1, 0(%2)\n"
1766 " andi %1, %2, 0x7\n"
1767 " beq $0, %1, 9f\n"
1768 " daddiu %2, %2, -1\n"
1769 " dext %1, %0, 8, 8\n"
1770 "7: sb %1, 0(%2)\n"
1771 " andi %1, %2, 0x7\n"
1772 " beq $0, %1, 9f\n"
1773 " daddiu %2, %2, -1\n"
1774 " dext %1, %0, 0, 8\n"
1775 "0: sb %1, 0(%2)\n"
1776 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1777 " dextu %1, %0, 56, 8\n"
1778 "1: sb %1, 0(%2)\n"
1779 " daddiu %2, %2, 1\n"
1780 " andi %1, %2, 0x7\n"
1781 " beq $0, %1, 9f\n"
1782 " dextu %1, %0, 48, 8\n"
1783 "2: sb %1, 0(%2)\n"
1784 " daddiu %2, %2, 1\n"
1785 " andi %1, %2, 0x7\n"
1786 " beq $0, %1, 9f\n"
1787 " dextu %1, %0, 40, 8\n"
1788 "3: sb %1, 0(%2)\n"
1789 " daddiu %2, %2, 1\n"
1790 " andi %1, %2, 0x7\n"
1791 " beq $0, %1, 9f\n"
1792 " dextu %1, %0, 32, 8\n"
1793 "4: sb %1, 0(%2)\n"
1794 " daddiu %2, %2, 1\n"
1795 " andi %1, %2, 0x7\n"
1796 " beq $0, %1, 9f\n"
1797 " dext %1, %0, 24, 8\n"
1798 "5: sb %1, 0(%2)\n"
1799 " daddiu %2, %2, 1\n"
1800 " andi %1, %2, 0x7\n"
1801 " beq $0, %1, 9f\n"
1802 " dext %1, %0, 16, 8\n"
1803 "6: sb %1, 0(%2)\n"
1804 " daddiu %2, %2, 1\n"
1805 " andi %1, %2, 0x7\n"
1806 " beq $0, %1, 9f\n"
1807 " dext %1, %0, 8, 8\n"
1808 "7: sb %1, 0(%2)\n"
1809 " daddiu %2, %2, 1\n"
1810 " andi %1, %2, 0x7\n"
1811 " beq $0, %1, 9f\n"
1812 " dext %1, %0, 0, 8\n"
1813 "0: sb %1, 0(%2)\n"
1814 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1815 "9:\n"
1816 " .insn\n"
1817 " .section .fixup,\"ax\"\n"
1818 "8: li %3,%4\n"
1819 " j 9b\n"
1820 " .previous\n"
1821 " .section __ex_table,\"a\"\n"
1822 " .word 1b,8b\n"
1823 " .word 2b,8b\n"
1824 " .word 3b,8b\n"
1825 " .word 4b,8b\n"
1826 " .word 5b,8b\n"
1827 " .word 6b,8b\n"
1828 " .word 7b,8b\n"
1829 " .word 0b,8b\n"
1830 " .previous\n"
1831 " .set pop\n"
1832 : "+&r"(rt), "=&r"(rs),
1833 "+&r"(vaddr), "+&r"(err)
1834 : "i"(SIGSEGV)
1835 : "memory");
1837 MIPS_R2_STATS(stores);
1838 break;
1840 case sdr_op:
1841 if (config_enabled(CONFIG_32BIT)) {
1842 err = SIGILL;
1843 break;
1846 rt = regs->regs[MIPSInst_RT(inst)];
1847 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1848 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
1849 current->thread.cp0_baduaddr = vaddr;
1850 err = SIGSEGV;
1851 break;
1853 __asm__ __volatile__(
1854 " .set push\n"
1855 " .set reorder\n"
1856 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1857 " dext %1, %0, 0, 8\n"
1858 "1: sb %1, 0(%2)\n"
1859 " daddiu %2, %2, 1\n"
1860 " andi %1, %2, 0x7\n"
1861 " beq $0, %1, 9f\n"
1862 " dext %1, %0, 8, 8\n"
1863 "2: sb %1, 0(%2)\n"
1864 " daddiu %2, %2, 1\n"
1865 " andi %1, %2, 0x7\n"
1866 " beq $0, %1, 9f\n"
1867 " dext %1, %0, 16, 8\n"
1868 "3: sb %1, 0(%2)\n"
1869 " daddiu %2, %2, 1\n"
1870 " andi %1, %2, 0x7\n"
1871 " beq $0, %1, 9f\n"
1872 " dext %1, %0, 24, 8\n"
1873 "4: sb %1, 0(%2)\n"
1874 " daddiu %2, %2, 1\n"
1875 " andi %1, %2, 0x7\n"
1876 " beq $0, %1, 9f\n"
1877 " dextu %1, %0, 32, 8\n"
1878 "5: sb %1, 0(%2)\n"
1879 " daddiu %2, %2, 1\n"
1880 " andi %1, %2, 0x7\n"
1881 " beq $0, %1, 9f\n"
1882 " dextu %1, %0, 40, 8\n"
1883 "6: sb %1, 0(%2)\n"
1884 " daddiu %2, %2, 1\n"
1885 " andi %1, %2, 0x7\n"
1886 " beq $0, %1, 9f\n"
1887 " dextu %1, %0, 48, 8\n"
1888 "7: sb %1, 0(%2)\n"
1889 " daddiu %2, %2, 1\n"
1890 " andi %1, %2, 0x7\n"
1891 " beq $0, %1, 9f\n"
1892 " dextu %1, %0, 56, 8\n"
1893 "0: sb %1, 0(%2)\n"
1894 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1895 " dext %1, %0, 0, 8\n"
1896 "1: sb %1, 0(%2)\n"
1897 " andi %1, %2, 0x7\n"
1898 " beq $0, %1, 9f\n"
1899 " daddiu %2, %2, -1\n"
1900 " dext %1, %0, 8, 8\n"
1901 "2: sb %1, 0(%2)\n"
1902 " andi %1, %2, 0x7\n"
1903 " beq $0, %1, 9f\n"
1904 " daddiu %2, %2, -1\n"
1905 " dext %1, %0, 16, 8\n"
1906 "3: sb %1, 0(%2)\n"
1907 " andi %1, %2, 0x7\n"
1908 " beq $0, %1, 9f\n"
1909 " daddiu %2, %2, -1\n"
1910 " dext %1, %0, 24, 8\n"
1911 "4: sb %1, 0(%2)\n"
1912 " andi %1, %2, 0x7\n"
1913 " beq $0, %1, 9f\n"
1914 " daddiu %2, %2, -1\n"
1915 " dextu %1, %0, 32, 8\n"
1916 "5: sb %1, 0(%2)\n"
1917 " andi %1, %2, 0x7\n"
1918 " beq $0, %1, 9f\n"
1919 " daddiu %2, %2, -1\n"
1920 " dextu %1, %0, 40, 8\n"
1921 "6: sb %1, 0(%2)\n"
1922 " andi %1, %2, 0x7\n"
1923 " beq $0, %1, 9f\n"
1924 " daddiu %2, %2, -1\n"
1925 " dextu %1, %0, 48, 8\n"
1926 "7: sb %1, 0(%2)\n"
1927 " andi %1, %2, 0x7\n"
1928 " beq $0, %1, 9f\n"
1929 " daddiu %2, %2, -1\n"
1930 " dextu %1, %0, 56, 8\n"
1931 "0: sb %1, 0(%2)\n"
1932 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1933 "9:\n"
1934 " .insn\n"
1935 " .section .fixup,\"ax\"\n"
1936 "8: li %3,%4\n"
1937 " j 9b\n"
1938 " .previous\n"
1939 " .section __ex_table,\"a\"\n"
1940 " .word 1b,8b\n"
1941 " .word 2b,8b\n"
1942 " .word 3b,8b\n"
1943 " .word 4b,8b\n"
1944 " .word 5b,8b\n"
1945 " .word 6b,8b\n"
1946 " .word 7b,8b\n"
1947 " .word 0b,8b\n"
1948 " .previous\n"
1949 " .set pop\n"
1950 : "+&r"(rt), "=&r"(rs),
1951 "+&r"(vaddr), "+&r"(err)
1952 : "i"(SIGSEGV)
1953 : "memory");
1955 MIPS_R2_STATS(stores);
1957 break;
1958 case ll_op:
1959 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1960 if (vaddr & 0x3) {
1961 current->thread.cp0_baduaddr = vaddr;
1962 err = SIGBUS;
1963 break;
1965 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1966 current->thread.cp0_baduaddr = vaddr;
1967 err = SIGBUS;
1968 break;
1971 if (!cpu_has_rw_llb) {
1973 * An LL/SC block can't be safely emulated without
1974 * a Config5/LLB availability. So it's probably time to
1975 * kill our process before things get any worse. This is
1976 * because Config5/LLB allows us to use ERETNC so that
1977 * the LLAddr/LLB bit is not cleared when we return from
1978 * an exception. MIPS R2 LL/SC instructions trap with an
1979 * RI exception so once we emulate them here, we return
1980 * back to userland with ERETNC. That preserves the
1981 * LLAddr/LLB so the subsequent SC instruction will
1982 * succeed preserving the atomic semantics of the LL/SC
1983 * block. Without that, there is no safe way to emulate
1984 * an LL/SC block in MIPSR2 userland.
1986 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
1987 err = SIGKILL;
1988 break;
1991 __asm__ __volatile__(
1992 "1:\n"
1993 "ll %0, 0(%2)\n"
1994 "2:\n"
1995 ".insn\n"
1996 ".section .fixup,\"ax\"\n"
1997 "3:\n"
1998 "li %1, %3\n"
1999 "j 2b\n"
2000 ".previous\n"
2001 ".section __ex_table,\"a\"\n"
2002 ".word 1b, 3b\n"
2003 ".previous\n"
2004 : "=&r"(res), "+&r"(err)
2005 : "r"(vaddr), "i"(SIGSEGV)
2006 : "memory");
2008 if (MIPSInst_RT(inst) && !err)
2009 regs->regs[MIPSInst_RT(inst)] = res;
2010 MIPS_R2_STATS(llsc);
2012 break;
2014 case sc_op:
2015 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2016 if (vaddr & 0x3) {
2017 current->thread.cp0_baduaddr = vaddr;
2018 err = SIGBUS;
2019 break;
2021 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
2022 current->thread.cp0_baduaddr = vaddr;
2023 err = SIGBUS;
2024 break;
2027 if (!cpu_has_rw_llb) {
2029 * An LL/SC block can't be safely emulated without
2030 * a Config5/LLB availability. So it's probably time to
2031 * kill our process before things get any worse. This is
2032 * because Config5/LLB allows us to use ERETNC so that
2033 * the LLAddr/LLB bit is not cleared when we return from
2034 * an exception. MIPS R2 LL/SC instructions trap with an
2035 * RI exception so once we emulate them here, we return
2036 * back to userland with ERETNC. That preserves the
2037 * LLAddr/LLB so the subsequent SC instruction will
2038 * succeed preserving the atomic semantics of the LL/SC
2039 * block. Without that, there is no safe way to emulate
2040 * an LL/SC block in MIPSR2 userland.
2042 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2043 err = SIGKILL;
2044 break;
2047 res = regs->regs[MIPSInst_RT(inst)];
2049 __asm__ __volatile__(
2050 "1:\n"
2051 "sc %0, 0(%2)\n"
2052 "2:\n"
2053 ".insn\n"
2054 ".section .fixup,\"ax\"\n"
2055 "3:\n"
2056 "li %1, %3\n"
2057 "j 2b\n"
2058 ".previous\n"
2059 ".section __ex_table,\"a\"\n"
2060 ".word 1b, 3b\n"
2061 ".previous\n"
2062 : "+&r"(res), "+&r"(err)
2063 : "r"(vaddr), "i"(SIGSEGV));
2065 if (MIPSInst_RT(inst) && !err)
2066 regs->regs[MIPSInst_RT(inst)] = res;
2068 MIPS_R2_STATS(llsc);
2070 break;
2072 case lld_op:
2073 if (config_enabled(CONFIG_32BIT)) {
2074 err = SIGILL;
2075 break;
2078 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2079 if (vaddr & 0x7) {
2080 current->thread.cp0_baduaddr = vaddr;
2081 err = SIGBUS;
2082 break;
2084 if (!access_ok(VERIFY_READ, vaddr, 8)) {
2085 current->thread.cp0_baduaddr = vaddr;
2086 err = SIGBUS;
2087 break;
2090 if (!cpu_has_rw_llb) {
2092 * An LL/SC block can't be safely emulated without
2093 * a Config5/LLB availability. So it's probably time to
2094 * kill our process before things get any worse. This is
2095 * because Config5/LLB allows us to use ERETNC so that
2096 * the LLAddr/LLB bit is not cleared when we return from
2097 * an exception. MIPS R2 LL/SC instructions trap with an
2098 * RI exception so once we emulate them here, we return
2099 * back to userland with ERETNC. That preserves the
2100 * LLAddr/LLB so the subsequent SC instruction will
2101 * succeed preserving the atomic semantics of the LL/SC
2102 * block. Without that, there is no safe way to emulate
2103 * an LL/SC block in MIPSR2 userland.
2105 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2106 err = SIGKILL;
2107 break;
2110 __asm__ __volatile__(
2111 "1:\n"
2112 "lld %0, 0(%2)\n"
2113 "2:\n"
2114 ".insn\n"
2115 ".section .fixup,\"ax\"\n"
2116 "3:\n"
2117 "li %1, %3\n"
2118 "j 2b\n"
2119 ".previous\n"
2120 ".section __ex_table,\"a\"\n"
2121 ".word 1b, 3b\n"
2122 ".previous\n"
2123 : "=&r"(res), "+&r"(err)
2124 : "r"(vaddr), "i"(SIGSEGV)
2125 : "memory");
2126 if (MIPSInst_RT(inst) && !err)
2127 regs->regs[MIPSInst_RT(inst)] = res;
2129 MIPS_R2_STATS(llsc);
2131 break;
2133 case scd_op:
2134 if (config_enabled(CONFIG_32BIT)) {
2135 err = SIGILL;
2136 break;
2139 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2140 if (vaddr & 0x7) {
2141 current->thread.cp0_baduaddr = vaddr;
2142 err = SIGBUS;
2143 break;
2145 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
2146 current->thread.cp0_baduaddr = vaddr;
2147 err = SIGBUS;
2148 break;
2151 if (!cpu_has_rw_llb) {
2153 * An LL/SC block can't be safely emulated without
2154 * a Config5/LLB availability. So it's probably time to
2155 * kill our process before things get any worse. This is
2156 * because Config5/LLB allows us to use ERETNC so that
2157 * the LLAddr/LLB bit is not cleared when we return from
2158 * an exception. MIPS R2 LL/SC instructions trap with an
2159 * RI exception so once we emulate them here, we return
2160 * back to userland with ERETNC. That preserves the
2161 * LLAddr/LLB so the subsequent SC instruction will
2162 * succeed preserving the atomic semantics of the LL/SC
2163 * block. Without that, there is no safe way to emulate
2164 * an LL/SC block in MIPSR2 userland.
2166 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2167 err = SIGKILL;
2168 break;
2171 res = regs->regs[MIPSInst_RT(inst)];
2173 __asm__ __volatile__(
2174 "1:\n"
2175 "scd %0, 0(%2)\n"
2176 "2:\n"
2177 ".insn\n"
2178 ".section .fixup,\"ax\"\n"
2179 "3:\n"
2180 "li %1, %3\n"
2181 "j 2b\n"
2182 ".previous\n"
2183 ".section __ex_table,\"a\"\n"
2184 ".word 1b, 3b\n"
2185 ".previous\n"
2186 : "+&r"(res), "+&r"(err)
2187 : "r"(vaddr), "i"(SIGSEGV));
2189 if (MIPSInst_RT(inst) && !err)
2190 regs->regs[MIPSInst_RT(inst)] = res;
2192 MIPS_R2_STATS(llsc);
2194 break;
2195 case pref_op:
2196 /* skip it */
2197 break;
2198 default:
2199 err = SIGILL;
2203 * Lets not return to userland just yet. It's constly and
2204 * it's likely we have more R2 instructions to emulate
2206 if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
2207 regs->cp0_cause &= ~CAUSEF_BD;
2208 err = get_user(inst, (u32 __user *)regs->cp0_epc);
2209 if (!err)
2210 goto repeat;
2212 if (err < 0)
2213 err = SIGSEGV;
2216 if (err && (err != SIGEMT)) {
2217 regs->regs[31] = r31;
2218 regs->cp0_epc = epc;
2221 /* Likely a MIPS R6 compatible instruction */
2222 if (pass && (err == SIGILL))
2223 err = 0;
2225 return err;
2228 #ifdef CONFIG_DEBUG_FS
2230 static int mipsr2_stats_show(struct seq_file *s, void *unused)
2233 seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
2234 seq_printf(s, "movs\t\t%ld\t%ld\n",
2235 (unsigned long)__this_cpu_read(mipsr2emustats.movs),
2236 (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
2237 seq_printf(s, "hilo\t\t%ld\t%ld\n",
2238 (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
2239 (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
2240 seq_printf(s, "muls\t\t%ld\t%ld\n",
2241 (unsigned long)__this_cpu_read(mipsr2emustats.muls),
2242 (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
2243 seq_printf(s, "divs\t\t%ld\t%ld\n",
2244 (unsigned long)__this_cpu_read(mipsr2emustats.divs),
2245 (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
2246 seq_printf(s, "dsps\t\t%ld\t%ld\n",
2247 (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
2248 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
2249 seq_printf(s, "bops\t\t%ld\t%ld\n",
2250 (unsigned long)__this_cpu_read(mipsr2emustats.bops),
2251 (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
2252 seq_printf(s, "traps\t\t%ld\t%ld\n",
2253 (unsigned long)__this_cpu_read(mipsr2emustats.traps),
2254 (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
2255 seq_printf(s, "fpus\t\t%ld\t%ld\n",
2256 (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
2257 (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
2258 seq_printf(s, "loads\t\t%ld\t%ld\n",
2259 (unsigned long)__this_cpu_read(mipsr2emustats.loads),
2260 (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
2261 seq_printf(s, "stores\t\t%ld\t%ld\n",
2262 (unsigned long)__this_cpu_read(mipsr2emustats.stores),
2263 (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
2264 seq_printf(s, "llsc\t\t%ld\t%ld\n",
2265 (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
2266 (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
2267 seq_printf(s, "dsemul\t\t%ld\t%ld\n",
2268 (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
2269 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
2270 seq_printf(s, "jr\t\t%ld\n",
2271 (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
2272 seq_printf(s, "bltzl\t\t%ld\n",
2273 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
2274 seq_printf(s, "bgezl\t\t%ld\n",
2275 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
2276 seq_printf(s, "bltzll\t\t%ld\n",
2277 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
2278 seq_printf(s, "bgezll\t\t%ld\n",
2279 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
2280 seq_printf(s, "bltzal\t\t%ld\n",
2281 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
2282 seq_printf(s, "bgezal\t\t%ld\n",
2283 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
2284 seq_printf(s, "beql\t\t%ld\n",
2285 (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
2286 seq_printf(s, "bnel\t\t%ld\n",
2287 (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
2288 seq_printf(s, "blezl\t\t%ld\n",
2289 (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
2290 seq_printf(s, "bgtzl\t\t%ld\n",
2291 (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
2293 return 0;
2296 static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
2298 mipsr2_stats_show(s, unused);
2300 __this_cpu_write((mipsr2emustats).movs, 0);
2301 __this_cpu_write((mipsr2bdemustats).movs, 0);
2302 __this_cpu_write((mipsr2emustats).hilo, 0);
2303 __this_cpu_write((mipsr2bdemustats).hilo, 0);
2304 __this_cpu_write((mipsr2emustats).muls, 0);
2305 __this_cpu_write((mipsr2bdemustats).muls, 0);
2306 __this_cpu_write((mipsr2emustats).divs, 0);
2307 __this_cpu_write((mipsr2bdemustats).divs, 0);
2308 __this_cpu_write((mipsr2emustats).dsps, 0);
2309 __this_cpu_write((mipsr2bdemustats).dsps, 0);
2310 __this_cpu_write((mipsr2emustats).bops, 0);
2311 __this_cpu_write((mipsr2bdemustats).bops, 0);
2312 __this_cpu_write((mipsr2emustats).traps, 0);
2313 __this_cpu_write((mipsr2bdemustats).traps, 0);
2314 __this_cpu_write((mipsr2emustats).fpus, 0);
2315 __this_cpu_write((mipsr2bdemustats).fpus, 0);
2316 __this_cpu_write((mipsr2emustats).loads, 0);
2317 __this_cpu_write((mipsr2bdemustats).loads, 0);
2318 __this_cpu_write((mipsr2emustats).stores, 0);
2319 __this_cpu_write((mipsr2bdemustats).stores, 0);
2320 __this_cpu_write((mipsr2emustats).llsc, 0);
2321 __this_cpu_write((mipsr2bdemustats).llsc, 0);
2322 __this_cpu_write((mipsr2emustats).dsemul, 0);
2323 __this_cpu_write((mipsr2bdemustats).dsemul, 0);
2324 __this_cpu_write((mipsr2bremustats).jrs, 0);
2325 __this_cpu_write((mipsr2bremustats).bltzl, 0);
2326 __this_cpu_write((mipsr2bremustats).bgezl, 0);
2327 __this_cpu_write((mipsr2bremustats).bltzll, 0);
2328 __this_cpu_write((mipsr2bremustats).bgezll, 0);
2329 __this_cpu_write((mipsr2bremustats).bltzal, 0);
2330 __this_cpu_write((mipsr2bremustats).bgezal, 0);
2331 __this_cpu_write((mipsr2bremustats).beql, 0);
2332 __this_cpu_write((mipsr2bremustats).bnel, 0);
2333 __this_cpu_write((mipsr2bremustats).blezl, 0);
2334 __this_cpu_write((mipsr2bremustats).bgtzl, 0);
2336 return 0;
2339 static int mipsr2_stats_open(struct inode *inode, struct file *file)
2341 return single_open(file, mipsr2_stats_show, inode->i_private);
2344 static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
2346 return single_open(file, mipsr2_stats_clear_show, inode->i_private);
2349 static const struct file_operations mipsr2_emul_fops = {
2350 .open = mipsr2_stats_open,
2351 .read = seq_read,
2352 .llseek = seq_lseek,
2353 .release = single_release,
2356 static const struct file_operations mipsr2_clear_fops = {
2357 .open = mipsr2_stats_clear_open,
2358 .read = seq_read,
2359 .llseek = seq_lseek,
2360 .release = single_release,
2364 static int __init mipsr2_init_debugfs(void)
2366 extern struct dentry *mips_debugfs_dir;
2367 struct dentry *mipsr2_emul;
2369 if (!mips_debugfs_dir)
2370 return -ENODEV;
2372 mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
2373 mips_debugfs_dir, NULL,
2374 &mipsr2_emul_fops);
2375 if (!mipsr2_emul)
2376 return -ENOMEM;
2378 mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
2379 mips_debugfs_dir, NULL,
2380 &mipsr2_clear_fops);
2381 if (!mipsr2_emul)
2382 return -ENOMEM;
2384 return 0;
2387 device_initcall(mipsr2_init_debugfs);
2389 #endif /* CONFIG_DEBUG_FS */