2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2014 Imagination Technologies Ltd.
7 * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
8 * Author: Markos Chandras <markos.chandras@imgtec.com>
10 * MIPS R2 user space instruction emulator for MIPS R6
13 #include <linux/bug.h>
14 #include <linux/compiler.h>
15 #include <linux/debugfs.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/ptrace.h>
20 #include <linux/seq_file.h>
23 #include <asm/branch.h>
24 #include <asm/break.h>
25 #include <asm/debug.h>
27 #include <asm/fpu_emulator.h>
29 #include <asm/mips-r2-to-r6-emul.h>
30 #include <asm/local.h>
31 #include <asm/ptrace.h>
32 #include <asm/uaccess.h>
35 #define ADDIU "daddiu "
39 #define ADDIU "addiu "
42 #endif /* CONFIG_64BIT */
49 DEFINE_PER_CPU(struct mips_r2_emulator_stats
, mipsr2emustats
);
50 DEFINE_PER_CPU(struct mips_r2_emulator_stats
, mipsr2bdemustats
);
51 DEFINE_PER_CPU(struct mips_r2br_emulator_stats
, mipsr2bremustats
);
53 extern const unsigned int fpucondbit
[8];
55 #define MIPS_R2_EMUL_TOTAL_PASS 10
57 int mipsr2_emulation
= 0;
59 static int __init
mipsr2emu_enable(char *s
)
63 pr_info("MIPS R2-to-R6 Emulator Enabled!");
67 __setup("mipsr2emu", mipsr2emu_enable
);
70 * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
71 * for performance instead of the traditional way of using a stack trampoline
72 * which is rather slow.
73 * @regs: Process register set
76 static inline int mipsr6_emul(struct pt_regs
*regs
, u32 ir
)
78 switch (MIPSInst_OPCODE(ir
)) {
81 regs
->regs
[MIPSInst_RT(ir
)] =
82 (s32
)regs
->regs
[MIPSInst_RS(ir
)] +
83 (s32
)MIPSInst_SIMM(ir
);
86 if (config_enabled(CONFIG_32BIT
))
90 regs
->regs
[MIPSInst_RT(ir
)] =
91 (s64
)regs
->regs
[MIPSInst_RS(ir
)] +
92 (s64
)MIPSInst_SIMM(ir
);
98 /* FPU instructions in delay slot */
101 switch (MIPSInst_FUNC(ir
)) {
104 regs
->regs
[MIPSInst_RD(ir
)] =
105 regs
->regs
[MIPSInst_RS(ir
)] |
106 regs
->regs
[MIPSInst_RT(ir
)];
113 regs
->regs
[MIPSInst_RD(ir
)] =
114 (s32
)(((u32
)regs
->regs
[MIPSInst_RT(ir
)]) <<
122 regs
->regs
[MIPSInst_RD(ir
)] =
123 (s32
)(((u32
)regs
->regs
[MIPSInst_RT(ir
)]) >>
131 regs
->regs
[MIPSInst_RD(ir
)] =
132 (s32
)((u32
)regs
->regs
[MIPSInst_RS(ir
)] +
133 (u32
)regs
->regs
[MIPSInst_RT(ir
)]);
140 regs
->regs
[MIPSInst_RD(ir
)] =
141 (s32
)((u32
)regs
->regs
[MIPSInst_RS(ir
)] -
142 (u32
)regs
->regs
[MIPSInst_RT(ir
)]);
145 if (config_enabled(CONFIG_32BIT
) || MIPSInst_RS(ir
))
149 regs
->regs
[MIPSInst_RD(ir
)] =
150 (s64
)(((u64
)regs
->regs
[MIPSInst_RT(ir
)]) <<
154 if (config_enabled(CONFIG_32BIT
) || MIPSInst_RS(ir
))
158 regs
->regs
[MIPSInst_RD(ir
)] =
159 (s64
)(((u64
)regs
->regs
[MIPSInst_RT(ir
)]) >>
163 if (config_enabled(CONFIG_32BIT
) || MIPSInst_FD(ir
))
167 regs
->regs
[MIPSInst_RD(ir
)] =
168 (u64
)regs
->regs
[MIPSInst_RS(ir
)] +
169 (u64
)regs
->regs
[MIPSInst_RT(ir
)];
172 if (config_enabled(CONFIG_32BIT
) || MIPSInst_FD(ir
))
176 regs
->regs
[MIPSInst_RD(ir
)] =
177 (s64
)((u64
)regs
->regs
[MIPSInst_RS(ir
)] -
178 (u64
)regs
->regs
[MIPSInst_RT(ir
)]);
183 pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
184 ir
, MIPSInst_OPCODE(ir
));
191 * movf_func - Emulate a MOVF instruction
192 * @regs: Process register set
195 * Returns 0 since it always succeeds.
197 static int movf_func(struct pt_regs
*regs
, u32 ir
)
202 csr
= current
->thread
.fpu
.fcr31
;
203 cond
= fpucondbit
[MIPSInst_RT(ir
) >> 2];
205 if (((csr
& cond
) == 0) && MIPSInst_RD(ir
))
206 regs
->regs
[MIPSInst_RD(ir
)] = regs
->regs
[MIPSInst_RS(ir
)];
214 * movt_func - Emulate a MOVT instruction
215 * @regs: Process register set
218 * Returns 0 since it always succeeds.
220 static int movt_func(struct pt_regs
*regs
, u32 ir
)
225 csr
= current
->thread
.fpu
.fcr31
;
226 cond
= fpucondbit
[MIPSInst_RT(ir
) >> 2];
228 if (((csr
& cond
) != 0) && MIPSInst_RD(ir
))
229 regs
->regs
[MIPSInst_RD(ir
)] = regs
->regs
[MIPSInst_RS(ir
)];
237 * jr_func - Emulate a JR instruction.
238 * @pt_regs: Process register set
241 * Returns SIGILL if JR was in delay slot, SIGEMT if we
242 * can't compute the EPC, SIGSEGV if we can't access the
243 * userland instruction or 0 on success.
245 static int jr_func(struct pt_regs
*regs
, u32 ir
)
248 unsigned long cepc
, epc
, nepc
;
251 if (delay_slot(regs
))
254 /* EPC after the RI/JR instruction */
255 nepc
= regs
->cp0_epc
;
256 /* Roll back to the reserved R2 JR instruction */
259 err
= __compute_return_epc(regs
);
266 cepc
= regs
->cp0_epc
;
268 /* Get DS instruction */
269 err
= __get_user(nir
, (u32 __user
*)nepc
);
273 MIPS_R2BR_STATS(jrs
);
275 /* If nir == 0(NOP), then nothing else to do */
278 * Negative err means FPU instruction in BD-slot,
279 * Zero err means 'BD-slot emulation done'
280 * For anything else we go back to trampoline emulation.
282 err
= mipsr6_emul(regs
, nir
);
284 regs
->cp0_epc
= nepc
;
285 err
= mips_dsemul(regs
, nir
, cepc
);
288 MIPS_R2_STATS(dsemul
);
296 * movz_func - Emulate a MOVZ instruction
297 * @regs: Process register set
300 * Returns 0 since it always succeeds.
302 static int movz_func(struct pt_regs
*regs
, u32 ir
)
304 if (((regs
->regs
[MIPSInst_RT(ir
)]) == 0) && MIPSInst_RD(ir
))
305 regs
->regs
[MIPSInst_RD(ir
)] = regs
->regs
[MIPSInst_RS(ir
)];
312 * movn_func - Emulate a MOVZ instruction
313 * @regs: Process register set
316 * Returns 0 since it always succeeds.
318 static int movn_func(struct pt_regs
*regs
, u32 ir
)
320 if (((regs
->regs
[MIPSInst_RT(ir
)]) != 0) && MIPSInst_RD(ir
))
321 regs
->regs
[MIPSInst_RD(ir
)] = regs
->regs
[MIPSInst_RS(ir
)];
328 * mfhi_func - Emulate a MFHI instruction
329 * @regs: Process register set
332 * Returns 0 since it always succeeds.
334 static int mfhi_func(struct pt_regs
*regs
, u32 ir
)
337 regs
->regs
[MIPSInst_RD(ir
)] = regs
->hi
;
345 * mthi_func - Emulate a MTHI instruction
346 * @regs: Process register set
349 * Returns 0 since it always succeeds.
351 static int mthi_func(struct pt_regs
*regs
, u32 ir
)
353 regs
->hi
= regs
->regs
[MIPSInst_RS(ir
)];
361 * mflo_func - Emulate a MFLO instruction
362 * @regs: Process register set
365 * Returns 0 since it always succeeds.
367 static int mflo_func(struct pt_regs
*regs
, u32 ir
)
370 regs
->regs
[MIPSInst_RD(ir
)] = regs
->lo
;
378 * mtlo_func - Emulate a MTLO instruction
379 * @regs: Process register set
382 * Returns 0 since it always succeeds.
384 static int mtlo_func(struct pt_regs
*regs
, u32 ir
)
386 regs
->lo
= regs
->regs
[MIPSInst_RS(ir
)];
394 * mult_func - Emulate a MULT instruction
395 * @regs: Process register set
398 * Returns 0 since it always succeeds.
400 static int mult_func(struct pt_regs
*regs
, u32 ir
)
405 rt
= regs
->regs
[MIPSInst_RT(ir
)];
406 rs
= regs
->regs
[MIPSInst_RS(ir
)];
407 res
= (s64
)rt
* (s64
)rs
;
421 * multu_func - Emulate a MULTU instruction
422 * @regs: Process register set
425 * Returns 0 since it always succeeds.
427 static int multu_func(struct pt_regs
*regs
, u32 ir
)
432 rt
= regs
->regs
[MIPSInst_RT(ir
)];
433 rs
= regs
->regs
[MIPSInst_RS(ir
)];
434 res
= (u64
)rt
* (u64
)rs
;
437 regs
->hi
= (s64
)(res
>> 32);
445 * div_func - Emulate a DIV instruction
446 * @regs: Process register set
449 * Returns 0 since it always succeeds.
451 static int div_func(struct pt_regs
*regs
, u32 ir
)
455 rt
= regs
->regs
[MIPSInst_RT(ir
)];
456 rs
= regs
->regs
[MIPSInst_RS(ir
)];
458 regs
->lo
= (s64
)(rs
/ rt
);
459 regs
->hi
= (s64
)(rs
% rt
);
467 * divu_func - Emulate a DIVU instruction
468 * @regs: Process register set
471 * Returns 0 since it always succeeds.
473 static int divu_func(struct pt_regs
*regs
, u32 ir
)
477 rt
= regs
->regs
[MIPSInst_RT(ir
)];
478 rs
= regs
->regs
[MIPSInst_RS(ir
)];
480 regs
->lo
= (s64
)(rs
/ rt
);
481 regs
->hi
= (s64
)(rs
% rt
);
489 * dmult_func - Emulate a DMULT instruction
490 * @regs: Process register set
493 * Returns 0 on success or SIGILL for 32-bit kernels.
495 static int dmult_func(struct pt_regs
*regs
, u32 ir
)
500 if (config_enabled(CONFIG_32BIT
))
503 rt
= regs
->regs
[MIPSInst_RT(ir
)];
504 rs
= regs
->regs
[MIPSInst_RS(ir
)];
508 __asm__
__volatile__(
509 "dmuh %0, %1, %2\t\n"
521 * dmultu_func - Emulate a DMULTU instruction
522 * @regs: Process register set
525 * Returns 0 on success or SIGILL for 32-bit kernels.
527 static int dmultu_func(struct pt_regs
*regs
, u32 ir
)
532 if (config_enabled(CONFIG_32BIT
))
535 rt
= regs
->regs
[MIPSInst_RT(ir
)];
536 rs
= regs
->regs
[MIPSInst_RS(ir
)];
540 __asm__
__volatile__(
541 "dmuhu %0, %1, %2\t\n"
553 * ddiv_func - Emulate a DDIV instruction
554 * @regs: Process register set
557 * Returns 0 on success or SIGILL for 32-bit kernels.
559 static int ddiv_func(struct pt_regs
*regs
, u32 ir
)
563 if (config_enabled(CONFIG_32BIT
))
566 rt
= regs
->regs
[MIPSInst_RT(ir
)];
567 rs
= regs
->regs
[MIPSInst_RS(ir
)];
578 * ddivu_func - Emulate a DDIVU instruction
579 * @regs: Process register set
582 * Returns 0 on success or SIGILL for 32-bit kernels.
584 static int ddivu_func(struct pt_regs
*regs
, u32 ir
)
588 if (config_enabled(CONFIG_32BIT
))
591 rt
= regs
->regs
[MIPSInst_RT(ir
)];
592 rs
= regs
->regs
[MIPSInst_RS(ir
)];
602 /* R6 removed instructions for the SPECIAL opcode */
603 static struct r2_decoder_table spec_op_table
[] = {
604 { 0xfc1ff83f, 0x00000008, jr_func
},
605 { 0xfc00ffff, 0x00000018, mult_func
},
606 { 0xfc00ffff, 0x00000019, multu_func
},
607 { 0xfc00ffff, 0x0000001c, dmult_func
},
608 { 0xfc00ffff, 0x0000001d, dmultu_func
},
609 { 0xffff07ff, 0x00000010, mfhi_func
},
610 { 0xfc1fffff, 0x00000011, mthi_func
},
611 { 0xffff07ff, 0x00000012, mflo_func
},
612 { 0xfc1fffff, 0x00000013, mtlo_func
},
613 { 0xfc0307ff, 0x00000001, movf_func
},
614 { 0xfc0307ff, 0x00010001, movt_func
},
615 { 0xfc0007ff, 0x0000000a, movz_func
},
616 { 0xfc0007ff, 0x0000000b, movn_func
},
617 { 0xfc00ffff, 0x0000001a, div_func
},
618 { 0xfc00ffff, 0x0000001b, divu_func
},
619 { 0xfc00ffff, 0x0000001e, ddiv_func
},
620 { 0xfc00ffff, 0x0000001f, ddivu_func
},
625 * madd_func - Emulate a MADD instruction
626 * @regs: Process register set
629 * Returns 0 since it always succeeds.
631 static int madd_func(struct pt_regs
*regs
, u32 ir
)
636 rt
= regs
->regs
[MIPSInst_RT(ir
)];
637 rs
= regs
->regs
[MIPSInst_RS(ir
)];
638 res
= (s64
)rt
* (s64
)rs
;
641 res
+= ((((s64
)rt
) << 32) | (u32
)rs
);
654 * maddu_func - Emulate a MADDU instruction
655 * @regs: Process register set
658 * Returns 0 since it always succeeds.
660 static int maddu_func(struct pt_regs
*regs
, u32 ir
)
665 rt
= regs
->regs
[MIPSInst_RT(ir
)];
666 rs
= regs
->regs
[MIPSInst_RS(ir
)];
667 res
= (u64
)rt
* (u64
)rs
;
670 res
+= ((((s64
)rt
) << 32) | (u32
)rs
);
683 * msub_func - Emulate a MSUB instruction
684 * @regs: Process register set
687 * Returns 0 since it always succeeds.
689 static int msub_func(struct pt_regs
*regs
, u32 ir
)
694 rt
= regs
->regs
[MIPSInst_RT(ir
)];
695 rs
= regs
->regs
[MIPSInst_RS(ir
)];
696 res
= (s64
)rt
* (s64
)rs
;
699 res
= ((((s64
)rt
) << 32) | (u32
)rs
) - res
;
712 * msubu_func - Emulate a MSUBU instruction
713 * @regs: Process register set
716 * Returns 0 since it always succeeds.
718 static int msubu_func(struct pt_regs
*regs
, u32 ir
)
723 rt
= regs
->regs
[MIPSInst_RT(ir
)];
724 rs
= regs
->regs
[MIPSInst_RS(ir
)];
725 res
= (u64
)rt
* (u64
)rs
;
728 res
= ((((s64
)rt
) << 32) | (u32
)rs
) - res
;
741 * mul_func - Emulate a MUL instruction
742 * @regs: Process register set
745 * Returns 0 since it always succeeds.
747 static int mul_func(struct pt_regs
*regs
, u32 ir
)
752 if (!MIPSInst_RD(ir
))
754 rt
= regs
->regs
[MIPSInst_RT(ir
)];
755 rs
= regs
->regs
[MIPSInst_RS(ir
)];
756 res
= (s64
)rt
* (s64
)rs
;
759 regs
->regs
[MIPSInst_RD(ir
)] = (s64
)rs
;
767 * clz_func - Emulate a CLZ instruction
768 * @regs: Process register set
771 * Returns 0 since it always succeeds.
773 static int clz_func(struct pt_regs
*regs
, u32 ir
)
778 if (!MIPSInst_RD(ir
))
781 rs
= regs
->regs
[MIPSInst_RS(ir
)];
782 __asm__
__volatile__("clz %0, %1" : "=r"(res
) : "r"(rs
));
783 regs
->regs
[MIPSInst_RD(ir
)] = res
;
791 * clo_func - Emulate a CLO instruction
792 * @regs: Process register set
795 * Returns 0 since it always succeeds.
798 static int clo_func(struct pt_regs
*regs
, u32 ir
)
803 if (!MIPSInst_RD(ir
))
806 rs
= regs
->regs
[MIPSInst_RS(ir
)];
807 __asm__
__volatile__("clo %0, %1" : "=r"(res
) : "r"(rs
));
808 regs
->regs
[MIPSInst_RD(ir
)] = res
;
816 * dclz_func - Emulate a DCLZ instruction
817 * @regs: Process register set
820 * Returns 0 since it always succeeds.
822 static int dclz_func(struct pt_regs
*regs
, u32 ir
)
827 if (config_enabled(CONFIG_32BIT
))
830 if (!MIPSInst_RD(ir
))
833 rs
= regs
->regs
[MIPSInst_RS(ir
)];
834 __asm__
__volatile__("dclz %0, %1" : "=r"(res
) : "r"(rs
));
835 regs
->regs
[MIPSInst_RD(ir
)] = res
;
843 * dclo_func - Emulate a DCLO instruction
844 * @regs: Process register set
847 * Returns 0 since it always succeeds.
849 static int dclo_func(struct pt_regs
*regs
, u32 ir
)
854 if (config_enabled(CONFIG_32BIT
))
857 if (!MIPSInst_RD(ir
))
860 rs
= regs
->regs
[MIPSInst_RS(ir
)];
861 __asm__
__volatile__("dclo %0, %1" : "=r"(res
) : "r"(rs
));
862 regs
->regs
[MIPSInst_RD(ir
)] = res
;
869 /* R6 removed instructions for the SPECIAL2 opcode */
870 static struct r2_decoder_table spec2_op_table
[] = {
871 { 0xfc00ffff, 0x70000000, madd_func
},
872 { 0xfc00ffff, 0x70000001, maddu_func
},
873 { 0xfc0007ff, 0x70000002, mul_func
},
874 { 0xfc00ffff, 0x70000004, msub_func
},
875 { 0xfc00ffff, 0x70000005, msubu_func
},
876 { 0xfc0007ff, 0x70000020, clz_func
},
877 { 0xfc0007ff, 0x70000021, clo_func
},
878 { 0xfc0007ff, 0x70000024, dclz_func
},
879 { 0xfc0007ff, 0x70000025, dclo_func
},
883 static inline int mipsr2_find_op_func(struct pt_regs
*regs
, u32 inst
,
884 struct r2_decoder_table
*table
)
886 struct r2_decoder_table
*p
;
889 for (p
= table
; p
->func
; p
++) {
890 if ((inst
& p
->mask
) == p
->code
) {
891 err
= (p
->func
)(regs
, inst
);
899 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
900 * @regs: Process register set
901 * @inst: Instruction to decode and emulate
902 * @fcr31: Floating Point Control and Status Register returned
904 int mipsr2_decoder(struct pt_regs
*regs
, u32 inst
, unsigned long *fcr31
)
909 unsigned long cpc
, epc
, nepc
, r31
, res
, rs
, rt
;
911 void __user
*fault_addr
= NULL
;
915 r31
= regs
->regs
[31];
917 err
= compute_return_epc(regs
);
922 pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
925 switch (MIPSInst_OPCODE(inst
)) {
927 err
= mipsr2_find_op_func(regs
, inst
, spec_op_table
);
929 /* FPU instruction under JR */
930 regs
->cp0_cause
|= CAUSEF_BD
;
935 err
= mipsr2_find_op_func(regs
, inst
, spec2_op_table
);
938 rt
= MIPSInst_RT(inst
);
939 rs
= MIPSInst_RS(inst
);
942 if ((long)regs
->regs
[rs
] >= MIPSInst_SIMM(inst
))
943 do_trap_or_bp(regs
, 0, "TGEI");
945 MIPS_R2_STATS(traps
);
949 if (regs
->regs
[rs
] >= MIPSInst_UIMM(inst
))
950 do_trap_or_bp(regs
, 0, "TGEIU");
952 MIPS_R2_STATS(traps
);
956 if ((long)regs
->regs
[rs
] < MIPSInst_SIMM(inst
))
957 do_trap_or_bp(regs
, 0, "TLTI");
959 MIPS_R2_STATS(traps
);
963 if (regs
->regs
[rs
] < MIPSInst_UIMM(inst
))
964 do_trap_or_bp(regs
, 0, "TLTIU");
966 MIPS_R2_STATS(traps
);
970 if (regs
->regs
[rs
] == MIPSInst_SIMM(inst
))
971 do_trap_or_bp(regs
, 0, "TEQI");
973 MIPS_R2_STATS(traps
);
977 if (regs
->regs
[rs
] != MIPSInst_SIMM(inst
))
978 do_trap_or_bp(regs
, 0, "TNEI");
980 MIPS_R2_STATS(traps
);
987 if (delay_slot(regs
)) {
991 regs
->regs
[31] = r31
;
993 err
= __compute_return_epc(regs
);
996 if (err
!= BRANCH_LIKELY_TAKEN
)
1000 err
= __get_user(nir
, (u32 __user
*)nepc
);
1006 * This will probably be optimized away when
1007 * CONFIG_DEBUG_FS is not enabled
1011 MIPS_R2BR_STATS(bltzl
);
1014 MIPS_R2BR_STATS(bgezl
);
1017 MIPS_R2BR_STATS(bltzall
);
1020 MIPS_R2BR_STATS(bgezall
);
1024 switch (MIPSInst_OPCODE(nir
)) {
1029 regs
->cp0_cause
|= CAUSEF_BD
;
1033 err
= mipsr6_emul(regs
, nir
);
1035 err
= mips_dsemul(regs
, nir
, cpc
);
1038 MIPS_R2_STATS(dsemul
);
1044 if (delay_slot(regs
)) {
1048 regs
->regs
[31] = r31
;
1049 regs
->cp0_epc
= epc
;
1050 err
= __compute_return_epc(regs
);
1053 cpc
= regs
->cp0_epc
;
1055 err
= __get_user(nir
, (u32 __user
*)nepc
);
1061 * This will probably be optimized away when
1062 * CONFIG_DEBUG_FS is not enabled
1066 MIPS_R2BR_STATS(bltzal
);
1069 MIPS_R2BR_STATS(bgezal
);
1073 switch (MIPSInst_OPCODE(nir
)) {
1078 regs
->cp0_cause
|= CAUSEF_BD
;
1082 err
= mipsr6_emul(regs
, nir
);
1084 err
= mips_dsemul(regs
, nir
, cpc
);
1087 MIPS_R2_STATS(dsemul
);
1092 regs
->regs
[31] = r31
;
1093 regs
->cp0_epc
= epc
;
1103 if (delay_slot(regs
)) {
1107 regs
->regs
[31] = r31
;
1108 regs
->cp0_epc
= epc
;
1109 err
= __compute_return_epc(regs
);
1112 if (err
!= BRANCH_LIKELY_TAKEN
)
1114 cpc
= regs
->cp0_epc
;
1116 err
= __get_user(nir
, (u32 __user
*)nepc
);
1122 * This will probably be optimized away when
1123 * CONFIG_DEBUG_FS is not enabled
1125 switch (MIPSInst_OPCODE(inst
)) {
1127 MIPS_R2BR_STATS(beql
);
1130 MIPS_R2BR_STATS(bnel
);
1133 MIPS_R2BR_STATS(blezl
);
1136 MIPS_R2BR_STATS(bgtzl
);
1140 switch (MIPSInst_OPCODE(nir
)) {
1145 regs
->cp0_cause
|= CAUSEF_BD
;
1149 err
= mipsr6_emul(regs
, nir
);
1151 err
= mips_dsemul(regs
, nir
, cpc
);
1154 MIPS_R2_STATS(dsemul
);
1163 regs
->regs
[31] = r31
;
1164 regs
->cp0_epc
= epc
;
1165 if (!used_math()) { /* First time FPU user. */
1169 lose_fpu(1); /* Save FPU state for the emulator. */
1171 err
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 0,
1173 *fcr31
= current
->thread
.fpu
.fcr31
;
1176 * We can't allow the emulated instruction to leave any of
1177 * the cause bits set in $fcr31.
1179 current
->thread
.fpu
.fcr31
&= ~FPU_CSR_ALL_X
;
1182 * this is a tricky issue - lose_fpu() uses LL/SC atomics
1183 * if FPU is owned and effectively cancels user level LL/SC.
1184 * So, it could be logical to don't restore FPU ownership here.
1185 * But the sequence of multiple FPU instructions is much much
1186 * more often than LL-FPU-SC and I prefer loop here until
1187 * next scheduler cycle cancels FPU ownership
1189 own_fpu(1); /* Restore FPU state. */
1192 current
->thread
.cp0_baduaddr
= (unsigned long)fault_addr
;
1194 MIPS_R2_STATS(fpus
);
1199 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1200 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1201 if (!access_ok(VERIFY_READ
, vaddr
, 4)) {
1202 current
->thread
.cp0_baduaddr
= vaddr
;
1206 __asm__
__volatile__(
1209 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1210 "1:" LB
"%1, 0(%2)\n"
1211 INS
"%0, %1, 24, 8\n"
1212 " andi %1, %2, 0x3\n"
1214 ADDIU
"%2, %2, -1\n"
1215 "2:" LB
"%1, 0(%2)\n"
1216 INS
"%0, %1, 16, 8\n"
1217 " andi %1, %2, 0x3\n"
1219 ADDIU
"%2, %2, -1\n"
1220 "3:" LB
"%1, 0(%2)\n"
1221 INS
"%0, %1, 8, 8\n"
1222 " andi %1, %2, 0x3\n"
1224 ADDIU
"%2, %2, -1\n"
1225 "4:" LB
"%1, 0(%2)\n"
1226 INS
"%0, %1, 0, 8\n"
1227 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1228 "1:" LB
"%1, 0(%2)\n"
1229 INS
"%0, %1, 24, 8\n"
1231 " andi %1, %2, 0x3\n"
1233 "2:" LB
"%1, 0(%2)\n"
1234 INS
"%0, %1, 16, 8\n"
1236 " andi %1, %2, 0x3\n"
1238 "3:" LB
"%1, 0(%2)\n"
1239 INS
"%0, %1, 8, 8\n"
1241 " andi %1, %2, 0x3\n"
1243 "4:" LB
"%1, 0(%2)\n"
1244 INS
"%0, %1, 0, 8\n"
1245 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1246 "9: sll %0, %0, 0\n"
1249 " .section .fixup,\"ax\"\n"
1253 " .section __ex_table,\"a\"\n"
1260 : "+&r"(rt
), "=&r"(rs
),
1261 "+&r"(vaddr
), "+&r"(err
)
1264 if (MIPSInst_RT(inst
) && !err
)
1265 regs
->regs
[MIPSInst_RT(inst
)] = rt
;
1267 MIPS_R2_STATS(loads
);
1272 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1273 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1274 if (!access_ok(VERIFY_READ
, vaddr
, 4)) {
1275 current
->thread
.cp0_baduaddr
= vaddr
;
1279 __asm__
__volatile__(
1282 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1283 "1:" LB
"%1, 0(%2)\n"
1284 INS
"%0, %1, 0, 8\n"
1286 " andi %1, %2, 0x3\n"
1288 "2:" LB
"%1, 0(%2)\n"
1289 INS
"%0, %1, 8, 8\n"
1291 " andi %1, %2, 0x3\n"
1293 "3:" LB
"%1, 0(%2)\n"
1294 INS
"%0, %1, 16, 8\n"
1296 " andi %1, %2, 0x3\n"
1298 "4:" LB
"%1, 0(%2)\n"
1299 INS
"%0, %1, 24, 8\n"
1301 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1302 "1:" LB
"%1, 0(%2)\n"
1303 INS
"%0, %1, 0, 8\n"
1304 " andi %1, %2, 0x3\n"
1306 ADDIU
"%2, %2, -1\n"
1307 "2:" LB
"%1, 0(%2)\n"
1308 INS
"%0, %1, 8, 8\n"
1309 " andi %1, %2, 0x3\n"
1311 ADDIU
"%2, %2, -1\n"
1312 "3:" LB
"%1, 0(%2)\n"
1313 INS
"%0, %1, 16, 8\n"
1314 " andi %1, %2, 0x3\n"
1316 ADDIU
"%2, %2, -1\n"
1317 "4:" LB
"%1, 0(%2)\n"
1318 INS
"%0, %1, 24, 8\n"
1320 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1324 " .section .fixup,\"ax\"\n"
1328 " .section __ex_table,\"a\"\n"
1335 : "+&r"(rt
), "=&r"(rs
),
1336 "+&r"(vaddr
), "+&r"(err
)
1338 if (MIPSInst_RT(inst
) && !err
)
1339 regs
->regs
[MIPSInst_RT(inst
)] = rt
;
1341 MIPS_R2_STATS(loads
);
1346 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1347 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1348 if (!access_ok(VERIFY_WRITE
, vaddr
, 4)) {
1349 current
->thread
.cp0_baduaddr
= vaddr
;
1353 __asm__
__volatile__(
1356 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1357 EXT
"%1, %0, 24, 8\n"
1358 "1:" SB
"%1, 0(%2)\n"
1359 " andi %1, %2, 0x3\n"
1361 ADDIU
"%2, %2, -1\n"
1362 EXT
"%1, %0, 16, 8\n"
1363 "2:" SB
"%1, 0(%2)\n"
1364 " andi %1, %2, 0x3\n"
1366 ADDIU
"%2, %2, -1\n"
1367 EXT
"%1, %0, 8, 8\n"
1368 "3:" SB
"%1, 0(%2)\n"
1369 " andi %1, %2, 0x3\n"
1371 ADDIU
"%2, %2, -1\n"
1372 EXT
"%1, %0, 0, 8\n"
1373 "4:" SB
"%1, 0(%2)\n"
1374 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1375 EXT
"%1, %0, 24, 8\n"
1376 "1:" SB
"%1, 0(%2)\n"
1378 " andi %1, %2, 0x3\n"
1380 EXT
"%1, %0, 16, 8\n"
1381 "2:" SB
"%1, 0(%2)\n"
1383 " andi %1, %2, 0x3\n"
1385 EXT
"%1, %0, 8, 8\n"
1386 "3:" SB
"%1, 0(%2)\n"
1388 " andi %1, %2, 0x3\n"
1390 EXT
"%1, %0, 0, 8\n"
1391 "4:" SB
"%1, 0(%2)\n"
1392 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1395 " .section .fixup,\"ax\"\n"
1399 " .section __ex_table,\"a\"\n"
1406 : "+&r"(rt
), "=&r"(rs
),
1407 "+&r"(vaddr
), "+&r"(err
)
1411 MIPS_R2_STATS(stores
);
1416 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1417 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1418 if (!access_ok(VERIFY_WRITE
, vaddr
, 4)) {
1419 current
->thread
.cp0_baduaddr
= vaddr
;
1423 __asm__
__volatile__(
1426 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1427 EXT
"%1, %0, 0, 8\n"
1428 "1:" SB
"%1, 0(%2)\n"
1430 " andi %1, %2, 0x3\n"
1432 EXT
"%1, %0, 8, 8\n"
1433 "2:" SB
"%1, 0(%2)\n"
1435 " andi %1, %2, 0x3\n"
1437 EXT
"%1, %0, 16, 8\n"
1438 "3:" SB
"%1, 0(%2)\n"
1440 " andi %1, %2, 0x3\n"
1442 EXT
"%1, %0, 24, 8\n"
1443 "4:" SB
"%1, 0(%2)\n"
1444 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1445 EXT
"%1, %0, 0, 8\n"
1446 "1:" SB
"%1, 0(%2)\n"
1447 " andi %1, %2, 0x3\n"
1449 ADDIU
"%2, %2, -1\n"
1450 EXT
"%1, %0, 8, 8\n"
1451 "2:" SB
"%1, 0(%2)\n"
1452 " andi %1, %2, 0x3\n"
1454 ADDIU
"%2, %2, -1\n"
1455 EXT
"%1, %0, 16, 8\n"
1456 "3:" SB
"%1, 0(%2)\n"
1457 " andi %1, %2, 0x3\n"
1459 ADDIU
"%2, %2, -1\n"
1460 EXT
"%1, %0, 24, 8\n"
1461 "4:" SB
"%1, 0(%2)\n"
1462 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1465 " .section .fixup,\"ax\"\n"
1469 " .section __ex_table,\"a\"\n"
1476 : "+&r"(rt
), "=&r"(rs
),
1477 "+&r"(vaddr
), "+&r"(err
)
1481 MIPS_R2_STATS(stores
);
1486 if (config_enabled(CONFIG_32BIT
)) {
1491 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1492 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1493 if (!access_ok(VERIFY_READ
, vaddr
, 8)) {
1494 current
->thread
.cp0_baduaddr
= vaddr
;
1498 __asm__
__volatile__(
1501 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1503 " dinsu %0, %1, 56, 8\n"
1504 " andi %1, %2, 0x7\n"
1506 " daddiu %2, %2, -1\n"
1508 " dinsu %0, %1, 48, 8\n"
1509 " andi %1, %2, 0x7\n"
1511 " daddiu %2, %2, -1\n"
1513 " dinsu %0, %1, 40, 8\n"
1514 " andi %1, %2, 0x7\n"
1516 " daddiu %2, %2, -1\n"
1518 " dinsu %0, %1, 32, 8\n"
1519 " andi %1, %2, 0x7\n"
1521 " daddiu %2, %2, -1\n"
1523 " dins %0, %1, 24, 8\n"
1524 " andi %1, %2, 0x7\n"
1526 " daddiu %2, %2, -1\n"
1528 " dins %0, %1, 16, 8\n"
1529 " andi %1, %2, 0x7\n"
1531 " daddiu %2, %2, -1\n"
1533 " dins %0, %1, 8, 8\n"
1534 " andi %1, %2, 0x7\n"
1536 " daddiu %2, %2, -1\n"
1538 " dins %0, %1, 0, 8\n"
1539 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1541 " dinsu %0, %1, 56, 8\n"
1542 " daddiu %2, %2, 1\n"
1543 " andi %1, %2, 0x7\n"
1546 " dinsu %0, %1, 48, 8\n"
1547 " daddiu %2, %2, 1\n"
1548 " andi %1, %2, 0x7\n"
1551 " dinsu %0, %1, 40, 8\n"
1552 " daddiu %2, %2, 1\n"
1553 " andi %1, %2, 0x7\n"
1556 " dinsu %0, %1, 32, 8\n"
1557 " daddiu %2, %2, 1\n"
1558 " andi %1, %2, 0x7\n"
1561 " dins %0, %1, 24, 8\n"
1562 " daddiu %2, %2, 1\n"
1563 " andi %1, %2, 0x7\n"
1566 " dins %0, %1, 16, 8\n"
1567 " daddiu %2, %2, 1\n"
1568 " andi %1, %2, 0x7\n"
1571 " dins %0, %1, 8, 8\n"
1572 " daddiu %2, %2, 1\n"
1573 " andi %1, %2, 0x7\n"
1576 " dins %0, %1, 0, 8\n"
1577 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1580 " .section .fixup,\"ax\"\n"
1584 " .section __ex_table,\"a\"\n"
1595 : "+&r"(rt
), "=&r"(rs
),
1596 "+&r"(vaddr
), "+&r"(err
)
1598 if (MIPSInst_RT(inst
) && !err
)
1599 regs
->regs
[MIPSInst_RT(inst
)] = rt
;
1601 MIPS_R2_STATS(loads
);
1605 if (config_enabled(CONFIG_32BIT
)) {
1610 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1611 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1612 if (!access_ok(VERIFY_READ
, vaddr
, 8)) {
1613 current
->thread
.cp0_baduaddr
= vaddr
;
1617 __asm__
__volatile__(
1620 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1622 " dins %0, %1, 0, 8\n"
1623 " daddiu %2, %2, 1\n"
1624 " andi %1, %2, 0x7\n"
1627 " dins %0, %1, 8, 8\n"
1628 " daddiu %2, %2, 1\n"
1629 " andi %1, %2, 0x7\n"
1632 " dins %0, %1, 16, 8\n"
1633 " daddiu %2, %2, 1\n"
1634 " andi %1, %2, 0x7\n"
1637 " dins %0, %1, 24, 8\n"
1638 " daddiu %2, %2, 1\n"
1639 " andi %1, %2, 0x7\n"
1642 " dinsu %0, %1, 32, 8\n"
1643 " daddiu %2, %2, 1\n"
1644 " andi %1, %2, 0x7\n"
1647 " dinsu %0, %1, 40, 8\n"
1648 " daddiu %2, %2, 1\n"
1649 " andi %1, %2, 0x7\n"
1652 " dinsu %0, %1, 48, 8\n"
1653 " daddiu %2, %2, 1\n"
1654 " andi %1, %2, 0x7\n"
1657 " dinsu %0, %1, 56, 8\n"
1658 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1660 " dins %0, %1, 0, 8\n"
1661 " andi %1, %2, 0x7\n"
1663 " daddiu %2, %2, -1\n"
1665 " dins %0, %1, 8, 8\n"
1666 " andi %1, %2, 0x7\n"
1668 " daddiu %2, %2, -1\n"
1670 " dins %0, %1, 16, 8\n"
1671 " andi %1, %2, 0x7\n"
1673 " daddiu %2, %2, -1\n"
1675 " dins %0, %1, 24, 8\n"
1676 " andi %1, %2, 0x7\n"
1678 " daddiu %2, %2, -1\n"
1680 " dinsu %0, %1, 32, 8\n"
1681 " andi %1, %2, 0x7\n"
1683 " daddiu %2, %2, -1\n"
1685 " dinsu %0, %1, 40, 8\n"
1686 " andi %1, %2, 0x7\n"
1688 " daddiu %2, %2, -1\n"
1690 " dinsu %0, %1, 48, 8\n"
1691 " andi %1, %2, 0x7\n"
1693 " daddiu %2, %2, -1\n"
1695 " dinsu %0, %1, 56, 8\n"
1696 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1699 " .section .fixup,\"ax\"\n"
1703 " .section __ex_table,\"a\"\n"
1714 : "+&r"(rt
), "=&r"(rs
),
1715 "+&r"(vaddr
), "+&r"(err
)
1717 if (MIPSInst_RT(inst
) && !err
)
1718 regs
->regs
[MIPSInst_RT(inst
)] = rt
;
1720 MIPS_R2_STATS(loads
);
1724 if (config_enabled(CONFIG_32BIT
)) {
1729 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1730 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1731 if (!access_ok(VERIFY_WRITE
, vaddr
, 8)) {
1732 current
->thread
.cp0_baduaddr
= vaddr
;
1736 __asm__
__volatile__(
1739 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1740 " dextu %1, %0, 56, 8\n"
1742 " andi %1, %2, 0x7\n"
1744 " daddiu %2, %2, -1\n"
1745 " dextu %1, %0, 48, 8\n"
1747 " andi %1, %2, 0x7\n"
1749 " daddiu %2, %2, -1\n"
1750 " dextu %1, %0, 40, 8\n"
1752 " andi %1, %2, 0x7\n"
1754 " daddiu %2, %2, -1\n"
1755 " dextu %1, %0, 32, 8\n"
1757 " andi %1, %2, 0x7\n"
1759 " daddiu %2, %2, -1\n"
1760 " dext %1, %0, 24, 8\n"
1762 " andi %1, %2, 0x7\n"
1764 " daddiu %2, %2, -1\n"
1765 " dext %1, %0, 16, 8\n"
1767 " andi %1, %2, 0x7\n"
1769 " daddiu %2, %2, -1\n"
1770 " dext %1, %0, 8, 8\n"
1772 " andi %1, %2, 0x7\n"
1774 " daddiu %2, %2, -1\n"
1775 " dext %1, %0, 0, 8\n"
1777 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1778 " dextu %1, %0, 56, 8\n"
1780 " daddiu %2, %2, 1\n"
1781 " andi %1, %2, 0x7\n"
1783 " dextu %1, %0, 48, 8\n"
1785 " daddiu %2, %2, 1\n"
1786 " andi %1, %2, 0x7\n"
1788 " dextu %1, %0, 40, 8\n"
1790 " daddiu %2, %2, 1\n"
1791 " andi %1, %2, 0x7\n"
1793 " dextu %1, %0, 32, 8\n"
1795 " daddiu %2, %2, 1\n"
1796 " andi %1, %2, 0x7\n"
1798 " dext %1, %0, 24, 8\n"
1800 " daddiu %2, %2, 1\n"
1801 " andi %1, %2, 0x7\n"
1803 " dext %1, %0, 16, 8\n"
1805 " daddiu %2, %2, 1\n"
1806 " andi %1, %2, 0x7\n"
1808 " dext %1, %0, 8, 8\n"
1810 " daddiu %2, %2, 1\n"
1811 " andi %1, %2, 0x7\n"
1813 " dext %1, %0, 0, 8\n"
1815 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1818 " .section .fixup,\"ax\"\n"
1822 " .section __ex_table,\"a\"\n"
1833 : "+&r"(rt
), "=&r"(rs
),
1834 "+&r"(vaddr
), "+&r"(err
)
1838 MIPS_R2_STATS(stores
);
1842 if (config_enabled(CONFIG_32BIT
)) {
1847 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1848 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1849 if (!access_ok(VERIFY_WRITE
, vaddr
, 8)) {
1850 current
->thread
.cp0_baduaddr
= vaddr
;
1854 __asm__
__volatile__(
1857 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1858 " dext %1, %0, 0, 8\n"
1860 " daddiu %2, %2, 1\n"
1861 " andi %1, %2, 0x7\n"
1863 " dext %1, %0, 8, 8\n"
1865 " daddiu %2, %2, 1\n"
1866 " andi %1, %2, 0x7\n"
1868 " dext %1, %0, 16, 8\n"
1870 " daddiu %2, %2, 1\n"
1871 " andi %1, %2, 0x7\n"
1873 " dext %1, %0, 24, 8\n"
1875 " daddiu %2, %2, 1\n"
1876 " andi %1, %2, 0x7\n"
1878 " dextu %1, %0, 32, 8\n"
1880 " daddiu %2, %2, 1\n"
1881 " andi %1, %2, 0x7\n"
1883 " dextu %1, %0, 40, 8\n"
1885 " daddiu %2, %2, 1\n"
1886 " andi %1, %2, 0x7\n"
1888 " dextu %1, %0, 48, 8\n"
1890 " daddiu %2, %2, 1\n"
1891 " andi %1, %2, 0x7\n"
1893 " dextu %1, %0, 56, 8\n"
1895 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1896 " dext %1, %0, 0, 8\n"
1898 " andi %1, %2, 0x7\n"
1900 " daddiu %2, %2, -1\n"
1901 " dext %1, %0, 8, 8\n"
1903 " andi %1, %2, 0x7\n"
1905 " daddiu %2, %2, -1\n"
1906 " dext %1, %0, 16, 8\n"
1908 " andi %1, %2, 0x7\n"
1910 " daddiu %2, %2, -1\n"
1911 " dext %1, %0, 24, 8\n"
1913 " andi %1, %2, 0x7\n"
1915 " daddiu %2, %2, -1\n"
1916 " dextu %1, %0, 32, 8\n"
1918 " andi %1, %2, 0x7\n"
1920 " daddiu %2, %2, -1\n"
1921 " dextu %1, %0, 40, 8\n"
1923 " andi %1, %2, 0x7\n"
1925 " daddiu %2, %2, -1\n"
1926 " dextu %1, %0, 48, 8\n"
1928 " andi %1, %2, 0x7\n"
1930 " daddiu %2, %2, -1\n"
1931 " dextu %1, %0, 56, 8\n"
1933 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1936 " .section .fixup,\"ax\"\n"
1940 " .section __ex_table,\"a\"\n"
1951 : "+&r"(rt
), "=&r"(rs
),
1952 "+&r"(vaddr
), "+&r"(err
)
1956 MIPS_R2_STATS(stores
);
1960 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1962 current
->thread
.cp0_baduaddr
= vaddr
;
1966 if (!access_ok(VERIFY_READ
, vaddr
, 4)) {
1967 current
->thread
.cp0_baduaddr
= vaddr
;
1972 if (!cpu_has_rw_llb
) {
1974 * An LL/SC block can't be safely emulated without
1975 * a Config5/LLB availability. So it's probably time to
1976 * kill our process before things get any worse. This is
1977 * because Config5/LLB allows us to use ERETNC so that
1978 * the LLAddr/LLB bit is not cleared when we return from
1979 * an exception. MIPS R2 LL/SC instructions trap with an
1980 * RI exception so once we emulate them here, we return
1981 * back to userland with ERETNC. That preserves the
1982 * LLAddr/LLB so the subsequent SC instruction will
1983 * succeed preserving the atomic semantics of the LL/SC
1984 * block. Without that, there is no safe way to emulate
1985 * an LL/SC block in MIPSR2 userland.
1987 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
1992 __asm__
__volatile__(
1997 ".section .fixup,\"ax\"\n"
2002 ".section __ex_table,\"a\"\n"
2005 : "=&r"(res
), "+&r"(err
)
2006 : "r"(vaddr
), "i"(SIGSEGV
)
2009 if (MIPSInst_RT(inst
) && !err
)
2010 regs
->regs
[MIPSInst_RT(inst
)] = res
;
2011 MIPS_R2_STATS(llsc
);
2016 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
2018 current
->thread
.cp0_baduaddr
= vaddr
;
2022 if (!access_ok(VERIFY_WRITE
, vaddr
, 4)) {
2023 current
->thread
.cp0_baduaddr
= vaddr
;
2028 if (!cpu_has_rw_llb
) {
2030 * An LL/SC block can't be safely emulated without
2031 * a Config5/LLB availability. So it's probably time to
2032 * kill our process before things get any worse. This is
2033 * because Config5/LLB allows us to use ERETNC so that
2034 * the LLAddr/LLB bit is not cleared when we return from
2035 * an exception. MIPS R2 LL/SC instructions trap with an
2036 * RI exception so once we emulate them here, we return
2037 * back to userland with ERETNC. That preserves the
2038 * LLAddr/LLB so the subsequent SC instruction will
2039 * succeed preserving the atomic semantics of the LL/SC
2040 * block. Without that, there is no safe way to emulate
2041 * an LL/SC block in MIPSR2 userland.
2043 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2048 res
= regs
->regs
[MIPSInst_RT(inst
)];
2050 __asm__
__volatile__(
2055 ".section .fixup,\"ax\"\n"
2060 ".section __ex_table,\"a\"\n"
2063 : "+&r"(res
), "+&r"(err
)
2064 : "r"(vaddr
), "i"(SIGSEGV
));
2066 if (MIPSInst_RT(inst
) && !err
)
2067 regs
->regs
[MIPSInst_RT(inst
)] = res
;
2069 MIPS_R2_STATS(llsc
);
2074 if (config_enabled(CONFIG_32BIT
)) {
2079 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
2081 current
->thread
.cp0_baduaddr
= vaddr
;
2085 if (!access_ok(VERIFY_READ
, vaddr
, 8)) {
2086 current
->thread
.cp0_baduaddr
= vaddr
;
2091 if (!cpu_has_rw_llb
) {
2093 * An LL/SC block can't be safely emulated without
2094 * a Config5/LLB availability. So it's probably time to
2095 * kill our process before things get any worse. This is
2096 * because Config5/LLB allows us to use ERETNC so that
2097 * the LLAddr/LLB bit is not cleared when we return from
2098 * an exception. MIPS R2 LL/SC instructions trap with an
2099 * RI exception so once we emulate them here, we return
2100 * back to userland with ERETNC. That preserves the
2101 * LLAddr/LLB so the subsequent SC instruction will
2102 * succeed preserving the atomic semantics of the LL/SC
2103 * block. Without that, there is no safe way to emulate
2104 * an LL/SC block in MIPSR2 userland.
2106 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2111 __asm__
__volatile__(
2116 ".section .fixup,\"ax\"\n"
2121 ".section __ex_table,\"a\"\n"
2124 : "=&r"(res
), "+&r"(err
)
2125 : "r"(vaddr
), "i"(SIGSEGV
)
2127 if (MIPSInst_RT(inst
) && !err
)
2128 regs
->regs
[MIPSInst_RT(inst
)] = res
;
2130 MIPS_R2_STATS(llsc
);
2135 if (config_enabled(CONFIG_32BIT
)) {
2140 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
2142 current
->thread
.cp0_baduaddr
= vaddr
;
2146 if (!access_ok(VERIFY_WRITE
, vaddr
, 8)) {
2147 current
->thread
.cp0_baduaddr
= vaddr
;
2152 if (!cpu_has_rw_llb
) {
2154 * An LL/SC block can't be safely emulated without
2155 * a Config5/LLB availability. So it's probably time to
2156 * kill our process before things get any worse. This is
2157 * because Config5/LLB allows us to use ERETNC so that
2158 * the LLAddr/LLB bit is not cleared when we return from
2159 * an exception. MIPS R2 LL/SC instructions trap with an
2160 * RI exception so once we emulate them here, we return
2161 * back to userland with ERETNC. That preserves the
2162 * LLAddr/LLB so the subsequent SC instruction will
2163 * succeed preserving the atomic semantics of the LL/SC
2164 * block. Without that, there is no safe way to emulate
2165 * an LL/SC block in MIPSR2 userland.
2167 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2172 res
= regs
->regs
[MIPSInst_RT(inst
)];
2174 __asm__
__volatile__(
2179 ".section .fixup,\"ax\"\n"
2184 ".section __ex_table,\"a\"\n"
2187 : "+&r"(res
), "+&r"(err
)
2188 : "r"(vaddr
), "i"(SIGSEGV
));
2190 if (MIPSInst_RT(inst
) && !err
)
2191 regs
->regs
[MIPSInst_RT(inst
)] = res
;
2193 MIPS_R2_STATS(llsc
);
2204 * Lets not return to userland just yet. It's constly and
2205 * it's likely we have more R2 instructions to emulate
2207 if (!err
&& (pass
++ < MIPS_R2_EMUL_TOTAL_PASS
)) {
2208 regs
->cp0_cause
&= ~CAUSEF_BD
;
2209 err
= get_user(inst
, (u32 __user
*)regs
->cp0_epc
);
2217 if (err
&& (err
!= SIGEMT
)) {
2218 regs
->regs
[31] = r31
;
2219 regs
->cp0_epc
= epc
;
2222 /* Likely a MIPS R6 compatible instruction */
2223 if (pass
&& (err
== SIGILL
))
2229 #ifdef CONFIG_DEBUG_FS
2231 static int mipsr2_stats_show(struct seq_file
*s
, void *unused
)
2234 seq_printf(s
, "Instruction\tTotal\tBDslot\n------------------------------\n");
2235 seq_printf(s
, "movs\t\t%ld\t%ld\n",
2236 (unsigned long)__this_cpu_read(mipsr2emustats
.movs
),
2237 (unsigned long)__this_cpu_read(mipsr2bdemustats
.movs
));
2238 seq_printf(s
, "hilo\t\t%ld\t%ld\n",
2239 (unsigned long)__this_cpu_read(mipsr2emustats
.hilo
),
2240 (unsigned long)__this_cpu_read(mipsr2bdemustats
.hilo
));
2241 seq_printf(s
, "muls\t\t%ld\t%ld\n",
2242 (unsigned long)__this_cpu_read(mipsr2emustats
.muls
),
2243 (unsigned long)__this_cpu_read(mipsr2bdemustats
.muls
));
2244 seq_printf(s
, "divs\t\t%ld\t%ld\n",
2245 (unsigned long)__this_cpu_read(mipsr2emustats
.divs
),
2246 (unsigned long)__this_cpu_read(mipsr2bdemustats
.divs
));
2247 seq_printf(s
, "dsps\t\t%ld\t%ld\n",
2248 (unsigned long)__this_cpu_read(mipsr2emustats
.dsps
),
2249 (unsigned long)__this_cpu_read(mipsr2bdemustats
.dsps
));
2250 seq_printf(s
, "bops\t\t%ld\t%ld\n",
2251 (unsigned long)__this_cpu_read(mipsr2emustats
.bops
),
2252 (unsigned long)__this_cpu_read(mipsr2bdemustats
.bops
));
2253 seq_printf(s
, "traps\t\t%ld\t%ld\n",
2254 (unsigned long)__this_cpu_read(mipsr2emustats
.traps
),
2255 (unsigned long)__this_cpu_read(mipsr2bdemustats
.traps
));
2256 seq_printf(s
, "fpus\t\t%ld\t%ld\n",
2257 (unsigned long)__this_cpu_read(mipsr2emustats
.fpus
),
2258 (unsigned long)__this_cpu_read(mipsr2bdemustats
.fpus
));
2259 seq_printf(s
, "loads\t\t%ld\t%ld\n",
2260 (unsigned long)__this_cpu_read(mipsr2emustats
.loads
),
2261 (unsigned long)__this_cpu_read(mipsr2bdemustats
.loads
));
2262 seq_printf(s
, "stores\t\t%ld\t%ld\n",
2263 (unsigned long)__this_cpu_read(mipsr2emustats
.stores
),
2264 (unsigned long)__this_cpu_read(mipsr2bdemustats
.stores
));
2265 seq_printf(s
, "llsc\t\t%ld\t%ld\n",
2266 (unsigned long)__this_cpu_read(mipsr2emustats
.llsc
),
2267 (unsigned long)__this_cpu_read(mipsr2bdemustats
.llsc
));
2268 seq_printf(s
, "dsemul\t\t%ld\t%ld\n",
2269 (unsigned long)__this_cpu_read(mipsr2emustats
.dsemul
),
2270 (unsigned long)__this_cpu_read(mipsr2bdemustats
.dsemul
));
2271 seq_printf(s
, "jr\t\t%ld\n",
2272 (unsigned long)__this_cpu_read(mipsr2bremustats
.jrs
));
2273 seq_printf(s
, "bltzl\t\t%ld\n",
2274 (unsigned long)__this_cpu_read(mipsr2bremustats
.bltzl
));
2275 seq_printf(s
, "bgezl\t\t%ld\n",
2276 (unsigned long)__this_cpu_read(mipsr2bremustats
.bgezl
));
2277 seq_printf(s
, "bltzll\t\t%ld\n",
2278 (unsigned long)__this_cpu_read(mipsr2bremustats
.bltzll
));
2279 seq_printf(s
, "bgezll\t\t%ld\n",
2280 (unsigned long)__this_cpu_read(mipsr2bremustats
.bgezll
));
2281 seq_printf(s
, "bltzal\t\t%ld\n",
2282 (unsigned long)__this_cpu_read(mipsr2bremustats
.bltzal
));
2283 seq_printf(s
, "bgezal\t\t%ld\n",
2284 (unsigned long)__this_cpu_read(mipsr2bremustats
.bgezal
));
2285 seq_printf(s
, "beql\t\t%ld\n",
2286 (unsigned long)__this_cpu_read(mipsr2bremustats
.beql
));
2287 seq_printf(s
, "bnel\t\t%ld\n",
2288 (unsigned long)__this_cpu_read(mipsr2bremustats
.bnel
));
2289 seq_printf(s
, "blezl\t\t%ld\n",
2290 (unsigned long)__this_cpu_read(mipsr2bremustats
.blezl
));
2291 seq_printf(s
, "bgtzl\t\t%ld\n",
2292 (unsigned long)__this_cpu_read(mipsr2bremustats
.bgtzl
));
2297 static int mipsr2_stats_clear_show(struct seq_file
*s
, void *unused
)
2299 mipsr2_stats_show(s
, unused
);
2301 __this_cpu_write((mipsr2emustats
).movs
, 0);
2302 __this_cpu_write((mipsr2bdemustats
).movs
, 0);
2303 __this_cpu_write((mipsr2emustats
).hilo
, 0);
2304 __this_cpu_write((mipsr2bdemustats
).hilo
, 0);
2305 __this_cpu_write((mipsr2emustats
).muls
, 0);
2306 __this_cpu_write((mipsr2bdemustats
).muls
, 0);
2307 __this_cpu_write((mipsr2emustats
).divs
, 0);
2308 __this_cpu_write((mipsr2bdemustats
).divs
, 0);
2309 __this_cpu_write((mipsr2emustats
).dsps
, 0);
2310 __this_cpu_write((mipsr2bdemustats
).dsps
, 0);
2311 __this_cpu_write((mipsr2emustats
).bops
, 0);
2312 __this_cpu_write((mipsr2bdemustats
).bops
, 0);
2313 __this_cpu_write((mipsr2emustats
).traps
, 0);
2314 __this_cpu_write((mipsr2bdemustats
).traps
, 0);
2315 __this_cpu_write((mipsr2emustats
).fpus
, 0);
2316 __this_cpu_write((mipsr2bdemustats
).fpus
, 0);
2317 __this_cpu_write((mipsr2emustats
).loads
, 0);
2318 __this_cpu_write((mipsr2bdemustats
).loads
, 0);
2319 __this_cpu_write((mipsr2emustats
).stores
, 0);
2320 __this_cpu_write((mipsr2bdemustats
).stores
, 0);
2321 __this_cpu_write((mipsr2emustats
).llsc
, 0);
2322 __this_cpu_write((mipsr2bdemustats
).llsc
, 0);
2323 __this_cpu_write((mipsr2emustats
).dsemul
, 0);
2324 __this_cpu_write((mipsr2bdemustats
).dsemul
, 0);
2325 __this_cpu_write((mipsr2bremustats
).jrs
, 0);
2326 __this_cpu_write((mipsr2bremustats
).bltzl
, 0);
2327 __this_cpu_write((mipsr2bremustats
).bgezl
, 0);
2328 __this_cpu_write((mipsr2bremustats
).bltzll
, 0);
2329 __this_cpu_write((mipsr2bremustats
).bgezll
, 0);
2330 __this_cpu_write((mipsr2bremustats
).bltzal
, 0);
2331 __this_cpu_write((mipsr2bremustats
).bgezal
, 0);
2332 __this_cpu_write((mipsr2bremustats
).beql
, 0);
2333 __this_cpu_write((mipsr2bremustats
).bnel
, 0);
2334 __this_cpu_write((mipsr2bremustats
).blezl
, 0);
2335 __this_cpu_write((mipsr2bremustats
).bgtzl
, 0);
2340 static int mipsr2_stats_open(struct inode
*inode
, struct file
*file
)
2342 return single_open(file
, mipsr2_stats_show
, inode
->i_private
);
2345 static int mipsr2_stats_clear_open(struct inode
*inode
, struct file
*file
)
2347 return single_open(file
, mipsr2_stats_clear_show
, inode
->i_private
);
2350 static const struct file_operations mipsr2_emul_fops
= {
2351 .open
= mipsr2_stats_open
,
2353 .llseek
= seq_lseek
,
2354 .release
= single_release
,
2357 static const struct file_operations mipsr2_clear_fops
= {
2358 .open
= mipsr2_stats_clear_open
,
2360 .llseek
= seq_lseek
,
2361 .release
= single_release
,
2365 static int __init
mipsr2_init_debugfs(void)
2367 struct dentry
*mipsr2_emul
;
2369 if (!mips_debugfs_dir
)
2372 mipsr2_emul
= debugfs_create_file("r2_emul_stats", S_IRUGO
,
2373 mips_debugfs_dir
, NULL
,
2378 mipsr2_emul
= debugfs_create_file("r2_emul_stats_clear", S_IRUGO
,
2379 mips_debugfs_dir
, NULL
,
2380 &mipsr2_clear_fops
);
2387 device_initcall(mipsr2_init_debugfs
);
2389 #endif /* CONFIG_DEBUG_FS */