2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2014 Imagination Technologies Ltd.
7 * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
8 * Author: Markos Chandras <markos.chandras@imgtec.com>
10 * MIPS R2 user space instruction emulator for MIPS R6
13 #include <linux/bug.h>
14 #include <linux/compiler.h>
15 #include <linux/debugfs.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/ptrace.h>
19 #include <linux/seq_file.h>
22 #include <asm/branch.h>
23 #include <asm/break.h>
24 #include <asm/debug.h>
26 #include <asm/fpu_emulator.h>
28 #include <asm/mips-r2-to-r6-emul.h>
29 #include <asm/local.h>
30 #include <asm/mipsregs.h>
31 #include <asm/ptrace.h>
32 #include <linux/uaccess.h>
35 #define ADDIU "daddiu "
39 #define ADDIU "addiu "
42 #endif /* CONFIG_64BIT */
49 #ifdef CONFIG_DEBUG_FS
50 static DEFINE_PER_CPU(struct mips_r2_emulator_stats
, mipsr2emustats
);
51 static DEFINE_PER_CPU(struct mips_r2_emulator_stats
, mipsr2bdemustats
);
52 static DEFINE_PER_CPU(struct mips_r2br_emulator_stats
, mipsr2bremustats
);
55 extern const unsigned int fpucondbit
[8];
57 #define MIPS_R2_EMUL_TOTAL_PASS 10
59 int mipsr2_emulation
= 0;
61 static int __init
mipsr2emu_enable(char *s
)
65 pr_info("MIPS R2-to-R6 Emulator Enabled!");
69 __setup("mipsr2emu", mipsr2emu_enable
);
72 * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
73 * for performance instead of the traditional way of using a stack trampoline
74 * which is rather slow.
75 * @regs: Process register set
78 static inline int mipsr6_emul(struct pt_regs
*regs
, u32 ir
)
80 switch (MIPSInst_OPCODE(ir
)) {
83 regs
->regs
[MIPSInst_RT(ir
)] =
84 (s32
)regs
->regs
[MIPSInst_RS(ir
)] +
85 (s32
)MIPSInst_SIMM(ir
);
88 if (IS_ENABLED(CONFIG_32BIT
))
92 regs
->regs
[MIPSInst_RT(ir
)] =
93 (s64
)regs
->regs
[MIPSInst_RS(ir
)] +
94 (s64
)MIPSInst_SIMM(ir
);
100 /* FPU instructions in delay slot */
103 switch (MIPSInst_FUNC(ir
)) {
106 regs
->regs
[MIPSInst_RD(ir
)] =
107 regs
->regs
[MIPSInst_RS(ir
)] |
108 regs
->regs
[MIPSInst_RT(ir
)];
115 regs
->regs
[MIPSInst_RD(ir
)] =
116 (s32
)(((u32
)regs
->regs
[MIPSInst_RT(ir
)]) <<
124 regs
->regs
[MIPSInst_RD(ir
)] =
125 (s32
)(((u32
)regs
->regs
[MIPSInst_RT(ir
)]) >>
133 regs
->regs
[MIPSInst_RD(ir
)] =
134 (s32
)((u32
)regs
->regs
[MIPSInst_RS(ir
)] +
135 (u32
)regs
->regs
[MIPSInst_RT(ir
)]);
142 regs
->regs
[MIPSInst_RD(ir
)] =
143 (s32
)((u32
)regs
->regs
[MIPSInst_RS(ir
)] -
144 (u32
)regs
->regs
[MIPSInst_RT(ir
)]);
147 if (IS_ENABLED(CONFIG_32BIT
) || MIPSInst_RS(ir
))
151 regs
->regs
[MIPSInst_RD(ir
)] =
152 (s64
)(((u64
)regs
->regs
[MIPSInst_RT(ir
)]) <<
156 if (IS_ENABLED(CONFIG_32BIT
) || MIPSInst_RS(ir
))
160 regs
->regs
[MIPSInst_RD(ir
)] =
161 (s64
)(((u64
)regs
->regs
[MIPSInst_RT(ir
)]) >>
165 if (IS_ENABLED(CONFIG_32BIT
) || MIPSInst_FD(ir
))
169 regs
->regs
[MIPSInst_RD(ir
)] =
170 (u64
)regs
->regs
[MIPSInst_RS(ir
)] +
171 (u64
)regs
->regs
[MIPSInst_RT(ir
)];
174 if (IS_ENABLED(CONFIG_32BIT
) || MIPSInst_FD(ir
))
178 regs
->regs
[MIPSInst_RD(ir
)] =
179 (s64
)((u64
)regs
->regs
[MIPSInst_RS(ir
)] -
180 (u64
)regs
->regs
[MIPSInst_RT(ir
)]);
185 pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
186 ir
, MIPSInst_OPCODE(ir
));
193 * movf_func - Emulate a MOVF instruction
194 * @regs: Process register set
197 * Returns 0 since it always succeeds.
199 static int movf_func(struct pt_regs
*regs
, u32 ir
)
204 csr
= current
->thread
.fpu
.fcr31
;
205 cond
= fpucondbit
[MIPSInst_RT(ir
) >> 2];
207 if (((csr
& cond
) == 0) && MIPSInst_RD(ir
))
208 regs
->regs
[MIPSInst_RD(ir
)] = regs
->regs
[MIPSInst_RS(ir
)];
216 * movt_func - Emulate a MOVT instruction
217 * @regs: Process register set
220 * Returns 0 since it always succeeds.
222 static int movt_func(struct pt_regs
*regs
, u32 ir
)
227 csr
= current
->thread
.fpu
.fcr31
;
228 cond
= fpucondbit
[MIPSInst_RT(ir
) >> 2];
230 if (((csr
& cond
) != 0) && MIPSInst_RD(ir
))
231 regs
->regs
[MIPSInst_RD(ir
)] = regs
->regs
[MIPSInst_RS(ir
)];
239 * jr_func - Emulate a JR instruction.
240 * @pt_regs: Process register set
243 * Returns SIGILL if JR was in delay slot, SIGEMT if we
244 * can't compute the EPC, SIGSEGV if we can't access the
245 * userland instruction or 0 on success.
247 static int jr_func(struct pt_regs
*regs
, u32 ir
)
250 unsigned long cepc
, epc
, nepc
;
253 if (delay_slot(regs
))
256 /* EPC after the RI/JR instruction */
257 nepc
= regs
->cp0_epc
;
258 /* Roll back to the reserved R2 JR instruction */
261 err
= __compute_return_epc(regs
);
268 cepc
= regs
->cp0_epc
;
270 /* Get DS instruction */
271 err
= __get_user(nir
, (u32 __user
*)nepc
);
275 MIPS_R2BR_STATS(jrs
);
277 /* If nir == 0(NOP), then nothing else to do */
280 * Negative err means FPU instruction in BD-slot,
281 * Zero err means 'BD-slot emulation done'
282 * For anything else we go back to trampoline emulation.
284 err
= mipsr6_emul(regs
, nir
);
286 regs
->cp0_epc
= nepc
;
287 err
= mips_dsemul(regs
, nir
, epc
, cepc
);
290 MIPS_R2_STATS(dsemul
);
298 * movz_func - Emulate a MOVZ instruction
299 * @regs: Process register set
302 * Returns 0 since it always succeeds.
304 static int movz_func(struct pt_regs
*regs
, u32 ir
)
306 if (((regs
->regs
[MIPSInst_RT(ir
)]) == 0) && MIPSInst_RD(ir
))
307 regs
->regs
[MIPSInst_RD(ir
)] = regs
->regs
[MIPSInst_RS(ir
)];
314 * movn_func - Emulate a MOVZ instruction
315 * @regs: Process register set
318 * Returns 0 since it always succeeds.
320 static int movn_func(struct pt_regs
*regs
, u32 ir
)
322 if (((regs
->regs
[MIPSInst_RT(ir
)]) != 0) && MIPSInst_RD(ir
))
323 regs
->regs
[MIPSInst_RD(ir
)] = regs
->regs
[MIPSInst_RS(ir
)];
330 * mfhi_func - Emulate a MFHI instruction
331 * @regs: Process register set
334 * Returns 0 since it always succeeds.
336 static int mfhi_func(struct pt_regs
*regs
, u32 ir
)
339 regs
->regs
[MIPSInst_RD(ir
)] = regs
->hi
;
347 * mthi_func - Emulate a MTHI instruction
348 * @regs: Process register set
351 * Returns 0 since it always succeeds.
353 static int mthi_func(struct pt_regs
*regs
, u32 ir
)
355 regs
->hi
= regs
->regs
[MIPSInst_RS(ir
)];
363 * mflo_func - Emulate a MFLO instruction
364 * @regs: Process register set
367 * Returns 0 since it always succeeds.
369 static int mflo_func(struct pt_regs
*regs
, u32 ir
)
372 regs
->regs
[MIPSInst_RD(ir
)] = regs
->lo
;
380 * mtlo_func - Emulate a MTLO instruction
381 * @regs: Process register set
384 * Returns 0 since it always succeeds.
386 static int mtlo_func(struct pt_regs
*regs
, u32 ir
)
388 regs
->lo
= regs
->regs
[MIPSInst_RS(ir
)];
396 * mult_func - Emulate a MULT instruction
397 * @regs: Process register set
400 * Returns 0 since it always succeeds.
402 static int mult_func(struct pt_regs
*regs
, u32 ir
)
407 rt
= regs
->regs
[MIPSInst_RT(ir
)];
408 rs
= regs
->regs
[MIPSInst_RS(ir
)];
409 res
= (s64
)rt
* (s64
)rs
;
423 * multu_func - Emulate a MULTU instruction
424 * @regs: Process register set
427 * Returns 0 since it always succeeds.
429 static int multu_func(struct pt_regs
*regs
, u32 ir
)
434 rt
= regs
->regs
[MIPSInst_RT(ir
)];
435 rs
= regs
->regs
[MIPSInst_RS(ir
)];
436 res
= (u64
)rt
* (u64
)rs
;
438 regs
->lo
= (s64
)(s32
)rt
;
439 regs
->hi
= (s64
)(s32
)(res
>> 32);
447 * div_func - Emulate a DIV instruction
448 * @regs: Process register set
451 * Returns 0 since it always succeeds.
453 static int div_func(struct pt_regs
*regs
, u32 ir
)
457 rt
= regs
->regs
[MIPSInst_RT(ir
)];
458 rs
= regs
->regs
[MIPSInst_RS(ir
)];
460 regs
->lo
= (s64
)(rs
/ rt
);
461 regs
->hi
= (s64
)(rs
% rt
);
469 * divu_func - Emulate a DIVU instruction
470 * @regs: Process register set
473 * Returns 0 since it always succeeds.
475 static int divu_func(struct pt_regs
*regs
, u32 ir
)
479 rt
= regs
->regs
[MIPSInst_RT(ir
)];
480 rs
= regs
->regs
[MIPSInst_RS(ir
)];
482 regs
->lo
= (s64
)(rs
/ rt
);
483 regs
->hi
= (s64
)(rs
% rt
);
491 * dmult_func - Emulate a DMULT instruction
492 * @regs: Process register set
495 * Returns 0 on success or SIGILL for 32-bit kernels.
497 static int dmult_func(struct pt_regs
*regs
, u32 ir
)
502 if (IS_ENABLED(CONFIG_32BIT
))
505 rt
= regs
->regs
[MIPSInst_RT(ir
)];
506 rs
= regs
->regs
[MIPSInst_RS(ir
)];
510 __asm__
__volatile__(
511 "dmuh %0, %1, %2\t\n"
523 * dmultu_func - Emulate a DMULTU instruction
524 * @regs: Process register set
527 * Returns 0 on success or SIGILL for 32-bit kernels.
529 static int dmultu_func(struct pt_regs
*regs
, u32 ir
)
534 if (IS_ENABLED(CONFIG_32BIT
))
537 rt
= regs
->regs
[MIPSInst_RT(ir
)];
538 rs
= regs
->regs
[MIPSInst_RS(ir
)];
542 __asm__
__volatile__(
543 "dmuhu %0, %1, %2\t\n"
555 * ddiv_func - Emulate a DDIV instruction
556 * @regs: Process register set
559 * Returns 0 on success or SIGILL for 32-bit kernels.
561 static int ddiv_func(struct pt_regs
*regs
, u32 ir
)
565 if (IS_ENABLED(CONFIG_32BIT
))
568 rt
= regs
->regs
[MIPSInst_RT(ir
)];
569 rs
= regs
->regs
[MIPSInst_RS(ir
)];
580 * ddivu_func - Emulate a DDIVU instruction
581 * @regs: Process register set
584 * Returns 0 on success or SIGILL for 32-bit kernels.
586 static int ddivu_func(struct pt_regs
*regs
, u32 ir
)
590 if (IS_ENABLED(CONFIG_32BIT
))
593 rt
= regs
->regs
[MIPSInst_RT(ir
)];
594 rs
= regs
->regs
[MIPSInst_RS(ir
)];
604 /* R6 removed instructions for the SPECIAL opcode */
605 static const struct r2_decoder_table spec_op_table
[] = {
606 { 0xfc1ff83f, 0x00000008, jr_func
},
607 { 0xfc00ffff, 0x00000018, mult_func
},
608 { 0xfc00ffff, 0x00000019, multu_func
},
609 { 0xfc00ffff, 0x0000001c, dmult_func
},
610 { 0xfc00ffff, 0x0000001d, dmultu_func
},
611 { 0xffff07ff, 0x00000010, mfhi_func
},
612 { 0xfc1fffff, 0x00000011, mthi_func
},
613 { 0xffff07ff, 0x00000012, mflo_func
},
614 { 0xfc1fffff, 0x00000013, mtlo_func
},
615 { 0xfc0307ff, 0x00000001, movf_func
},
616 { 0xfc0307ff, 0x00010001, movt_func
},
617 { 0xfc0007ff, 0x0000000a, movz_func
},
618 { 0xfc0007ff, 0x0000000b, movn_func
},
619 { 0xfc00ffff, 0x0000001a, div_func
},
620 { 0xfc00ffff, 0x0000001b, divu_func
},
621 { 0xfc00ffff, 0x0000001e, ddiv_func
},
622 { 0xfc00ffff, 0x0000001f, ddivu_func
},
627 * madd_func - Emulate a MADD instruction
628 * @regs: Process register set
631 * Returns 0 since it always succeeds.
633 static int madd_func(struct pt_regs
*regs
, u32 ir
)
638 rt
= regs
->regs
[MIPSInst_RT(ir
)];
639 rs
= regs
->regs
[MIPSInst_RS(ir
)];
640 res
= (s64
)rt
* (s64
)rs
;
643 res
+= ((((s64
)rt
) << 32) | (u32
)rs
);
656 * maddu_func - Emulate a MADDU instruction
657 * @regs: Process register set
660 * Returns 0 since it always succeeds.
662 static int maddu_func(struct pt_regs
*regs
, u32 ir
)
667 rt
= regs
->regs
[MIPSInst_RT(ir
)];
668 rs
= regs
->regs
[MIPSInst_RS(ir
)];
669 res
= (u64
)rt
* (u64
)rs
;
672 res
+= ((((s64
)rt
) << 32) | (u32
)rs
);
675 regs
->lo
= (s64
)(s32
)rt
;
677 regs
->hi
= (s64
)(s32
)rs
;
685 * msub_func - Emulate a MSUB instruction
686 * @regs: Process register set
689 * Returns 0 since it always succeeds.
691 static int msub_func(struct pt_regs
*regs
, u32 ir
)
696 rt
= regs
->regs
[MIPSInst_RT(ir
)];
697 rs
= regs
->regs
[MIPSInst_RS(ir
)];
698 res
= (s64
)rt
* (s64
)rs
;
701 res
= ((((s64
)rt
) << 32) | (u32
)rs
) - res
;
714 * msubu_func - Emulate a MSUBU instruction
715 * @regs: Process register set
718 * Returns 0 since it always succeeds.
720 static int msubu_func(struct pt_regs
*regs
, u32 ir
)
725 rt
= regs
->regs
[MIPSInst_RT(ir
)];
726 rs
= regs
->regs
[MIPSInst_RS(ir
)];
727 res
= (u64
)rt
* (u64
)rs
;
730 res
= ((((s64
)rt
) << 32) | (u32
)rs
) - res
;
733 regs
->lo
= (s64
)(s32
)rt
;
735 regs
->hi
= (s64
)(s32
)rs
;
743 * mul_func - Emulate a MUL instruction
744 * @regs: Process register set
747 * Returns 0 since it always succeeds.
749 static int mul_func(struct pt_regs
*regs
, u32 ir
)
754 if (!MIPSInst_RD(ir
))
756 rt
= regs
->regs
[MIPSInst_RT(ir
)];
757 rs
= regs
->regs
[MIPSInst_RS(ir
)];
758 res
= (s64
)rt
* (s64
)rs
;
761 regs
->regs
[MIPSInst_RD(ir
)] = (s64
)rs
;
769 * clz_func - Emulate a CLZ instruction
770 * @regs: Process register set
773 * Returns 0 since it always succeeds.
775 static int clz_func(struct pt_regs
*regs
, u32 ir
)
780 if (!MIPSInst_RD(ir
))
783 rs
= regs
->regs
[MIPSInst_RS(ir
)];
784 __asm__
__volatile__("clz %0, %1" : "=r"(res
) : "r"(rs
));
785 regs
->regs
[MIPSInst_RD(ir
)] = res
;
793 * clo_func - Emulate a CLO instruction
794 * @regs: Process register set
797 * Returns 0 since it always succeeds.
800 static int clo_func(struct pt_regs
*regs
, u32 ir
)
805 if (!MIPSInst_RD(ir
))
808 rs
= regs
->regs
[MIPSInst_RS(ir
)];
809 __asm__
__volatile__("clo %0, %1" : "=r"(res
) : "r"(rs
));
810 regs
->regs
[MIPSInst_RD(ir
)] = res
;
818 * dclz_func - Emulate a DCLZ instruction
819 * @regs: Process register set
822 * Returns 0 since it always succeeds.
824 static int dclz_func(struct pt_regs
*regs
, u32 ir
)
829 if (IS_ENABLED(CONFIG_32BIT
))
832 if (!MIPSInst_RD(ir
))
835 rs
= regs
->regs
[MIPSInst_RS(ir
)];
836 __asm__
__volatile__("dclz %0, %1" : "=r"(res
) : "r"(rs
));
837 regs
->regs
[MIPSInst_RD(ir
)] = res
;
845 * dclo_func - Emulate a DCLO instruction
846 * @regs: Process register set
849 * Returns 0 since it always succeeds.
851 static int dclo_func(struct pt_regs
*regs
, u32 ir
)
856 if (IS_ENABLED(CONFIG_32BIT
))
859 if (!MIPSInst_RD(ir
))
862 rs
= regs
->regs
[MIPSInst_RS(ir
)];
863 __asm__
__volatile__("dclo %0, %1" : "=r"(res
) : "r"(rs
));
864 regs
->regs
[MIPSInst_RD(ir
)] = res
;
871 /* R6 removed instructions for the SPECIAL2 opcode */
872 static const struct r2_decoder_table spec2_op_table
[] = {
873 { 0xfc00ffff, 0x70000000, madd_func
},
874 { 0xfc00ffff, 0x70000001, maddu_func
},
875 { 0xfc0007ff, 0x70000002, mul_func
},
876 { 0xfc00ffff, 0x70000004, msub_func
},
877 { 0xfc00ffff, 0x70000005, msubu_func
},
878 { 0xfc0007ff, 0x70000020, clz_func
},
879 { 0xfc0007ff, 0x70000021, clo_func
},
880 { 0xfc0007ff, 0x70000024, dclz_func
},
881 { 0xfc0007ff, 0x70000025, dclo_func
},
885 static inline int mipsr2_find_op_func(struct pt_regs
*regs
, u32 inst
,
886 const struct r2_decoder_table
*table
)
888 const struct r2_decoder_table
*p
;
891 for (p
= table
; p
->func
; p
++) {
892 if ((inst
& p
->mask
) == p
->code
) {
893 err
= (p
->func
)(regs
, inst
);
901 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
902 * @regs: Process register set
903 * @inst: Instruction to decode and emulate
904 * @fcr31: Floating Point Control and Status Register Cause bits returned
906 int mipsr2_decoder(struct pt_regs
*regs
, u32 inst
, unsigned long *fcr31
)
911 unsigned long cpc
, epc
, nepc
, r31
, res
, rs
, rt
;
913 void __user
*fault_addr
= NULL
;
917 r31
= regs
->regs
[31];
919 err
= compute_return_epc(regs
);
924 pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
927 switch (MIPSInst_OPCODE(inst
)) {
929 err
= mipsr2_find_op_func(regs
, inst
, spec_op_table
);
931 /* FPU instruction under JR */
932 regs
->cp0_cause
|= CAUSEF_BD
;
937 err
= mipsr2_find_op_func(regs
, inst
, spec2_op_table
);
940 rt
= MIPSInst_RT(inst
);
941 rs
= MIPSInst_RS(inst
);
944 if ((long)regs
->regs
[rs
] >= MIPSInst_SIMM(inst
))
945 do_trap_or_bp(regs
, 0, 0, "TGEI");
947 MIPS_R2_STATS(traps
);
951 if (regs
->regs
[rs
] >= MIPSInst_UIMM(inst
))
952 do_trap_or_bp(regs
, 0, 0, "TGEIU");
954 MIPS_R2_STATS(traps
);
958 if ((long)regs
->regs
[rs
] < MIPSInst_SIMM(inst
))
959 do_trap_or_bp(regs
, 0, 0, "TLTI");
961 MIPS_R2_STATS(traps
);
965 if (regs
->regs
[rs
] < MIPSInst_UIMM(inst
))
966 do_trap_or_bp(regs
, 0, 0, "TLTIU");
968 MIPS_R2_STATS(traps
);
972 if (regs
->regs
[rs
] == MIPSInst_SIMM(inst
))
973 do_trap_or_bp(regs
, 0, 0, "TEQI");
975 MIPS_R2_STATS(traps
);
979 if (regs
->regs
[rs
] != MIPSInst_SIMM(inst
))
980 do_trap_or_bp(regs
, 0, 0, "TNEI");
982 MIPS_R2_STATS(traps
);
989 if (delay_slot(regs
)) {
993 regs
->regs
[31] = r31
;
995 err
= __compute_return_epc(regs
);
998 if (err
!= BRANCH_LIKELY_TAKEN
)
1000 cpc
= regs
->cp0_epc
;
1002 err
= __get_user(nir
, (u32 __user
*)nepc
);
1008 * This will probably be optimized away when
1009 * CONFIG_DEBUG_FS is not enabled
1013 MIPS_R2BR_STATS(bltzl
);
1016 MIPS_R2BR_STATS(bgezl
);
1019 MIPS_R2BR_STATS(bltzall
);
1022 MIPS_R2BR_STATS(bgezall
);
1026 switch (MIPSInst_OPCODE(nir
)) {
1031 regs
->cp0_cause
|= CAUSEF_BD
;
1035 err
= mipsr6_emul(regs
, nir
);
1037 err
= mips_dsemul(regs
, nir
, epc
, cpc
);
1040 MIPS_R2_STATS(dsemul
);
1046 if (delay_slot(regs
)) {
1050 regs
->regs
[31] = r31
;
1051 regs
->cp0_epc
= epc
;
1052 err
= __compute_return_epc(regs
);
1055 cpc
= regs
->cp0_epc
;
1057 err
= __get_user(nir
, (u32 __user
*)nepc
);
1063 * This will probably be optimized away when
1064 * CONFIG_DEBUG_FS is not enabled
1068 MIPS_R2BR_STATS(bltzal
);
1071 MIPS_R2BR_STATS(bgezal
);
1075 switch (MIPSInst_OPCODE(nir
)) {
1080 regs
->cp0_cause
|= CAUSEF_BD
;
1084 err
= mipsr6_emul(regs
, nir
);
1086 err
= mips_dsemul(regs
, nir
, epc
, cpc
);
1089 MIPS_R2_STATS(dsemul
);
1094 regs
->regs
[31] = r31
;
1095 regs
->cp0_epc
= epc
;
1104 * For BLEZL and BGTZL, rt field must be set to 0. If this
1105 * is not the case, this may be an encoding of a MIPS R6
1106 * instruction, so return to CPU execution if this occurs
1108 if (MIPSInst_RT(inst
)) {
1115 if (delay_slot(regs
)) {
1119 regs
->regs
[31] = r31
;
1120 regs
->cp0_epc
= epc
;
1121 err
= __compute_return_epc(regs
);
1124 if (err
!= BRANCH_LIKELY_TAKEN
)
1126 cpc
= regs
->cp0_epc
;
1128 err
= __get_user(nir
, (u32 __user
*)nepc
);
1134 * This will probably be optimized away when
1135 * CONFIG_DEBUG_FS is not enabled
1137 switch (MIPSInst_OPCODE(inst
)) {
1139 MIPS_R2BR_STATS(beql
);
1142 MIPS_R2BR_STATS(bnel
);
1145 MIPS_R2BR_STATS(blezl
);
1148 MIPS_R2BR_STATS(bgtzl
);
1152 switch (MIPSInst_OPCODE(nir
)) {
1157 regs
->cp0_cause
|= CAUSEF_BD
;
1161 err
= mipsr6_emul(regs
, nir
);
1163 err
= mips_dsemul(regs
, nir
, epc
, cpc
);
1166 MIPS_R2_STATS(dsemul
);
1175 regs
->regs
[31] = r31
;
1176 regs
->cp0_epc
= epc
;
1177 if (!used_math()) { /* First time FPU user. */
1183 lose_fpu(1); /* Save FPU state for the emulator. */
1185 err
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 0,
1189 * We can't allow the emulated instruction to leave any
1190 * enabled Cause bits set in $fcr31.
1192 *fcr31
= res
= mask_fcr31_x(current
->thread
.fpu
.fcr31
);
1193 current
->thread
.fpu
.fcr31
&= ~res
;
1196 * this is a tricky issue - lose_fpu() uses LL/SC atomics
1197 * if FPU is owned and effectively cancels user level LL/SC.
1198 * So, it could be logical to don't restore FPU ownership here.
1199 * But the sequence of multiple FPU instructions is much much
1200 * more often than LL-FPU-SC and I prefer loop here until
1201 * next scheduler cycle cancels FPU ownership
1203 own_fpu(1); /* Restore FPU state. */
1206 current
->thread
.cp0_baduaddr
= (unsigned long)fault_addr
;
1208 MIPS_R2_STATS(fpus
);
1213 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1214 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1215 if (!access_ok(VERIFY_READ
, (void __user
*)vaddr
, 4)) {
1216 current
->thread
.cp0_baduaddr
= vaddr
;
1220 __asm__
__volatile__(
1223 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1224 "1:" LB
"%1, 0(%2)\n"
1225 INS
"%0, %1, 24, 8\n"
1226 " andi %1, %2, 0x3\n"
1228 ADDIU
"%2, %2, -1\n"
1229 "2:" LB
"%1, 0(%2)\n"
1230 INS
"%0, %1, 16, 8\n"
1231 " andi %1, %2, 0x3\n"
1233 ADDIU
"%2, %2, -1\n"
1234 "3:" LB
"%1, 0(%2)\n"
1235 INS
"%0, %1, 8, 8\n"
1236 " andi %1, %2, 0x3\n"
1238 ADDIU
"%2, %2, -1\n"
1239 "4:" LB
"%1, 0(%2)\n"
1240 INS
"%0, %1, 0, 8\n"
1241 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1242 "1:" LB
"%1, 0(%2)\n"
1243 INS
"%0, %1, 24, 8\n"
1245 " andi %1, %2, 0x3\n"
1247 "2:" LB
"%1, 0(%2)\n"
1248 INS
"%0, %1, 16, 8\n"
1250 " andi %1, %2, 0x3\n"
1252 "3:" LB
"%1, 0(%2)\n"
1253 INS
"%0, %1, 8, 8\n"
1255 " andi %1, %2, 0x3\n"
1257 "4:" LB
"%1, 0(%2)\n"
1258 INS
"%0, %1, 0, 8\n"
1259 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1260 "9: sll %0, %0, 0\n"
1263 " .section .fixup,\"ax\"\n"
1267 " .section __ex_table,\"a\"\n"
1274 : "+&r"(rt
), "=&r"(rs
),
1275 "+&r"(vaddr
), "+&r"(err
)
1278 if (MIPSInst_RT(inst
) && !err
)
1279 regs
->regs
[MIPSInst_RT(inst
)] = rt
;
1281 MIPS_R2_STATS(loads
);
1286 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1287 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1288 if (!access_ok(VERIFY_READ
, (void __user
*)vaddr
, 4)) {
1289 current
->thread
.cp0_baduaddr
= vaddr
;
1293 __asm__
__volatile__(
1296 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1297 "1:" LB
"%1, 0(%2)\n"
1298 INS
"%0, %1, 0, 8\n"
1300 " andi %1, %2, 0x3\n"
1302 "2:" LB
"%1, 0(%2)\n"
1303 INS
"%0, %1, 8, 8\n"
1305 " andi %1, %2, 0x3\n"
1307 "3:" LB
"%1, 0(%2)\n"
1308 INS
"%0, %1, 16, 8\n"
1310 " andi %1, %2, 0x3\n"
1312 "4:" LB
"%1, 0(%2)\n"
1313 INS
"%0, %1, 24, 8\n"
1315 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1316 "1:" LB
"%1, 0(%2)\n"
1317 INS
"%0, %1, 0, 8\n"
1318 " andi %1, %2, 0x3\n"
1320 ADDIU
"%2, %2, -1\n"
1321 "2:" LB
"%1, 0(%2)\n"
1322 INS
"%0, %1, 8, 8\n"
1323 " andi %1, %2, 0x3\n"
1325 ADDIU
"%2, %2, -1\n"
1326 "3:" LB
"%1, 0(%2)\n"
1327 INS
"%0, %1, 16, 8\n"
1328 " andi %1, %2, 0x3\n"
1330 ADDIU
"%2, %2, -1\n"
1331 "4:" LB
"%1, 0(%2)\n"
1332 INS
"%0, %1, 24, 8\n"
1334 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1338 " .section .fixup,\"ax\"\n"
1342 " .section __ex_table,\"a\"\n"
1349 : "+&r"(rt
), "=&r"(rs
),
1350 "+&r"(vaddr
), "+&r"(err
)
1352 if (MIPSInst_RT(inst
) && !err
)
1353 regs
->regs
[MIPSInst_RT(inst
)] = rt
;
1355 MIPS_R2_STATS(loads
);
1360 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1361 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1362 if (!access_ok(VERIFY_WRITE
, (void __user
*)vaddr
, 4)) {
1363 current
->thread
.cp0_baduaddr
= vaddr
;
1367 __asm__
__volatile__(
1370 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1371 EXT
"%1, %0, 24, 8\n"
1372 "1:" SB
"%1, 0(%2)\n"
1373 " andi %1, %2, 0x3\n"
1375 ADDIU
"%2, %2, -1\n"
1376 EXT
"%1, %0, 16, 8\n"
1377 "2:" SB
"%1, 0(%2)\n"
1378 " andi %1, %2, 0x3\n"
1380 ADDIU
"%2, %2, -1\n"
1381 EXT
"%1, %0, 8, 8\n"
1382 "3:" SB
"%1, 0(%2)\n"
1383 " andi %1, %2, 0x3\n"
1385 ADDIU
"%2, %2, -1\n"
1386 EXT
"%1, %0, 0, 8\n"
1387 "4:" SB
"%1, 0(%2)\n"
1388 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1389 EXT
"%1, %0, 24, 8\n"
1390 "1:" SB
"%1, 0(%2)\n"
1392 " andi %1, %2, 0x3\n"
1394 EXT
"%1, %0, 16, 8\n"
1395 "2:" SB
"%1, 0(%2)\n"
1397 " andi %1, %2, 0x3\n"
1399 EXT
"%1, %0, 8, 8\n"
1400 "3:" SB
"%1, 0(%2)\n"
1402 " andi %1, %2, 0x3\n"
1404 EXT
"%1, %0, 0, 8\n"
1405 "4:" SB
"%1, 0(%2)\n"
1406 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1409 " .section .fixup,\"ax\"\n"
1413 " .section __ex_table,\"a\"\n"
1420 : "+&r"(rt
), "=&r"(rs
),
1421 "+&r"(vaddr
), "+&r"(err
)
1425 MIPS_R2_STATS(stores
);
1430 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1431 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1432 if (!access_ok(VERIFY_WRITE
, (void __user
*)vaddr
, 4)) {
1433 current
->thread
.cp0_baduaddr
= vaddr
;
1437 __asm__
__volatile__(
1440 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1441 EXT
"%1, %0, 0, 8\n"
1442 "1:" SB
"%1, 0(%2)\n"
1444 " andi %1, %2, 0x3\n"
1446 EXT
"%1, %0, 8, 8\n"
1447 "2:" SB
"%1, 0(%2)\n"
1449 " andi %1, %2, 0x3\n"
1451 EXT
"%1, %0, 16, 8\n"
1452 "3:" SB
"%1, 0(%2)\n"
1454 " andi %1, %2, 0x3\n"
1456 EXT
"%1, %0, 24, 8\n"
1457 "4:" SB
"%1, 0(%2)\n"
1458 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1459 EXT
"%1, %0, 0, 8\n"
1460 "1:" SB
"%1, 0(%2)\n"
1461 " andi %1, %2, 0x3\n"
1463 ADDIU
"%2, %2, -1\n"
1464 EXT
"%1, %0, 8, 8\n"
1465 "2:" SB
"%1, 0(%2)\n"
1466 " andi %1, %2, 0x3\n"
1468 ADDIU
"%2, %2, -1\n"
1469 EXT
"%1, %0, 16, 8\n"
1470 "3:" SB
"%1, 0(%2)\n"
1471 " andi %1, %2, 0x3\n"
1473 ADDIU
"%2, %2, -1\n"
1474 EXT
"%1, %0, 24, 8\n"
1475 "4:" SB
"%1, 0(%2)\n"
1476 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1479 " .section .fixup,\"ax\"\n"
1483 " .section __ex_table,\"a\"\n"
1490 : "+&r"(rt
), "=&r"(rs
),
1491 "+&r"(vaddr
), "+&r"(err
)
1495 MIPS_R2_STATS(stores
);
1500 if (IS_ENABLED(CONFIG_32BIT
)) {
1505 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1506 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1507 if (!access_ok(VERIFY_READ
, (void __user
*)vaddr
, 8)) {
1508 current
->thread
.cp0_baduaddr
= vaddr
;
1512 __asm__
__volatile__(
1515 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1517 " dinsu %0, %1, 56, 8\n"
1518 " andi %1, %2, 0x7\n"
1520 " daddiu %2, %2, -1\n"
1522 " dinsu %0, %1, 48, 8\n"
1523 " andi %1, %2, 0x7\n"
1525 " daddiu %2, %2, -1\n"
1527 " dinsu %0, %1, 40, 8\n"
1528 " andi %1, %2, 0x7\n"
1530 " daddiu %2, %2, -1\n"
1532 " dinsu %0, %1, 32, 8\n"
1533 " andi %1, %2, 0x7\n"
1535 " daddiu %2, %2, -1\n"
1537 " dins %0, %1, 24, 8\n"
1538 " andi %1, %2, 0x7\n"
1540 " daddiu %2, %2, -1\n"
1542 " dins %0, %1, 16, 8\n"
1543 " andi %1, %2, 0x7\n"
1545 " daddiu %2, %2, -1\n"
1547 " dins %0, %1, 8, 8\n"
1548 " andi %1, %2, 0x7\n"
1550 " daddiu %2, %2, -1\n"
1552 " dins %0, %1, 0, 8\n"
1553 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1555 " dinsu %0, %1, 56, 8\n"
1556 " daddiu %2, %2, 1\n"
1557 " andi %1, %2, 0x7\n"
1560 " dinsu %0, %1, 48, 8\n"
1561 " daddiu %2, %2, 1\n"
1562 " andi %1, %2, 0x7\n"
1565 " dinsu %0, %1, 40, 8\n"
1566 " daddiu %2, %2, 1\n"
1567 " andi %1, %2, 0x7\n"
1570 " dinsu %0, %1, 32, 8\n"
1571 " daddiu %2, %2, 1\n"
1572 " andi %1, %2, 0x7\n"
1575 " dins %0, %1, 24, 8\n"
1576 " daddiu %2, %2, 1\n"
1577 " andi %1, %2, 0x7\n"
1580 " dins %0, %1, 16, 8\n"
1581 " daddiu %2, %2, 1\n"
1582 " andi %1, %2, 0x7\n"
1585 " dins %0, %1, 8, 8\n"
1586 " daddiu %2, %2, 1\n"
1587 " andi %1, %2, 0x7\n"
1590 " dins %0, %1, 0, 8\n"
1591 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1594 " .section .fixup,\"ax\"\n"
1598 " .section __ex_table,\"a\"\n"
1609 : "+&r"(rt
), "=&r"(rs
),
1610 "+&r"(vaddr
), "+&r"(err
)
1612 if (MIPSInst_RT(inst
) && !err
)
1613 regs
->regs
[MIPSInst_RT(inst
)] = rt
;
1615 MIPS_R2_STATS(loads
);
1619 if (IS_ENABLED(CONFIG_32BIT
)) {
1624 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1625 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1626 if (!access_ok(VERIFY_READ
, (void __user
*)vaddr
, 8)) {
1627 current
->thread
.cp0_baduaddr
= vaddr
;
1631 __asm__
__volatile__(
1634 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1636 " dins %0, %1, 0, 8\n"
1637 " daddiu %2, %2, 1\n"
1638 " andi %1, %2, 0x7\n"
1641 " dins %0, %1, 8, 8\n"
1642 " daddiu %2, %2, 1\n"
1643 " andi %1, %2, 0x7\n"
1646 " dins %0, %1, 16, 8\n"
1647 " daddiu %2, %2, 1\n"
1648 " andi %1, %2, 0x7\n"
1651 " dins %0, %1, 24, 8\n"
1652 " daddiu %2, %2, 1\n"
1653 " andi %1, %2, 0x7\n"
1656 " dinsu %0, %1, 32, 8\n"
1657 " daddiu %2, %2, 1\n"
1658 " andi %1, %2, 0x7\n"
1661 " dinsu %0, %1, 40, 8\n"
1662 " daddiu %2, %2, 1\n"
1663 " andi %1, %2, 0x7\n"
1666 " dinsu %0, %1, 48, 8\n"
1667 " daddiu %2, %2, 1\n"
1668 " andi %1, %2, 0x7\n"
1671 " dinsu %0, %1, 56, 8\n"
1672 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1674 " dins %0, %1, 0, 8\n"
1675 " andi %1, %2, 0x7\n"
1677 " daddiu %2, %2, -1\n"
1679 " dins %0, %1, 8, 8\n"
1680 " andi %1, %2, 0x7\n"
1682 " daddiu %2, %2, -1\n"
1684 " dins %0, %1, 16, 8\n"
1685 " andi %1, %2, 0x7\n"
1687 " daddiu %2, %2, -1\n"
1689 " dins %0, %1, 24, 8\n"
1690 " andi %1, %2, 0x7\n"
1692 " daddiu %2, %2, -1\n"
1694 " dinsu %0, %1, 32, 8\n"
1695 " andi %1, %2, 0x7\n"
1697 " daddiu %2, %2, -1\n"
1699 " dinsu %0, %1, 40, 8\n"
1700 " andi %1, %2, 0x7\n"
1702 " daddiu %2, %2, -1\n"
1704 " dinsu %0, %1, 48, 8\n"
1705 " andi %1, %2, 0x7\n"
1707 " daddiu %2, %2, -1\n"
1709 " dinsu %0, %1, 56, 8\n"
1710 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1713 " .section .fixup,\"ax\"\n"
1717 " .section __ex_table,\"a\"\n"
1728 : "+&r"(rt
), "=&r"(rs
),
1729 "+&r"(vaddr
), "+&r"(err
)
1731 if (MIPSInst_RT(inst
) && !err
)
1732 regs
->regs
[MIPSInst_RT(inst
)] = rt
;
1734 MIPS_R2_STATS(loads
);
1738 if (IS_ENABLED(CONFIG_32BIT
)) {
1743 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1744 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1745 if (!access_ok(VERIFY_WRITE
, (void __user
*)vaddr
, 8)) {
1746 current
->thread
.cp0_baduaddr
= vaddr
;
1750 __asm__
__volatile__(
1753 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1754 " dextu %1, %0, 56, 8\n"
1756 " andi %1, %2, 0x7\n"
1758 " daddiu %2, %2, -1\n"
1759 " dextu %1, %0, 48, 8\n"
1761 " andi %1, %2, 0x7\n"
1763 " daddiu %2, %2, -1\n"
1764 " dextu %1, %0, 40, 8\n"
1766 " andi %1, %2, 0x7\n"
1768 " daddiu %2, %2, -1\n"
1769 " dextu %1, %0, 32, 8\n"
1771 " andi %1, %2, 0x7\n"
1773 " daddiu %2, %2, -1\n"
1774 " dext %1, %0, 24, 8\n"
1776 " andi %1, %2, 0x7\n"
1778 " daddiu %2, %2, -1\n"
1779 " dext %1, %0, 16, 8\n"
1781 " andi %1, %2, 0x7\n"
1783 " daddiu %2, %2, -1\n"
1784 " dext %1, %0, 8, 8\n"
1786 " andi %1, %2, 0x7\n"
1788 " daddiu %2, %2, -1\n"
1789 " dext %1, %0, 0, 8\n"
1791 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1792 " dextu %1, %0, 56, 8\n"
1794 " daddiu %2, %2, 1\n"
1795 " andi %1, %2, 0x7\n"
1797 " dextu %1, %0, 48, 8\n"
1799 " daddiu %2, %2, 1\n"
1800 " andi %1, %2, 0x7\n"
1802 " dextu %1, %0, 40, 8\n"
1804 " daddiu %2, %2, 1\n"
1805 " andi %1, %2, 0x7\n"
1807 " dextu %1, %0, 32, 8\n"
1809 " daddiu %2, %2, 1\n"
1810 " andi %1, %2, 0x7\n"
1812 " dext %1, %0, 24, 8\n"
1814 " daddiu %2, %2, 1\n"
1815 " andi %1, %2, 0x7\n"
1817 " dext %1, %0, 16, 8\n"
1819 " daddiu %2, %2, 1\n"
1820 " andi %1, %2, 0x7\n"
1822 " dext %1, %0, 8, 8\n"
1824 " daddiu %2, %2, 1\n"
1825 " andi %1, %2, 0x7\n"
1827 " dext %1, %0, 0, 8\n"
1829 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1832 " .section .fixup,\"ax\"\n"
1836 " .section __ex_table,\"a\"\n"
1847 : "+&r"(rt
), "=&r"(rs
),
1848 "+&r"(vaddr
), "+&r"(err
)
1852 MIPS_R2_STATS(stores
);
1856 if (IS_ENABLED(CONFIG_32BIT
)) {
1861 rt
= regs
->regs
[MIPSInst_RT(inst
)];
1862 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1863 if (!access_ok(VERIFY_WRITE
, (void __user
*)vaddr
, 8)) {
1864 current
->thread
.cp0_baduaddr
= vaddr
;
1868 __asm__
__volatile__(
1871 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1872 " dext %1, %0, 0, 8\n"
1874 " daddiu %2, %2, 1\n"
1875 " andi %1, %2, 0x7\n"
1877 " dext %1, %0, 8, 8\n"
1879 " daddiu %2, %2, 1\n"
1880 " andi %1, %2, 0x7\n"
1882 " dext %1, %0, 16, 8\n"
1884 " daddiu %2, %2, 1\n"
1885 " andi %1, %2, 0x7\n"
1887 " dext %1, %0, 24, 8\n"
1889 " daddiu %2, %2, 1\n"
1890 " andi %1, %2, 0x7\n"
1892 " dextu %1, %0, 32, 8\n"
1894 " daddiu %2, %2, 1\n"
1895 " andi %1, %2, 0x7\n"
1897 " dextu %1, %0, 40, 8\n"
1899 " daddiu %2, %2, 1\n"
1900 " andi %1, %2, 0x7\n"
1902 " dextu %1, %0, 48, 8\n"
1904 " daddiu %2, %2, 1\n"
1905 " andi %1, %2, 0x7\n"
1907 " dextu %1, %0, 56, 8\n"
1909 #else /* !CONFIG_CPU_LITTLE_ENDIAN */
1910 " dext %1, %0, 0, 8\n"
1912 " andi %1, %2, 0x7\n"
1914 " daddiu %2, %2, -1\n"
1915 " dext %1, %0, 8, 8\n"
1917 " andi %1, %2, 0x7\n"
1919 " daddiu %2, %2, -1\n"
1920 " dext %1, %0, 16, 8\n"
1922 " andi %1, %2, 0x7\n"
1924 " daddiu %2, %2, -1\n"
1925 " dext %1, %0, 24, 8\n"
1927 " andi %1, %2, 0x7\n"
1929 " daddiu %2, %2, -1\n"
1930 " dextu %1, %0, 32, 8\n"
1932 " andi %1, %2, 0x7\n"
1934 " daddiu %2, %2, -1\n"
1935 " dextu %1, %0, 40, 8\n"
1937 " andi %1, %2, 0x7\n"
1939 " daddiu %2, %2, -1\n"
1940 " dextu %1, %0, 48, 8\n"
1942 " andi %1, %2, 0x7\n"
1944 " daddiu %2, %2, -1\n"
1945 " dextu %1, %0, 56, 8\n"
1947 #endif /* CONFIG_CPU_LITTLE_ENDIAN */
1950 " .section .fixup,\"ax\"\n"
1954 " .section __ex_table,\"a\"\n"
1965 : "+&r"(rt
), "=&r"(rs
),
1966 "+&r"(vaddr
), "+&r"(err
)
1970 MIPS_R2_STATS(stores
);
1974 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
1976 current
->thread
.cp0_baduaddr
= vaddr
;
1980 if (!access_ok(VERIFY_READ
, (void __user
*)vaddr
, 4)) {
1981 current
->thread
.cp0_baduaddr
= vaddr
;
1986 if (!cpu_has_rw_llb
) {
1988 * An LL/SC block can't be safely emulated without
1989 * a Config5/LLB availability. So it's probably time to
1990 * kill our process before things get any worse. This is
1991 * because Config5/LLB allows us to use ERETNC so that
1992 * the LLAddr/LLB bit is not cleared when we return from
1993 * an exception. MIPS R2 LL/SC instructions trap with an
1994 * RI exception so once we emulate them here, we return
1995 * back to userland with ERETNC. That preserves the
1996 * LLAddr/LLB so the subsequent SC instruction will
1997 * succeed preserving the atomic semantics of the LL/SC
1998 * block. Without that, there is no safe way to emulate
1999 * an LL/SC block in MIPSR2 userland.
2001 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2006 __asm__
__volatile__(
2011 ".section .fixup,\"ax\"\n"
2016 ".section __ex_table,\"a\"\n"
2019 : "=&r"(res
), "+&r"(err
)
2020 : "r"(vaddr
), "i"(SIGSEGV
)
2023 if (MIPSInst_RT(inst
) && !err
)
2024 regs
->regs
[MIPSInst_RT(inst
)] = res
;
2025 MIPS_R2_STATS(llsc
);
2030 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
2032 current
->thread
.cp0_baduaddr
= vaddr
;
2036 if (!access_ok(VERIFY_WRITE
, (void __user
*)vaddr
, 4)) {
2037 current
->thread
.cp0_baduaddr
= vaddr
;
2042 if (!cpu_has_rw_llb
) {
2044 * An LL/SC block can't be safely emulated without
2045 * a Config5/LLB availability. So it's probably time to
2046 * kill our process before things get any worse. This is
2047 * because Config5/LLB allows us to use ERETNC so that
2048 * the LLAddr/LLB bit is not cleared when we return from
2049 * an exception. MIPS R2 LL/SC instructions trap with an
2050 * RI exception so once we emulate them here, we return
2051 * back to userland with ERETNC. That preserves the
2052 * LLAddr/LLB so the subsequent SC instruction will
2053 * succeed preserving the atomic semantics of the LL/SC
2054 * block. Without that, there is no safe way to emulate
2055 * an LL/SC block in MIPSR2 userland.
2057 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2062 res
= regs
->regs
[MIPSInst_RT(inst
)];
2064 __asm__
__volatile__(
2069 ".section .fixup,\"ax\"\n"
2074 ".section __ex_table,\"a\"\n"
2077 : "+&r"(res
), "+&r"(err
)
2078 : "r"(vaddr
), "i"(SIGSEGV
));
2080 if (MIPSInst_RT(inst
) && !err
)
2081 regs
->regs
[MIPSInst_RT(inst
)] = res
;
2083 MIPS_R2_STATS(llsc
);
2088 if (IS_ENABLED(CONFIG_32BIT
)) {
2093 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
2095 current
->thread
.cp0_baduaddr
= vaddr
;
2099 if (!access_ok(VERIFY_READ
, (void __user
*)vaddr
, 8)) {
2100 current
->thread
.cp0_baduaddr
= vaddr
;
2105 if (!cpu_has_rw_llb
) {
2107 * An LL/SC block can't be safely emulated without
2108 * a Config5/LLB availability. So it's probably time to
2109 * kill our process before things get any worse. This is
2110 * because Config5/LLB allows us to use ERETNC so that
2111 * the LLAddr/LLB bit is not cleared when we return from
2112 * an exception. MIPS R2 LL/SC instructions trap with an
2113 * RI exception so once we emulate them here, we return
2114 * back to userland with ERETNC. That preserves the
2115 * LLAddr/LLB so the subsequent SC instruction will
2116 * succeed preserving the atomic semantics of the LL/SC
2117 * block. Without that, there is no safe way to emulate
2118 * an LL/SC block in MIPSR2 userland.
2120 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2125 __asm__
__volatile__(
2130 ".section .fixup,\"ax\"\n"
2135 ".section __ex_table,\"a\"\n"
2138 : "=&r"(res
), "+&r"(err
)
2139 : "r"(vaddr
), "i"(SIGSEGV
)
2141 if (MIPSInst_RT(inst
) && !err
)
2142 regs
->regs
[MIPSInst_RT(inst
)] = res
;
2144 MIPS_R2_STATS(llsc
);
2149 if (IS_ENABLED(CONFIG_32BIT
)) {
2154 vaddr
= regs
->regs
[MIPSInst_RS(inst
)] + MIPSInst_SIMM(inst
);
2156 current
->thread
.cp0_baduaddr
= vaddr
;
2160 if (!access_ok(VERIFY_WRITE
, (void __user
*)vaddr
, 8)) {
2161 current
->thread
.cp0_baduaddr
= vaddr
;
2166 if (!cpu_has_rw_llb
) {
2168 * An LL/SC block can't be safely emulated without
2169 * a Config5/LLB availability. So it's probably time to
2170 * kill our process before things get any worse. This is
2171 * because Config5/LLB allows us to use ERETNC so that
2172 * the LLAddr/LLB bit is not cleared when we return from
2173 * an exception. MIPS R2 LL/SC instructions trap with an
2174 * RI exception so once we emulate them here, we return
2175 * back to userland with ERETNC. That preserves the
2176 * LLAddr/LLB so the subsequent SC instruction will
2177 * succeed preserving the atomic semantics of the LL/SC
2178 * block. Without that, there is no safe way to emulate
2179 * an LL/SC block in MIPSR2 userland.
2181 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2186 res
= regs
->regs
[MIPSInst_RT(inst
)];
2188 __asm__
__volatile__(
2193 ".section .fixup,\"ax\"\n"
2198 ".section __ex_table,\"a\"\n"
2201 : "+&r"(res
), "+&r"(err
)
2202 : "r"(vaddr
), "i"(SIGSEGV
));
2204 if (MIPSInst_RT(inst
) && !err
)
2205 regs
->regs
[MIPSInst_RT(inst
)] = res
;
2207 MIPS_R2_STATS(llsc
);
2218 * Let's not return to userland just yet. It's costly and
2219 * it's likely we have more R2 instructions to emulate
2221 if (!err
&& (pass
++ < MIPS_R2_EMUL_TOTAL_PASS
)) {
2222 regs
->cp0_cause
&= ~CAUSEF_BD
;
2223 err
= get_user(inst
, (u32 __user
*)regs
->cp0_epc
);
2231 if (err
&& (err
!= SIGEMT
)) {
2232 regs
->regs
[31] = r31
;
2233 regs
->cp0_epc
= epc
;
2236 /* Likely a MIPS R6 compatible instruction */
2237 if (pass
&& (err
== SIGILL
))
2243 #ifdef CONFIG_DEBUG_FS
2245 static int mipsr2_stats_show(struct seq_file
*s
, void *unused
)
2248 seq_printf(s
, "Instruction\tTotal\tBDslot\n------------------------------\n");
2249 seq_printf(s
, "movs\t\t%ld\t%ld\n",
2250 (unsigned long)__this_cpu_read(mipsr2emustats
.movs
),
2251 (unsigned long)__this_cpu_read(mipsr2bdemustats
.movs
));
2252 seq_printf(s
, "hilo\t\t%ld\t%ld\n",
2253 (unsigned long)__this_cpu_read(mipsr2emustats
.hilo
),
2254 (unsigned long)__this_cpu_read(mipsr2bdemustats
.hilo
));
2255 seq_printf(s
, "muls\t\t%ld\t%ld\n",
2256 (unsigned long)__this_cpu_read(mipsr2emustats
.muls
),
2257 (unsigned long)__this_cpu_read(mipsr2bdemustats
.muls
));
2258 seq_printf(s
, "divs\t\t%ld\t%ld\n",
2259 (unsigned long)__this_cpu_read(mipsr2emustats
.divs
),
2260 (unsigned long)__this_cpu_read(mipsr2bdemustats
.divs
));
2261 seq_printf(s
, "dsps\t\t%ld\t%ld\n",
2262 (unsigned long)__this_cpu_read(mipsr2emustats
.dsps
),
2263 (unsigned long)__this_cpu_read(mipsr2bdemustats
.dsps
));
2264 seq_printf(s
, "bops\t\t%ld\t%ld\n",
2265 (unsigned long)__this_cpu_read(mipsr2emustats
.bops
),
2266 (unsigned long)__this_cpu_read(mipsr2bdemustats
.bops
));
2267 seq_printf(s
, "traps\t\t%ld\t%ld\n",
2268 (unsigned long)__this_cpu_read(mipsr2emustats
.traps
),
2269 (unsigned long)__this_cpu_read(mipsr2bdemustats
.traps
));
2270 seq_printf(s
, "fpus\t\t%ld\t%ld\n",
2271 (unsigned long)__this_cpu_read(mipsr2emustats
.fpus
),
2272 (unsigned long)__this_cpu_read(mipsr2bdemustats
.fpus
));
2273 seq_printf(s
, "loads\t\t%ld\t%ld\n",
2274 (unsigned long)__this_cpu_read(mipsr2emustats
.loads
),
2275 (unsigned long)__this_cpu_read(mipsr2bdemustats
.loads
));
2276 seq_printf(s
, "stores\t\t%ld\t%ld\n",
2277 (unsigned long)__this_cpu_read(mipsr2emustats
.stores
),
2278 (unsigned long)__this_cpu_read(mipsr2bdemustats
.stores
));
2279 seq_printf(s
, "llsc\t\t%ld\t%ld\n",
2280 (unsigned long)__this_cpu_read(mipsr2emustats
.llsc
),
2281 (unsigned long)__this_cpu_read(mipsr2bdemustats
.llsc
));
2282 seq_printf(s
, "dsemul\t\t%ld\t%ld\n",
2283 (unsigned long)__this_cpu_read(mipsr2emustats
.dsemul
),
2284 (unsigned long)__this_cpu_read(mipsr2bdemustats
.dsemul
));
2285 seq_printf(s
, "jr\t\t%ld\n",
2286 (unsigned long)__this_cpu_read(mipsr2bremustats
.jrs
));
2287 seq_printf(s
, "bltzl\t\t%ld\n",
2288 (unsigned long)__this_cpu_read(mipsr2bremustats
.bltzl
));
2289 seq_printf(s
, "bgezl\t\t%ld\n",
2290 (unsigned long)__this_cpu_read(mipsr2bremustats
.bgezl
));
2291 seq_printf(s
, "bltzll\t\t%ld\n",
2292 (unsigned long)__this_cpu_read(mipsr2bremustats
.bltzll
));
2293 seq_printf(s
, "bgezll\t\t%ld\n",
2294 (unsigned long)__this_cpu_read(mipsr2bremustats
.bgezll
));
2295 seq_printf(s
, "bltzal\t\t%ld\n",
2296 (unsigned long)__this_cpu_read(mipsr2bremustats
.bltzal
));
2297 seq_printf(s
, "bgezal\t\t%ld\n",
2298 (unsigned long)__this_cpu_read(mipsr2bremustats
.bgezal
));
2299 seq_printf(s
, "beql\t\t%ld\n",
2300 (unsigned long)__this_cpu_read(mipsr2bremustats
.beql
));
2301 seq_printf(s
, "bnel\t\t%ld\n",
2302 (unsigned long)__this_cpu_read(mipsr2bremustats
.bnel
));
2303 seq_printf(s
, "blezl\t\t%ld\n",
2304 (unsigned long)__this_cpu_read(mipsr2bremustats
.blezl
));
2305 seq_printf(s
, "bgtzl\t\t%ld\n",
2306 (unsigned long)__this_cpu_read(mipsr2bremustats
.bgtzl
));
2311 static int mipsr2_stats_clear_show(struct seq_file
*s
, void *unused
)
2313 mipsr2_stats_show(s
, unused
);
2315 __this_cpu_write((mipsr2emustats
).movs
, 0);
2316 __this_cpu_write((mipsr2bdemustats
).movs
, 0);
2317 __this_cpu_write((mipsr2emustats
).hilo
, 0);
2318 __this_cpu_write((mipsr2bdemustats
).hilo
, 0);
2319 __this_cpu_write((mipsr2emustats
).muls
, 0);
2320 __this_cpu_write((mipsr2bdemustats
).muls
, 0);
2321 __this_cpu_write((mipsr2emustats
).divs
, 0);
2322 __this_cpu_write((mipsr2bdemustats
).divs
, 0);
2323 __this_cpu_write((mipsr2emustats
).dsps
, 0);
2324 __this_cpu_write((mipsr2bdemustats
).dsps
, 0);
2325 __this_cpu_write((mipsr2emustats
).bops
, 0);
2326 __this_cpu_write((mipsr2bdemustats
).bops
, 0);
2327 __this_cpu_write((mipsr2emustats
).traps
, 0);
2328 __this_cpu_write((mipsr2bdemustats
).traps
, 0);
2329 __this_cpu_write((mipsr2emustats
).fpus
, 0);
2330 __this_cpu_write((mipsr2bdemustats
).fpus
, 0);
2331 __this_cpu_write((mipsr2emustats
).loads
, 0);
2332 __this_cpu_write((mipsr2bdemustats
).loads
, 0);
2333 __this_cpu_write((mipsr2emustats
).stores
, 0);
2334 __this_cpu_write((mipsr2bdemustats
).stores
, 0);
2335 __this_cpu_write((mipsr2emustats
).llsc
, 0);
2336 __this_cpu_write((mipsr2bdemustats
).llsc
, 0);
2337 __this_cpu_write((mipsr2emustats
).dsemul
, 0);
2338 __this_cpu_write((mipsr2bdemustats
).dsemul
, 0);
2339 __this_cpu_write((mipsr2bremustats
).jrs
, 0);
2340 __this_cpu_write((mipsr2bremustats
).bltzl
, 0);
2341 __this_cpu_write((mipsr2bremustats
).bgezl
, 0);
2342 __this_cpu_write((mipsr2bremustats
).bltzll
, 0);
2343 __this_cpu_write((mipsr2bremustats
).bgezll
, 0);
2344 __this_cpu_write((mipsr2bremustats
).bltzall
, 0);
2345 __this_cpu_write((mipsr2bremustats
).bgezall
, 0);
2346 __this_cpu_write((mipsr2bremustats
).bltzal
, 0);
2347 __this_cpu_write((mipsr2bremustats
).bgezal
, 0);
2348 __this_cpu_write((mipsr2bremustats
).beql
, 0);
2349 __this_cpu_write((mipsr2bremustats
).bnel
, 0);
2350 __this_cpu_write((mipsr2bremustats
).blezl
, 0);
2351 __this_cpu_write((mipsr2bremustats
).bgtzl
, 0);
2356 static int mipsr2_stats_open(struct inode
*inode
, struct file
*file
)
2358 return single_open(file
, mipsr2_stats_show
, inode
->i_private
);
2361 static int mipsr2_stats_clear_open(struct inode
*inode
, struct file
*file
)
2363 return single_open(file
, mipsr2_stats_clear_show
, inode
->i_private
);
2366 static const struct file_operations mipsr2_emul_fops
= {
2367 .open
= mipsr2_stats_open
,
2369 .llseek
= seq_lseek
,
2370 .release
= single_release
,
2373 static const struct file_operations mipsr2_clear_fops
= {
2374 .open
= mipsr2_stats_clear_open
,
2376 .llseek
= seq_lseek
,
2377 .release
= single_release
,
2381 static int __init
mipsr2_init_debugfs(void)
2383 struct dentry
*mipsr2_emul
;
2385 if (!mips_debugfs_dir
)
2388 mipsr2_emul
= debugfs_create_file("r2_emul_stats", S_IRUGO
,
2389 mips_debugfs_dir
, NULL
,
2394 mipsr2_emul
= debugfs_create_file("r2_emul_stats_clear", S_IRUGO
,
2395 mips_debugfs_dir
, NULL
,
2396 &mipsr2_clear_fops
);
2403 device_initcall(mipsr2_init_debugfs
);
2405 #endif /* CONFIG_DEBUG_FS */