2 * MIPS emulation for QEMU - nanoMIPS translation routines
4 * Copyright (c) 2004-2005 Jocelyn Mayer
5 * Copyright (c) 2006 Marius Groeger (FPU operations)
6 * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
7 * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
8 * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support)
10 * SPDX-License-Identifier: LGPL-2.1-or-later
13 /* MAJOR, P16, and P32 pools opcodes */
70 /* POOL32A instruction pool */
80 /* P.GP.W instruction pool */
87 /* P48I instruction pool */
97 /* P.U12 instruction pool */
113 /* POOL32F instruction pool */
120 /* POOL32S instruction pool */
126 /* P.LUI instruction pool */
132 /* P.GP.BH instruction pool */
143 /* P.LS.U12 instruction pool */
161 /* P.LS.S9 instruction pool */
170 /* P.BAL instruction pool */
176 /* P.J instruction pool */
183 /* P.BR1 instruction pool */
191 /* P.BR2 instruction pool */
198 /* P.BRI instruction pool */
210 /* P16.SHIFT instruction pool */
216 /* POOL16C instruction pool */
222 /* P16.A1 instruction pool */
227 /* P16.A2 instruction pool */
230 NM_P_ADDIURS5 = 0x01,
233 /* P16.ADDU instruction pool */
239 /* P16.SR instruction pool */
242 NM_RESTORE_JRC16 = 0x01,
245 /* P16.4X4 instruction pool */
251 /* P16.LB instruction pool */
258 /* P16.LH instruction pool */
265 /* P.RI instruction pool */
273 /* POOL32A0 instruction pool */
308 NM_D_E_MT_VPE = 0x56,
316 /* CRC32 instruction pool */
326 /* POOL32A5 instruction pool */
331 NM_CMPGU_EQ_QB = 0x18,
332 NM_CMPGU_LT_QB = 0x20,
333 NM_CMPGU_LE_QB = 0x28,
334 NM_CMPGDU_EQ_QB = 0x30,
335 NM_CMPGDU_LT_QB = 0x38,
336 NM_CMPGDU_LE_QB = 0x40,
337 NM_CMPU_EQ_QB = 0x48,
338 NM_CMPU_LT_QB = 0x50,
339 NM_CMPU_LE_QB = 0x58,
346 NM_ADDQH_R_PH = 0x09,
350 NM_ADDUH_R_QB = 0x29,
351 NM_SHRAV_R_PH = 0x31,
352 NM_SHRAV_R_QB = 0x39,
354 NM_SUBQH_R_PH = 0x49,
358 NM_SUBUH_R_QB = 0x69,
359 NM_SHLLV_S_PH = 0x71,
360 NM_PRECR_SRA_R_PH_W = 0x79,
362 NM_MULEU_S_PH_QBL = 0x12,
363 NM_MULEU_S_PH_QBR = 0x1a,
364 NM_MULQ_RS_PH = 0x22,
378 NM_MULEQ_S_W_PHL = 0x04,
379 NM_MULEQ_S_W_PHR = 0x0c,
382 NM_PRECR_QB_PH = 0x0d,
383 NM_PRECRQ_QB_PH = 0x15,
384 NM_PRECRQ_PH_W = 0x1d,
385 NM_PRECRQ_RS_PH_W = 0x25,
386 NM_PRECRQU_S_QB_PH = 0x2d,
399 /* POOL32A7 instruction pool */
407 /* P.SR instruction pool */
413 /* P.SHIFT instruction pool */
421 /* P.ROTX instruction pool */
426 /* P.INS instruction pool */
431 /* P.EXT instruction pool */
436 /* POOL32F_0 (fmt) instruction pool */
462 /* POOL32F_3 instruction pool */
471 /* POOL32F_5 instruction pool */
473 NM_CMP_CONDN_S = 0x00,
474 NM_CMP_CONDN_D = 0x02,
477 /* P.GP.LH instruction pool */
483 /* P.GP.SH instruction pool */
488 /* P.GP.CP1 instruction pool */
496 /* P.LS.S0 instruction pool */
519 /* P.LS.S1 instruction pool */
529 /* P.LS.E0 instruction pool */
545 /* P.PREFE instruction pool */
551 /* P.LLE instruction pool */
557 /* P.SCE instruction pool */
563 /* P.LS.WM instruction pool */
569 /* P.LS.UAWM instruction pool */
575 /* P.BR3A instruction pool */
584 /* P16.RI instruction pool */
586 NM_P16_SYSCALL = 0x01,
591 /* POOL16C_0 instruction pool */
593 NM_POOL16C_00 = 0x00,
596 /* P16.JRC instruction pool */
602 /* P.SYSCALL instruction pool */
608 /* P.TRAP instruction pool */
614 /* P.CMOVE instruction pool */
620 /* POOL32Axf instruction pool */
622 NM_POOL32AXF_1 = 0x01,
623 NM_POOL32AXF_2 = 0x02,
624 NM_POOL32AXF_4 = 0x04,
625 NM_POOL32AXF_5 = 0x05,
626 NM_POOL32AXF_7 = 0x07,
629 /* POOL32Axf_1 instruction pool */
631 NM_POOL32AXF_1_0 = 0x00,
632 NM_POOL32AXF_1_1 = 0x01,
633 NM_POOL32AXF_1_3 = 0x03,
634 NM_POOL32AXF_1_4 = 0x04,
635 NM_POOL32AXF_1_5 = 0x05,
636 NM_POOL32AXF_1_7 = 0x07,
639 /* POOL32Axf_2 instruction pool */
641 NM_POOL32AXF_2_0_7 = 0x00,
642 NM_POOL32AXF_2_8_15 = 0x01,
643 NM_POOL32AXF_2_16_23 = 0x02,
644 NM_POOL32AXF_2_24_31 = 0x03,
647 /* POOL32Axf_7 instruction pool */
654 /* POOL32Axf_1_0 instruction pool */
662 /* POOL32Axf_1_1 instruction pool */
668 /* POOL32Axf_1_3 instruction pool */
676 /* POOL32Axf_1_4 instruction pool */
682 /* POOL32Axf_1_5 instruction pool */
684 NM_MAQ_S_W_PHR = 0x0,
685 NM_MAQ_S_W_PHL = 0x1,
686 NM_MAQ_SA_W_PHR = 0x2,
687 NM_MAQ_SA_W_PHL = 0x3,
690 /* POOL32Axf_1_7 instruction pool */
698 /* POOL32Axf_2_0_7 instruction pool */
701 NM_DPAQ_S_W_PH = 0x1,
703 NM_DPSQ_S_W_PH = 0x3,
710 /* POOL32Axf_2_8_15 instruction pool */
713 NM_DPAQ_SA_L_W = 0x1,
715 NM_DPSQ_SA_L_W = 0x3,
721 /* POOL32Axf_2_16_23 instruction pool */
724 NM_DPAQX_S_W_PH = 0x1,
726 NM_DPSQX_S_W_PH = 0x3,
733 /* POOL32Axf_2_24_31 instruction pool */
736 NM_DPAQX_SA_W_PH = 0x1,
738 NM_DPSQX_SA_W_PH = 0x3,
741 NM_MULSAQ_S_W_PH = 0x6,
745 /* POOL32Axf_{4, 5} instruction pool */
764 /* nanoMIPS DSP instructions */
768 NM_PRECEQ_W_PHL = 0x28,
769 NM_PRECEQ_W_PHR = 0x30,
770 NM_PRECEQU_PH_QBL = 0x38,
771 NM_PRECEQU_PH_QBR = 0x48,
772 NM_PRECEU_PH_QBL = 0x58,
773 NM_PRECEU_PH_QBR = 0x68,
774 NM_PRECEQU_PH_QBLA = 0x39,
775 NM_PRECEQU_PH_QBRA = 0x49,
776 NM_PRECEU_PH_QBLA = 0x59,
777 NM_PRECEU_PH_QBRA = 0x69,
782 NM_RADDU_W_QB = 0x78,
788 /* PP.SR instruction pool */
792 NM_RESTORE_JRC = 0x03,
795 /* P.SR.F instruction pool */
801 /* P16.SYSCALL instruction pool */
807 /* POOL16C_00 instruction pool */
815 /* PP.LSX and PP.LSXS instruction pool */
853 /* ERETx instruction pool */
859 /* POOL32FxF_{0, 1} insturction pool */
885 NM_FLOOR_L_S = 0x00c,
886 NM_FLOOR_L_D = 0x10c,
888 NM_FLOOR_W_S = 0x02c,
889 NM_FLOOR_W_D = 0x12c,
895 NM_TRUNC_L_S = 0x08c,
896 NM_TRUNC_L_D = 0x18c,
897 NM_TRUNC_W_S = 0x0ac,
898 NM_TRUNC_W_D = 0x1ac,
899 NM_ROUND_L_S = 0x0cc,
900 NM_ROUND_L_D = 0x1cc,
901 NM_ROUND_W_S = 0x0ec,
902 NM_ROUND_W_D = 0x1ec,
918 /* P.LL instruction pool */
924 /* P.SC instruction pool */
930 /* P.DVP instruction pool */
939 * nanoMIPS decoding engine
944 /* extraction utilities */
946 #define NANOMIPS_EXTRACT_RT3(op) ((op >> 7) & 0x7)
947 #define NANOMIPS_EXTRACT_RS3(op) ((op >> 4) & 0x7)
948 #define NANOMIPS_EXTRACT_RD3(op) ((op >> 1) & 0x7)
949 #define NANOMIPS_EXTRACT_RD5(op) ((op >> 5) & 0x1f)
950 #define NANOMIPS_EXTRACT_RS5(op) (op & 0x1f)
952 /* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr3'). */
953 static inline int decode_gpr_gpr3(int r)
955 static const int map[] = { 16, 17, 18, 19, 4, 5, 6, 7 };
960 /* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr3.src.store'). */
961 static inline int decode_gpr_gpr3_src_store(int r)
963 static const int map[] = { 0, 17, 18, 19, 4, 5, 6, 7 };
968 /* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr4'). */
969 static inline int decode_gpr_gpr4(int r)
971 static const int map[] = { 8, 9, 10, 11, 4, 5, 6, 7,
972 16, 17, 18, 19, 20, 21, 22, 23 };
977 /* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr4.zero'). */
978 static inline int decode_gpr_gpr4_zero(int r)
980 static const int map[] = { 8, 9, 10, 0, 4, 5, 6, 7,
981 16, 17, 18, 19, 20, 21, 22, 23 };
986 static void gen_ext(DisasContext *ctx, int wordsz, int rd, int rs, int rt,
989 gen_align_bits(ctx, wordsz, rd, rs, rt, wordsz - shift);
992 static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset,
993 uint32_t reg1, uint32_t reg2)
995 TCGv taddr = tcg_temp_new();
996 TCGv_i64 tval = tcg_temp_new_i64();
997 TCGv tmp1 = tcg_temp_new();
998 TCGv tmp2 = tcg_temp_new();
1000 gen_base_offset_addr(ctx, taddr, base, offset);
1001 tcg_gen_qemu_ld64(tval, taddr, ctx->mem_idx);
1002 if (cpu_is_bigendian(ctx)) {
1003 tcg_gen_extr_i64_tl(tmp2, tmp1, tval);
1005 tcg_gen_extr_i64_tl(tmp1, tmp2, tval);
1007 gen_store_gpr(tmp1, reg1);
1008 gen_store_gpr(tmp2, reg2);
1009 tcg_gen_st_i64(tval, cpu_env, offsetof(CPUMIPSState, llval_wp));
1010 tcg_gen_st_tl(taddr, cpu_env, offsetof(CPUMIPSState, lladdr));
1013 static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
1014 uint32_t reg1, uint32_t reg2, bool eva)
1016 TCGv taddr = tcg_temp_new();
1017 TCGv lladdr = tcg_temp_new();
1018 TCGv_i64 tval = tcg_temp_new_i64();
1019 TCGv_i64 llval = tcg_temp_new_i64();
1020 TCGv_i64 val = tcg_temp_new_i64();
1021 TCGv tmp1 = tcg_temp_new();
1022 TCGv tmp2 = tcg_temp_new();
1023 TCGLabel *lab_fail = gen_new_label();
1024 TCGLabel *lab_done = gen_new_label();
1026 gen_base_offset_addr(ctx, taddr, base, offset);
1028 tcg_gen_ld_tl(lladdr, cpu_env, offsetof(CPUMIPSState, lladdr));
1029 tcg_gen_brcond_tl(TCG_COND_NE, taddr, lladdr, lab_fail);
1031 gen_load_gpr(tmp1, reg1);
1032 gen_load_gpr(tmp2, reg2);
1034 if (cpu_is_bigendian(ctx)) {
1035 tcg_gen_concat_tl_i64(tval, tmp2, tmp1);
1037 tcg_gen_concat_tl_i64(tval, tmp1, tmp2);
1040 tcg_gen_ld_i64(llval, cpu_env, offsetof(CPUMIPSState, llval_wp));
1041 tcg_gen_atomic_cmpxchg_i64(val, taddr, llval, tval,
1042 eva ? MIPS_HFLAG_UM : ctx->mem_idx, MO_64);
1044 tcg_gen_movi_tl(cpu_gpr[reg1], 1);
1046 tcg_gen_brcond_i64(TCG_COND_EQ, val, llval, lab_done);
1048 gen_set_label(lab_fail);
1051 tcg_gen_movi_tl(cpu_gpr[reg1], 0);
1053 gen_set_label(lab_done);
1054 tcg_gen_movi_tl(lladdr, -1);
1055 tcg_gen_st_tl(lladdr, cpu_env, offsetof(CPUMIPSState, lladdr));
1058 static void gen_adjust_sp(DisasContext *ctx, int u)
1060 gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], u);
1063 static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count,
1064 uint8_t gp, uint16_t u)
1067 TCGv va = tcg_temp_new();
1068 TCGv t0 = tcg_temp_new();
1070 while (counter != count) {
1071 bool use_gp = gp && (counter == count - 1);
1072 int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f);
1073 int this_offset = -((counter + 1) << 2);
1074 gen_base_offset_addr(ctx, va, 29, this_offset);
1075 gen_load_gpr(t0, this_rt);
1076 tcg_gen_qemu_st_tl(t0, va, ctx->mem_idx,
1077 (MO_TEUL | ctx->default_tcg_memop_mask));
1081 /* adjust stack pointer */
1082 gen_adjust_sp(ctx, -u);
1085 static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count,
1086 uint8_t gp, uint16_t u)
1089 TCGv va = tcg_temp_new();
1090 TCGv t0 = tcg_temp_new();
1092 while (counter != count) {
1093 bool use_gp = gp && (counter == count - 1);
1094 int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f);
1095 int this_offset = u - ((counter + 1) << 2);
1096 gen_base_offset_addr(ctx, va, 29, this_offset);
1097 tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx, MO_TESL |
1098 ctx->default_tcg_memop_mask);
1099 tcg_gen_ext32s_tl(t0, t0);
1100 gen_store_gpr(t0, this_rt);
1104 /* adjust stack pointer */
1105 gen_adjust_sp(ctx, u);
1108 static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc,
1110 int rs, int rt, int32_t offset)
1112 target_ulong btgt = -1;
1113 int bcond_compute = 0;
1114 TCGv t0 = tcg_temp_new();
1115 TCGv t1 = tcg_temp_new();
1117 /* Load needed operands */
1121 /* Compare two registers */
1123 gen_load_gpr(t0, rs);
1124 gen_load_gpr(t1, rt);
1127 btgt = ctx->base.pc_next + insn_bytes + offset;
1130 /* Compare to zero */
1132 gen_load_gpr(t0, rs);
1135 btgt = ctx->base.pc_next + insn_bytes + offset;
1138 tcg_gen_andi_tl(t0, cpu_dspctrl, 0x3F);
1140 btgt = ctx->base.pc_next + insn_bytes + offset;
1144 /* Jump to register */
1145 if (offset != 0 && offset != 16) {
1147 * Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the
1148 * others are reserved.
1150 MIPS_INVAL("jump hint");
1151 gen_reserved_instruction(ctx);
1154 gen_load_gpr(btarget, rs);
1157 MIPS_INVAL("branch/jump");
1158 gen_reserved_instruction(ctx);
1161 if (bcond_compute == 0) {
1162 /* No condition to be computed */
1164 case OPC_BEQ: /* rx == rx */
1166 ctx->hflags |= MIPS_HFLAG_B;
1168 case OPC_BGEZAL: /* 0 >= 0 */
1169 /* Always take and link */
1170 tcg_gen_movi_tl(cpu_gpr[31],
1171 ctx->base.pc_next + insn_bytes);
1172 ctx->hflags |= MIPS_HFLAG_B;
1174 case OPC_BNE: /* rx != rx */
1175 tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 8);
1176 /* Skip the instruction in the delay slot */
1177 ctx->base.pc_next += 4;
1180 ctx->hflags |= MIPS_HFLAG_BR;
1184 tcg_gen_movi_tl(cpu_gpr[rt],
1185 ctx->base.pc_next + insn_bytes);
1187 ctx->hflags |= MIPS_HFLAG_BR;
1190 MIPS_INVAL("branch/jump");
1191 gen_reserved_instruction(ctx);
1197 tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
1200 tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
1203 tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
1204 tcg_gen_movi_tl(cpu_gpr[31],
1205 ctx->base.pc_next + insn_bytes);
1208 tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 32);
1210 ctx->hflags |= MIPS_HFLAG_BC;
1213 MIPS_INVAL("conditional branch/jump");
1214 gen_reserved_instruction(ctx);
1219 ctx->btarget = btgt;
1222 if (insn_bytes == 2) {
1223 ctx->hflags |= MIPS_HFLAG_B16;
1227 static void gen_pool16c_nanomips_insn(DisasContext *ctx)
1229 int rt = decode_gpr_gpr3(NANOMIPS_EXTRACT_RT3(ctx->opcode));
1230 int rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode));
1232 switch (extract32(ctx->opcode, 2, 2)) {
1234 gen_logic(ctx, OPC_NOR, rt, rs, 0);
1237 gen_logic(ctx, OPC_AND, rt, rt, rs);
1240 gen_logic(ctx, OPC_XOR, rt, rt, rs);
1243 gen_logic(ctx, OPC_OR, rt, rt, rs);
1248 static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx)
1250 int rt = extract32(ctx->opcode, 21, 5);
1251 int rs = extract32(ctx->opcode, 16, 5);
1252 int rd = extract32(ctx->opcode, 11, 5);
1254 switch (extract32(ctx->opcode, 3, 7)) {
1256 switch (extract32(ctx->opcode, 10, 1)) {
1259 gen_trap(ctx, OPC_TEQ, rs, rt, -1, rd);
1263 gen_trap(ctx, OPC_TNE, rs, rt, -1, rd);
1269 gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3));
1273 gen_bshfl(ctx, OPC_SEB, rs, rt);
1276 gen_bshfl(ctx, OPC_SEH, rs, rt);
1279 gen_shift(ctx, OPC_SLLV, rd, rt, rs);
1282 gen_shift(ctx, OPC_SRLV, rd, rt, rs);
1285 gen_shift(ctx, OPC_SRAV, rd, rt, rs);
1288 gen_shift(ctx, OPC_ROTRV, rd, rt, rs);
1291 gen_arith(ctx, OPC_ADD, rd, rs, rt);
1294 gen_arith(ctx, OPC_ADDU, rd, rs, rt);
1298 gen_arith(ctx, OPC_SUB, rd, rs, rt);
1301 gen_arith(ctx, OPC_SUBU, rd, rs, rt);
1304 switch (extract32(ctx->opcode, 10, 1)) {
1306 gen_cond_move(ctx, OPC_MOVZ, rd, rs, rt);
1309 gen_cond_move(ctx, OPC_MOVN, rd, rs, rt);
1314 gen_logic(ctx, OPC_AND, rd, rs, rt);
1317 gen_logic(ctx, OPC_OR, rd, rs, rt);
1320 gen_logic(ctx, OPC_NOR, rd, rs, rt);
1323 gen_logic(ctx, OPC_XOR, rd, rs, rt);
1326 gen_slt(ctx, OPC_SLT, rd, rs, rt);
1331 #ifndef CONFIG_USER_ONLY
1332 TCGv t0 = tcg_temp_new();
1333 switch (extract32(ctx->opcode, 10, 1)) {
1336 check_cp0_enabled(ctx);
1337 gen_helper_dvp(t0, cpu_env);
1338 gen_store_gpr(t0, rt);
1343 check_cp0_enabled(ctx);
1344 gen_helper_evp(t0, cpu_env);
1345 gen_store_gpr(t0, rt);
1351 gen_slt(ctx, OPC_SLTU, rd, rs, rt);
1356 TCGv t0 = tcg_temp_new();
1357 TCGv t1 = tcg_temp_new();
1358 TCGv t2 = tcg_temp_new();
1360 gen_load_gpr(t1, rs);
1361 gen_load_gpr(t2, rt);
1362 tcg_gen_add_tl(t0, t1, t2);
1363 tcg_gen_ext32s_tl(t0, t0);
1364 tcg_gen_xor_tl(t1, t1, t2);
1365 tcg_gen_xor_tl(t2, t0, t2);
1366 tcg_gen_andc_tl(t1, t2, t1);
1368 /* operands of same sign, result different sign */
1369 tcg_gen_setcondi_tl(TCG_COND_LT, t0, t1, 0);
1370 gen_store_gpr(t0, rd);
1374 gen_r6_muldiv(ctx, R6_OPC_MUL, rd, rs, rt);
1377 gen_r6_muldiv(ctx, R6_OPC_MUH, rd, rs, rt);
1380 gen_r6_muldiv(ctx, R6_OPC_MULU, rd, rs, rt);
1383 gen_r6_muldiv(ctx, R6_OPC_MUHU, rd, rs, rt);
1386 gen_r6_muldiv(ctx, R6_OPC_DIV, rd, rs, rt);
1389 gen_r6_muldiv(ctx, R6_OPC_MOD, rd, rs, rt);
1392 gen_r6_muldiv(ctx, R6_OPC_DIVU, rd, rs, rt);
1395 gen_r6_muldiv(ctx, R6_OPC_MODU, rd, rs, rt);
1397 #ifndef CONFIG_USER_ONLY
1399 check_cp0_enabled(ctx);
1404 gen_mfc0(ctx, cpu_gpr[rt], rs, extract32(ctx->opcode, 11, 3));
1407 check_cp0_enabled(ctx);
1409 TCGv t0 = tcg_temp_new();
1411 gen_load_gpr(t0, rt);
1412 gen_mtc0(ctx, t0, rs, extract32(ctx->opcode, 11, 3));
1417 uint8_t sc = extract32(ctx->opcode, 10, 1);
1418 TCGv t0 = tcg_temp_new();
1426 gen_store_gpr(t0, rt);
1427 } else if (rs == 0) {
1430 gen_helper_dvpe(t0, cpu_env);
1431 gen_store_gpr(t0, rt);
1433 gen_reserved_instruction(ctx);
1441 gen_store_gpr(t0, rt);
1442 } else if (rs == 0) {
1445 gen_helper_evpe(t0, cpu_env);
1446 gen_store_gpr(t0, rt);
1448 gen_reserved_instruction(ctx);
1457 TCGv t0 = tcg_temp_new();
1458 TCGv t1 = tcg_temp_new();
1460 gen_load_gpr(t0, rt);
1461 gen_load_gpr(t1, rs);
1462 gen_helper_fork(t0, t1);
1467 check_cp0_enabled(ctx);
1472 gen_mftr(env, ctx, rs, rt, extract32(ctx->opcode, 10, 1),
1473 extract32(ctx->opcode, 11, 5), extract32(ctx->opcode, 3, 1));
1477 check_cp0_enabled(ctx);
1478 gen_mttr(env, ctx, rs, rt, extract32(ctx->opcode, 10, 1),
1479 extract32(ctx->opcode, 11, 5), extract32(ctx->opcode, 3, 1));
1484 TCGv t0 = tcg_temp_new();
1486 gen_load_gpr(t0, rs);
1487 gen_helper_yield(t0, cpu_env, t0);
1488 gen_store_gpr(t0, rt);
1493 gen_reserved_instruction(ctx);
1499 static void gen_pool32axf_1_5_nanomips_insn(DisasContext *ctx, uint32_t opc,
1500 int ret, int v1, int v2)
1506 t0 = tcg_temp_new_i32();
1508 v0_t = tcg_temp_new();
1509 v1_t = tcg_temp_new();
1511 tcg_gen_movi_i32(t0, v2 >> 3);
1513 gen_load_gpr(v0_t, ret);
1514 gen_load_gpr(v1_t, v1);
1517 case NM_MAQ_S_W_PHR:
1519 gen_helper_maq_s_w_phr(t0, v1_t, v0_t, cpu_env);
1521 case NM_MAQ_S_W_PHL:
1523 gen_helper_maq_s_w_phl(t0, v1_t, v0_t, cpu_env);
1525 case NM_MAQ_SA_W_PHR:
1527 gen_helper_maq_sa_w_phr(t0, v1_t, v0_t, cpu_env);
1529 case NM_MAQ_SA_W_PHL:
1531 gen_helper_maq_sa_w_phl(t0, v1_t, v0_t, cpu_env);
1534 gen_reserved_instruction(ctx);
1540 static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
1541 int ret, int v1, int v2)
1544 TCGv t0 = tcg_temp_new();
1545 TCGv t1 = tcg_temp_new();
1546 TCGv v0_t = tcg_temp_new();
1548 gen_load_gpr(v0_t, v1);
1551 case NM_POOL32AXF_1_0:
1553 switch (extract32(ctx->opcode, 12, 2)) {
1555 gen_HILO(ctx, OPC_MFHI, v2 >> 3, ret);
1558 gen_HILO(ctx, OPC_MFLO, v2 >> 3, ret);
1561 gen_HILO(ctx, OPC_MTHI, v2 >> 3, v1);
1564 gen_HILO(ctx, OPC_MTLO, v2 >> 3, v1);
1568 case NM_POOL32AXF_1_1:
1570 switch (extract32(ctx->opcode, 12, 2)) {
1572 tcg_gen_movi_tl(t0, v2 >> 3);
1573 gen_helper_mthlip(t0, v0_t, cpu_env);
1576 tcg_gen_movi_tl(t0, v2 >> 3);
1577 gen_helper_shilo(t0, v0_t, cpu_env);
1580 gen_reserved_instruction(ctx);
1584 case NM_POOL32AXF_1_3:
1586 imm = extract32(ctx->opcode, 14, 7);
1587 switch (extract32(ctx->opcode, 12, 2)) {
1589 tcg_gen_movi_tl(t0, imm);
1590 gen_helper_rddsp(t0, t0, cpu_env);
1591 gen_store_gpr(t0, ret);
1594 gen_load_gpr(t0, ret);
1595 tcg_gen_movi_tl(t1, imm);
1596 gen_helper_wrdsp(t0, t1, cpu_env);
1599 tcg_gen_movi_tl(t0, v2 >> 3);
1600 tcg_gen_movi_tl(t1, v1);
1601 gen_helper_extp(t0, t0, t1, cpu_env);
1602 gen_store_gpr(t0, ret);
1605 tcg_gen_movi_tl(t0, v2 >> 3);
1606 tcg_gen_movi_tl(t1, v1);
1607 gen_helper_extpdp(t0, t0, t1, cpu_env);
1608 gen_store_gpr(t0, ret);
1612 case NM_POOL32AXF_1_4:
1614 tcg_gen_movi_tl(t0, v2 >> 2);
1615 switch (extract32(ctx->opcode, 12, 1)) {
1617 gen_helper_shll_qb(t0, t0, v0_t, cpu_env);
1618 gen_store_gpr(t0, ret);
1621 gen_helper_shrl_qb(t0, t0, v0_t);
1622 gen_store_gpr(t0, ret);
1626 case NM_POOL32AXF_1_5:
1627 opc = extract32(ctx->opcode, 12, 2);
1628 gen_pool32axf_1_5_nanomips_insn(ctx, opc, ret, v1, v2);
1630 case NM_POOL32AXF_1_7:
1632 tcg_gen_movi_tl(t0, v2 >> 3);
1633 tcg_gen_movi_tl(t1, v1);
1634 switch (extract32(ctx->opcode, 12, 2)) {
1636 gen_helper_extr_w(t0, t0, t1, cpu_env);
1637 gen_store_gpr(t0, ret);
1640 gen_helper_extr_r_w(t0, t0, t1, cpu_env);
1641 gen_store_gpr(t0, ret);
1644 gen_helper_extr_rs_w(t0, t0, t1, cpu_env);
1645 gen_store_gpr(t0, ret);
1648 gen_helper_extr_s_h(t0, t0, t1, cpu_env);
1649 gen_store_gpr(t0, ret);
1654 gen_reserved_instruction(ctx);
1659 static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc,
1660 TCGv v0, TCGv v1, int rd)
1664 t0 = tcg_temp_new_i32();
1666 tcg_gen_movi_i32(t0, rd >> 3);
1669 case NM_POOL32AXF_2_0_7:
1670 switch (extract32(ctx->opcode, 9, 3)) {
1673 gen_helper_dpa_w_ph(t0, v1, v0, cpu_env);
1675 case NM_DPAQ_S_W_PH:
1677 gen_helper_dpaq_s_w_ph(t0, v1, v0, cpu_env);
1681 gen_helper_dps_w_ph(t0, v1, v0, cpu_env);
1683 case NM_DPSQ_S_W_PH:
1685 gen_helper_dpsq_s_w_ph(t0, v1, v0, cpu_env);
1688 gen_reserved_instruction(ctx);
1692 case NM_POOL32AXF_2_8_15:
1693 switch (extract32(ctx->opcode, 9, 3)) {
1696 gen_helper_dpax_w_ph(t0, v0, v1, cpu_env);
1698 case NM_DPAQ_SA_L_W:
1700 gen_helper_dpaq_sa_l_w(t0, v0, v1, cpu_env);
1704 gen_helper_dpsx_w_ph(t0, v0, v1, cpu_env);
1706 case NM_DPSQ_SA_L_W:
1708 gen_helper_dpsq_sa_l_w(t0, v0, v1, cpu_env);
1711 gen_reserved_instruction(ctx);
1715 case NM_POOL32AXF_2_16_23:
1716 switch (extract32(ctx->opcode, 9, 3)) {
1719 gen_helper_dpau_h_qbl(t0, v0, v1, cpu_env);
1721 case NM_DPAQX_S_W_PH:
1723 gen_helper_dpaqx_s_w_ph(t0, v0, v1, cpu_env);
1727 gen_helper_dpsu_h_qbl(t0, v0, v1, cpu_env);
1729 case NM_DPSQX_S_W_PH:
1731 gen_helper_dpsqx_s_w_ph(t0, v0, v1, cpu_env);
1735 gen_helper_mulsa_w_ph(t0, v0, v1, cpu_env);
1738 gen_reserved_instruction(ctx);
1742 case NM_POOL32AXF_2_24_31:
1743 switch (extract32(ctx->opcode, 9, 3)) {
1746 gen_helper_dpau_h_qbr(t0, v1, v0, cpu_env);
1748 case NM_DPAQX_SA_W_PH:
1750 gen_helper_dpaqx_sa_w_ph(t0, v1, v0, cpu_env);
1754 gen_helper_dpsu_h_qbr(t0, v1, v0, cpu_env);
1756 case NM_DPSQX_SA_W_PH:
1758 gen_helper_dpsqx_sa_w_ph(t0, v1, v0, cpu_env);
1760 case NM_MULSAQ_S_W_PH:
1762 gen_helper_mulsaq_s_w_ph(t0, v1, v0, cpu_env);
1765 gen_reserved_instruction(ctx);
1770 gen_reserved_instruction(ctx);
1775 static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
1776 int rt, int rs, int rd)
1779 TCGv t0 = tcg_temp_new();
1780 TCGv t1 = tcg_temp_new();
1781 TCGv v0_t = tcg_temp_new();
1782 TCGv v1_t = tcg_temp_new();
1784 gen_load_gpr(v0_t, rt);
1785 gen_load_gpr(v1_t, rs);
1788 case NM_POOL32AXF_2_0_7:
1789 switch (extract32(ctx->opcode, 9, 3)) {
1791 case NM_DPAQ_S_W_PH:
1793 case NM_DPSQ_S_W_PH:
1794 gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd);
1799 gen_load_gpr(t0, rs);
1801 if (rd != 0 && rd != 2) {
1802 tcg_gen_shli_tl(cpu_gpr[ret], cpu_gpr[ret], 8 * rd);
1803 tcg_gen_ext32u_tl(t0, t0);
1804 tcg_gen_shri_tl(t0, t0, 8 * (4 - rd));
1805 tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
1807 tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
1813 int acc = extract32(ctx->opcode, 14, 2);
1814 TCGv_i64 t2 = tcg_temp_new_i64();
1815 TCGv_i64 t3 = tcg_temp_new_i64();
1817 gen_load_gpr(t0, rt);
1818 gen_load_gpr(t1, rs);
1819 tcg_gen_ext_tl_i64(t2, t0);
1820 tcg_gen_ext_tl_i64(t3, t1);
1821 tcg_gen_mul_i64(t2, t2, t3);
1822 tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
1823 tcg_gen_add_i64(t2, t2, t3);
1824 gen_move_low32(cpu_LO[acc], t2);
1825 gen_move_high32(cpu_HI[acc], t2);
1831 int acc = extract32(ctx->opcode, 14, 2);
1832 TCGv_i32 t2 = tcg_temp_new_i32();
1833 TCGv_i32 t3 = tcg_temp_new_i32();
1835 if (acc || ctx->insn_flags & ISA_MIPS_R6) {
1838 gen_load_gpr(t0, rs);
1839 gen_load_gpr(t1, rt);
1840 tcg_gen_trunc_tl_i32(t2, t0);
1841 tcg_gen_trunc_tl_i32(t3, t1);
1842 tcg_gen_muls2_i32(t2, t3, t2, t3);
1843 tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
1844 tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
1849 gen_load_gpr(v1_t, rs);
1850 tcg_gen_movi_tl(t0, rd >> 3);
1851 gen_helper_extr_w(t0, t0, v1_t, cpu_env);
1852 gen_store_gpr(t0, ret);
1856 case NM_POOL32AXF_2_8_15:
1857 switch (extract32(ctx->opcode, 9, 3)) {
1859 case NM_DPAQ_SA_L_W:
1861 case NM_DPSQ_SA_L_W:
1862 gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd);
1867 int acc = extract32(ctx->opcode, 14, 2);
1868 TCGv_i64 t2 = tcg_temp_new_i64();
1869 TCGv_i64 t3 = tcg_temp_new_i64();
1871 gen_load_gpr(t0, rs);
1872 gen_load_gpr(t1, rt);
1873 tcg_gen_ext32u_tl(t0, t0);
1874 tcg_gen_ext32u_tl(t1, t1);
1875 tcg_gen_extu_tl_i64(t2, t0);
1876 tcg_gen_extu_tl_i64(t3, t1);
1877 tcg_gen_mul_i64(t2, t2, t3);
1878 tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
1879 tcg_gen_add_i64(t2, t2, t3);
1880 gen_move_low32(cpu_LO[acc], t2);
1881 gen_move_high32(cpu_HI[acc], t2);
1887 int acc = extract32(ctx->opcode, 14, 2);
1888 TCGv_i32 t2 = tcg_temp_new_i32();
1889 TCGv_i32 t3 = tcg_temp_new_i32();
1891 if (acc || ctx->insn_flags & ISA_MIPS_R6) {
1894 gen_load_gpr(t0, rs);
1895 gen_load_gpr(t1, rt);
1896 tcg_gen_trunc_tl_i32(t2, t0);
1897 tcg_gen_trunc_tl_i32(t3, t1);
1898 tcg_gen_mulu2_i32(t2, t3, t2, t3);
1899 tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
1900 tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
1905 tcg_gen_movi_tl(t0, rd >> 3);
1906 gen_helper_extr_r_w(t0, t0, v1_t, cpu_env);
1907 gen_store_gpr(t0, ret);
1910 gen_reserved_instruction(ctx);
1914 case NM_POOL32AXF_2_16_23:
1915 switch (extract32(ctx->opcode, 9, 3)) {
1917 case NM_DPAQX_S_W_PH:
1919 case NM_DPSQX_S_W_PH:
1921 gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd);
1925 tcg_gen_movi_tl(t0, rd >> 3);
1926 gen_helper_extp(t0, t0, v1_t, cpu_env);
1927 gen_store_gpr(t0, ret);
1932 int acc = extract32(ctx->opcode, 14, 2);
1933 TCGv_i64 t2 = tcg_temp_new_i64();
1934 TCGv_i64 t3 = tcg_temp_new_i64();
1936 gen_load_gpr(t0, rs);
1937 gen_load_gpr(t1, rt);
1938 tcg_gen_ext_tl_i64(t2, t0);
1939 tcg_gen_ext_tl_i64(t3, t1);
1940 tcg_gen_mul_i64(t2, t2, t3);
1941 tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
1942 tcg_gen_sub_i64(t2, t3, t2);
1943 gen_move_low32(cpu_LO[acc], t2);
1944 gen_move_high32(cpu_HI[acc], t2);
1949 tcg_gen_movi_tl(t0, rd >> 3);
1950 gen_helper_extr_rs_w(t0, t0, v1_t, cpu_env);
1951 gen_store_gpr(t0, ret);
1955 case NM_POOL32AXF_2_24_31:
1956 switch (extract32(ctx->opcode, 9, 3)) {
1958 case NM_DPAQX_SA_W_PH:
1960 case NM_DPSQX_SA_W_PH:
1961 case NM_MULSAQ_S_W_PH:
1962 gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd);
1966 tcg_gen_movi_tl(t0, rd >> 3);
1967 gen_helper_extpdp(t0, t0, v1_t, cpu_env);
1968 gen_store_gpr(t0, ret);
1973 int acc = extract32(ctx->opcode, 14, 2);
1974 TCGv_i64 t2 = tcg_temp_new_i64();
1975 TCGv_i64 t3 = tcg_temp_new_i64();
1977 gen_load_gpr(t0, rs);
1978 gen_load_gpr(t1, rt);
1979 tcg_gen_ext32u_tl(t0, t0);
1980 tcg_gen_ext32u_tl(t1, t1);
1981 tcg_gen_extu_tl_i64(t2, t0);
1982 tcg_gen_extu_tl_i64(t3, t1);
1983 tcg_gen_mul_i64(t2, t2, t3);
1984 tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
1985 tcg_gen_sub_i64(t2, t3, t2);
1986 gen_move_low32(cpu_LO[acc], t2);
1987 gen_move_high32(cpu_HI[acc], t2);
1992 tcg_gen_movi_tl(t0, rd >> 3);
1993 gen_helper_extr_s_h(t0, t0, v1_t, cpu_env);
1994 gen_store_gpr(t0, ret);
1999 gen_reserved_instruction(ctx);
2004 static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc,
2008 TCGv t0 = tcg_temp_new();
2009 TCGv v0_t = tcg_temp_new();
2011 gen_load_gpr(v0_t, rs);
2016 gen_helper_absq_s_qb(v0_t, v0_t, cpu_env);
2017 gen_store_gpr(v0_t, ret);
2021 gen_helper_absq_s_ph(v0_t, v0_t, cpu_env);
2022 gen_store_gpr(v0_t, ret);
2026 gen_helper_absq_s_w(v0_t, v0_t, cpu_env);
2027 gen_store_gpr(v0_t, ret);
2029 case NM_PRECEQ_W_PHL:
2031 tcg_gen_andi_tl(v0_t, v0_t, 0xFFFF0000);
2032 tcg_gen_ext32s_tl(v0_t, v0_t);
2033 gen_store_gpr(v0_t, ret);
2035 case NM_PRECEQ_W_PHR:
2037 tcg_gen_andi_tl(v0_t, v0_t, 0x0000FFFF);
2038 tcg_gen_shli_tl(v0_t, v0_t, 16);
2039 tcg_gen_ext32s_tl(v0_t, v0_t);
2040 gen_store_gpr(v0_t, ret);
2042 case NM_PRECEQU_PH_QBL:
2044 gen_helper_precequ_ph_qbl(v0_t, v0_t);
2045 gen_store_gpr(v0_t, ret);
2047 case NM_PRECEQU_PH_QBR:
2049 gen_helper_precequ_ph_qbr(v0_t, v0_t);
2050 gen_store_gpr(v0_t, ret);
2052 case NM_PRECEQU_PH_QBLA:
2054 gen_helper_precequ_ph_qbla(v0_t, v0_t);
2055 gen_store_gpr(v0_t, ret);
2057 case NM_PRECEQU_PH_QBRA:
2059 gen_helper_precequ_ph_qbra(v0_t, v0_t);
2060 gen_store_gpr(v0_t, ret);
2062 case NM_PRECEU_PH_QBL:
2064 gen_helper_preceu_ph_qbl(v0_t, v0_t);
2065 gen_store_gpr(v0_t, ret);
2067 case NM_PRECEU_PH_QBR:
2069 gen_helper_preceu_ph_qbr(v0_t, v0_t);
2070 gen_store_gpr(v0_t, ret);
2072 case NM_PRECEU_PH_QBLA:
2074 gen_helper_preceu_ph_qbla(v0_t, v0_t);
2075 gen_store_gpr(v0_t, ret);
2077 case NM_PRECEU_PH_QBRA:
2079 gen_helper_preceu_ph_qbra(v0_t, v0_t);
2080 gen_store_gpr(v0_t, ret);
2084 tcg_gen_ext16u_tl(v0_t, v0_t);
2085 tcg_gen_shli_tl(t0, v0_t, 16);
2086 tcg_gen_or_tl(v0_t, v0_t, t0);
2087 tcg_gen_ext32s_tl(v0_t, v0_t);
2088 gen_store_gpr(v0_t, ret);
2092 tcg_gen_ext8u_tl(v0_t, v0_t);
2093 tcg_gen_shli_tl(t0, v0_t, 8);
2094 tcg_gen_or_tl(v0_t, v0_t, t0);
2095 tcg_gen_shli_tl(t0, v0_t, 16);
2096 tcg_gen_or_tl(v0_t, v0_t, t0);
2097 tcg_gen_ext32s_tl(v0_t, v0_t);
2098 gen_store_gpr(v0_t, ret);
2102 gen_helper_bitrev(v0_t, v0_t);
2103 gen_store_gpr(v0_t, ret);
2108 TCGv tv0 = tcg_temp_new();
2110 gen_load_gpr(tv0, rt);
2111 gen_helper_insv(v0_t, cpu_env, v0_t, tv0);
2112 gen_store_gpr(v0_t, ret);
2117 gen_helper_raddu_w_qb(v0_t, v0_t);
2118 gen_store_gpr(v0_t, ret);
2121 gen_bitswap(ctx, OPC_BITSWAP, ret, rs);
2125 gen_cl(ctx, OPC_CLO, ret, rs);
2129 gen_cl(ctx, OPC_CLZ, ret, rs);
2132 gen_bshfl(ctx, OPC_WSBH, ret, rs);
2135 gen_reserved_instruction(ctx);
2140 static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc,
2141 int rt, int rs, int rd)
2143 TCGv t0 = tcg_temp_new();
2144 TCGv rs_t = tcg_temp_new();
2146 gen_load_gpr(rs_t, rs);
2151 tcg_gen_movi_tl(t0, rd >> 2);
2152 switch (extract32(ctx->opcode, 12, 1)) {
2155 gen_helper_shra_qb(t0, t0, rs_t);
2156 gen_store_gpr(t0, rt);
2160 gen_helper_shra_r_qb(t0, t0, rs_t);
2161 gen_store_gpr(t0, rt);
2167 tcg_gen_movi_tl(t0, rd >> 1);
2168 gen_helper_shrl_ph(t0, t0, rs_t);
2169 gen_store_gpr(t0, rt);
2176 imm = extract32(ctx->opcode, 13, 8);
2177 result = (uint32_t)imm << 24 |
2178 (uint32_t)imm << 16 |
2179 (uint32_t)imm << 8 |
2181 result = (int32_t)result;
2182 tcg_gen_movi_tl(t0, result);
2183 gen_store_gpr(t0, rt);
2187 gen_reserved_instruction(ctx);
2193 static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx)
2195 int rt = extract32(ctx->opcode, 21, 5);
2196 int rs = extract32(ctx->opcode, 16, 5);
2197 int rd = extract32(ctx->opcode, 11, 5);
2199 switch (extract32(ctx->opcode, 6, 3)) {
2200 case NM_POOL32AXF_1:
2202 int32_t op1 = extract32(ctx->opcode, 9, 3);
2203 gen_pool32axf_1_nanomips_insn(ctx, op1, rt, rs, rd);
2206 case NM_POOL32AXF_2:
2208 int32_t op1 = extract32(ctx->opcode, 12, 2);
2209 gen_pool32axf_2_nanomips_insn(ctx, op1, rt, rs, rd);
2212 case NM_POOL32AXF_4:
2214 int32_t op1 = extract32(ctx->opcode, 9, 7);
2215 gen_pool32axf_4_nanomips_insn(ctx, op1, rt, rs);
2218 case NM_POOL32AXF_5:
2219 switch (extract32(ctx->opcode, 9, 7)) {
2220 #ifndef CONFIG_USER_ONLY
2222 gen_cp0(env, ctx, OPC_TLBP, 0, 0);
2225 gen_cp0(env, ctx, OPC_TLBR, 0, 0);
2228 gen_cp0(env, ctx, OPC_TLBWI, 0, 0);
2231 gen_cp0(env, ctx, OPC_TLBWR, 0, 0);
2234 gen_cp0(env, ctx, OPC_TLBINV, 0, 0);
2237 gen_cp0(env, ctx, OPC_TLBINVF, 0, 0);
2240 check_cp0_enabled(ctx);
2242 TCGv t0 = tcg_temp_new();
2244 save_cpu_state(ctx, 1);
2245 gen_helper_di(t0, cpu_env);
2246 gen_store_gpr(t0, rt);
2247 /* Stop translation as we may have switched the execution mode */
2248 ctx->base.is_jmp = DISAS_STOP;
2252 check_cp0_enabled(ctx);
2254 TCGv t0 = tcg_temp_new();
2256 save_cpu_state(ctx, 1);
2257 gen_helper_ei(t0, cpu_env);
2258 gen_store_gpr(t0, rt);
2259 /* Stop translation as we may have switched the execution mode */
2260 ctx->base.is_jmp = DISAS_STOP;
2264 check_cp0_enabled(ctx);
2265 gen_load_srsgpr(rs, rt);
2268 check_cp0_enabled(ctx);
2269 gen_store_srsgpr(rs, rt);
2272 gen_cp0(env, ctx, OPC_WAIT, 0, 0);
2275 gen_cp0(env, ctx, OPC_DERET, 0, 0);
2278 gen_cp0(env, ctx, OPC_ERET, 0, 0);
2282 gen_reserved_instruction(ctx);
2286 case NM_POOL32AXF_7:
2288 int32_t op1 = extract32(ctx->opcode, 9, 3);
2289 gen_pool32axf_7_nanomips_insn(ctx, op1, rt, rs, rd);
2293 gen_reserved_instruction(ctx);
2298 /* Immediate Value Compact Branches */
2299 static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc,
2300 int rt, int32_t imm, int32_t offset)
2302 TCGCond cond = TCG_COND_ALWAYS;
2303 TCGv t0 = tcg_temp_new();
2304 TCGv t1 = tcg_temp_new();
2306 gen_load_gpr(t0, rt);
2307 tcg_gen_movi_tl(t1, imm);
2308 ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
2310 /* Load needed operands and calculate btarget */
2313 if (rt == 0 && imm == 0) {
2314 /* Unconditional branch */
2315 } else if (rt == 0 && imm != 0) {
2325 if (imm >= 32 && !(ctx->hflags & MIPS_HFLAG_64)) {
2326 gen_reserved_instruction(ctx);
2328 } else if (rt == 0 && opc == NM_BBEQZC) {
2329 /* Unconditional branch */
2330 } else if (rt == 0 && opc == NM_BBNEZC) {
2334 tcg_gen_shri_tl(t0, t0, imm);
2335 tcg_gen_andi_tl(t0, t0, 1);
2336 tcg_gen_movi_tl(t1, 0);
2337 if (opc == NM_BBEQZC) {
2345 if (rt == 0 && imm == 0) {
2348 } else if (rt == 0 && imm != 0) {
2349 /* Unconditional branch */
2355 if (rt == 0 && imm == 0) {
2356 /* Unconditional branch */
2365 if (rt == 0 && imm == 0) {
2366 /* Unconditional branch */
2368 cond = TCG_COND_GEU;
2372 cond = TCG_COND_LTU;
2375 MIPS_INVAL("Immediate Value Compact branch");
2376 gen_reserved_instruction(ctx);
2380 /* branch completion */
2381 clear_branch_hflags(ctx);
2382 ctx->base.is_jmp = DISAS_NORETURN;
2384 if (cond == TCG_COND_ALWAYS) {
2385 /* Uncoditional compact branch */
2386 gen_goto_tb(ctx, 0, ctx->btarget);
2388 /* Conditional compact branch */
2389 TCGLabel *fs = gen_new_label();
2391 tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, t1, fs);
2393 gen_goto_tb(ctx, 1, ctx->btarget);
2396 gen_goto_tb(ctx, 0, ctx->base.pc_next + 4);
2400 /* P.BALRSC type nanoMIPS R6 branches: BALRSC and BRSC */
2401 static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs,
2404 TCGv t0 = tcg_temp_new();
2405 TCGv t1 = tcg_temp_new();
2408 gen_load_gpr(t0, rs);
2412 tcg_gen_movi_tl(cpu_gpr[rt], ctx->base.pc_next + 4);
2415 /* calculate btarget */
2416 tcg_gen_shli_tl(t0, t0, 1);
2417 tcg_gen_movi_tl(t1, ctx->base.pc_next + 4);
2418 gen_op_addr_add(ctx, btarget, t1, t0);
2420 /* branch completion */
2421 clear_branch_hflags(ctx);
2422 ctx->base.is_jmp = DISAS_NORETURN;
2424 /* unconditional branch to register */
2425 tcg_gen_mov_tl(cpu_PC, btarget);
2426 tcg_gen_lookup_and_goto_ptr();
2429 /* nanoMIPS Branches */
2430 static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc,
2431 int rs, int rt, int32_t offset)
2433 int bcond_compute = 0;
2434 TCGv t0 = tcg_temp_new();
2435 TCGv t1 = tcg_temp_new();
2437 /* Load needed operands and calculate btarget */
2439 /* compact branch */
2442 gen_load_gpr(t0, rs);
2443 gen_load_gpr(t1, rt);
2445 ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
2449 if (rs == 0 || rs == rt) {
2450 /* OPC_BLEZALC, OPC_BGEZALC */
2451 /* OPC_BGTZALC, OPC_BLTZALC */
2452 tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 4);
2454 gen_load_gpr(t0, rs);
2455 gen_load_gpr(t1, rt);
2457 ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
2460 ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
2464 /* OPC_BEQZC, OPC_BNEZC */
2465 gen_load_gpr(t0, rs);
2467 ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
2469 /* OPC_JIC, OPC_JIALC */
2470 TCGv tbase = tcg_temp_new();
2471 TCGv toffset = tcg_temp_new();
2473 gen_load_gpr(tbase, rt);
2474 tcg_gen_movi_tl(toffset, offset);
2475 gen_op_addr_add(ctx, btarget, tbase, toffset);
2479 MIPS_INVAL("Compact branch/jump");
2480 gen_reserved_instruction(ctx);
2484 if (bcond_compute == 0) {
2485 /* Uncoditional compact branch */
2488 gen_goto_tb(ctx, 0, ctx->btarget);
2491 MIPS_INVAL("Compact branch/jump");
2492 gen_reserved_instruction(ctx);
2496 /* Conditional compact branch */
2497 TCGLabel *fs = gen_new_label();
2501 if (rs == 0 && rt != 0) {
2503 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
2504 } else if (rs != 0 && rt != 0 && rs == rt) {
2506 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
2509 tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GEU), t0, t1, fs);
2513 if (rs == 0 && rt != 0) {
2515 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
2516 } else if (rs != 0 && rt != 0 && rs == rt) {
2518 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
2521 tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LTU), t0, t1, fs);
2525 if (rs == 0 && rt != 0) {
2527 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
2528 } else if (rs != 0 && rt != 0 && rs == rt) {
2530 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
2533 tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GE), t0, t1, fs);
2537 if (rs == 0 && rt != 0) {
2539 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
2540 } else if (rs != 0 && rt != 0 && rs == rt) {
2542 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
2545 tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LT), t0, t1, fs);
2549 tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t0, 0, fs);
2552 MIPS_INVAL("Compact conditional branch/jump");
2553 gen_reserved_instruction(ctx);
2557 /* branch completion */
2558 clear_branch_hflags(ctx);
2559 ctx->base.is_jmp = DISAS_NORETURN;
2561 /* Generating branch here as compact branches don't have delay slot */
2562 gen_goto_tb(ctx, 1, ctx->btarget);
2565 gen_goto_tb(ctx, 0, ctx->base.pc_next + 4);
2570 /* nanoMIPS CP1 Branches */
2571 static void gen_compute_branch_cp1_nm(DisasContext *ctx, uint32_t op,
2572 int32_t ft, int32_t offset)
2574 target_ulong btarget;
2575 TCGv_i64 t0 = tcg_temp_new_i64();
2577 gen_load_fpr64(ctx, t0, ft);
2578 tcg_gen_andi_i64(t0, t0, 1);
2580 btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
2584 tcg_gen_xori_i64(t0, t0, 1);
2585 ctx->hflags |= MIPS_HFLAG_BC;
2588 /* t0 already set */
2589 ctx->hflags |= MIPS_HFLAG_BC;
2592 MIPS_INVAL("cp1 cond branch");
2593 gen_reserved_instruction(ctx);
2597 tcg_gen_trunc_i64_tl(bcond, t0);
2599 ctx->btarget = btarget;
2603 static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt)
2606 t0 = tcg_temp_new();
2607 t1 = tcg_temp_new();
2609 gen_load_gpr(t0, rs);
2610 gen_load_gpr(t1, rt);
2612 if ((extract32(ctx->opcode, 6, 1)) == 1) {
2613 /* PP.LSXS instructions require shifting */
2614 switch (extract32(ctx->opcode, 7, 4)) {
2620 tcg_gen_shli_tl(t0, t0, 1);
2628 tcg_gen_shli_tl(t0, t0, 2);
2632 tcg_gen_shli_tl(t0, t0, 3);
2635 gen_reserved_instruction(ctx);
2639 gen_op_addr_add(ctx, t0, t0, t1);
2641 switch (extract32(ctx->opcode, 7, 4)) {
2643 tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
2645 gen_store_gpr(t0, rd);
2649 tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
2651 gen_store_gpr(t0, rd);
2655 tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
2657 gen_store_gpr(t0, rd);
2660 tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
2662 gen_store_gpr(t0, rd);
2666 tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
2668 gen_store_gpr(t0, rd);
2672 gen_load_gpr(t1, rd);
2673 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
2679 gen_load_gpr(t1, rd);
2680 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
2686 gen_load_gpr(t1, rd);
2687 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
2698 if (ctx->CP0_Config1 & (1 << CP0C1_FP)) {
2699 check_cp1_enabled(ctx);
2700 switch (extract32(ctx->opcode, 7, 4)) {
2703 gen_flt_ldst(ctx, OPC_LWC1, rd, t0);
2707 gen_flt_ldst(ctx, OPC_LDC1, rd, t0);
2711 gen_flt_ldst(ctx, OPC_SWC1, rd, t0);
2715 gen_flt_ldst(ctx, OPC_SDC1, rd, t0);
2719 generate_exception_err(ctx, EXCP_CpU, 1);
2723 gen_reserved_instruction(ctx);
2728 static void gen_pool32f_nanomips_insn(DisasContext *ctx)
2732 rt = extract32(ctx->opcode, 21, 5);
2733 rs = extract32(ctx->opcode, 16, 5);
2734 rd = extract32(ctx->opcode, 11, 5);
2736 if (!(ctx->CP0_Config1 & (1 << CP0C1_FP))) {
2737 gen_reserved_instruction(ctx);
2740 check_cp1_enabled(ctx);
2741 switch (extract32(ctx->opcode, 0, 3)) {
2743 switch (extract32(ctx->opcode, 3, 7)) {
2745 gen_farith(ctx, OPC_RINT_S, 0, rt, rs, 0);
2748 gen_farith(ctx, OPC_RINT_D, 0, rt, rs, 0);
2751 gen_farith(ctx, OPC_CLASS_S, 0, rt, rs, 0);
2754 gen_farith(ctx, OPC_CLASS_D, 0, rt, rs, 0);
2757 gen_farith(ctx, OPC_ADD_S, rt, rs, rd, 0);
2760 gen_farith(ctx, OPC_ADD_D, rt, rs, rd, 0);
2763 gen_farith(ctx, OPC_SUB_S, rt, rs, rd, 0);
2766 gen_farith(ctx, OPC_SUB_D, rt, rs, rd, 0);
2769 gen_farith(ctx, OPC_MUL_S, rt, rs, rd, 0);
2772 gen_farith(ctx, OPC_MUL_D, rt, rs, rd, 0);
2775 gen_farith(ctx, OPC_DIV_S, rt, rs, rd, 0);
2778 gen_farith(ctx, OPC_DIV_D, rt, rs, rd, 0);
2781 gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs);
2784 gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs);
2787 gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs);
2790 gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs);
2793 gen_sel_s(ctx, OPC_SEL_S, rd, rt, rs);
2796 gen_sel_d(ctx, OPC_SEL_D, rd, rt, rs);
2799 gen_farith(ctx, OPC_MADDF_S, rt, rs, rd, 0);
2802 gen_farith(ctx, OPC_MADDF_D, rt, rs, rd, 0);
2805 gen_farith(ctx, OPC_MSUBF_S, rt, rs, rd, 0);
2808 gen_farith(ctx, OPC_MSUBF_D, rt, rs, rd, 0);
2811 gen_reserved_instruction(ctx);
2816 switch (extract32(ctx->opcode, 3, 3)) {
2818 switch (extract32(ctx->opcode, 9, 1)) {
2820 gen_farith(ctx, OPC_MIN_S, rt, rs, rd, 0);
2823 gen_farith(ctx, OPC_MIN_D, rt, rs, rd, 0);
2828 switch (extract32(ctx->opcode, 9, 1)) {
2830 gen_farith(ctx, OPC_MAX_S, rt, rs, rd, 0);
2833 gen_farith(ctx, OPC_MAX_D, rt, rs, rd, 0);
2838 switch (extract32(ctx->opcode, 9, 1)) {
2840 gen_farith(ctx, OPC_MINA_S, rt, rs, rd, 0);
2843 gen_farith(ctx, OPC_MINA_D, rt, rs, rd, 0);
2848 switch (extract32(ctx->opcode, 9, 1)) {
2850 gen_farith(ctx, OPC_MAXA_S, rt, rs, rd, 0);
2853 gen_farith(ctx, OPC_MAXA_D, rt, rs, rd, 0);
2858 switch (extract32(ctx->opcode, 6, 8)) {
2860 gen_cp1(ctx, OPC_CFC1, rt, rs);
2863 gen_cp1(ctx, OPC_CTC1, rt, rs);
2866 gen_cp1(ctx, OPC_MFC1, rt, rs);
2869 gen_cp1(ctx, OPC_MTC1, rt, rs);
2872 gen_cp1(ctx, OPC_MFHC1, rt, rs);
2875 gen_cp1(ctx, OPC_MTHC1, rt, rs);
2878 gen_farith(ctx, OPC_CVT_S_PL, -1, rs, rt, 0);
2881 gen_farith(ctx, OPC_CVT_S_PU, -1, rs, rt, 0);
2884 switch (extract32(ctx->opcode, 6, 9)) {
2886 gen_farith(ctx, OPC_CVT_L_S, -1, rs, rt, 0);
2889 gen_farith(ctx, OPC_CVT_L_D, -1, rs, rt, 0);
2892 gen_farith(ctx, OPC_CVT_W_S, -1, rs, rt, 0);
2895 gen_farith(ctx, OPC_CVT_W_D, -1, rs, rt, 0);
2898 gen_farith(ctx, OPC_RSQRT_S, -1, rs, rt, 0);
2901 gen_farith(ctx, OPC_RSQRT_D, -1, rs, rt, 0);
2904 gen_farith(ctx, OPC_SQRT_S, -1, rs, rt, 0);
2907 gen_farith(ctx, OPC_SQRT_D, -1, rs, rt, 0);
2910 gen_farith(ctx, OPC_RECIP_S, -1, rs, rt, 0);
2913 gen_farith(ctx, OPC_RECIP_D, -1, rs, rt, 0);
2916 gen_farith(ctx, OPC_FLOOR_L_S, -1, rs, rt, 0);
2919 gen_farith(ctx, OPC_FLOOR_L_D, -1, rs, rt, 0);
2922 gen_farith(ctx, OPC_FLOOR_W_S, -1, rs, rt, 0);
2925 gen_farith(ctx, OPC_FLOOR_W_D, -1, rs, rt, 0);
2928 gen_farith(ctx, OPC_CEIL_L_S, -1, rs, rt, 0);
2931 gen_farith(ctx, OPC_CEIL_L_D, -1, rs, rt, 0);
2934 gen_farith(ctx, OPC_CEIL_W_S, -1, rs, rt, 0);
2937 gen_farith(ctx, OPC_CEIL_W_D, -1, rs, rt, 0);
2940 gen_farith(ctx, OPC_TRUNC_L_S, -1, rs, rt, 0);
2943 gen_farith(ctx, OPC_TRUNC_L_D, -1, rs, rt, 0);
2946 gen_farith(ctx, OPC_TRUNC_W_S, -1, rs, rt, 0);
2949 gen_farith(ctx, OPC_TRUNC_W_D, -1, rs, rt, 0);
2952 gen_farith(ctx, OPC_ROUND_L_S, -1, rs, rt, 0);
2955 gen_farith(ctx, OPC_ROUND_L_D, -1, rs, rt, 0);
2958 gen_farith(ctx, OPC_ROUND_W_S, -1, rs, rt, 0);
2961 gen_farith(ctx, OPC_ROUND_W_D, -1, rs, rt, 0);
2964 gen_farith(ctx, OPC_MOV_S, -1, rs, rt, 0);
2967 gen_farith(ctx, OPC_MOV_D, -1, rs, rt, 0);
2970 gen_farith(ctx, OPC_ABS_S, -1, rs, rt, 0);
2973 gen_farith(ctx, OPC_ABS_D, -1, rs, rt, 0);
2976 gen_farith(ctx, OPC_NEG_S, -1, rs, rt, 0);
2979 gen_farith(ctx, OPC_NEG_D, -1, rs, rt, 0);
2982 gen_farith(ctx, OPC_CVT_D_S, -1, rs, rt, 0);
2985 gen_farith(ctx, OPC_CVT_D_W, -1, rs, rt, 0);
2988 gen_farith(ctx, OPC_CVT_D_L, -1, rs, rt, 0);
2991 gen_farith(ctx, OPC_CVT_S_D, -1, rs, rt, 0);
2994 gen_farith(ctx, OPC_CVT_S_W, -1, rs, rt, 0);
2997 gen_farith(ctx, OPC_CVT_S_L, -1, rs, rt, 0);
3000 gen_reserved_instruction(ctx);
3009 switch (extract32(ctx->opcode, 3, 3)) {
3010 case NM_CMP_CONDN_S:
3011 gen_r6_cmp_s(ctx, extract32(ctx->opcode, 6, 5), rt, rs, rd);
3013 case NM_CMP_CONDN_D:
3014 gen_r6_cmp_d(ctx, extract32(ctx->opcode, 6, 5), rt, rs, rd);
3017 gen_reserved_instruction(ctx);
3022 gen_reserved_instruction(ctx);
3027 static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc,
3028 int rd, int rs, int rt)
3031 TCGv t0 = tcg_temp_new();
3032 TCGv v1_t = tcg_temp_new();
3033 TCGv v2_t = tcg_temp_new();
3035 gen_load_gpr(v1_t, rs);
3036 gen_load_gpr(v2_t, rt);
3041 gen_helper_cmp_eq_ph(v1_t, v2_t, cpu_env);
3045 gen_helper_cmp_lt_ph(v1_t, v2_t, cpu_env);
3049 gen_helper_cmp_le_ph(v1_t, v2_t, cpu_env);
3053 gen_helper_cmpu_eq_qb(v1_t, v2_t, cpu_env);
3057 gen_helper_cmpu_lt_qb(v1_t, v2_t, cpu_env);
3061 gen_helper_cmpu_le_qb(v1_t, v2_t, cpu_env);
3063 case NM_CMPGU_EQ_QB:
3065 gen_helper_cmpgu_eq_qb(v1_t, v1_t, v2_t);
3066 gen_store_gpr(v1_t, ret);
3068 case NM_CMPGU_LT_QB:
3070 gen_helper_cmpgu_lt_qb(v1_t, v1_t, v2_t);
3071 gen_store_gpr(v1_t, ret);
3073 case NM_CMPGU_LE_QB:
3075 gen_helper_cmpgu_le_qb(v1_t, v1_t, v2_t);
3076 gen_store_gpr(v1_t, ret);
3078 case NM_CMPGDU_EQ_QB:
3080 gen_helper_cmpgu_eq_qb(v1_t, v1_t, v2_t);
3081 tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4);
3082 gen_store_gpr(v1_t, ret);
3084 case NM_CMPGDU_LT_QB:
3086 gen_helper_cmpgu_lt_qb(v1_t, v1_t, v2_t);
3087 tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4);
3088 gen_store_gpr(v1_t, ret);
3090 case NM_CMPGDU_LE_QB:
3092 gen_helper_cmpgu_le_qb(v1_t, v1_t, v2_t);
3093 tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4);
3094 gen_store_gpr(v1_t, ret);
3098 gen_helper_packrl_ph(v1_t, v1_t, v2_t);
3099 gen_store_gpr(v1_t, ret);
3103 gen_helper_pick_qb(v1_t, v1_t, v2_t, cpu_env);
3104 gen_store_gpr(v1_t, ret);
3108 gen_helper_pick_ph(v1_t, v1_t, v2_t, cpu_env);
3109 gen_store_gpr(v1_t, ret);
3113 gen_helper_addq_s_w(v1_t, v1_t, v2_t, cpu_env);
3114 gen_store_gpr(v1_t, ret);
3118 gen_helper_subq_s_w(v1_t, v1_t, v2_t, cpu_env);
3119 gen_store_gpr(v1_t, ret);
3123 gen_helper_addsc(v1_t, v1_t, v2_t, cpu_env);
3124 gen_store_gpr(v1_t, ret);
3128 gen_helper_addwc(v1_t, v1_t, v2_t, cpu_env);
3129 gen_store_gpr(v1_t, ret);
3133 switch (extract32(ctx->opcode, 10, 1)) {
3136 gen_helper_addq_ph(v1_t, v1_t, v2_t, cpu_env);
3137 gen_store_gpr(v1_t, ret);
3141 gen_helper_addq_s_ph(v1_t, v1_t, v2_t, cpu_env);
3142 gen_store_gpr(v1_t, ret);
3148 switch (extract32(ctx->opcode, 10, 1)) {
3151 gen_helper_addqh_ph(v1_t, v1_t, v2_t);
3152 gen_store_gpr(v1_t, ret);
3156 gen_helper_addqh_r_ph(v1_t, v1_t, v2_t);
3157 gen_store_gpr(v1_t, ret);
3163 switch (extract32(ctx->opcode, 10, 1)) {
3166 gen_helper_addqh_w(v1_t, v1_t, v2_t);
3167 gen_store_gpr(v1_t, ret);
3171 gen_helper_addqh_r_w(v1_t, v1_t, v2_t);
3172 gen_store_gpr(v1_t, ret);
3178 switch (extract32(ctx->opcode, 10, 1)) {
3181 gen_helper_addu_qb(v1_t, v1_t, v2_t, cpu_env);
3182 gen_store_gpr(v1_t, ret);
3186 gen_helper_addu_s_qb(v1_t, v1_t, v2_t, cpu_env);
3187 gen_store_gpr(v1_t, ret);
3193 switch (extract32(ctx->opcode, 10, 1)) {
3196 gen_helper_addu_ph(v1_t, v1_t, v2_t, cpu_env);
3197 gen_store_gpr(v1_t, ret);
3201 gen_helper_addu_s_ph(v1_t, v1_t, v2_t, cpu_env);
3202 gen_store_gpr(v1_t, ret);
3208 switch (extract32(ctx->opcode, 10, 1)) {
3211 gen_helper_adduh_qb(v1_t, v1_t, v2_t);
3212 gen_store_gpr(v1_t, ret);
3216 gen_helper_adduh_r_qb(v1_t, v1_t, v2_t);
3217 gen_store_gpr(v1_t, ret);
3223 switch (extract32(ctx->opcode, 10, 1)) {
3226 gen_helper_shra_ph(v1_t, v1_t, v2_t);
3227 gen_store_gpr(v1_t, ret);
3231 gen_helper_shra_r_ph(v1_t, v1_t, v2_t);
3232 gen_store_gpr(v1_t, ret);
3238 switch (extract32(ctx->opcode, 10, 1)) {
3241 gen_helper_shra_qb(v1_t, v1_t, v2_t);
3242 gen_store_gpr(v1_t, ret);
3246 gen_helper_shra_r_qb(v1_t, v1_t, v2_t);
3247 gen_store_gpr(v1_t, ret);
3253 switch (extract32(ctx->opcode, 10, 1)) {
3256 gen_helper_subq_ph(v1_t, v1_t, v2_t, cpu_env);
3257 gen_store_gpr(v1_t, ret);
3261 gen_helper_subq_s_ph(v1_t, v1_t, v2_t, cpu_env);
3262 gen_store_gpr(v1_t, ret);
3268 switch (extract32(ctx->opcode, 10, 1)) {
3271 gen_helper_subqh_ph(v1_t, v1_t, v2_t);
3272 gen_store_gpr(v1_t, ret);
3276 gen_helper_subqh_r_ph(v1_t, v1_t, v2_t);
3277 gen_store_gpr(v1_t, ret);
3283 switch (extract32(ctx->opcode, 10, 1)) {
3286 gen_helper_subqh_w(v1_t, v1_t, v2_t);
3287 gen_store_gpr(v1_t, ret);
3291 gen_helper_subqh_r_w(v1_t, v1_t, v2_t);
3292 gen_store_gpr(v1_t, ret);
3298 switch (extract32(ctx->opcode, 10, 1)) {
3301 gen_helper_subu_qb(v1_t, v1_t, v2_t, cpu_env);
3302 gen_store_gpr(v1_t, ret);
3306 gen_helper_subu_s_qb(v1_t, v1_t, v2_t, cpu_env);
3307 gen_store_gpr(v1_t, ret);
3313 switch (extract32(ctx->opcode, 10, 1)) {
3316 gen_helper_subu_ph(v1_t, v1_t, v2_t, cpu_env);
3317 gen_store_gpr(v1_t, ret);
3321 gen_helper_subu_s_ph(v1_t, v1_t, v2_t, cpu_env);
3322 gen_store_gpr(v1_t, ret);
3328 switch (extract32(ctx->opcode, 10, 1)) {
3331 gen_helper_subuh_qb(v1_t, v1_t, v2_t);
3332 gen_store_gpr(v1_t, ret);
3336 gen_helper_subuh_r_qb(v1_t, v1_t, v2_t);
3337 gen_store_gpr(v1_t, ret);
3343 switch (extract32(ctx->opcode, 10, 1)) {
3346 gen_helper_shll_ph(v1_t, v1_t, v2_t, cpu_env);
3347 gen_store_gpr(v1_t, ret);
3351 gen_helper_shll_s_ph(v1_t, v1_t, v2_t, cpu_env);
3352 gen_store_gpr(v1_t, ret);
3356 case NM_PRECR_SRA_R_PH_W:
3358 switch (extract32(ctx->opcode, 10, 1)) {
3360 /* PRECR_SRA_PH_W */
3362 TCGv_i32 sa_t = tcg_constant_i32(rd);
3363 gen_helper_precr_sra_ph_w(v1_t, sa_t, v1_t,
3365 gen_store_gpr(v1_t, rt);
3369 /* PRECR_SRA_R_PH_W */
3371 TCGv_i32 sa_t = tcg_constant_i32(rd);
3372 gen_helper_precr_sra_r_ph_w(v1_t, sa_t, v1_t,
3374 gen_store_gpr(v1_t, rt);
3379 case NM_MULEU_S_PH_QBL:
3381 gen_helper_muleu_s_ph_qbl(v1_t, v1_t, v2_t, cpu_env);
3382 gen_store_gpr(v1_t, ret);
3384 case NM_MULEU_S_PH_QBR:
3386 gen_helper_muleu_s_ph_qbr(v1_t, v1_t, v2_t, cpu_env);
3387 gen_store_gpr(v1_t, ret);
3391 gen_helper_mulq_rs_ph(v1_t, v1_t, v2_t, cpu_env);
3392 gen_store_gpr(v1_t, ret);
3396 gen_helper_mulq_s_ph(v1_t, v1_t, v2_t, cpu_env);
3397 gen_store_gpr(v1_t, ret);
3401 gen_helper_mulq_rs_w(v1_t, v1_t, v2_t, cpu_env);
3402 gen_store_gpr(v1_t, ret);
3406 gen_helper_mulq_s_w(v1_t, v1_t, v2_t, cpu_env);
3407 gen_store_gpr(v1_t, ret);
3411 gen_load_gpr(t0, rs);
3413 tcg_gen_deposit_tl(cpu_gpr[rt], t0, cpu_gpr[rt], rd, 32 - rd);
3415 tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
3419 gen_helper_modsub(v1_t, v1_t, v2_t);
3420 gen_store_gpr(v1_t, ret);
3424 gen_helper_shra_r_w(v1_t, v1_t, v2_t);
3425 gen_store_gpr(v1_t, ret);
3429 gen_helper_shrl_ph(v1_t, v1_t, v2_t);
3430 gen_store_gpr(v1_t, ret);
3434 gen_helper_shrl_qb(v1_t, v1_t, v2_t);
3435 gen_store_gpr(v1_t, ret);
3439 gen_helper_shll_qb(v1_t, v1_t, v2_t, cpu_env);
3440 gen_store_gpr(v1_t, ret);
3444 gen_helper_shll_s_w(v1_t, v1_t, v2_t, cpu_env);
3445 gen_store_gpr(v1_t, ret);
3450 TCGv tv0 = tcg_temp_new();
3451 TCGv tv1 = tcg_temp_new();
3452 int16_t imm = extract32(ctx->opcode, 16, 7);
3454 tcg_gen_movi_tl(tv0, rd >> 3);
3455 tcg_gen_movi_tl(tv1, imm);
3456 gen_helper_shilo(tv0, tv1, cpu_env);
3459 case NM_MULEQ_S_W_PHL:
3461 gen_helper_muleq_s_w_phl(v1_t, v1_t, v2_t, cpu_env);
3462 gen_store_gpr(v1_t, ret);
3464 case NM_MULEQ_S_W_PHR:
3466 gen_helper_muleq_s_w_phr(v1_t, v1_t, v2_t, cpu_env);
3467 gen_store_gpr(v1_t, ret);
3471 switch (extract32(ctx->opcode, 10, 1)) {
3474 gen_helper_mul_ph(v1_t, v1_t, v2_t, cpu_env);
3475 gen_store_gpr(v1_t, ret);
3479 gen_helper_mul_s_ph(v1_t, v1_t, v2_t, cpu_env);
3480 gen_store_gpr(v1_t, ret);
3484 case NM_PRECR_QB_PH:
3486 gen_helper_precr_qb_ph(v1_t, v1_t, v2_t);
3487 gen_store_gpr(v1_t, ret);
3489 case NM_PRECRQ_QB_PH:
3491 gen_helper_precrq_qb_ph(v1_t, v1_t, v2_t);
3492 gen_store_gpr(v1_t, ret);
3494 case NM_PRECRQ_PH_W:
3496 gen_helper_precrq_ph_w(v1_t, v1_t, v2_t);
3497 gen_store_gpr(v1_t, ret);
3499 case NM_PRECRQ_RS_PH_W:
3501 gen_helper_precrq_rs_ph_w(v1_t, v1_t, v2_t, cpu_env);
3502 gen_store_gpr(v1_t, ret);
3504 case NM_PRECRQU_S_QB_PH:
3506 gen_helper_precrqu_s_qb_ph(v1_t, v1_t, v2_t, cpu_env);
3507 gen_store_gpr(v1_t, ret);
3511 tcg_gen_movi_tl(t0, rd);
3512 gen_helper_shra_r_w(v1_t, t0, v1_t);
3513 gen_store_gpr(v1_t, rt);
3517 tcg_gen_movi_tl(t0, rd >> 1);
3518 switch (extract32(ctx->opcode, 10, 1)) {
3521 gen_helper_shra_ph(v1_t, t0, v1_t);
3522 gen_store_gpr(v1_t, rt);
3526 gen_helper_shra_r_ph(v1_t, t0, v1_t);
3527 gen_store_gpr(v1_t, rt);
3533 tcg_gen_movi_tl(t0, rd >> 1);
3534 switch (extract32(ctx->opcode, 10, 2)) {
3537 gen_helper_shll_ph(v1_t, t0, v1_t, cpu_env);
3538 gen_store_gpr(v1_t, rt);
3542 gen_helper_shll_s_ph(v1_t, t0, v1_t, cpu_env);
3543 gen_store_gpr(v1_t, rt);
3546 gen_reserved_instruction(ctx);
3552 tcg_gen_movi_tl(t0, rd);
3553 gen_helper_shll_s_w(v1_t, t0, v1_t, cpu_env);
3554 gen_store_gpr(v1_t, rt);
3560 imm = sextract32(ctx->opcode, 11, 11);
3561 imm = (int16_t)(imm << 6) >> 6;
3563 tcg_gen_movi_tl(cpu_gpr[rt], dup_const(MO_16, imm));
3568 gen_reserved_instruction(ctx);
3573 static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
3581 insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
3582 ctx->opcode = (ctx->opcode << 16) | insn;
3584 rt = extract32(ctx->opcode, 21, 5);
3585 rs = extract32(ctx->opcode, 16, 5);
3586 rd = extract32(ctx->opcode, 11, 5);
3588 op = extract32(ctx->opcode, 26, 6);
3593 switch (extract32(ctx->opcode, 19, 2)) {
3596 gen_reserved_instruction(ctx);
3599 if ((extract32(ctx->opcode, 18, 1)) == NM_SYSCALL) {
3600 generate_exception_end(ctx, EXCP_SYSCALL);
3602 gen_reserved_instruction(ctx);
3606 generate_exception_end(ctx, EXCP_BREAK);
3609 if (is_uhi(ctx, extract32(ctx->opcode, 0, 19))) {
3610 ctx->base.is_jmp = DISAS_SEMIHOST;
3612 if (ctx->hflags & MIPS_HFLAG_SBRI) {
3613 gen_reserved_instruction(ctx);
3615 generate_exception_end(ctx, EXCP_DBp);
3622 imm = extract32(ctx->opcode, 0, 16);
3624 tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], imm);
3626 tcg_gen_movi_tl(cpu_gpr[rt], imm);
3628 tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
3633 offset = sextract32(ctx->opcode, 0, 1) << 21 |
3634 extract32(ctx->opcode, 1, 20) << 1;
3635 target_long addr = addr_add(ctx, ctx->base.pc_next + 4, offset);
3636 tcg_gen_movi_tl(cpu_gpr[rt], addr);
3640 switch (ctx->opcode & 0x07) {
3642 gen_pool32a0_nanomips_insn(env, ctx);
3646 int32_t op1 = extract32(ctx->opcode, 3, 7);
3647 gen_pool32a5_nanomips_insn(ctx, op1, rd, rs, rt);
3651 switch (extract32(ctx->opcode, 3, 3)) {
3653 gen_p_lsx(ctx, rd, rs, rt);
3657 * In nanoMIPS, the shift field directly encodes the shift
3658 * amount, meaning that the supported shift values are in
3659 * the range 0 to 3 (instead of 1 to 4 in MIPSR6).
3661 gen_lsa(ctx, rd, rt, rs, extract32(ctx->opcode, 9, 2) - 1);
3664 gen_ext(ctx, 32, rd, rs, rt, extract32(ctx->opcode, 6, 5));
3667 gen_pool32axf_nanomips_insn(env, ctx);
3670 gen_reserved_instruction(ctx);
3675 gen_reserved_instruction(ctx);
3680 switch (ctx->opcode & 0x03) {
3683 offset = extract32(ctx->opcode, 0, 21);
3684 gen_op_addr_addi(ctx, cpu_gpr[rt], cpu_gpr[28], offset);
3688 gen_ld(ctx, OPC_LW, rt, 28, extract32(ctx->opcode, 2, 19) << 2);
3691 gen_st(ctx, OPC_SW, rt, 28, extract32(ctx->opcode, 2, 19) << 2);
3694 gen_reserved_instruction(ctx);
3700 insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 4);
3701 target_long addr_off = extract32(ctx->opcode, 0, 16) | insn << 16;
3702 switch (extract32(ctx->opcode, 16, 5)) {
3706 tcg_gen_movi_tl(cpu_gpr[rt], addr_off);
3712 tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rt], addr_off);
3713 tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
3719 gen_op_addr_addi(ctx, cpu_gpr[rt], cpu_gpr[28], addr_off);
3725 target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
3728 tcg_gen_movi_tl(cpu_gpr[rt], addr);
3735 t0 = tcg_temp_new();
3737 target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
3740 tcg_gen_movi_tl(t0, addr);
3741 tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL);
3748 t0 = tcg_temp_new();
3749 t1 = tcg_temp_new();
3751 target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
3754 tcg_gen_movi_tl(t0, addr);
3755 gen_load_gpr(t1, rt);
3757 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
3761 gen_reserved_instruction(ctx);
3767 switch (extract32(ctx->opcode, 12, 4)) {
3769 gen_logic_imm(ctx, OPC_ORI, rt, rs, extract32(ctx->opcode, 0, 12));
3772 gen_logic_imm(ctx, OPC_XORI, rt, rs, extract32(ctx->opcode, 0, 12));
3775 gen_logic_imm(ctx, OPC_ANDI, rt, rs, extract32(ctx->opcode, 0, 12));
3778 switch (extract32(ctx->opcode, 20, 1)) {
3780 switch (ctx->opcode & 3) {
3782 gen_save(ctx, rt, extract32(ctx->opcode, 16, 4),
3783 extract32(ctx->opcode, 2, 1),
3784 extract32(ctx->opcode, 3, 9) << 3);
3787 case NM_RESTORE_JRC:
3788 gen_restore(ctx, rt, extract32(ctx->opcode, 16, 4),
3789 extract32(ctx->opcode, 2, 1),
3790 extract32(ctx->opcode, 3, 9) << 3);
3791 if ((ctx->opcode & 3) == NM_RESTORE_JRC) {
3792 gen_compute_branch_nm(ctx, OPC_JR, 2, 31, 0, 0);
3796 gen_reserved_instruction(ctx);
3801 gen_reserved_instruction(ctx);
3806 gen_slt_imm(ctx, OPC_SLTI, rt, rs, extract32(ctx->opcode, 0, 12));
3809 gen_slt_imm(ctx, OPC_SLTIU, rt, rs, extract32(ctx->opcode, 0, 12));
3813 TCGv t0 = tcg_temp_new();
3815 imm = extract32(ctx->opcode, 0, 12);
3816 gen_load_gpr(t0, rs);
3817 tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, imm);
3818 gen_store_gpr(t0, rt);
3822 imm = (int16_t) extract32(ctx->opcode, 0, 12);
3823 gen_arith_imm(ctx, OPC_ADDIU, rt, rs, -imm);
3827 int shift = extract32(ctx->opcode, 0, 5);
3828 switch (extract32(ctx->opcode, 5, 4)) {
3830 if (rt == 0 && shift == 0) {
3832 } else if (rt == 0 && shift == 3) {
3833 /* EHB - treat as NOP */
3834 } else if (rt == 0 && shift == 5) {
3835 /* PAUSE - treat as NOP */
3836 } else if (rt == 0 && shift == 6) {
3838 gen_sync(extract32(ctx->opcode, 16, 5));
3841 gen_shift_imm(ctx, OPC_SLL, rt, rs,
3842 extract32(ctx->opcode, 0, 5));
3846 gen_shift_imm(ctx, OPC_SRL, rt, rs,
3847 extract32(ctx->opcode, 0, 5));
3850 gen_shift_imm(ctx, OPC_SRA, rt, rs,
3851 extract32(ctx->opcode, 0, 5));
3854 gen_shift_imm(ctx, OPC_ROTR, rt, rs,
3855 extract32(ctx->opcode, 0, 5));
3858 gen_reserved_instruction(ctx);
3866 TCGv t0 = tcg_temp_new();
3868 tcg_constant_i32(extract32(ctx->opcode, 0, 5));
3870 tcg_constant_i32(extract32(ctx->opcode, 7, 4) << 1);
3872 tcg_constant_i32(extract32(ctx->opcode, 6, 1));
3874 gen_load_gpr(t0, rs);
3875 gen_helper_rotx(cpu_gpr[rt], t0, shift, shiftx, stripe);
3879 switch (((ctx->opcode >> 10) & 2) |
3880 (extract32(ctx->opcode, 5, 1))) {
3883 gen_bitops(ctx, OPC_INS, rt, rs, extract32(ctx->opcode, 0, 5),
3884 extract32(ctx->opcode, 6, 5));
3887 gen_reserved_instruction(ctx);
3892 switch (((ctx->opcode >> 10) & 2) |
3893 (extract32(ctx->opcode, 5, 1))) {
3896 gen_bitops(ctx, OPC_EXT, rt, rs, extract32(ctx->opcode, 0, 5),
3897 extract32(ctx->opcode, 6, 5));
3900 gen_reserved_instruction(ctx);
3905 gen_reserved_instruction(ctx);
3910 gen_pool32f_nanomips_insn(ctx);
3915 switch (extract32(ctx->opcode, 1, 1)) {
3918 tcg_gen_movi_tl(cpu_gpr[rt],
3919 sextract32(ctx->opcode, 0, 1) << 31 |
3920 extract32(ctx->opcode, 2, 10) << 21 |
3921 extract32(ctx->opcode, 12, 9) << 12);
3926 offset = sextract32(ctx->opcode, 0, 1) << 31 |
3927 extract32(ctx->opcode, 2, 10) << 21 |
3928 extract32(ctx->opcode, 12, 9) << 12;
3930 addr = ~0xFFF & addr_add(ctx, ctx->base.pc_next + 4, offset);
3931 tcg_gen_movi_tl(cpu_gpr[rt], addr);
3938 uint32_t u = extract32(ctx->opcode, 0, 18);
3940 switch (extract32(ctx->opcode, 18, 3)) {
3942 gen_ld(ctx, OPC_LB, rt, 28, u);
3945 gen_st(ctx, OPC_SB, rt, 28, u);
3948 gen_ld(ctx, OPC_LBU, rt, 28, u);
3952 gen_op_addr_addi(ctx, cpu_gpr[rt], cpu_gpr[28], u);
3957 switch (ctx->opcode & 1) {
3959 gen_ld(ctx, OPC_LH, rt, 28, u);
3962 gen_ld(ctx, OPC_LHU, rt, 28, u);
3968 switch (ctx->opcode & 1) {
3970 gen_st(ctx, OPC_SH, rt, 28, u);
3973 gen_reserved_instruction(ctx);
3979 switch (ctx->opcode & 0x3) {
3981 gen_cop1_ldst(ctx, OPC_LWC1, rt, 28, u);
3984 gen_cop1_ldst(ctx, OPC_LDC1, rt, 28, u);
3987 gen_cop1_ldst(ctx, OPC_SWC1, rt, 28, u);
3990 gen_cop1_ldst(ctx, OPC_SDC1, rt, 28, u);
3995 gen_reserved_instruction(ctx);
4002 uint32_t u = extract32(ctx->opcode, 0, 12);
4004 switch (extract32(ctx->opcode, 12, 4)) {
4009 * Break the TB to be able to sync copied instructions
4012 ctx->base.is_jmp = DISAS_STOP;
4019 gen_ld(ctx, OPC_LB, rt, rs, u);
4022 gen_ld(ctx, OPC_LH, rt, rs, u);
4025 gen_ld(ctx, OPC_LW, rt, rs, u);
4028 gen_ld(ctx, OPC_LBU, rt, rs, u);
4031 gen_ld(ctx, OPC_LHU, rt, rs, u);
4034 gen_st(ctx, OPC_SB, rt, rs, u);
4037 gen_st(ctx, OPC_SH, rt, rs, u);
4040 gen_st(ctx, OPC_SW, rt, rs, u);
4043 gen_cop1_ldst(ctx, OPC_LWC1, rt, rs, u);
4046 gen_cop1_ldst(ctx, OPC_LDC1, rt, rs, u);
4049 gen_cop1_ldst(ctx, OPC_SWC1, rt, rs, u);
4052 gen_cop1_ldst(ctx, OPC_SDC1, rt, rs, u);
4055 gen_reserved_instruction(ctx);
4062 int32_t s = (sextract32(ctx->opcode, 15, 1) << 8) |
4063 extract32(ctx->opcode, 0, 8);
4065 switch (extract32(ctx->opcode, 8, 3)) {
4067 switch (extract32(ctx->opcode, 11, 4)) {
4069 gen_ld(ctx, OPC_LB, rt, rs, s);
4072 gen_ld(ctx, OPC_LH, rt, rs, s);
4075 gen_ld(ctx, OPC_LW, rt, rs, s);
4078 gen_ld(ctx, OPC_LBU, rt, rs, s);
4081 gen_ld(ctx, OPC_LHU, rt, rs, s);
4084 gen_st(ctx, OPC_SB, rt, rs, s);
4087 gen_st(ctx, OPC_SH, rt, rs, s);
4090 gen_st(ctx, OPC_SW, rt, rs, s);
4093 gen_cop1_ldst(ctx, OPC_LWC1, rt, rs, s);
4096 gen_cop1_ldst(ctx, OPC_LDC1, rt, rs, s);
4099 gen_cop1_ldst(ctx, OPC_SWC1, rt, rs, s);
4102 gen_cop1_ldst(ctx, OPC_SDC1, rt, rs, s);
4108 * Break the TB to be able to sync copied instructions
4111 ctx->base.is_jmp = DISAS_STOP;
4118 gen_reserved_instruction(ctx);
4123 switch (extract32(ctx->opcode, 11, 4)) {
4128 TCGv t0 = tcg_temp_new();
4129 TCGv t1 = tcg_temp_new();
4131 gen_base_offset_addr(ctx, t0, rs, s);
4133 switch (extract32(ctx->opcode, 11, 4)) {
4135 tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
4137 gen_store_gpr(t0, rt);
4140 gen_load_gpr(t1, rt);
4141 tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
4148 switch (ctx->opcode & 0x03) {
4150 gen_ld(ctx, OPC_LL, rt, rs, s);
4154 gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5));
4157 gen_reserved_instruction(ctx);
4162 switch (ctx->opcode & 0x03) {
4164 gen_st_cond(ctx, rt, rs, s, MO_TESL, false);
4168 gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5),
4172 gen_reserved_instruction(ctx);
4177 check_cp0_enabled(ctx);
4178 if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
4179 gen_cache_operation(ctx, rt, rs, s);
4183 gen_reserved_instruction(ctx);
4188 switch (extract32(ctx->opcode, 11, 4)) {
4191 check_cp0_enabled(ctx);
4192 gen_ld(ctx, OPC_LBE, rt, rs, s);
4196 check_cp0_enabled(ctx);
4197 gen_st(ctx, OPC_SBE, rt, rs, s);
4201 check_cp0_enabled(ctx);
4202 gen_ld(ctx, OPC_LBUE, rt, rs, s);
4206 /* case NM_SYNCIE */
4208 check_cp0_enabled(ctx);
4210 * Break the TB to be able to sync copied instructions
4213 ctx->base.is_jmp = DISAS_STOP;
4217 check_cp0_enabled(ctx);
4223 check_cp0_enabled(ctx);
4224 gen_ld(ctx, OPC_LHE, rt, rs, s);
4228 check_cp0_enabled(ctx);
4229 gen_st(ctx, OPC_SHE, rt, rs, s);
4233 check_cp0_enabled(ctx);
4234 gen_ld(ctx, OPC_LHUE, rt, rs, s);
4238 check_cp0_enabled(ctx);
4239 check_nms_dl_il_sl_tl_l2c(ctx);
4240 gen_cache_operation(ctx, rt, rs, s);
4244 check_cp0_enabled(ctx);
4245 gen_ld(ctx, OPC_LWE, rt, rs, s);
4249 check_cp0_enabled(ctx);
4250 gen_st(ctx, OPC_SWE, rt, rs, s);
4253 switch (extract32(ctx->opcode, 2, 2)) {
4257 check_cp0_enabled(ctx);
4258 gen_ld(ctx, OPC_LLE, rt, rs, s);
4263 check_cp0_enabled(ctx);
4264 gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5));
4267 gen_reserved_instruction(ctx);
4272 switch (extract32(ctx->opcode, 2, 2)) {
4276 check_cp0_enabled(ctx);
4277 gen_st_cond(ctx, rt, rs, s, MO_TESL, true);
4282 check_cp0_enabled(ctx);
4283 gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5),
4287 gen_reserved_instruction(ctx);
4292 gen_reserved_instruction(ctx);
4300 int count = extract32(ctx->opcode, 12, 3);
4303 offset = sextract32(ctx->opcode, 15, 1) << 8 |
4304 extract32(ctx->opcode, 0, 8);
4305 TCGv va = tcg_temp_new();
4306 TCGv t1 = tcg_temp_new();
4307 MemOp memop = (extract32(ctx->opcode, 8, 3)) ==
4308 NM_P_LS_UAWM ? MO_UNALN : 0;
4310 count = (count == 0) ? 8 : count;
4311 while (counter != count) {
4312 int this_rt = ((rt + counter) & 0x1f) | (rt & 0x10);
4313 int this_offset = offset + (counter << 2);
4315 gen_base_offset_addr(ctx, va, rs, this_offset);
4317 switch (extract32(ctx->opcode, 11, 1)) {
4319 tcg_gen_qemu_ld_tl(t1, va, ctx->mem_idx,
4321 gen_store_gpr(t1, this_rt);
4322 if ((this_rt == rs) &&
4323 (counter != (count - 1))) {
4328 this_rt = (rt == 0) ? 0 : this_rt;
4329 gen_load_gpr(t1, this_rt);
4330 tcg_gen_qemu_st_tl(t1, va, ctx->mem_idx,
4339 gen_reserved_instruction(ctx);
4347 TCGv t0 = tcg_temp_new();
4348 int32_t s = sextract32(ctx->opcode, 0, 1) << 21 |
4349 extract32(ctx->opcode, 1, 20) << 1;
4350 rd = (extract32(ctx->opcode, 24, 1)) == 0 ? 4 : 5;
4351 rt = decode_gpr_gpr4_zero(extract32(ctx->opcode, 25, 1) << 3 |
4352 extract32(ctx->opcode, 21, 3));
4353 gen_load_gpr(t0, rt);
4354 tcg_gen_mov_tl(cpu_gpr[rd], t0);
4355 gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s);
4360 int32_t s = sextract32(ctx->opcode, 0, 1) << 25 |
4361 extract32(ctx->opcode, 1, 24) << 1;
4363 if ((extract32(ctx->opcode, 25, 1)) == 0) {
4365 gen_compute_branch_nm(ctx, OPC_BEQ, 4, 0, 0, s);
4368 gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s);
4373 switch (extract32(ctx->opcode, 12, 4)) {
4376 gen_compute_branch_nm(ctx, OPC_JALR, 4, rs, rt, 0);
4379 gen_compute_nanomips_pbalrsc_branch(ctx, rs, rt);
4382 gen_reserved_instruction(ctx);
4388 int32_t s = sextract32(ctx->opcode, 0, 1) << 14 |
4389 extract32(ctx->opcode, 1, 13) << 1;
4390 switch (extract32(ctx->opcode, 14, 2)) {
4393 gen_compute_branch_nm(ctx, OPC_BEQ, 4, rs, rt, s);
4396 s = sextract32(ctx->opcode, 0, 1) << 14 |
4397 extract32(ctx->opcode, 1, 13) << 1;
4398 switch (extract32(ctx->opcode, 16, 5)) {
4400 check_cp1_enabled(ctx);
4401 gen_compute_branch_cp1_nm(ctx, OPC_BC1EQZ, rt, s);
4404 check_cp1_enabled(ctx);
4405 gen_compute_branch_cp1_nm(ctx, OPC_BC1NEZ, rt, s);
4410 int32_t imm = extract32(ctx->opcode, 1, 13) |
4411 extract32(ctx->opcode, 0, 1) << 13;
4413 gen_compute_branch_nm(ctx, OPC_BPOSGE32, 4, -1, -2,
4418 gen_reserved_instruction(ctx);
4424 gen_compute_compact_branch_nm(ctx, OPC_BC, rs, rt, s);
4426 gen_compute_compact_branch_nm(ctx, OPC_BGEC, rs, rt, s);
4430 if (rs == rt || rt == 0) {
4431 gen_compute_compact_branch_nm(ctx, OPC_BC, 0, 0, s);
4432 } else if (rs == 0) {
4433 gen_compute_compact_branch_nm(ctx, OPC_BEQZC, rt, 0, s);
4435 gen_compute_compact_branch_nm(ctx, OPC_BGEUC, rs, rt, s);
4443 int32_t s = sextract32(ctx->opcode, 0, 1) << 14 |
4444 extract32(ctx->opcode, 1, 13) << 1;
4445 switch (extract32(ctx->opcode, 14, 2)) {
4450 ctx->hflags |= MIPS_HFLAG_FBNSLOT;
4452 gen_compute_branch_nm(ctx, OPC_BNE, 4, rs, rt, s);
4456 if (rs != 0 && rt != 0 && rs == rt) {
4458 ctx->hflags |= MIPS_HFLAG_FBNSLOT;
4460 gen_compute_compact_branch_nm(ctx, OPC_BLTC, rs, rt, s);
4464 if (rs == 0 || rs == rt) {
4466 ctx->hflags |= MIPS_HFLAG_FBNSLOT;
4468 gen_compute_compact_branch_nm(ctx, OPC_BLTUC, rs, rt, s);
4472 gen_reserved_instruction(ctx);
4479 int32_t s = sextract32(ctx->opcode, 0, 1) << 11 |
4480 extract32(ctx->opcode, 1, 10) << 1;
4481 uint32_t u = extract32(ctx->opcode, 11, 7);
4483 gen_compute_imm_branch(ctx, extract32(ctx->opcode, 18, 3),
4488 gen_reserved_instruction(ctx);
4494 static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx)
4497 int rt = decode_gpr_gpr3(NANOMIPS_EXTRACT_RT3(ctx->opcode));
4498 int rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode));
4499 int rd = decode_gpr_gpr3(NANOMIPS_EXTRACT_RD3(ctx->opcode));
4503 /* make sure instructions are on a halfword boundary */
4504 if (ctx->base.pc_next & 0x1) {
4505 TCGv tmp = tcg_constant_tl(ctx->base.pc_next);
4506 tcg_gen_st_tl(tmp, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
4507 generate_exception_end(ctx, EXCP_AdEL);
4511 op = extract32(ctx->opcode, 10, 6);
4514 rt = NANOMIPS_EXTRACT_RD5(ctx->opcode);
4517 rs = NANOMIPS_EXTRACT_RS5(ctx->opcode);
4518 gen_arith(ctx, OPC_ADDU, rt, rs, 0);
4521 switch (extract32(ctx->opcode, 3, 2)) {
4522 case NM_P16_SYSCALL:
4523 if (extract32(ctx->opcode, 2, 1) == 0) {
4524 generate_exception_end(ctx, EXCP_SYSCALL);
4526 gen_reserved_instruction(ctx);
4530 generate_exception_end(ctx, EXCP_BREAK);
4533 if (is_uhi(ctx, extract32(ctx->opcode, 0, 3))) {
4534 ctx->base.is_jmp = DISAS_SEMIHOST;
4536 if (ctx->hflags & MIPS_HFLAG_SBRI) {
4537 gen_reserved_instruction(ctx);
4539 generate_exception_end(ctx, EXCP_DBp);
4544 gen_reserved_instruction(ctx);
4551 int shift = extract32(ctx->opcode, 0, 3);
4553 shift = (shift == 0) ? 8 : shift;
4555 switch (extract32(ctx->opcode, 3, 1)) {
4563 gen_shift_imm(ctx, opc, rt, rs, shift);
4567 switch (ctx->opcode & 1) {
4569 gen_pool16c_nanomips_insn(ctx);
4572 gen_ldxs(ctx, rt, rs, rd);
4577 switch (extract32(ctx->opcode, 6, 1)) {
4579 imm = extract32(ctx->opcode, 0, 6) << 2;
4580 gen_arith_imm(ctx, OPC_ADDIU, rt, 29, imm);
4583 gen_reserved_instruction(ctx);
4588 switch (extract32(ctx->opcode, 3, 1)) {
4590 imm = extract32(ctx->opcode, 0, 3) << 2;
4591 gen_arith_imm(ctx, OPC_ADDIU, rt, rs, imm);
4594 rt = extract32(ctx->opcode, 5, 5);
4596 /* imm = sign_extend(s[3] . s[2:0] , from_nbits = 4) */
4597 imm = (sextract32(ctx->opcode, 4, 1) << 3) |
4598 (extract32(ctx->opcode, 0, 3));
4599 gen_arith_imm(ctx, OPC_ADDIU, rt, rt, imm);
4605 switch (ctx->opcode & 0x1) {
4607 gen_arith(ctx, OPC_ADDU, rd, rs, rt);
4610 gen_arith(ctx, OPC_SUBU, rd, rs, rt);
4615 rt = (extract32(ctx->opcode, 9, 1) << 3) |
4616 extract32(ctx->opcode, 5, 3);
4617 rs = (extract32(ctx->opcode, 4, 1) << 3) |
4618 extract32(ctx->opcode, 0, 3);
4619 rt = decode_gpr_gpr4(rt);
4620 rs = decode_gpr_gpr4(rs);
4621 switch ((extract32(ctx->opcode, 7, 2) & 0x2) |
4622 (extract32(ctx->opcode, 3, 1))) {
4625 gen_arith(ctx, OPC_ADDU, rt, rs, rt);
4629 gen_r6_muldiv(ctx, R6_OPC_MUL, rt, rs, rt);
4632 gen_reserved_instruction(ctx);
4638 int imm = extract32(ctx->opcode, 0, 7);
4639 imm = (imm == 0x7f ? -1 : imm);
4641 tcg_gen_movi_tl(cpu_gpr[rt], imm);
4647 uint32_t u = extract32(ctx->opcode, 0, 4);
4648 u = (u == 12) ? 0xff :
4649 (u == 13) ? 0xffff : u;
4650 gen_logic_imm(ctx, OPC_ANDI, rt, rs, u);
4654 offset = extract32(ctx->opcode, 0, 2);
4655 switch (extract32(ctx->opcode, 2, 2)) {
4657 gen_ld(ctx, OPC_LB, rt, rs, offset);
4660 rt = decode_gpr_gpr3_src_store(
4661 NANOMIPS_EXTRACT_RT3(ctx->opcode));
4662 gen_st(ctx, OPC_SB, rt, rs, offset);
4665 gen_ld(ctx, OPC_LBU, rt, rs, offset);
4668 gen_reserved_instruction(ctx);
4673 offset = extract32(ctx->opcode, 1, 2) << 1;
4674 switch ((extract32(ctx->opcode, 3, 1) << 1) | (ctx->opcode & 1)) {
4676 gen_ld(ctx, OPC_LH, rt, rs, offset);
4679 rt = decode_gpr_gpr3_src_store(
4680 NANOMIPS_EXTRACT_RT3(ctx->opcode));
4681 gen_st(ctx, OPC_SH, rt, rs, offset);
4684 gen_ld(ctx, OPC_LHU, rt, rs, offset);
4687 gen_reserved_instruction(ctx);
4692 offset = extract32(ctx->opcode, 0, 4) << 2;
4693 gen_ld(ctx, OPC_LW, rt, rs, offset);
4696 rt = NANOMIPS_EXTRACT_RD5(ctx->opcode);
4697 offset = extract32(ctx->opcode, 0, 5) << 2;
4698 gen_ld(ctx, OPC_LW, rt, 29, offset);
4702 rt = (extract32(ctx->opcode, 9, 1) << 3) |
4703 extract32(ctx->opcode, 5, 3);
4704 rs = (extract32(ctx->opcode, 4, 1) << 3) |
4705 extract32(ctx->opcode, 0, 3);
4706 offset = (extract32(ctx->opcode, 3, 1) << 3) |
4707 (extract32(ctx->opcode, 8, 1) << 2);
4708 rt = decode_gpr_gpr4(rt);
4709 rs = decode_gpr_gpr4(rs);
4710 gen_ld(ctx, OPC_LW, rt, rs, offset);
4714 rt = (extract32(ctx->opcode, 9, 1) << 3) |
4715 extract32(ctx->opcode, 5, 3);
4716 rs = (extract32(ctx->opcode, 4, 1) << 3) |
4717 extract32(ctx->opcode, 0, 3);
4718 offset = (extract32(ctx->opcode, 3, 1) << 3) |
4719 (extract32(ctx->opcode, 8, 1) << 2);
4720 rt = decode_gpr_gpr4_zero(rt);
4721 rs = decode_gpr_gpr4(rs);
4722 gen_st(ctx, OPC_SW, rt, rs, offset);
4725 offset = extract32(ctx->opcode, 0, 7) << 2;
4726 gen_ld(ctx, OPC_LW, rt, 28, offset);
4729 rt = NANOMIPS_EXTRACT_RD5(ctx->opcode);
4730 offset = extract32(ctx->opcode, 0, 5) << 2;
4731 gen_st(ctx, OPC_SW, rt, 29, offset);
4734 rt = decode_gpr_gpr3_src_store(
4735 NANOMIPS_EXTRACT_RT3(ctx->opcode));
4736 rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode));
4737 offset = extract32(ctx->opcode, 0, 4) << 2;
4738 gen_st(ctx, OPC_SW, rt, rs, offset);
4741 rt = decode_gpr_gpr3_src_store(
4742 NANOMIPS_EXTRACT_RT3(ctx->opcode));
4743 offset = extract32(ctx->opcode, 0, 7) << 2;
4744 gen_st(ctx, OPC_SW, rt, 28, offset);
4747 gen_compute_branch_nm(ctx, OPC_BEQ, 2, 0, 0,
4748 (sextract32(ctx->opcode, 0, 1) << 10) |
4749 (extract32(ctx->opcode, 1, 9) << 1));
4752 gen_compute_branch_nm(ctx, OPC_BGEZAL, 2, 0, 0,
4753 (sextract32(ctx->opcode, 0, 1) << 10) |
4754 (extract32(ctx->opcode, 1, 9) << 1));
4757 gen_compute_branch_nm(ctx, OPC_BEQ, 2, rt, 0,
4758 (sextract32(ctx->opcode, 0, 1) << 7) |
4759 (extract32(ctx->opcode, 1, 6) << 1));
4762 gen_compute_branch_nm(ctx, OPC_BNE, 2, rt, 0,
4763 (sextract32(ctx->opcode, 0, 1) << 7) |
4764 (extract32(ctx->opcode, 1, 6) << 1));
4767 switch (ctx->opcode & 0xf) {
4770 switch (extract32(ctx->opcode, 4, 1)) {
4772 gen_compute_branch_nm(ctx, OPC_JR, 2,
4773 extract32(ctx->opcode, 5, 5), 0, 0);
4776 gen_compute_branch_nm(ctx, OPC_JALR, 2,
4777 extract32(ctx->opcode, 5, 5), 31, 0);
4784 uint32_t opc = extract32(ctx->opcode, 4, 3) <
4785 extract32(ctx->opcode, 7, 3) ? OPC_BEQ : OPC_BNE;
4786 gen_compute_branch_nm(ctx, opc, 2, rs, rt,
4787 extract32(ctx->opcode, 0, 4) << 1);
4794 int count = extract32(ctx->opcode, 0, 4);
4795 int u = extract32(ctx->opcode, 4, 4) << 4;
4797 rt = 30 + extract32(ctx->opcode, 9, 1);
4798 switch (extract32(ctx->opcode, 8, 1)) {
4800 gen_save(ctx, rt, count, 0, u);
4802 case NM_RESTORE_JRC16:
4803 gen_restore(ctx, rt, count, 0, u);
4804 gen_compute_branch_nm(ctx, OPC_JR, 2, 31, 0, 0);
4813 static const int gpr2reg1[] = {4, 5, 6, 7};
4814 static const int gpr2reg2[] = {5, 6, 7, 8};
4816 int rd2 = extract32(ctx->opcode, 3, 1) << 1 |
4817 extract32(ctx->opcode, 8, 1);
4818 int r1 = gpr2reg1[rd2];
4819 int r2 = gpr2reg2[rd2];
4820 int r3 = extract32(ctx->opcode, 4, 1) << 3 |
4821 extract32(ctx->opcode, 0, 3);
4822 int r4 = extract32(ctx->opcode, 9, 1) << 3 |
4823 extract32(ctx->opcode, 5, 3);
4824 TCGv t0 = tcg_temp_new();
4825 TCGv t1 = tcg_temp_new();
4826 if (op == NM_MOVEP) {
4829 rs = decode_gpr_gpr4_zero(r3);
4830 rt = decode_gpr_gpr4_zero(r4);
4832 rd = decode_gpr_gpr4(r3);
4833 re = decode_gpr_gpr4(r4);
4837 gen_load_gpr(t0, rs);
4838 gen_load_gpr(t1, rt);
4839 tcg_gen_mov_tl(cpu_gpr[rd], t0);
4840 tcg_gen_mov_tl(cpu_gpr[re], t1);
4844 return decode_nanomips_32_48_opc(env, ctx);