2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "qemu/qemu-print.h"
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
37 #define EXTRACT_FIELD(src, start, end) \
38 (((src) >> start) & ((1 << (end - start + 1)) - 1))
40 /* is_jmp field values */
41 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
42 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
44 /* cpu state besides pc was modified dynamically; update pc to next */
45 #define DISAS_EXIT_NEXT DISAS_TARGET_2
46 /* cpu state besides pc was modified dynamically; update pc to btarget */
47 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49 static TCGv_i32 cpu_R
[32];
50 static TCGv_i32 cpu_pc
;
51 static TCGv_i32 cpu_msr
;
52 static TCGv_i32 cpu_msr_c
;
53 static TCGv_i32 cpu_imm
;
54 static TCGv_i32 cpu_bvalue
;
55 static TCGv_i32 cpu_btarget
;
56 static TCGv_i32 cpu_iflags
;
57 static TCGv cpu_res_addr
;
58 static TCGv_i32 cpu_res_val
;
60 /* This is the state at translation time. */
61 typedef struct DisasContext
{
62 DisasContextBase base
;
63 const MicroBlazeCPUConfig
*cfg
;
70 unsigned int tb_flags
;
71 unsigned int tb_flags_to_set
;
74 /* Condition under which to jump, including NEVER and ALWAYS. */
77 /* Immediate branch-taken destination, or -1 for indirect. */
81 static int typeb_imm(DisasContext
*dc
, int x
)
83 if (dc
->tb_flags
& IMM_FLAG
) {
84 return deposit32(dc
->ext_imm
, 0, 16, x
);
89 /* Include the auto-generated decoder. */
90 #include "decode-insns.c.inc"
92 static void t_sync_flags(DisasContext
*dc
)
94 /* Synch the tb dependent flags between translator and runtime. */
95 if ((dc
->tb_flags
^ dc
->base
.tb
->flags
) & IFLAGS_TB_MASK
) {
96 tcg_gen_movi_i32(cpu_iflags
, dc
->tb_flags
& IFLAGS_TB_MASK
);
100 static void gen_raise_exception(DisasContext
*dc
, uint32_t index
)
102 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(index
));
103 dc
->base
.is_jmp
= DISAS_NORETURN
;
106 static void gen_raise_exception_sync(DisasContext
*dc
, uint32_t index
)
109 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
);
110 gen_raise_exception(dc
, index
);
113 static void gen_raise_hw_excp(DisasContext
*dc
, uint32_t esr_ec
)
115 TCGv_i32 tmp
= tcg_constant_i32(esr_ec
);
116 tcg_gen_st_i32(tmp
, tcg_env
, offsetof(CPUMBState
, esr
));
118 gen_raise_exception_sync(dc
, EXCP_HW_EXCP
);
121 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
123 if (translator_use_goto_tb(&dc
->base
, dest
)) {
125 tcg_gen_movi_i32(cpu_pc
, dest
);
126 tcg_gen_exit_tb(dc
->base
.tb
, n
);
128 tcg_gen_movi_i32(cpu_pc
, dest
);
129 tcg_gen_lookup_and_goto_ptr();
131 dc
->base
.is_jmp
= DISAS_NORETURN
;
135 * Returns true if the insn an illegal operation.
136 * If exceptions are enabled, an exception is raised.
138 static bool trap_illegal(DisasContext
*dc
, bool cond
)
140 if (cond
&& (dc
->tb_flags
& MSR_EE
)
141 && dc
->cfg
->illegal_opcode_exception
) {
142 gen_raise_hw_excp(dc
, ESR_EC_ILLEGAL_OP
);
148 * Returns true if the insn is illegal in userspace.
149 * If exceptions are enabled, an exception is raised.
151 static bool trap_userspace(DisasContext
*dc
, bool cond
)
153 bool cond_user
= cond
&& dc
->mem_index
== MMU_USER_IDX
;
155 if (cond_user
&& (dc
->tb_flags
& MSR_EE
)) {
156 gen_raise_hw_excp(dc
, ESR_EC_PRIVINSN
);
162 * Return true, and log an error, if the current insn is
163 * within a delay slot.
165 static bool invalid_delay_slot(DisasContext
*dc
, const char *insn_type
)
167 if (dc
->tb_flags
& D_FLAG
) {
168 qemu_log_mask(LOG_GUEST_ERROR
,
169 "Invalid insn in delay slot: %s at %08x\n",
170 insn_type
, (uint32_t)dc
->base
.pc_next
);
176 static TCGv_i32
reg_for_read(DisasContext
*dc
, int reg
)
178 if (likely(reg
!= 0)) {
182 if (dc
->r0
== NULL
) {
183 dc
->r0
= tcg_temp_new_i32();
185 tcg_gen_movi_i32(dc
->r0
, 0);
191 static TCGv_i32
reg_for_write(DisasContext
*dc
, int reg
)
193 if (likely(reg
!= 0)) {
196 if (dc
->r0
== NULL
) {
197 dc
->r0
= tcg_temp_new_i32();
202 static bool do_typea(DisasContext
*dc
, arg_typea
*arg
, bool side_effects
,
203 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
207 if (arg
->rd
== 0 && !side_effects
) {
211 rd
= reg_for_write(dc
, arg
->rd
);
212 ra
= reg_for_read(dc
, arg
->ra
);
213 rb
= reg_for_read(dc
, arg
->rb
);
218 static bool do_typea0(DisasContext
*dc
, arg_typea0
*arg
, bool side_effects
,
219 void (*fn
)(TCGv_i32
, TCGv_i32
))
223 if (arg
->rd
== 0 && !side_effects
) {
227 rd
= reg_for_write(dc
, arg
->rd
);
228 ra
= reg_for_read(dc
, arg
->ra
);
233 static bool do_typeb_imm(DisasContext
*dc
, arg_typeb
*arg
, bool side_effects
,
234 void (*fni
)(TCGv_i32
, TCGv_i32
, int32_t))
238 if (arg
->rd
== 0 && !side_effects
) {
242 rd
= reg_for_write(dc
, arg
->rd
);
243 ra
= reg_for_read(dc
, arg
->ra
);
244 fni(rd
, ra
, arg
->imm
);
248 static bool do_typeb_val(DisasContext
*dc
, arg_typeb
*arg
, bool side_effects
,
249 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
251 TCGv_i32 rd
, ra
, imm
;
253 if (arg
->rd
== 0 && !side_effects
) {
257 rd
= reg_for_write(dc
, arg
->rd
);
258 ra
= reg_for_read(dc
, arg
->ra
);
259 imm
= tcg_constant_i32(arg
->imm
);
265 #define DO_TYPEA(NAME, SE, FN) \
266 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
267 { return do_typea(dc, a, SE, FN); }
269 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
270 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
271 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
273 #define DO_TYPEA0(NAME, SE, FN) \
274 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
275 { return do_typea0(dc, a, SE, FN); }
277 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
278 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
279 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
281 #define DO_TYPEBI(NAME, SE, FNI) \
282 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
283 { return do_typeb_imm(dc, a, SE, FNI); }
285 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
286 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
287 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
289 #define DO_TYPEBV(NAME, SE, FN) \
290 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
291 { return do_typeb_val(dc, a, SE, FN); }
293 #define ENV_WRAPPER2(NAME, HELPER) \
294 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
295 { HELPER(out, tcg_env, ina); }
297 #define ENV_WRAPPER3(NAME, HELPER) \
298 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
299 { HELPER(out, tcg_env, ina, inb); }
301 /* No input carry, but output carry. */
302 static void gen_add(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
304 TCGv_i32 zero
= tcg_constant_i32(0);
306 tcg_gen_add2_i32(out
, cpu_msr_c
, ina
, zero
, inb
, zero
);
309 /* Input and output carry. */
310 static void gen_addc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
312 TCGv_i32 zero
= tcg_constant_i32(0);
313 TCGv_i32 tmp
= tcg_temp_new_i32();
315 tcg_gen_add2_i32(tmp
, cpu_msr_c
, ina
, zero
, cpu_msr_c
, zero
);
316 tcg_gen_add2_i32(out
, cpu_msr_c
, tmp
, cpu_msr_c
, inb
, zero
);
319 /* Input carry, but no output carry. */
320 static void gen_addkc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
322 tcg_gen_add_i32(out
, ina
, inb
);
323 tcg_gen_add_i32(out
, out
, cpu_msr_c
);
326 DO_TYPEA(add
, true, gen_add
)
327 DO_TYPEA(addc
, true, gen_addc
)
328 DO_TYPEA(addk
, false, tcg_gen_add_i32
)
329 DO_TYPEA(addkc
, true, gen_addkc
)
331 DO_TYPEBV(addi
, true, gen_add
)
332 DO_TYPEBV(addic
, true, gen_addc
)
333 DO_TYPEBI(addik
, false, tcg_gen_addi_i32
)
334 DO_TYPEBV(addikc
, true, gen_addkc
)
336 static void gen_andni(TCGv_i32 out
, TCGv_i32 ina
, int32_t imm
)
338 tcg_gen_andi_i32(out
, ina
, ~imm
);
341 DO_TYPEA(and, false, tcg_gen_and_i32
)
342 DO_TYPEBI(andi
, false, tcg_gen_andi_i32
)
343 DO_TYPEA(andn
, false, tcg_gen_andc_i32
)
344 DO_TYPEBI(andni
, false, gen_andni
)
346 static void gen_bsra(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
348 TCGv_i32 tmp
= tcg_temp_new_i32();
349 tcg_gen_andi_i32(tmp
, inb
, 31);
350 tcg_gen_sar_i32(out
, ina
, tmp
);
353 static void gen_bsrl(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
355 TCGv_i32 tmp
= tcg_temp_new_i32();
356 tcg_gen_andi_i32(tmp
, inb
, 31);
357 tcg_gen_shr_i32(out
, ina
, tmp
);
360 static void gen_bsll(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
362 TCGv_i32 tmp
= tcg_temp_new_i32();
363 tcg_gen_andi_i32(tmp
, inb
, 31);
364 tcg_gen_shl_i32(out
, ina
, tmp
);
367 static void gen_bsefi(TCGv_i32 out
, TCGv_i32 ina
, int32_t imm
)
369 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
370 int imm_w
= extract32(imm
, 5, 5);
371 int imm_s
= extract32(imm
, 0, 5);
373 if (imm_w
+ imm_s
> 32 || imm_w
== 0) {
374 /* These inputs have an undefined behavior. */
375 qemu_log_mask(LOG_GUEST_ERROR
, "bsefi: Bad input w=%d s=%d\n",
378 tcg_gen_extract_i32(out
, ina
, imm_s
, imm_w
);
382 static void gen_bsifi(TCGv_i32 out
, TCGv_i32 ina
, int32_t imm
)
384 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
385 int imm_w
= extract32(imm
, 5, 5);
386 int imm_s
= extract32(imm
, 0, 5);
387 int width
= imm_w
- imm_s
+ 1;
390 /* These inputs have an undefined behavior. */
391 qemu_log_mask(LOG_GUEST_ERROR
, "bsifi: Bad input w=%d s=%d\n",
394 tcg_gen_deposit_i32(out
, out
, ina
, imm_s
, width
);
398 DO_TYPEA_CFG(bsra
, use_barrel
, false, gen_bsra
)
399 DO_TYPEA_CFG(bsrl
, use_barrel
, false, gen_bsrl
)
400 DO_TYPEA_CFG(bsll
, use_barrel
, false, gen_bsll
)
402 DO_TYPEBI_CFG(bsrai
, use_barrel
, false, tcg_gen_sari_i32
)
403 DO_TYPEBI_CFG(bsrli
, use_barrel
, false, tcg_gen_shri_i32
)
404 DO_TYPEBI_CFG(bslli
, use_barrel
, false, tcg_gen_shli_i32
)
406 DO_TYPEBI_CFG(bsefi
, use_barrel
, false, gen_bsefi
)
407 DO_TYPEBI_CFG(bsifi
, use_barrel
, false, gen_bsifi
)
409 static void gen_clz(TCGv_i32 out
, TCGv_i32 ina
)
411 tcg_gen_clzi_i32(out
, ina
, 32);
414 DO_TYPEA0_CFG(clz
, use_pcmp_instr
, false, gen_clz
)
416 static void gen_cmp(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
418 TCGv_i32 lt
= tcg_temp_new_i32();
420 tcg_gen_setcond_i32(TCG_COND_LT
, lt
, inb
, ina
);
421 tcg_gen_sub_i32(out
, inb
, ina
);
422 tcg_gen_deposit_i32(out
, out
, lt
, 31, 1);
425 static void gen_cmpu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
427 TCGv_i32 lt
= tcg_temp_new_i32();
429 tcg_gen_setcond_i32(TCG_COND_LTU
, lt
, inb
, ina
);
430 tcg_gen_sub_i32(out
, inb
, ina
);
431 tcg_gen_deposit_i32(out
, out
, lt
, 31, 1);
434 DO_TYPEA(cmp
, false, gen_cmp
)
435 DO_TYPEA(cmpu
, false, gen_cmpu
)
437 ENV_WRAPPER3(gen_fadd
, gen_helper_fadd
)
438 ENV_WRAPPER3(gen_frsub
, gen_helper_frsub
)
439 ENV_WRAPPER3(gen_fmul
, gen_helper_fmul
)
440 ENV_WRAPPER3(gen_fdiv
, gen_helper_fdiv
)
441 ENV_WRAPPER3(gen_fcmp_un
, gen_helper_fcmp_un
)
442 ENV_WRAPPER3(gen_fcmp_lt
, gen_helper_fcmp_lt
)
443 ENV_WRAPPER3(gen_fcmp_eq
, gen_helper_fcmp_eq
)
444 ENV_WRAPPER3(gen_fcmp_le
, gen_helper_fcmp_le
)
445 ENV_WRAPPER3(gen_fcmp_gt
, gen_helper_fcmp_gt
)
446 ENV_WRAPPER3(gen_fcmp_ne
, gen_helper_fcmp_ne
)
447 ENV_WRAPPER3(gen_fcmp_ge
, gen_helper_fcmp_ge
)
449 DO_TYPEA_CFG(fadd
, use_fpu
, true, gen_fadd
)
450 DO_TYPEA_CFG(frsub
, use_fpu
, true, gen_frsub
)
451 DO_TYPEA_CFG(fmul
, use_fpu
, true, gen_fmul
)
452 DO_TYPEA_CFG(fdiv
, use_fpu
, true, gen_fdiv
)
453 DO_TYPEA_CFG(fcmp_un
, use_fpu
, true, gen_fcmp_un
)
454 DO_TYPEA_CFG(fcmp_lt
, use_fpu
, true, gen_fcmp_lt
)
455 DO_TYPEA_CFG(fcmp_eq
, use_fpu
, true, gen_fcmp_eq
)
456 DO_TYPEA_CFG(fcmp_le
, use_fpu
, true, gen_fcmp_le
)
457 DO_TYPEA_CFG(fcmp_gt
, use_fpu
, true, gen_fcmp_gt
)
458 DO_TYPEA_CFG(fcmp_ne
, use_fpu
, true, gen_fcmp_ne
)
459 DO_TYPEA_CFG(fcmp_ge
, use_fpu
, true, gen_fcmp_ge
)
461 ENV_WRAPPER2(gen_flt
, gen_helper_flt
)
462 ENV_WRAPPER2(gen_fint
, gen_helper_fint
)
463 ENV_WRAPPER2(gen_fsqrt
, gen_helper_fsqrt
)
465 DO_TYPEA0_CFG(flt
, use_fpu
>= 2, true, gen_flt
)
466 DO_TYPEA0_CFG(fint
, use_fpu
>= 2, true, gen_fint
)
467 DO_TYPEA0_CFG(fsqrt
, use_fpu
>= 2, true, gen_fsqrt
)
469 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
470 static void gen_idiv(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
472 gen_helper_divs(out
, tcg_env
, inb
, ina
);
475 static void gen_idivu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
477 gen_helper_divu(out
, tcg_env
, inb
, ina
);
480 DO_TYPEA_CFG(idiv
, use_div
, true, gen_idiv
)
481 DO_TYPEA_CFG(idivu
, use_div
, true, gen_idivu
)
483 static bool trans_imm(DisasContext
*dc
, arg_imm
*arg
)
485 if (invalid_delay_slot(dc
, "imm")) {
488 dc
->ext_imm
= arg
->imm
<< 16;
489 tcg_gen_movi_i32(cpu_imm
, dc
->ext_imm
);
490 dc
->tb_flags_to_set
= IMM_FLAG
;
494 static void gen_mulh(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
496 TCGv_i32 tmp
= tcg_temp_new_i32();
497 tcg_gen_muls2_i32(tmp
, out
, ina
, inb
);
500 static void gen_mulhu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
502 TCGv_i32 tmp
= tcg_temp_new_i32();
503 tcg_gen_mulu2_i32(tmp
, out
, ina
, inb
);
506 static void gen_mulhsu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
508 TCGv_i32 tmp
= tcg_temp_new_i32();
509 tcg_gen_mulsu2_i32(tmp
, out
, ina
, inb
);
512 DO_TYPEA_CFG(mul
, use_hw_mul
, false, tcg_gen_mul_i32
)
513 DO_TYPEA_CFG(mulh
, use_hw_mul
>= 2, false, gen_mulh
)
514 DO_TYPEA_CFG(mulhu
, use_hw_mul
>= 2, false, gen_mulhu
)
515 DO_TYPEA_CFG(mulhsu
, use_hw_mul
>= 2, false, gen_mulhsu
)
516 DO_TYPEBI_CFG(muli
, use_hw_mul
, false, tcg_gen_muli_i32
)
518 DO_TYPEA(or, false, tcg_gen_or_i32
)
519 DO_TYPEBI(ori
, false, tcg_gen_ori_i32
)
521 static void gen_pcmpeq(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
523 tcg_gen_setcond_i32(TCG_COND_EQ
, out
, ina
, inb
);
526 static void gen_pcmpne(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
528 tcg_gen_setcond_i32(TCG_COND_NE
, out
, ina
, inb
);
531 DO_TYPEA_CFG(pcmpbf
, use_pcmp_instr
, false, gen_helper_pcmpbf
)
532 DO_TYPEA_CFG(pcmpeq
, use_pcmp_instr
, false, gen_pcmpeq
)
533 DO_TYPEA_CFG(pcmpne
, use_pcmp_instr
, false, gen_pcmpne
)
535 /* No input carry, but output carry. */
536 static void gen_rsub(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
538 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_msr_c
, inb
, ina
);
539 tcg_gen_sub_i32(out
, inb
, ina
);
542 /* Input and output carry. */
543 static void gen_rsubc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
545 TCGv_i32 zero
= tcg_constant_i32(0);
546 TCGv_i32 tmp
= tcg_temp_new_i32();
548 tcg_gen_not_i32(tmp
, ina
);
549 tcg_gen_add2_i32(tmp
, cpu_msr_c
, tmp
, zero
, cpu_msr_c
, zero
);
550 tcg_gen_add2_i32(out
, cpu_msr_c
, tmp
, cpu_msr_c
, inb
, zero
);
553 /* No input or output carry. */
554 static void gen_rsubk(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
556 tcg_gen_sub_i32(out
, inb
, ina
);
559 /* Input carry, no output carry. */
560 static void gen_rsubkc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
562 TCGv_i32 nota
= tcg_temp_new_i32();
564 tcg_gen_not_i32(nota
, ina
);
565 tcg_gen_add_i32(out
, inb
, nota
);
566 tcg_gen_add_i32(out
, out
, cpu_msr_c
);
569 DO_TYPEA(rsub
, true, gen_rsub
)
570 DO_TYPEA(rsubc
, true, gen_rsubc
)
571 DO_TYPEA(rsubk
, false, gen_rsubk
)
572 DO_TYPEA(rsubkc
, true, gen_rsubkc
)
574 DO_TYPEBV(rsubi
, true, gen_rsub
)
575 DO_TYPEBV(rsubic
, true, gen_rsubc
)
576 DO_TYPEBV(rsubik
, false, gen_rsubk
)
577 DO_TYPEBV(rsubikc
, true, gen_rsubkc
)
579 DO_TYPEA0(sext8
, false, tcg_gen_ext8s_i32
)
580 DO_TYPEA0(sext16
, false, tcg_gen_ext16s_i32
)
582 static void gen_sra(TCGv_i32 out
, TCGv_i32 ina
)
584 tcg_gen_andi_i32(cpu_msr_c
, ina
, 1);
585 tcg_gen_sari_i32(out
, ina
, 1);
588 static void gen_src(TCGv_i32 out
, TCGv_i32 ina
)
590 TCGv_i32 tmp
= tcg_temp_new_i32();
592 tcg_gen_mov_i32(tmp
, cpu_msr_c
);
593 tcg_gen_andi_i32(cpu_msr_c
, ina
, 1);
594 tcg_gen_extract2_i32(out
, ina
, tmp
, 1);
597 static void gen_srl(TCGv_i32 out
, TCGv_i32 ina
)
599 tcg_gen_andi_i32(cpu_msr_c
, ina
, 1);
600 tcg_gen_shri_i32(out
, ina
, 1);
603 DO_TYPEA0(sra
, false, gen_sra
)
604 DO_TYPEA0(src
, false, gen_src
)
605 DO_TYPEA0(srl
, false, gen_srl
)
607 static void gen_swaph(TCGv_i32 out
, TCGv_i32 ina
)
609 tcg_gen_rotri_i32(out
, ina
, 16);
612 DO_TYPEA0(swapb
, false, tcg_gen_bswap32_i32
)
613 DO_TYPEA0(swaph
, false, gen_swaph
)
615 static bool trans_wdic(DisasContext
*dc
, arg_wdic
*a
)
617 /* Cache operations are nops: only check for supervisor mode. */
618 trap_userspace(dc
, true);
622 DO_TYPEA(xor, false, tcg_gen_xor_i32
)
623 DO_TYPEBI(xori
, false, tcg_gen_xori_i32
)
625 static TCGv
compute_ldst_addr_typea(DisasContext
*dc
, int ra
, int rb
)
627 TCGv ret
= tcg_temp_new();
629 /* If any of the regs is r0, set t to the value of the other reg. */
631 TCGv_i32 tmp
= tcg_temp_new_i32();
632 tcg_gen_add_i32(tmp
, cpu_R
[ra
], cpu_R
[rb
]);
633 tcg_gen_extu_i32_tl(ret
, tmp
);
635 tcg_gen_extu_i32_tl(ret
, cpu_R
[ra
]);
637 tcg_gen_extu_i32_tl(ret
, cpu_R
[rb
]);
639 tcg_gen_movi_tl(ret
, 0);
642 if ((ra
== 1 || rb
== 1) && dc
->cfg
->stackprot
) {
643 gen_helper_stackprot(tcg_env
, ret
);
648 static TCGv
compute_ldst_addr_typeb(DisasContext
*dc
, int ra
, int imm
)
650 TCGv ret
= tcg_temp_new();
652 /* If any of the regs is r0, set t to the value of the other reg. */
654 TCGv_i32 tmp
= tcg_temp_new_i32();
655 tcg_gen_addi_i32(tmp
, cpu_R
[ra
], imm
);
656 tcg_gen_extu_i32_tl(ret
, tmp
);
658 tcg_gen_movi_tl(ret
, (uint32_t)imm
);
661 if (ra
== 1 && dc
->cfg
->stackprot
) {
662 gen_helper_stackprot(tcg_env
, ret
);
667 #ifndef CONFIG_USER_ONLY
668 static TCGv
compute_ldst_addr_ea(DisasContext
*dc
, int ra
, int rb
)
670 int addr_size
= dc
->cfg
->addr_size
;
671 TCGv ret
= tcg_temp_new();
673 if (addr_size
== 32 || ra
== 0) {
675 tcg_gen_extu_i32_tl(ret
, cpu_R
[rb
]);
677 tcg_gen_movi_tl(ret
, 0);
681 tcg_gen_concat_i32_i64(ret
, cpu_R
[rb
], cpu_R
[ra
]);
683 tcg_gen_extu_i32_tl(ret
, cpu_R
[ra
]);
684 tcg_gen_shli_tl(ret
, ret
, 32);
686 if (addr_size
< 64) {
687 /* Mask off out of range bits. */
688 tcg_gen_andi_i64(ret
, ret
, MAKE_64BIT_MASK(0, addr_size
));
695 #ifndef CONFIG_USER_ONLY
696 static void record_unaligned_ess(DisasContext
*dc
, int rd
,
697 MemOp size
, bool store
)
699 uint32_t iflags
= tcg_get_insn_start_param(dc
->base
.insn_start
, 1);
701 iflags
|= ESR_ESS_FLAG
;
703 iflags
|= store
* ESR_S
;
704 iflags
|= (size
== MO_32
) * ESR_W
;
706 tcg_set_insn_start_param(dc
->base
.insn_start
, 1, iflags
);
710 static bool do_load(DisasContext
*dc
, int rd
, TCGv addr
, MemOp mop
,
711 int mem_index
, bool rev
)
713 MemOp size
= mop
& MO_SIZE
;
716 * When doing reverse accesses we need to do two things.
718 * 1. Reverse the address wrt endianness.
719 * 2. Byteswap the data lanes on the way back into the CPU core.
726 tcg_gen_xori_tl(addr
, addr
, 3 - size
);
731 * For system mode, enforce alignment if the cpu configuration
732 * requires it. For user-mode, the Linux kernel will have fixed up
733 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
735 #ifndef CONFIG_USER_ONLY
737 (dc
->tb_flags
& MSR_EE
) &&
738 dc
->cfg
->unaligned_exceptions
) {
739 record_unaligned_ess(dc
, rd
, size
, false);
744 tcg_gen_qemu_ld_i32(reg_for_write(dc
, rd
), addr
, mem_index
, mop
);
748 static bool trans_lbu(DisasContext
*dc
, arg_typea
*arg
)
750 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
751 return do_load(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, false);
754 static bool trans_lbur(DisasContext
*dc
, arg_typea
*arg
)
756 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
757 return do_load(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, true);
760 static bool trans_lbuea(DisasContext
*dc
, arg_typea
*arg
)
762 if (trap_userspace(dc
, true)) {
765 #ifdef CONFIG_USER_ONLY
768 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
769 return do_load(dc
, arg
->rd
, addr
, MO_UB
, MMU_NOMMU_IDX
, false);
773 static bool trans_lbui(DisasContext
*dc
, arg_typeb
*arg
)
775 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
776 return do_load(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, false);
779 static bool trans_lhu(DisasContext
*dc
, arg_typea
*arg
)
781 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
782 return do_load(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, false);
785 static bool trans_lhur(DisasContext
*dc
, arg_typea
*arg
)
787 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
788 return do_load(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, true);
791 static bool trans_lhuea(DisasContext
*dc
, arg_typea
*arg
)
793 if (trap_userspace(dc
, true)) {
796 #ifdef CONFIG_USER_ONLY
799 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
800 return do_load(dc
, arg
->rd
, addr
, MO_TEUW
, MMU_NOMMU_IDX
, false);
804 static bool trans_lhui(DisasContext
*dc
, arg_typeb
*arg
)
806 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
807 return do_load(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, false);
810 static bool trans_lw(DisasContext
*dc
, arg_typea
*arg
)
812 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
813 return do_load(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, false);
816 static bool trans_lwr(DisasContext
*dc
, arg_typea
*arg
)
818 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
819 return do_load(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, true);
822 static bool trans_lwea(DisasContext
*dc
, arg_typea
*arg
)
824 if (trap_userspace(dc
, true)) {
827 #ifdef CONFIG_USER_ONLY
830 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
831 return do_load(dc
, arg
->rd
, addr
, MO_TEUL
, MMU_NOMMU_IDX
, false);
835 static bool trans_lwi(DisasContext
*dc
, arg_typeb
*arg
)
837 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
838 return do_load(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, false);
841 static bool trans_lwx(DisasContext
*dc
, arg_typea
*arg
)
843 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
845 /* lwx does not throw unaligned access errors, so force alignment */
846 tcg_gen_andi_tl(addr
, addr
, ~3);
848 tcg_gen_qemu_ld_i32(cpu_res_val
, addr
, dc
->mem_index
, MO_TEUL
);
849 tcg_gen_mov_tl(cpu_res_addr
, addr
);
852 tcg_gen_mov_i32(cpu_R
[arg
->rd
], cpu_res_val
);
855 /* No support for AXI exclusive so always clear C */
856 tcg_gen_movi_i32(cpu_msr_c
, 0);
860 static bool do_store(DisasContext
*dc
, int rd
, TCGv addr
, MemOp mop
,
861 int mem_index
, bool rev
)
863 MemOp size
= mop
& MO_SIZE
;
866 * When doing reverse accesses we need to do two things.
868 * 1. Reverse the address wrt endianness.
869 * 2. Byteswap the data lanes on the way back into the CPU core.
876 tcg_gen_xori_tl(addr
, addr
, 3 - size
);
881 * For system mode, enforce alignment if the cpu configuration
882 * requires it. For user-mode, the Linux kernel will have fixed up
883 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
885 #ifndef CONFIG_USER_ONLY
887 (dc
->tb_flags
& MSR_EE
) &&
888 dc
->cfg
->unaligned_exceptions
) {
889 record_unaligned_ess(dc
, rd
, size
, true);
894 tcg_gen_qemu_st_i32(reg_for_read(dc
, rd
), addr
, mem_index
, mop
);
898 static bool trans_sb(DisasContext
*dc
, arg_typea
*arg
)
900 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
901 return do_store(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, false);
904 static bool trans_sbr(DisasContext
*dc
, arg_typea
*arg
)
906 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
907 return do_store(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, true);
910 static bool trans_sbea(DisasContext
*dc
, arg_typea
*arg
)
912 if (trap_userspace(dc
, true)) {
915 #ifdef CONFIG_USER_ONLY
918 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
919 return do_store(dc
, arg
->rd
, addr
, MO_UB
, MMU_NOMMU_IDX
, false);
923 static bool trans_sbi(DisasContext
*dc
, arg_typeb
*arg
)
925 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
926 return do_store(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, false);
929 static bool trans_sh(DisasContext
*dc
, arg_typea
*arg
)
931 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
932 return do_store(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, false);
935 static bool trans_shr(DisasContext
*dc
, arg_typea
*arg
)
937 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
938 return do_store(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, true);
941 static bool trans_shea(DisasContext
*dc
, arg_typea
*arg
)
943 if (trap_userspace(dc
, true)) {
946 #ifdef CONFIG_USER_ONLY
949 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
950 return do_store(dc
, arg
->rd
, addr
, MO_TEUW
, MMU_NOMMU_IDX
, false);
954 static bool trans_shi(DisasContext
*dc
, arg_typeb
*arg
)
956 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
957 return do_store(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, false);
960 static bool trans_sw(DisasContext
*dc
, arg_typea
*arg
)
962 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
963 return do_store(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, false);
966 static bool trans_swr(DisasContext
*dc
, arg_typea
*arg
)
968 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
969 return do_store(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, true);
972 static bool trans_swea(DisasContext
*dc
, arg_typea
*arg
)
974 if (trap_userspace(dc
, true)) {
977 #ifdef CONFIG_USER_ONLY
980 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
981 return do_store(dc
, arg
->rd
, addr
, MO_TEUL
, MMU_NOMMU_IDX
, false);
985 static bool trans_swi(DisasContext
*dc
, arg_typeb
*arg
)
987 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
988 return do_store(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, false);
991 static bool trans_swx(DisasContext
*dc
, arg_typea
*arg
)
993 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
994 TCGLabel
*swx_done
= gen_new_label();
995 TCGLabel
*swx_fail
= gen_new_label();
998 /* swx does not throw unaligned access errors, so force alignment */
999 tcg_gen_andi_tl(addr
, addr
, ~3);
1002 * Compare the address vs the one we used during lwx.
1003 * On mismatch, the operation fails. On match, addr dies at the
1004 * branch, but we know we can use the equal version in the global.
1005 * In either case, addr is no longer needed.
1007 tcg_gen_brcond_tl(TCG_COND_NE
, cpu_res_addr
, addr
, swx_fail
);
1010 * Compare the value loaded during lwx with current contents of
1011 * the reserved location.
1013 tval
= tcg_temp_new_i32();
1015 tcg_gen_atomic_cmpxchg_i32(tval
, cpu_res_addr
, cpu_res_val
,
1016 reg_for_write(dc
, arg
->rd
),
1017 dc
->mem_index
, MO_TEUL
);
1019 tcg_gen_brcond_i32(TCG_COND_NE
, cpu_res_val
, tval
, swx_fail
);
1022 tcg_gen_movi_i32(cpu_msr_c
, 0);
1023 tcg_gen_br(swx_done
);
1026 gen_set_label(swx_fail
);
1027 tcg_gen_movi_i32(cpu_msr_c
, 1);
1029 gen_set_label(swx_done
);
1032 * Prevent the saved address from working again without another ldx.
1033 * Akin to the pseudocode setting reservation = 0.
1035 tcg_gen_movi_tl(cpu_res_addr
, -1);
1039 static void setup_dslot(DisasContext
*dc
, bool type_b
)
1041 dc
->tb_flags_to_set
|= D_FLAG
;
1042 if (type_b
&& (dc
->tb_flags
& IMM_FLAG
)) {
1043 dc
->tb_flags_to_set
|= BIMM_FLAG
;
1047 static bool do_branch(DisasContext
*dc
, int dest_rb
, int dest_imm
,
1048 bool delay
, bool abs
, int link
)
1052 if (invalid_delay_slot(dc
, "branch")) {
1056 setup_dslot(dc
, dest_rb
< 0);
1060 tcg_gen_movi_i32(cpu_R
[link
], dc
->base
.pc_next
);
1063 /* Store the branch taken destination into btarget. */
1064 add_pc
= abs
? 0 : dc
->base
.pc_next
;
1067 tcg_gen_addi_i32(cpu_btarget
, cpu_R
[dest_rb
], add_pc
);
1069 dc
->jmp_dest
= add_pc
+ dest_imm
;
1070 tcg_gen_movi_i32(cpu_btarget
, dc
->jmp_dest
);
1072 dc
->jmp_cond
= TCG_COND_ALWAYS
;
1076 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1077 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1078 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1079 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1080 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1082 DO_BR(br
, bri
, false, false, false)
1083 DO_BR(bra
, brai
, false, true, false)
1084 DO_BR(brd
, brid
, true, false, false)
1085 DO_BR(brad
, braid
, true, true, false)
1086 DO_BR(brld
, brlid
, true, false, true)
1087 DO_BR(brald
, bralid
, true, true, true)
1089 static bool do_bcc(DisasContext
*dc
, int dest_rb
, int dest_imm
,
1090 TCGCond cond
, int ra
, bool delay
)
1092 TCGv_i32 zero
, next
;
1094 if (invalid_delay_slot(dc
, "bcc")) {
1098 setup_dslot(dc
, dest_rb
< 0);
1101 dc
->jmp_cond
= cond
;
1103 /* Cache the condition register in cpu_bvalue across any delay slot. */
1104 tcg_gen_mov_i32(cpu_bvalue
, reg_for_read(dc
, ra
));
1106 /* Store the branch taken destination into btarget. */
1109 tcg_gen_addi_i32(cpu_btarget
, cpu_R
[dest_rb
], dc
->base
.pc_next
);
1111 dc
->jmp_dest
= dc
->base
.pc_next
+ dest_imm
;
1112 tcg_gen_movi_i32(cpu_btarget
, dc
->jmp_dest
);
1115 /* Compute the final destination into btarget. */
1116 zero
= tcg_constant_i32(0);
1117 next
= tcg_constant_i32(dc
->base
.pc_next
+ (delay
+ 1) * 4);
1118 tcg_gen_movcond_i32(dc
->jmp_cond
, cpu_btarget
,
1119 reg_for_read(dc
, ra
), zero
,
1125 #define DO_BCC(NAME, COND) \
1126 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1127 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1128 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1129 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1130 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1131 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1132 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1133 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1135 DO_BCC(beq
, TCG_COND_EQ
)
1136 DO_BCC(bge
, TCG_COND_GE
)
1137 DO_BCC(bgt
, TCG_COND_GT
)
1138 DO_BCC(ble
, TCG_COND_LE
)
1139 DO_BCC(blt
, TCG_COND_LT
)
1140 DO_BCC(bne
, TCG_COND_NE
)
1142 static bool trans_brk(DisasContext
*dc
, arg_typea_br
*arg
)
1144 if (trap_userspace(dc
, true)) {
1147 if (invalid_delay_slot(dc
, "brk")) {
1151 tcg_gen_mov_i32(cpu_pc
, reg_for_read(dc
, arg
->rb
));
1153 tcg_gen_movi_i32(cpu_R
[arg
->rd
], dc
->base
.pc_next
);
1155 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, MSR_BIP
);
1156 tcg_gen_movi_tl(cpu_res_addr
, -1);
1158 dc
->base
.is_jmp
= DISAS_EXIT
;
1162 static bool trans_brki(DisasContext
*dc
, arg_typeb_br
*arg
)
1164 uint32_t imm
= arg
->imm
;
1166 if (trap_userspace(dc
, imm
!= 0x8 && imm
!= 0x18)) {
1169 if (invalid_delay_slot(dc
, "brki")) {
1173 tcg_gen_movi_i32(cpu_pc
, imm
);
1175 tcg_gen_movi_i32(cpu_R
[arg
->rd
], dc
->base
.pc_next
);
1177 tcg_gen_movi_tl(cpu_res_addr
, -1);
1179 #ifdef CONFIG_USER_ONLY
1181 case 0x8: /* syscall trap */
1182 gen_raise_exception_sync(dc
, EXCP_SYSCALL
);
1184 case 0x18: /* debug trap */
1185 gen_raise_exception_sync(dc
, EXCP_DEBUG
);
1187 default: /* eliminated with trap_userspace check */
1188 g_assert_not_reached();
1191 uint32_t msr_to_set
= 0;
1194 msr_to_set
|= MSR_BIP
;
1196 if (imm
== 0x8 || imm
== 0x18) {
1197 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1198 msr_to_set
|= (dc
->tb_flags
& (MSR_UM
| MSR_VM
)) << 1;
1199 tcg_gen_andi_i32(cpu_msr
, cpu_msr
,
1200 ~(MSR_VMS
| MSR_UMS
| MSR_VM
| MSR_UM
));
1202 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, msr_to_set
);
1203 dc
->base
.is_jmp
= DISAS_EXIT
;
1209 static bool trans_mbar(DisasContext
*dc
, arg_mbar
*arg
)
1211 int mbar_imm
= arg
->imm
;
1213 /* Note that mbar is a specialized branch instruction. */
1214 if (invalid_delay_slot(dc
, "mbar")) {
1218 /* Data access memory barrier. */
1219 if ((mbar_imm
& 2) == 0) {
1220 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
1224 if (mbar_imm
& 16) {
1225 if (trap_userspace(dc
, true)) {
1226 /* Sleep is a privileged instruction. */
1232 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env
,
1233 -offsetof(MicroBlazeCPU
, env
)
1234 +offsetof(CPUState
, halted
));
1236 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
+ 4);
1238 gen_raise_exception(dc
, EXCP_HLT
);
1242 * If !(mbar_imm & 1), this is an instruction access memory barrier
1243 * and we need to end the TB so that we recognize self-modified
1246 * However, there are some data mbars that need the TB break
1247 * (and return to main loop) to recognize interrupts right away.
1248 * E.g. recognizing a change to an interrupt controller register.
1250 * Therefore, choose to end the TB always.
1252 dc
->base
.is_jmp
= DISAS_EXIT_NEXT
;
1256 static bool do_rts(DisasContext
*dc
, arg_typeb_bc
*arg
, int to_set
)
1258 if (trap_userspace(dc
, to_set
)) {
1261 if (invalid_delay_slot(dc
, "rts")) {
1265 dc
->tb_flags_to_set
|= to_set
;
1266 setup_dslot(dc
, true);
1268 dc
->jmp_cond
= TCG_COND_ALWAYS
;
1270 tcg_gen_addi_i32(cpu_btarget
, reg_for_read(dc
, arg
->ra
), arg
->imm
);
1274 #define DO_RTS(NAME, IFLAG) \
1275 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1276 { return do_rts(dc, arg, IFLAG); }
1278 DO_RTS(rtbd
, DRTB_FLAG
)
1279 DO_RTS(rtid
, DRTI_FLAG
)
1280 DO_RTS(rted
, DRTE_FLAG
)
1283 static bool trans_zero(DisasContext
*dc
, arg_zero
*arg
)
1285 /* If opcode_0_illegal, trap. */
1286 if (dc
->cfg
->opcode_0_illegal
) {
1287 trap_illegal(dc
, true);
1291 * Otherwise, this is "add r0, r0, r0".
1292 * Continue to trans_add so that MSR[C] gets cleared.
1297 static void msr_read(DisasContext
*dc
, TCGv_i32 d
)
1301 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1302 t
= tcg_temp_new_i32();
1303 tcg_gen_muli_i32(t
, cpu_msr_c
, MSR_C
| MSR_CC
);
1304 tcg_gen_or_i32(d
, cpu_msr
, t
);
1307 static bool do_msrclrset(DisasContext
*dc
, arg_type_msr
*arg
, bool set
)
1309 uint32_t imm
= arg
->imm
;
1311 if (trap_userspace(dc
, imm
!= MSR_C
)) {
1316 msr_read(dc
, cpu_R
[arg
->rd
]);
1320 * Handle the carry bit separately.
1321 * This is the only bit that userspace can modify.
1324 tcg_gen_movi_i32(cpu_msr_c
, set
);
1328 * MSR_C and MSR_CC set above.
1329 * MSR_PVR is not writable, and is always clear.
1331 imm
&= ~(MSR_C
| MSR_CC
| MSR_PVR
);
1335 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, imm
);
1337 tcg_gen_andi_i32(cpu_msr
, cpu_msr
, ~imm
);
1339 dc
->base
.is_jmp
= DISAS_EXIT_NEXT
;
1344 static bool trans_msrclr(DisasContext
*dc
, arg_type_msr
*arg
)
1346 return do_msrclrset(dc
, arg
, false);
1349 static bool trans_msrset(DisasContext
*dc
, arg_type_msr
*arg
)
1351 return do_msrclrset(dc
, arg
, true);
1354 static bool trans_mts(DisasContext
*dc
, arg_mts
*arg
)
1356 if (trap_userspace(dc
, true)) {
1360 #ifdef CONFIG_USER_ONLY
1361 g_assert_not_reached();
1363 if (arg
->e
&& arg
->rs
!= 0x1003) {
1364 qemu_log_mask(LOG_GUEST_ERROR
,
1365 "Invalid extended mts reg 0x%x\n", arg
->rs
);
1369 TCGv_i32 src
= reg_for_read(dc
, arg
->ra
);
1372 /* Install MSR_C. */
1373 tcg_gen_extract_i32(cpu_msr_c
, src
, 2, 1);
1375 * Clear MSR_C and MSR_CC;
1376 * MSR_PVR is not writable, and is always clear.
1378 tcg_gen_andi_i32(cpu_msr
, src
, ~(MSR_C
| MSR_CC
| MSR_PVR
));
1381 tcg_gen_st_i32(src
, tcg_env
, offsetof(CPUMBState
, fsr
));
1384 tcg_gen_st_i32(src
, tcg_env
, offsetof(CPUMBState
, slr
));
1387 tcg_gen_st_i32(src
, tcg_env
, offsetof(CPUMBState
, shr
));
1390 case 0x1000: /* PID */
1391 case 0x1001: /* ZPR */
1392 case 0x1002: /* TLBX */
1393 case 0x1003: /* TLBLO */
1394 case 0x1004: /* TLBHI */
1395 case 0x1005: /* TLBSX */
1397 TCGv_i32 tmp_ext
= tcg_constant_i32(arg
->e
);
1398 TCGv_i32 tmp_reg
= tcg_constant_i32(arg
->rs
& 7);
1400 gen_helper_mmu_write(tcg_env
, tmp_ext
, tmp_reg
, src
);
1405 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid mts reg 0x%x\n", arg
->rs
);
1408 dc
->base
.is_jmp
= DISAS_EXIT_NEXT
;
1413 static bool trans_mfs(DisasContext
*dc
, arg_mfs
*arg
)
1415 TCGv_i32 dest
= reg_for_write(dc
, arg
->rd
);
1421 TCGv_i64 t64
= tcg_temp_new_i64();
1422 tcg_gen_ld_i64(t64
, tcg_env
, offsetof(CPUMBState
, ear
));
1423 tcg_gen_extrh_i64_i32(dest
, t64
);
1426 #ifndef CONFIG_USER_ONLY
1427 case 0x1003: /* TLBLO */
1428 /* Handled below. */
1431 case 0x2006 ... 0x2009:
1432 /* High bits of PVR6-9 not implemented. */
1433 tcg_gen_movi_i32(dest
, 0);
1436 qemu_log_mask(LOG_GUEST_ERROR
,
1437 "Invalid extended mfs reg 0x%x\n", arg
->rs
);
1444 tcg_gen_movi_i32(dest
, dc
->base
.pc_next
);
1451 TCGv_i64 t64
= tcg_temp_new_i64();
1452 tcg_gen_ld_i64(t64
, tcg_env
, offsetof(CPUMBState
, ear
));
1453 tcg_gen_extrl_i64_i32(dest
, t64
);
1457 tcg_gen_ld_i32(dest
, tcg_env
, offsetof(CPUMBState
, esr
));
1460 tcg_gen_ld_i32(dest
, tcg_env
, offsetof(CPUMBState
, fsr
));
1463 tcg_gen_ld_i32(dest
, tcg_env
, offsetof(CPUMBState
, btr
));
1466 tcg_gen_ld_i32(dest
, tcg_env
, offsetof(CPUMBState
, edr
));
1469 tcg_gen_ld_i32(dest
, tcg_env
, offsetof(CPUMBState
, slr
));
1472 tcg_gen_ld_i32(dest
, tcg_env
, offsetof(CPUMBState
, shr
));
1475 #ifndef CONFIG_USER_ONLY
1476 case 0x1000: /* PID */
1477 case 0x1001: /* ZPR */
1478 case 0x1002: /* TLBX */
1479 case 0x1003: /* TLBLO */
1480 case 0x1004: /* TLBHI */
1481 case 0x1005: /* TLBSX */
1483 TCGv_i32 tmp_ext
= tcg_constant_i32(arg
->e
);
1484 TCGv_i32 tmp_reg
= tcg_constant_i32(arg
->rs
& 7);
1486 gen_helper_mmu_read(dest
, tcg_env
, tmp_ext
, tmp_reg
);
1491 case 0x2000 ... 0x200c:
1492 tcg_gen_ld_i32(dest
, tcg_env
,
1493 offsetof(MicroBlazeCPU
, cfg
.pvr_regs
[arg
->rs
- 0x2000])
1494 - offsetof(MicroBlazeCPU
, env
));
1497 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid mfs reg 0x%x\n", arg
->rs
);
1503 static void do_rti(DisasContext
*dc
)
1505 TCGv_i32 tmp
= tcg_temp_new_i32();
1507 tcg_gen_shri_i32(tmp
, cpu_msr
, 1);
1508 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, MSR_IE
);
1509 tcg_gen_andi_i32(tmp
, tmp
, MSR_VM
| MSR_UM
);
1510 tcg_gen_andi_i32(cpu_msr
, cpu_msr
, ~(MSR_VM
| MSR_UM
));
1511 tcg_gen_or_i32(cpu_msr
, cpu_msr
, tmp
);
1514 static void do_rtb(DisasContext
*dc
)
1516 TCGv_i32 tmp
= tcg_temp_new_i32();
1518 tcg_gen_shri_i32(tmp
, cpu_msr
, 1);
1519 tcg_gen_andi_i32(cpu_msr
, cpu_msr
, ~(MSR_VM
| MSR_UM
| MSR_BIP
));
1520 tcg_gen_andi_i32(tmp
, tmp
, (MSR_VM
| MSR_UM
));
1521 tcg_gen_or_i32(cpu_msr
, cpu_msr
, tmp
);
1524 static void do_rte(DisasContext
*dc
)
1526 TCGv_i32 tmp
= tcg_temp_new_i32();
1528 tcg_gen_shri_i32(tmp
, cpu_msr
, 1);
1529 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, MSR_EE
);
1530 tcg_gen_andi_i32(tmp
, tmp
, (MSR_VM
| MSR_UM
));
1531 tcg_gen_andi_i32(cpu_msr
, cpu_msr
, ~(MSR_VM
| MSR_UM
| MSR_EIP
));
1532 tcg_gen_or_i32(cpu_msr
, cpu_msr
, tmp
);
1535 /* Insns connected to FSL or AXI stream attached devices. */
1536 static bool do_get(DisasContext
*dc
, int rd
, int rb
, int imm
, int ctrl
)
1538 TCGv_i32 t_id
, t_ctrl
;
1540 if (trap_userspace(dc
, true)) {
1544 t_id
= tcg_temp_new_i32();
1546 tcg_gen_andi_i32(t_id
, cpu_R
[rb
], 0xf);
1548 tcg_gen_movi_i32(t_id
, imm
);
1551 t_ctrl
= tcg_constant_i32(ctrl
);
1552 gen_helper_get(reg_for_write(dc
, rd
), t_id
, t_ctrl
);
1556 static bool trans_get(DisasContext
*dc
, arg_get
*arg
)
1558 return do_get(dc
, arg
->rd
, 0, arg
->imm
, arg
->ctrl
);
1561 static bool trans_getd(DisasContext
*dc
, arg_getd
*arg
)
1563 return do_get(dc
, arg
->rd
, arg
->rb
, 0, arg
->ctrl
);
1566 static bool do_put(DisasContext
*dc
, int ra
, int rb
, int imm
, int ctrl
)
1568 TCGv_i32 t_id
, t_ctrl
;
1570 if (trap_userspace(dc
, true)) {
1574 t_id
= tcg_temp_new_i32();
1576 tcg_gen_andi_i32(t_id
, cpu_R
[rb
], 0xf);
1578 tcg_gen_movi_i32(t_id
, imm
);
1581 t_ctrl
= tcg_constant_i32(ctrl
);
1582 gen_helper_put(t_id
, t_ctrl
, reg_for_read(dc
, ra
));
1586 static bool trans_put(DisasContext
*dc
, arg_put
*arg
)
1588 return do_put(dc
, arg
->ra
, 0, arg
->imm
, arg
->ctrl
);
1591 static bool trans_putd(DisasContext
*dc
, arg_putd
*arg
)
1593 return do_put(dc
, arg
->ra
, arg
->rb
, 0, arg
->ctrl
);
1596 static void mb_tr_init_disas_context(DisasContextBase
*dcb
, CPUState
*cs
)
1598 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1599 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1602 dc
->cfg
= &cpu
->cfg
;
1603 dc
->tb_flags
= dc
->base
.tb
->flags
;
1604 dc
->ext_imm
= dc
->base
.tb
->cs_base
;
1607 dc
->mem_index
= cpu_mmu_index(cs
, false);
1608 dc
->jmp_cond
= dc
->tb_flags
& D_FLAG
? TCG_COND_ALWAYS
: TCG_COND_NEVER
;
1611 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
1612 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
1615 static void mb_tr_tb_start(DisasContextBase
*dcb
, CPUState
*cs
)
1619 static void mb_tr_insn_start(DisasContextBase
*dcb
, CPUState
*cs
)
1621 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1623 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->tb_flags
& ~MSR_TB_MASK
);
1626 static void mb_tr_translate_insn(DisasContextBase
*dcb
, CPUState
*cs
)
1628 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1631 /* TODO: This should raise an exception, not terminate qemu. */
1632 if (dc
->base
.pc_next
& 3) {
1633 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n",
1634 (uint32_t)dc
->base
.pc_next
);
1637 dc
->tb_flags_to_set
= 0;
1639 ir
= translator_ldl(cpu_env(cs
), &dc
->base
, dc
->base
.pc_next
);
1640 if (!decode(dc
, ir
)) {
1641 trap_illegal(dc
, true);
1649 /* Discard the imm global when its contents cannot be used. */
1650 if ((dc
->tb_flags
& ~dc
->tb_flags_to_set
) & IMM_FLAG
) {
1651 tcg_gen_discard_i32(cpu_imm
);
1654 dc
->tb_flags
&= ~(IMM_FLAG
| BIMM_FLAG
| D_FLAG
);
1655 dc
->tb_flags
|= dc
->tb_flags_to_set
;
1656 dc
->base
.pc_next
+= 4;
1658 if (dc
->jmp_cond
!= TCG_COND_NEVER
&& !(dc
->tb_flags
& D_FLAG
)) {
1660 * Finish any return-from branch.
1662 uint32_t rt_ibe
= dc
->tb_flags
& (DRTI_FLAG
| DRTB_FLAG
| DRTE_FLAG
);
1663 if (unlikely(rt_ibe
!= 0)) {
1664 dc
->tb_flags
&= ~(DRTI_FLAG
| DRTB_FLAG
| DRTE_FLAG
);
1665 if (rt_ibe
& DRTI_FLAG
) {
1667 } else if (rt_ibe
& DRTB_FLAG
) {
1674 /* Complete the branch, ending the TB. */
1675 switch (dc
->base
.is_jmp
) {
1676 case DISAS_NORETURN
:
1678 * E.g. illegal insn in a delay slot. We've already exited
1679 * and will handle D_FLAG in mb_cpu_do_interrupt.
1684 * Normal insn a delay slot.
1685 * However, the return-from-exception type insns should
1686 * return to the main loop, as they have adjusted MSR.
1688 dc
->base
.is_jmp
= (rt_ibe
? DISAS_EXIT_JUMP
: DISAS_JUMP
);
1690 case DISAS_EXIT_NEXT
:
1692 * E.g. mts insn in a delay slot. Continue with btarget,
1693 * but still return to the main loop.
1695 dc
->base
.is_jmp
= DISAS_EXIT_JUMP
;
1698 g_assert_not_reached();
1703 static void mb_tr_tb_stop(DisasContextBase
*dcb
, CPUState
*cs
)
1705 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1707 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
1708 /* We have already exited the TB. */
1714 switch (dc
->base
.is_jmp
) {
1715 case DISAS_TOO_MANY
:
1716 gen_goto_tb(dc
, 0, dc
->base
.pc_next
);
1721 case DISAS_EXIT_NEXT
:
1722 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
);
1724 case DISAS_EXIT_JUMP
:
1725 tcg_gen_mov_i32(cpu_pc
, cpu_btarget
);
1726 tcg_gen_discard_i32(cpu_btarget
);
1730 if (dc
->jmp_dest
!= -1 && !(tb_cflags(dc
->base
.tb
) & CF_NO_GOTO_TB
)) {
1732 tcg_gen_discard_i32(cpu_btarget
);
1734 if (dc
->jmp_cond
!= TCG_COND_ALWAYS
) {
1735 /* Conditional direct jump. */
1736 TCGLabel
*taken
= gen_new_label();
1737 TCGv_i32 tmp
= tcg_temp_new_i32();
1740 * Copy bvalue to a temp now, so we can discard bvalue.
1741 * This can avoid writing bvalue to memory when the
1742 * delay slot cannot raise an exception.
1744 tcg_gen_mov_i32(tmp
, cpu_bvalue
);
1745 tcg_gen_discard_i32(cpu_bvalue
);
1747 tcg_gen_brcondi_i32(dc
->jmp_cond
, tmp
, 0, taken
);
1748 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
1749 gen_set_label(taken
);
1751 gen_goto_tb(dc
, 0, dc
->jmp_dest
);
1755 /* Indirect jump (or direct jump w/ goto_tb disabled) */
1756 tcg_gen_mov_i32(cpu_pc
, cpu_btarget
);
1757 tcg_gen_discard_i32(cpu_btarget
);
1758 tcg_gen_lookup_and_goto_ptr();
1762 g_assert_not_reached();
1765 /* Finish DISAS_EXIT_* */
1766 if (unlikely(cs
->singlestep_enabled
)) {
1767 gen_raise_exception(dc
, EXCP_DEBUG
);
1769 tcg_gen_exit_tb(NULL
, 0);
1773 static const TranslatorOps mb_tr_ops
= {
1774 .init_disas_context
= mb_tr_init_disas_context
,
1775 .tb_start
= mb_tr_tb_start
,
1776 .insn_start
= mb_tr_insn_start
,
1777 .translate_insn
= mb_tr_translate_insn
,
1778 .tb_stop
= mb_tr_tb_stop
,
1781 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
1782 vaddr pc
, void *host_pc
)
1785 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &mb_tr_ops
, &dc
.base
);
1788 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1790 CPUMBState
*env
= cpu_env(cs
);
1794 qemu_fprintf(f
, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1796 (env
->msr
& MSR_UM
) ? "user" : "kernel",
1797 (env
->msr
& MSR_UMS
) ? "user" : "kernel",
1798 (bool)(env
->msr
& MSR_EIP
),
1799 (bool)(env
->msr
& MSR_IE
));
1801 iflags
= env
->iflags
;
1802 qemu_fprintf(f
, "iflags: 0x%08x", iflags
);
1803 if (iflags
& IMM_FLAG
) {
1804 qemu_fprintf(f
, " IMM(0x%08x)", env
->imm
);
1806 if (iflags
& BIMM_FLAG
) {
1807 qemu_fprintf(f
, " BIMM");
1809 if (iflags
& D_FLAG
) {
1810 qemu_fprintf(f
, " D(btarget=0x%08x)", env
->btarget
);
1812 if (iflags
& DRTI_FLAG
) {
1813 qemu_fprintf(f
, " DRTI");
1815 if (iflags
& DRTE_FLAG
) {
1816 qemu_fprintf(f
, " DRTE");
1818 if (iflags
& DRTB_FLAG
) {
1819 qemu_fprintf(f
, " DRTB");
1821 if (iflags
& ESR_ESS_FLAG
) {
1822 qemu_fprintf(f
, " ESR_ESS(0x%04x)", iflags
& ESR_ESS_MASK
);
1825 qemu_fprintf(f
, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1826 "ear=0x" TARGET_FMT_lx
" slr=0x%x shr=0x%x\n",
1827 env
->esr
, env
->fsr
, env
->btr
, env
->edr
,
1828 env
->ear
, env
->slr
, env
->shr
);
1830 for (i
= 0; i
< 32; i
++) {
1831 qemu_fprintf(f
, "r%2.2d=%08x%c",
1832 i
, env
->regs
[i
], i
% 4 == 3 ? '\n' : ' ');
1834 qemu_fprintf(f
, "\n");
1837 void mb_tcg_init(void)
1839 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1840 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1842 static const struct {
1843 TCGv_i32
*var
; int ofs
; char name
[8];
1846 * Note that r0 is handled specially in reg_for_read
1847 * and reg_for_write. Nothing should touch cpu_R[0].
1848 * Leave that element NULL, which will assert quickly
1849 * inside the tcg generator functions.
1851 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1852 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1853 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1854 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1869 for (int i
= 0; i
< ARRAY_SIZE(i32s
); ++i
) {
1871 tcg_global_mem_new_i32(tcg_env
, i32s
[i
].ofs
, i32s
[i
].name
);
1875 tcg_global_mem_new(tcg_env
, offsetof(CPUMBState
, res_addr
), "res_addr");