qapi: Improve specificity of type/member descriptions
[qemu/armbru.git] / target / microblaze / translate.c
blobee0d7b81adad39d6b4a5b7c563592a18fd20a7f7
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
32 #include "exec/log.h"
34 #define EXTRACT_FIELD(src, start, end) \
35 (((src) >> start) & ((1 << (end - start + 1)) - 1))
37 /* is_jmp field values */
38 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
39 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
41 /* cpu state besides pc was modified dynamically; update pc to next */
42 #define DISAS_EXIT_NEXT DISAS_TARGET_2
43 /* cpu state besides pc was modified dynamically; update pc to btarget */
44 #define DISAS_EXIT_JUMP DISAS_TARGET_3
46 static TCGv_i32 cpu_R[32];
47 static TCGv_i32 cpu_pc;
48 static TCGv_i32 cpu_msr;
49 static TCGv_i32 cpu_msr_c;
50 static TCGv_i32 cpu_imm;
51 static TCGv_i32 cpu_bvalue;
52 static TCGv_i32 cpu_btarget;
53 static TCGv_i32 cpu_iflags;
54 static TCGv cpu_res_addr;
55 static TCGv_i32 cpu_res_val;
57 #include "exec/gen-icount.h"
59 /* This is the state at translation time. */
60 typedef struct DisasContext {
61 DisasContextBase base;
62 const MicroBlazeCPUConfig *cfg;
64 /* TCG op of the current insn_start. */
65 TCGOp *insn_start;
67 TCGv_i32 r0;
68 bool r0_set;
70 /* Decoder. */
71 uint32_t ext_imm;
72 unsigned int tb_flags;
73 unsigned int tb_flags_to_set;
74 int mem_index;
76 /* Condition under which to jump, including NEVER and ALWAYS. */
77 TCGCond jmp_cond;
79 /* Immediate branch-taken destination, or -1 for indirect. */
80 uint32_t jmp_dest;
81 } DisasContext;
83 static int typeb_imm(DisasContext *dc, int x)
85 if (dc->tb_flags & IMM_FLAG) {
86 return deposit32(dc->ext_imm, 0, 16, x);
88 return x;
91 /* Include the auto-generated decoder. */
92 #include "decode-insns.c.inc"
94 static void t_sync_flags(DisasContext *dc)
96 /* Synch the tb dependent flags between translator and runtime. */
97 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
98 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
102 static void gen_raise_exception(DisasContext *dc, uint32_t index)
104 gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
105 dc->base.is_jmp = DISAS_NORETURN;
108 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
110 t_sync_flags(dc);
111 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
112 gen_raise_exception(dc, index);
115 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
117 TCGv_i32 tmp = tcg_constant_i32(esr_ec);
118 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
120 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
123 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
125 if (translator_use_goto_tb(&dc->base, dest)) {
126 tcg_gen_goto_tb(n);
127 tcg_gen_movi_i32(cpu_pc, dest);
128 tcg_gen_exit_tb(dc->base.tb, n);
129 } else {
130 tcg_gen_movi_i32(cpu_pc, dest);
131 tcg_gen_lookup_and_goto_ptr();
133 dc->base.is_jmp = DISAS_NORETURN;
137 * Returns true if the insn an illegal operation.
138 * If exceptions are enabled, an exception is raised.
140 static bool trap_illegal(DisasContext *dc, bool cond)
142 if (cond && (dc->tb_flags & MSR_EE)
143 && dc->cfg->illegal_opcode_exception) {
144 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
146 return cond;
150 * Returns true if the insn is illegal in userspace.
151 * If exceptions are enabled, an exception is raised.
153 static bool trap_userspace(DisasContext *dc, bool cond)
155 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
157 if (cond_user && (dc->tb_flags & MSR_EE)) {
158 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
160 return cond_user;
164 * Return true, and log an error, if the current insn is
165 * within a delay slot.
167 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
169 if (dc->tb_flags & D_FLAG) {
170 qemu_log_mask(LOG_GUEST_ERROR,
171 "Invalid insn in delay slot: %s at %08x\n",
172 insn_type, (uint32_t)dc->base.pc_next);
173 return true;
175 return false;
178 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
180 if (likely(reg != 0)) {
181 return cpu_R[reg];
183 if (!dc->r0_set) {
184 if (dc->r0 == NULL) {
185 dc->r0 = tcg_temp_new_i32();
187 tcg_gen_movi_i32(dc->r0, 0);
188 dc->r0_set = true;
190 return dc->r0;
193 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
195 if (likely(reg != 0)) {
196 return cpu_R[reg];
198 if (dc->r0 == NULL) {
199 dc->r0 = tcg_temp_new_i32();
201 return dc->r0;
204 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
205 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
207 TCGv_i32 rd, ra, rb;
209 if (arg->rd == 0 && !side_effects) {
210 return true;
213 rd = reg_for_write(dc, arg->rd);
214 ra = reg_for_read(dc, arg->ra);
215 rb = reg_for_read(dc, arg->rb);
216 fn(rd, ra, rb);
217 return true;
220 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
221 void (*fn)(TCGv_i32, TCGv_i32))
223 TCGv_i32 rd, ra;
225 if (arg->rd == 0 && !side_effects) {
226 return true;
229 rd = reg_for_write(dc, arg->rd);
230 ra = reg_for_read(dc, arg->ra);
231 fn(rd, ra);
232 return true;
235 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
236 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
238 TCGv_i32 rd, ra;
240 if (arg->rd == 0 && !side_effects) {
241 return true;
244 rd = reg_for_write(dc, arg->rd);
245 ra = reg_for_read(dc, arg->ra);
246 fni(rd, ra, arg->imm);
247 return true;
250 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
251 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
253 TCGv_i32 rd, ra, imm;
255 if (arg->rd == 0 && !side_effects) {
256 return true;
259 rd = reg_for_write(dc, arg->rd);
260 ra = reg_for_read(dc, arg->ra);
261 imm = tcg_constant_i32(arg->imm);
263 fn(rd, ra, imm);
264 return true;
267 #define DO_TYPEA(NAME, SE, FN) \
268 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
269 { return do_typea(dc, a, SE, FN); }
271 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
272 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
273 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
275 #define DO_TYPEA0(NAME, SE, FN) \
276 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
277 { return do_typea0(dc, a, SE, FN); }
279 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
280 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
281 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
283 #define DO_TYPEBI(NAME, SE, FNI) \
284 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
285 { return do_typeb_imm(dc, a, SE, FNI); }
287 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
288 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
289 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
291 #define DO_TYPEBV(NAME, SE, FN) \
292 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
293 { return do_typeb_val(dc, a, SE, FN); }
295 #define ENV_WRAPPER2(NAME, HELPER) \
296 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
297 { HELPER(out, cpu_env, ina); }
299 #define ENV_WRAPPER3(NAME, HELPER) \
300 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
301 { HELPER(out, cpu_env, ina, inb); }
303 /* No input carry, but output carry. */
304 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
306 TCGv_i32 zero = tcg_constant_i32(0);
308 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
311 /* Input and output carry. */
312 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
314 TCGv_i32 zero = tcg_constant_i32(0);
315 TCGv_i32 tmp = tcg_temp_new_i32();
317 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
318 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
321 /* Input carry, but no output carry. */
322 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
324 tcg_gen_add_i32(out, ina, inb);
325 tcg_gen_add_i32(out, out, cpu_msr_c);
328 DO_TYPEA(add, true, gen_add)
329 DO_TYPEA(addc, true, gen_addc)
330 DO_TYPEA(addk, false, tcg_gen_add_i32)
331 DO_TYPEA(addkc, true, gen_addkc)
333 DO_TYPEBV(addi, true, gen_add)
334 DO_TYPEBV(addic, true, gen_addc)
335 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
336 DO_TYPEBV(addikc, true, gen_addkc)
338 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
340 tcg_gen_andi_i32(out, ina, ~imm);
343 DO_TYPEA(and, false, tcg_gen_and_i32)
344 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
345 DO_TYPEA(andn, false, tcg_gen_andc_i32)
346 DO_TYPEBI(andni, false, gen_andni)
348 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
350 TCGv_i32 tmp = tcg_temp_new_i32();
351 tcg_gen_andi_i32(tmp, inb, 31);
352 tcg_gen_sar_i32(out, ina, tmp);
355 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
357 TCGv_i32 tmp = tcg_temp_new_i32();
358 tcg_gen_andi_i32(tmp, inb, 31);
359 tcg_gen_shr_i32(out, ina, tmp);
362 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
364 TCGv_i32 tmp = tcg_temp_new_i32();
365 tcg_gen_andi_i32(tmp, inb, 31);
366 tcg_gen_shl_i32(out, ina, tmp);
369 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
371 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
372 int imm_w = extract32(imm, 5, 5);
373 int imm_s = extract32(imm, 0, 5);
375 if (imm_w + imm_s > 32 || imm_w == 0) {
376 /* These inputs have an undefined behavior. */
377 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
378 imm_w, imm_s);
379 } else {
380 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
384 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
386 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
387 int imm_w = extract32(imm, 5, 5);
388 int imm_s = extract32(imm, 0, 5);
389 int width = imm_w - imm_s + 1;
391 if (imm_w < imm_s) {
392 /* These inputs have an undefined behavior. */
393 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
394 imm_w, imm_s);
395 } else {
396 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
400 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
401 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
402 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
404 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
405 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
406 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
408 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
409 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
411 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
413 tcg_gen_clzi_i32(out, ina, 32);
416 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
418 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
420 TCGv_i32 lt = tcg_temp_new_i32();
422 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
423 tcg_gen_sub_i32(out, inb, ina);
424 tcg_gen_deposit_i32(out, out, lt, 31, 1);
427 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
429 TCGv_i32 lt = tcg_temp_new_i32();
431 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
432 tcg_gen_sub_i32(out, inb, ina);
433 tcg_gen_deposit_i32(out, out, lt, 31, 1);
436 DO_TYPEA(cmp, false, gen_cmp)
437 DO_TYPEA(cmpu, false, gen_cmpu)
439 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
440 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
441 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
442 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
443 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
444 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
445 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
446 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
447 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
448 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
449 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
451 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
452 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
453 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
454 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
455 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
456 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
457 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
458 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
459 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
460 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
461 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
463 ENV_WRAPPER2(gen_flt, gen_helper_flt)
464 ENV_WRAPPER2(gen_fint, gen_helper_fint)
465 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
467 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
468 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
469 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
471 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
472 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
474 gen_helper_divs(out, cpu_env, inb, ina);
477 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
479 gen_helper_divu(out, cpu_env, inb, ina);
482 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
483 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
485 static bool trans_imm(DisasContext *dc, arg_imm *arg)
487 if (invalid_delay_slot(dc, "imm")) {
488 return true;
490 dc->ext_imm = arg->imm << 16;
491 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
492 dc->tb_flags_to_set = IMM_FLAG;
493 return true;
496 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
498 TCGv_i32 tmp = tcg_temp_new_i32();
499 tcg_gen_muls2_i32(tmp, out, ina, inb);
502 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
504 TCGv_i32 tmp = tcg_temp_new_i32();
505 tcg_gen_mulu2_i32(tmp, out, ina, inb);
508 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
510 TCGv_i32 tmp = tcg_temp_new_i32();
511 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
514 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
515 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
516 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
517 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
518 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
520 DO_TYPEA(or, false, tcg_gen_or_i32)
521 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
523 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
525 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
528 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
530 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
533 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
534 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
535 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
537 /* No input carry, but output carry. */
538 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
540 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
541 tcg_gen_sub_i32(out, inb, ina);
544 /* Input and output carry. */
545 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
547 TCGv_i32 zero = tcg_constant_i32(0);
548 TCGv_i32 tmp = tcg_temp_new_i32();
550 tcg_gen_not_i32(tmp, ina);
551 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
552 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
555 /* No input or output carry. */
556 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
558 tcg_gen_sub_i32(out, inb, ina);
561 /* Input carry, no output carry. */
562 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
564 TCGv_i32 nota = tcg_temp_new_i32();
566 tcg_gen_not_i32(nota, ina);
567 tcg_gen_add_i32(out, inb, nota);
568 tcg_gen_add_i32(out, out, cpu_msr_c);
571 DO_TYPEA(rsub, true, gen_rsub)
572 DO_TYPEA(rsubc, true, gen_rsubc)
573 DO_TYPEA(rsubk, false, gen_rsubk)
574 DO_TYPEA(rsubkc, true, gen_rsubkc)
576 DO_TYPEBV(rsubi, true, gen_rsub)
577 DO_TYPEBV(rsubic, true, gen_rsubc)
578 DO_TYPEBV(rsubik, false, gen_rsubk)
579 DO_TYPEBV(rsubikc, true, gen_rsubkc)
581 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
582 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
584 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
586 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
587 tcg_gen_sari_i32(out, ina, 1);
590 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
592 TCGv_i32 tmp = tcg_temp_new_i32();
594 tcg_gen_mov_i32(tmp, cpu_msr_c);
595 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
596 tcg_gen_extract2_i32(out, ina, tmp, 1);
599 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
601 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
602 tcg_gen_shri_i32(out, ina, 1);
605 DO_TYPEA0(sra, false, gen_sra)
606 DO_TYPEA0(src, false, gen_src)
607 DO_TYPEA0(srl, false, gen_srl)
609 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
611 tcg_gen_rotri_i32(out, ina, 16);
614 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
615 DO_TYPEA0(swaph, false, gen_swaph)
617 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
619 /* Cache operations are nops: only check for supervisor mode. */
620 trap_userspace(dc, true);
621 return true;
624 DO_TYPEA(xor, false, tcg_gen_xor_i32)
625 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
627 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
629 TCGv ret = tcg_temp_new();
631 /* If any of the regs is r0, set t to the value of the other reg. */
632 if (ra && rb) {
633 TCGv_i32 tmp = tcg_temp_new_i32();
634 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
635 tcg_gen_extu_i32_tl(ret, tmp);
636 } else if (ra) {
637 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
638 } else if (rb) {
639 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
640 } else {
641 tcg_gen_movi_tl(ret, 0);
644 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
645 gen_helper_stackprot(cpu_env, ret);
647 return ret;
650 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
652 TCGv ret = tcg_temp_new();
654 /* If any of the regs is r0, set t to the value of the other reg. */
655 if (ra) {
656 TCGv_i32 tmp = tcg_temp_new_i32();
657 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
658 tcg_gen_extu_i32_tl(ret, tmp);
659 } else {
660 tcg_gen_movi_tl(ret, (uint32_t)imm);
663 if (ra == 1 && dc->cfg->stackprot) {
664 gen_helper_stackprot(cpu_env, ret);
666 return ret;
669 #ifndef CONFIG_USER_ONLY
670 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
672 int addr_size = dc->cfg->addr_size;
673 TCGv ret = tcg_temp_new();
675 if (addr_size == 32 || ra == 0) {
676 if (rb) {
677 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
678 } else {
679 tcg_gen_movi_tl(ret, 0);
681 } else {
682 if (rb) {
683 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
684 } else {
685 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
686 tcg_gen_shli_tl(ret, ret, 32);
688 if (addr_size < 64) {
689 /* Mask off out of range bits. */
690 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
693 return ret;
695 #endif
697 #ifndef CONFIG_USER_ONLY
698 static void record_unaligned_ess(DisasContext *dc, int rd,
699 MemOp size, bool store)
701 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
703 iflags |= ESR_ESS_FLAG;
704 iflags |= rd << 5;
705 iflags |= store * ESR_S;
706 iflags |= (size == MO_32) * ESR_W;
708 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
710 #endif
712 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
713 int mem_index, bool rev)
715 MemOp size = mop & MO_SIZE;
718 * When doing reverse accesses we need to do two things.
720 * 1. Reverse the address wrt endianness.
721 * 2. Byteswap the data lanes on the way back into the CPU core.
723 if (rev) {
724 if (size > MO_8) {
725 mop ^= MO_BSWAP;
727 if (size < MO_32) {
728 tcg_gen_xori_tl(addr, addr, 3 - size);
733 * For system mode, enforce alignment if the cpu configuration
734 * requires it. For user-mode, the Linux kernel will have fixed up
735 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
737 #ifndef CONFIG_USER_ONLY
738 if (size > MO_8 &&
739 (dc->tb_flags & MSR_EE) &&
740 dc->cfg->unaligned_exceptions) {
741 record_unaligned_ess(dc, rd, size, false);
742 mop |= MO_ALIGN;
744 #endif
746 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
747 return true;
750 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
752 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
753 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
756 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
758 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
759 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
762 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
764 if (trap_userspace(dc, true)) {
765 return true;
767 #ifdef CONFIG_USER_ONLY
768 return true;
769 #else
770 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
771 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
772 #endif
775 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
777 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
778 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
781 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
783 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
784 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
787 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
789 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
790 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
793 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
795 if (trap_userspace(dc, true)) {
796 return true;
798 #ifdef CONFIG_USER_ONLY
799 return true;
800 #else
801 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
802 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
803 #endif
806 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
808 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
809 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
812 static bool trans_lw(DisasContext *dc, arg_typea *arg)
814 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
815 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
818 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
820 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
821 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
824 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
826 if (trap_userspace(dc, true)) {
827 return true;
829 #ifdef CONFIG_USER_ONLY
830 return true;
831 #else
832 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
833 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
834 #endif
837 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
839 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
840 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
843 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
845 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
847 /* lwx does not throw unaligned access errors, so force alignment */
848 tcg_gen_andi_tl(addr, addr, ~3);
850 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
851 tcg_gen_mov_tl(cpu_res_addr, addr);
853 if (arg->rd) {
854 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
857 /* No support for AXI exclusive so always clear C */
858 tcg_gen_movi_i32(cpu_msr_c, 0);
859 return true;
862 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
863 int mem_index, bool rev)
865 MemOp size = mop & MO_SIZE;
868 * When doing reverse accesses we need to do two things.
870 * 1. Reverse the address wrt endianness.
871 * 2. Byteswap the data lanes on the way back into the CPU core.
873 if (rev) {
874 if (size > MO_8) {
875 mop ^= MO_BSWAP;
877 if (size < MO_32) {
878 tcg_gen_xori_tl(addr, addr, 3 - size);
883 * For system mode, enforce alignment if the cpu configuration
884 * requires it. For user-mode, the Linux kernel will have fixed up
885 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
887 #ifndef CONFIG_USER_ONLY
888 if (size > MO_8 &&
889 (dc->tb_flags & MSR_EE) &&
890 dc->cfg->unaligned_exceptions) {
891 record_unaligned_ess(dc, rd, size, true);
892 mop |= MO_ALIGN;
894 #endif
896 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
897 return true;
900 static bool trans_sb(DisasContext *dc, arg_typea *arg)
902 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
903 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
906 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
908 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
909 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
912 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
914 if (trap_userspace(dc, true)) {
915 return true;
917 #ifdef CONFIG_USER_ONLY
918 return true;
919 #else
920 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
921 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
922 #endif
925 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
927 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
928 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
931 static bool trans_sh(DisasContext *dc, arg_typea *arg)
933 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
934 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
937 static bool trans_shr(DisasContext *dc, arg_typea *arg)
939 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
940 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
943 static bool trans_shea(DisasContext *dc, arg_typea *arg)
945 if (trap_userspace(dc, true)) {
946 return true;
948 #ifdef CONFIG_USER_ONLY
949 return true;
950 #else
951 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
952 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
953 #endif
956 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
958 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
959 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
962 static bool trans_sw(DisasContext *dc, arg_typea *arg)
964 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
965 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
968 static bool trans_swr(DisasContext *dc, arg_typea *arg)
970 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
971 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
974 static bool trans_swea(DisasContext *dc, arg_typea *arg)
976 if (trap_userspace(dc, true)) {
977 return true;
979 #ifdef CONFIG_USER_ONLY
980 return true;
981 #else
982 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
983 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
984 #endif
987 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
989 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
990 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
993 static bool trans_swx(DisasContext *dc, arg_typea *arg)
995 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
996 TCGLabel *swx_done = gen_new_label();
997 TCGLabel *swx_fail = gen_new_label();
998 TCGv_i32 tval;
1000 /* swx does not throw unaligned access errors, so force alignment */
1001 tcg_gen_andi_tl(addr, addr, ~3);
1004 * Compare the address vs the one we used during lwx.
1005 * On mismatch, the operation fails. On match, addr dies at the
1006 * branch, but we know we can use the equal version in the global.
1007 * In either case, addr is no longer needed.
1009 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1012 * Compare the value loaded during lwx with current contents of
1013 * the reserved location.
1015 tval = tcg_temp_new_i32();
1017 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1018 reg_for_write(dc, arg->rd),
1019 dc->mem_index, MO_TEUL);
1021 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1023 /* Success */
1024 tcg_gen_movi_i32(cpu_msr_c, 0);
1025 tcg_gen_br(swx_done);
1027 /* Failure */
1028 gen_set_label(swx_fail);
1029 tcg_gen_movi_i32(cpu_msr_c, 1);
1031 gen_set_label(swx_done);
1034 * Prevent the saved address from working again without another ldx.
1035 * Akin to the pseudocode setting reservation = 0.
1037 tcg_gen_movi_tl(cpu_res_addr, -1);
1038 return true;
1041 static void setup_dslot(DisasContext *dc, bool type_b)
1043 dc->tb_flags_to_set |= D_FLAG;
1044 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1045 dc->tb_flags_to_set |= BIMM_FLAG;
1049 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1050 bool delay, bool abs, int link)
1052 uint32_t add_pc;
1054 if (invalid_delay_slot(dc, "branch")) {
1055 return true;
1057 if (delay) {
1058 setup_dslot(dc, dest_rb < 0);
1061 if (link) {
1062 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1065 /* Store the branch taken destination into btarget. */
1066 add_pc = abs ? 0 : dc->base.pc_next;
1067 if (dest_rb > 0) {
1068 dc->jmp_dest = -1;
1069 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1070 } else {
1071 dc->jmp_dest = add_pc + dest_imm;
1072 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1074 dc->jmp_cond = TCG_COND_ALWAYS;
1075 return true;
1078 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1079 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1080 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1081 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1082 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1084 DO_BR(br, bri, false, false, false)
1085 DO_BR(bra, brai, false, true, false)
1086 DO_BR(brd, brid, true, false, false)
1087 DO_BR(brad, braid, true, true, false)
1088 DO_BR(brld, brlid, true, false, true)
1089 DO_BR(brald, bralid, true, true, true)
1091 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1092 TCGCond cond, int ra, bool delay)
1094 TCGv_i32 zero, next;
1096 if (invalid_delay_slot(dc, "bcc")) {
1097 return true;
1099 if (delay) {
1100 setup_dslot(dc, dest_rb < 0);
1103 dc->jmp_cond = cond;
1105 /* Cache the condition register in cpu_bvalue across any delay slot. */
1106 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1108 /* Store the branch taken destination into btarget. */
1109 if (dest_rb > 0) {
1110 dc->jmp_dest = -1;
1111 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1112 } else {
1113 dc->jmp_dest = dc->base.pc_next + dest_imm;
1114 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1117 /* Compute the final destination into btarget. */
1118 zero = tcg_constant_i32(0);
1119 next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1120 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1121 reg_for_read(dc, ra), zero,
1122 cpu_btarget, next);
1124 return true;
1127 #define DO_BCC(NAME, COND) \
1128 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1129 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1130 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1131 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1132 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1133 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1134 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1135 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1137 DO_BCC(beq, TCG_COND_EQ)
1138 DO_BCC(bge, TCG_COND_GE)
1139 DO_BCC(bgt, TCG_COND_GT)
1140 DO_BCC(ble, TCG_COND_LE)
1141 DO_BCC(blt, TCG_COND_LT)
1142 DO_BCC(bne, TCG_COND_NE)
1144 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1146 if (trap_userspace(dc, true)) {
1147 return true;
1149 if (invalid_delay_slot(dc, "brk")) {
1150 return true;
1153 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1154 if (arg->rd) {
1155 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1157 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1158 tcg_gen_movi_tl(cpu_res_addr, -1);
1160 dc->base.is_jmp = DISAS_EXIT;
1161 return true;
1164 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1166 uint32_t imm = arg->imm;
1168 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1169 return true;
1171 if (invalid_delay_slot(dc, "brki")) {
1172 return true;
1175 tcg_gen_movi_i32(cpu_pc, imm);
1176 if (arg->rd) {
1177 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1179 tcg_gen_movi_tl(cpu_res_addr, -1);
1181 #ifdef CONFIG_USER_ONLY
1182 switch (imm) {
1183 case 0x8: /* syscall trap */
1184 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1185 break;
1186 case 0x18: /* debug trap */
1187 gen_raise_exception_sync(dc, EXCP_DEBUG);
1188 break;
1189 default: /* eliminated with trap_userspace check */
1190 g_assert_not_reached();
1192 #else
1193 uint32_t msr_to_set = 0;
1195 if (imm != 0x18) {
1196 msr_to_set |= MSR_BIP;
1198 if (imm == 0x8 || imm == 0x18) {
1199 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1200 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1201 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1202 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1204 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1205 dc->base.is_jmp = DISAS_EXIT;
1206 #endif
1208 return true;
1211 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1213 int mbar_imm = arg->imm;
1215 /* Note that mbar is a specialized branch instruction. */
1216 if (invalid_delay_slot(dc, "mbar")) {
1217 return true;
1220 /* Data access memory barrier. */
1221 if ((mbar_imm & 2) == 0) {
1222 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1225 /* Sleep. */
1226 if (mbar_imm & 16) {
1227 if (trap_userspace(dc, true)) {
1228 /* Sleep is a privileged instruction. */
1229 return true;
1232 t_sync_flags(dc);
1234 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1235 -offsetof(MicroBlazeCPU, env)
1236 +offsetof(CPUState, halted));
1238 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1240 gen_raise_exception(dc, EXCP_HLT);
1244 * If !(mbar_imm & 1), this is an instruction access memory barrier
1245 * and we need to end the TB so that we recognize self-modified
1246 * code immediately.
1248 * However, there are some data mbars that need the TB break
1249 * (and return to main loop) to recognize interrupts right away.
1250 * E.g. recognizing a change to an interrupt controller register.
1252 * Therefore, choose to end the TB always.
1254 dc->base.is_jmp = DISAS_EXIT_NEXT;
1255 return true;
1258 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1260 if (trap_userspace(dc, to_set)) {
1261 return true;
1263 if (invalid_delay_slot(dc, "rts")) {
1264 return true;
1267 dc->tb_flags_to_set |= to_set;
1268 setup_dslot(dc, true);
1270 dc->jmp_cond = TCG_COND_ALWAYS;
1271 dc->jmp_dest = -1;
1272 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1273 return true;
1276 #define DO_RTS(NAME, IFLAG) \
1277 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1278 { return do_rts(dc, arg, IFLAG); }
1280 DO_RTS(rtbd, DRTB_FLAG)
1281 DO_RTS(rtid, DRTI_FLAG)
1282 DO_RTS(rted, DRTE_FLAG)
1283 DO_RTS(rtsd, 0)
1285 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1287 /* If opcode_0_illegal, trap. */
1288 if (dc->cfg->opcode_0_illegal) {
1289 trap_illegal(dc, true);
1290 return true;
1293 * Otherwise, this is "add r0, r0, r0".
1294 * Continue to trans_add so that MSR[C] gets cleared.
1296 return false;
1299 static void msr_read(DisasContext *dc, TCGv_i32 d)
1301 TCGv_i32 t;
1303 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1304 t = tcg_temp_new_i32();
1305 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1306 tcg_gen_or_i32(d, cpu_msr, t);
1309 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1311 uint32_t imm = arg->imm;
1313 if (trap_userspace(dc, imm != MSR_C)) {
1314 return true;
1317 if (arg->rd) {
1318 msr_read(dc, cpu_R[arg->rd]);
1322 * Handle the carry bit separately.
1323 * This is the only bit that userspace can modify.
1325 if (imm & MSR_C) {
1326 tcg_gen_movi_i32(cpu_msr_c, set);
1330 * MSR_C and MSR_CC set above.
1331 * MSR_PVR is not writable, and is always clear.
1333 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1335 if (imm != 0) {
1336 if (set) {
1337 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1338 } else {
1339 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1341 dc->base.is_jmp = DISAS_EXIT_NEXT;
1343 return true;
1346 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1348 return do_msrclrset(dc, arg, false);
1351 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1353 return do_msrclrset(dc, arg, true);
1356 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1358 if (trap_userspace(dc, true)) {
1359 return true;
1362 #ifdef CONFIG_USER_ONLY
1363 g_assert_not_reached();
1364 #else
1365 if (arg->e && arg->rs != 0x1003) {
1366 qemu_log_mask(LOG_GUEST_ERROR,
1367 "Invalid extended mts reg 0x%x\n", arg->rs);
1368 return true;
1371 TCGv_i32 src = reg_for_read(dc, arg->ra);
1372 switch (arg->rs) {
1373 case SR_MSR:
1374 /* Install MSR_C. */
1375 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1377 * Clear MSR_C and MSR_CC;
1378 * MSR_PVR is not writable, and is always clear.
1380 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1381 break;
1382 case SR_FSR:
1383 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1384 break;
1385 case 0x800:
1386 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1387 break;
1388 case 0x802:
1389 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1390 break;
1392 case 0x1000: /* PID */
1393 case 0x1001: /* ZPR */
1394 case 0x1002: /* TLBX */
1395 case 0x1003: /* TLBLO */
1396 case 0x1004: /* TLBHI */
1397 case 0x1005: /* TLBSX */
1399 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1400 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1402 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1404 break;
1406 default:
1407 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1408 return true;
1410 dc->base.is_jmp = DISAS_EXIT_NEXT;
1411 return true;
1412 #endif
1415 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1417 TCGv_i32 dest = reg_for_write(dc, arg->rd);
1419 if (arg->e) {
1420 switch (arg->rs) {
1421 case SR_EAR:
1423 TCGv_i64 t64 = tcg_temp_new_i64();
1424 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1425 tcg_gen_extrh_i64_i32(dest, t64);
1427 return true;
1428 #ifndef CONFIG_USER_ONLY
1429 case 0x1003: /* TLBLO */
1430 /* Handled below. */
1431 break;
1432 #endif
1433 case 0x2006 ... 0x2009:
1434 /* High bits of PVR6-9 not implemented. */
1435 tcg_gen_movi_i32(dest, 0);
1436 return true;
1437 default:
1438 qemu_log_mask(LOG_GUEST_ERROR,
1439 "Invalid extended mfs reg 0x%x\n", arg->rs);
1440 return true;
1444 switch (arg->rs) {
1445 case SR_PC:
1446 tcg_gen_movi_i32(dest, dc->base.pc_next);
1447 break;
1448 case SR_MSR:
1449 msr_read(dc, dest);
1450 break;
1451 case SR_EAR:
1453 TCGv_i64 t64 = tcg_temp_new_i64();
1454 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1455 tcg_gen_extrl_i64_i32(dest, t64);
1457 break;
1458 case SR_ESR:
1459 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1460 break;
1461 case SR_FSR:
1462 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1463 break;
1464 case SR_BTR:
1465 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1466 break;
1467 case SR_EDR:
1468 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1469 break;
1470 case 0x800:
1471 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1472 break;
1473 case 0x802:
1474 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1475 break;
1477 #ifndef CONFIG_USER_ONLY
1478 case 0x1000: /* PID */
1479 case 0x1001: /* ZPR */
1480 case 0x1002: /* TLBX */
1481 case 0x1003: /* TLBLO */
1482 case 0x1004: /* TLBHI */
1483 case 0x1005: /* TLBSX */
1485 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1486 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1488 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1490 break;
1491 #endif
1493 case 0x2000 ... 0x200c:
1494 tcg_gen_ld_i32(dest, cpu_env,
1495 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1496 - offsetof(MicroBlazeCPU, env));
1497 break;
1498 default:
1499 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1500 break;
1502 return true;
1505 static void do_rti(DisasContext *dc)
1507 TCGv_i32 tmp = tcg_temp_new_i32();
1509 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1510 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1511 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1512 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1513 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1516 static void do_rtb(DisasContext *dc)
1518 TCGv_i32 tmp = tcg_temp_new_i32();
1520 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1521 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1522 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1523 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1526 static void do_rte(DisasContext *dc)
1528 TCGv_i32 tmp = tcg_temp_new_i32();
1530 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1531 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1532 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1533 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1534 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1537 /* Insns connected to FSL or AXI stream attached devices. */
1538 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1540 TCGv_i32 t_id, t_ctrl;
1542 if (trap_userspace(dc, true)) {
1543 return true;
1546 t_id = tcg_temp_new_i32();
1547 if (rb) {
1548 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1549 } else {
1550 tcg_gen_movi_i32(t_id, imm);
1553 t_ctrl = tcg_constant_i32(ctrl);
1554 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1555 return true;
1558 static bool trans_get(DisasContext *dc, arg_get *arg)
1560 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1563 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1565 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1568 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1570 TCGv_i32 t_id, t_ctrl;
1572 if (trap_userspace(dc, true)) {
1573 return true;
1576 t_id = tcg_temp_new_i32();
1577 if (rb) {
1578 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1579 } else {
1580 tcg_gen_movi_i32(t_id, imm);
1583 t_ctrl = tcg_constant_i32(ctrl);
1584 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1585 return true;
1588 static bool trans_put(DisasContext *dc, arg_put *arg)
1590 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1593 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1595 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1598 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1600 DisasContext *dc = container_of(dcb, DisasContext, base);
1601 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1602 int bound;
1604 dc->cfg = &cpu->cfg;
1605 dc->tb_flags = dc->base.tb->flags;
1606 dc->ext_imm = dc->base.tb->cs_base;
1607 dc->r0 = NULL;
1608 dc->r0_set = false;
1609 dc->mem_index = cpu_mmu_index(&cpu->env, false);
1610 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1611 dc->jmp_dest = -1;
1613 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1614 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1617 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1621 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1623 DisasContext *dc = container_of(dcb, DisasContext, base);
1625 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1626 dc->insn_start = tcg_last_op();
1629 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1631 DisasContext *dc = container_of(dcb, DisasContext, base);
1632 CPUMBState *env = cs->env_ptr;
1633 uint32_t ir;
1635 /* TODO: This should raise an exception, not terminate qemu. */
1636 if (dc->base.pc_next & 3) {
1637 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1638 (uint32_t)dc->base.pc_next);
1641 dc->tb_flags_to_set = 0;
1643 ir = cpu_ldl_code(env, dc->base.pc_next);
1644 if (!decode(dc, ir)) {
1645 trap_illegal(dc, true);
1648 if (dc->r0) {
1649 dc->r0 = NULL;
1650 dc->r0_set = false;
1653 /* Discard the imm global when its contents cannot be used. */
1654 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1655 tcg_gen_discard_i32(cpu_imm);
1658 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1659 dc->tb_flags |= dc->tb_flags_to_set;
1660 dc->base.pc_next += 4;
1662 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1664 * Finish any return-from branch.
1666 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1667 if (unlikely(rt_ibe != 0)) {
1668 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1669 if (rt_ibe & DRTI_FLAG) {
1670 do_rti(dc);
1671 } else if (rt_ibe & DRTB_FLAG) {
1672 do_rtb(dc);
1673 } else {
1674 do_rte(dc);
1678 /* Complete the branch, ending the TB. */
1679 switch (dc->base.is_jmp) {
1680 case DISAS_NORETURN:
1682 * E.g. illegal insn in a delay slot. We've already exited
1683 * and will handle D_FLAG in mb_cpu_do_interrupt.
1685 break;
1686 case DISAS_NEXT:
1688 * Normal insn a delay slot.
1689 * However, the return-from-exception type insns should
1690 * return to the main loop, as they have adjusted MSR.
1692 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1693 break;
1694 case DISAS_EXIT_NEXT:
1696 * E.g. mts insn in a delay slot. Continue with btarget,
1697 * but still return to the main loop.
1699 dc->base.is_jmp = DISAS_EXIT_JUMP;
1700 break;
1701 default:
1702 g_assert_not_reached();
1707 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1709 DisasContext *dc = container_of(dcb, DisasContext, base);
1711 if (dc->base.is_jmp == DISAS_NORETURN) {
1712 /* We have already exited the TB. */
1713 return;
1716 t_sync_flags(dc);
1718 switch (dc->base.is_jmp) {
1719 case DISAS_TOO_MANY:
1720 gen_goto_tb(dc, 0, dc->base.pc_next);
1721 return;
1723 case DISAS_EXIT:
1724 break;
1725 case DISAS_EXIT_NEXT:
1726 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1727 break;
1728 case DISAS_EXIT_JUMP:
1729 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1730 tcg_gen_discard_i32(cpu_btarget);
1731 break;
1733 case DISAS_JUMP:
1734 if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1735 /* Direct jump. */
1736 tcg_gen_discard_i32(cpu_btarget);
1738 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1739 /* Conditional direct jump. */
1740 TCGLabel *taken = gen_new_label();
1741 TCGv_i32 tmp = tcg_temp_new_i32();
1744 * Copy bvalue to a temp now, so we can discard bvalue.
1745 * This can avoid writing bvalue to memory when the
1746 * delay slot cannot raise an exception.
1748 tcg_gen_mov_i32(tmp, cpu_bvalue);
1749 tcg_gen_discard_i32(cpu_bvalue);
1751 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1752 gen_goto_tb(dc, 1, dc->base.pc_next);
1753 gen_set_label(taken);
1755 gen_goto_tb(dc, 0, dc->jmp_dest);
1756 return;
1759 /* Indirect jump (or direct jump w/ goto_tb disabled) */
1760 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1761 tcg_gen_discard_i32(cpu_btarget);
1762 tcg_gen_lookup_and_goto_ptr();
1763 return;
1765 default:
1766 g_assert_not_reached();
1769 /* Finish DISAS_EXIT_* */
1770 if (unlikely(cs->singlestep_enabled)) {
1771 gen_raise_exception(dc, EXCP_DEBUG);
1772 } else {
1773 tcg_gen_exit_tb(NULL, 0);
1777 static void mb_tr_disas_log(const DisasContextBase *dcb,
1778 CPUState *cs, FILE *logfile)
1780 fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1781 target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
1784 static const TranslatorOps mb_tr_ops = {
1785 .init_disas_context = mb_tr_init_disas_context,
1786 .tb_start = mb_tr_tb_start,
1787 .insn_start = mb_tr_insn_start,
1788 .translate_insn = mb_tr_translate_insn,
1789 .tb_stop = mb_tr_tb_stop,
1790 .disas_log = mb_tr_disas_log,
1793 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1794 target_ulong pc, void *host_pc)
1796 DisasContext dc;
1797 translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1800 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1802 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1803 CPUMBState *env = &cpu->env;
1804 uint32_t iflags;
1805 int i;
1807 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1808 env->pc, env->msr,
1809 (env->msr & MSR_UM) ? "user" : "kernel",
1810 (env->msr & MSR_UMS) ? "user" : "kernel",
1811 (bool)(env->msr & MSR_EIP),
1812 (bool)(env->msr & MSR_IE));
1814 iflags = env->iflags;
1815 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1816 if (iflags & IMM_FLAG) {
1817 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1819 if (iflags & BIMM_FLAG) {
1820 qemu_fprintf(f, " BIMM");
1822 if (iflags & D_FLAG) {
1823 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1825 if (iflags & DRTI_FLAG) {
1826 qemu_fprintf(f, " DRTI");
1828 if (iflags & DRTE_FLAG) {
1829 qemu_fprintf(f, " DRTE");
1831 if (iflags & DRTB_FLAG) {
1832 qemu_fprintf(f, " DRTB");
1834 if (iflags & ESR_ESS_FLAG) {
1835 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1838 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1839 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1840 env->esr, env->fsr, env->btr, env->edr,
1841 env->ear, env->slr, env->shr);
1843 for (i = 0; i < 32; i++) {
1844 qemu_fprintf(f, "r%2.2d=%08x%c",
1845 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1847 qemu_fprintf(f, "\n");
1850 void mb_tcg_init(void)
1852 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1853 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1855 static const struct {
1856 TCGv_i32 *var; int ofs; char name[8];
1857 } i32s[] = {
1859 * Note that r0 is handled specially in reg_for_read
1860 * and reg_for_write. Nothing should touch cpu_R[0].
1861 * Leave that element NULL, which will assert quickly
1862 * inside the tcg generator functions.
1864 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1865 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1866 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1867 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1869 SP(pc),
1870 SP(msr),
1871 SP(msr_c),
1872 SP(imm),
1873 SP(iflags),
1874 SP(bvalue),
1875 SP(btarget),
1876 SP(res_val),
1879 #undef R
1880 #undef SP
1882 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1883 *i32s[i].var =
1884 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1887 cpu_res_addr =
1888 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");