2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "hw/lm32_pic.h"
32 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 # define LOG_DIS(...) do { } while (0)
37 #define EXTRACT_FIELD(src, start, end) \
38 (((src) >> start) & ((1 << (end - start + 1)) - 1))
42 static TCGv_ptr cpu_env
;
43 static TCGv cpu_R
[32];
53 static TCGv cpu_bp
[4];
54 static TCGv cpu_wp
[4];
56 #include "gen-icount.h"
65 /* This is the state at translation time. */
66 typedef struct DisasContext
{
74 uint8_t r0
, r1
, r2
, csr
;
79 unsigned int delayed_branch
;
80 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
84 struct TranslationBlock
*tb
;
85 int singlestep_enabled
;
88 static const char *regnames
[] = {
89 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
91 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
92 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
93 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
97 static inline int zero_extend(unsigned int val
, int width
)
99 return val
& ((1 << width
) - 1);
102 static inline int sign_extend(unsigned int val
, int width
)
115 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
117 TCGv_i32 tmp
= tcg_const_i32(index
);
119 gen_helper_raise_exception(cpu_env
, tmp
);
120 tcg_temp_free_i32(tmp
);
123 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
125 TranslationBlock
*tb
;
128 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
129 likely(!dc
->singlestep_enabled
)) {
131 tcg_gen_movi_tl(cpu_pc
, dest
);
132 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
134 tcg_gen_movi_tl(cpu_pc
, dest
);
135 if (dc
->singlestep_enabled
) {
136 t_gen_raise_exception(dc
, EXCP_DEBUG
);
142 static void dec_add(DisasContext
*dc
)
144 if (dc
->format
== OP_FMT_RI
) {
145 if (dc
->r0
== R_R0
) {
146 if (dc
->r1
== R_R0
&& dc
->imm16
== 0) {
149 LOG_DIS("mvi r%d, %d\n", dc
->r1
, sign_extend(dc
->imm16
, 16));
152 LOG_DIS("addi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
153 sign_extend(dc
->imm16
, 16));
156 LOG_DIS("add r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
159 if (dc
->format
== OP_FMT_RI
) {
160 tcg_gen_addi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
161 sign_extend(dc
->imm16
, 16));
163 tcg_gen_add_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
167 static void dec_and(DisasContext
*dc
)
169 if (dc
->format
== OP_FMT_RI
) {
170 LOG_DIS("andi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
171 zero_extend(dc
->imm16
, 16));
173 LOG_DIS("and r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
176 if (dc
->format
== OP_FMT_RI
) {
177 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
178 zero_extend(dc
->imm16
, 16));
180 if (dc
->r0
== 0 && dc
->r1
== 0 && dc
->r2
== 0) {
181 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
182 gen_helper_hlt(cpu_env
);
184 tcg_gen_and_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
189 static void dec_andhi(DisasContext
*dc
)
191 LOG_DIS("andhi r%d, r%d, %d\n", dc
->r2
, dc
->r0
, dc
->imm16
);
193 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
196 static void dec_b(DisasContext
*dc
)
198 if (dc
->r0
== R_RA
) {
200 } else if (dc
->r0
== R_EA
) {
202 } else if (dc
->r0
== R_BA
) {
205 LOG_DIS("b r%d\n", dc
->r0
);
208 /* restore IE.IE in case of an eret */
209 if (dc
->r0
== R_EA
) {
210 TCGv t0
= tcg_temp_new();
211 int l1
= gen_new_label();
212 tcg_gen_andi_tl(t0
, cpu_ie
, IE_EIE
);
213 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
214 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_EIE
, l1
);
215 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
218 } else if (dc
->r0
== R_BA
) {
219 TCGv t0
= tcg_temp_new();
220 int l1
= gen_new_label();
221 tcg_gen_andi_tl(t0
, cpu_ie
, IE_BIE
);
222 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
223 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_BIE
, l1
);
224 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
228 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
230 dc
->is_jmp
= DISAS_JUMP
;
233 static void dec_bi(DisasContext
*dc
)
235 LOG_DIS("bi %d\n", sign_extend(dc
->imm26
<< 2, 26));
237 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
239 dc
->is_jmp
= DISAS_TB_JUMP
;
242 static inline void gen_cond_branch(DisasContext
*dc
, int cond
)
246 l1
= gen_new_label();
247 tcg_gen_brcond_tl(cond
, cpu_R
[dc
->r0
], cpu_R
[dc
->r1
], l1
);
248 gen_goto_tb(dc
, 0, dc
->pc
+ 4);
250 gen_goto_tb(dc
, 1, dc
->pc
+ (sign_extend(dc
->imm16
<< 2, 16)));
251 dc
->is_jmp
= DISAS_TB_JUMP
;
254 static void dec_be(DisasContext
*dc
)
256 LOG_DIS("be r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
257 sign_extend(dc
->imm16
, 16) * 4);
259 gen_cond_branch(dc
, TCG_COND_EQ
);
262 static void dec_bg(DisasContext
*dc
)
264 LOG_DIS("bg r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
265 sign_extend(dc
->imm16
, 16 * 4));
267 gen_cond_branch(dc
, TCG_COND_GT
);
270 static void dec_bge(DisasContext
*dc
)
272 LOG_DIS("bge r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
273 sign_extend(dc
->imm16
, 16) * 4);
275 gen_cond_branch(dc
, TCG_COND_GE
);
278 static void dec_bgeu(DisasContext
*dc
)
280 LOG_DIS("bgeu r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
281 sign_extend(dc
->imm16
, 16) * 4);
283 gen_cond_branch(dc
, TCG_COND_GEU
);
286 static void dec_bgu(DisasContext
*dc
)
288 LOG_DIS("bgu r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
289 sign_extend(dc
->imm16
, 16) * 4);
291 gen_cond_branch(dc
, TCG_COND_GTU
);
294 static void dec_bne(DisasContext
*dc
)
296 LOG_DIS("bne r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
297 sign_extend(dc
->imm16
, 16) * 4);
299 gen_cond_branch(dc
, TCG_COND_NE
);
302 static void dec_call(DisasContext
*dc
)
304 LOG_DIS("call r%d\n", dc
->r0
);
306 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
307 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
309 dc
->is_jmp
= DISAS_JUMP
;
312 static void dec_calli(DisasContext
*dc
)
314 LOG_DIS("calli %d\n", sign_extend(dc
->imm26
, 26) * 4);
316 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
317 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
319 dc
->is_jmp
= DISAS_TB_JUMP
;
322 static inline void gen_compare(DisasContext
*dc
, int cond
)
324 int rX
= (dc
->format
== OP_FMT_RR
) ? dc
->r2
: dc
->r1
;
325 int rY
= (dc
->format
== OP_FMT_RR
) ? dc
->r0
: dc
->r0
;
326 int rZ
= (dc
->format
== OP_FMT_RR
) ? dc
->r1
: -1;
328 if (dc
->format
== OP_FMT_RI
) {
329 tcg_gen_setcondi_tl(cond
, cpu_R
[rX
], cpu_R
[rY
],
330 sign_extend(dc
->imm16
, 16));
332 tcg_gen_setcond_tl(cond
, cpu_R
[rX
], cpu_R
[rY
], cpu_R
[rZ
]);
336 static void dec_cmpe(DisasContext
*dc
)
338 if (dc
->format
== OP_FMT_RI
) {
339 LOG_DIS("cmpei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
340 sign_extend(dc
->imm16
, 16));
342 LOG_DIS("cmpe r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
345 gen_compare(dc
, TCG_COND_EQ
);
348 static void dec_cmpg(DisasContext
*dc
)
350 if (dc
->format
== OP_FMT_RI
) {
351 LOG_DIS("cmpgi r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
352 sign_extend(dc
->imm16
, 16));
354 LOG_DIS("cmpg r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
357 gen_compare(dc
, TCG_COND_GT
);
360 static void dec_cmpge(DisasContext
*dc
)
362 if (dc
->format
== OP_FMT_RI
) {
363 LOG_DIS("cmpgei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
364 sign_extend(dc
->imm16
, 16));
366 LOG_DIS("cmpge r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
369 gen_compare(dc
, TCG_COND_GE
);
372 static void dec_cmpgeu(DisasContext
*dc
)
374 if (dc
->format
== OP_FMT_RI
) {
375 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
376 sign_extend(dc
->imm16
, 16));
378 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
381 gen_compare(dc
, TCG_COND_GEU
);
384 static void dec_cmpgu(DisasContext
*dc
)
386 if (dc
->format
== OP_FMT_RI
) {
387 LOG_DIS("cmpgui r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
388 sign_extend(dc
->imm16
, 16));
390 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
393 gen_compare(dc
, TCG_COND_GTU
);
396 static void dec_cmpne(DisasContext
*dc
)
398 if (dc
->format
== OP_FMT_RI
) {
399 LOG_DIS("cmpnei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
400 sign_extend(dc
->imm16
, 16));
402 LOG_DIS("cmpne r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
405 gen_compare(dc
, TCG_COND_NE
);
408 static void dec_divu(DisasContext
*dc
)
412 LOG_DIS("divu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
414 if (!(dc
->env
->features
& LM32_FEATURE_DIVIDE
)) {
415 cpu_abort(dc
->env
, "hardware divider is not available\n");
418 l1
= gen_new_label();
419 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
420 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
421 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
423 tcg_gen_divu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
426 static void dec_lb(DisasContext
*dc
)
430 LOG_DIS("lb r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
433 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
434 tcg_gen_qemu_ld8s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
438 static void dec_lbu(DisasContext
*dc
)
442 LOG_DIS("lbu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
445 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
446 tcg_gen_qemu_ld8u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
450 static void dec_lh(DisasContext
*dc
)
454 LOG_DIS("lh r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
457 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
458 tcg_gen_qemu_ld16s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
462 static void dec_lhu(DisasContext
*dc
)
466 LOG_DIS("lhu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
469 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
470 tcg_gen_qemu_ld16u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
474 static void dec_lw(DisasContext
*dc
)
478 LOG_DIS("lw r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, sign_extend(dc
->imm16
, 16));
481 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
482 tcg_gen_qemu_ld32s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
486 static void dec_modu(DisasContext
*dc
)
490 LOG_DIS("modu r%d, r%d, %d\n", dc
->r2
, dc
->r0
, dc
->r1
);
492 if (!(dc
->env
->features
& LM32_FEATURE_DIVIDE
)) {
493 cpu_abort(dc
->env
, "hardware divider is not available\n");
496 l1
= gen_new_label();
497 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
498 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
499 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
501 tcg_gen_remu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
504 static void dec_mul(DisasContext
*dc
)
506 if (dc
->format
== OP_FMT_RI
) {
507 LOG_DIS("muli r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
508 sign_extend(dc
->imm16
, 16));
510 LOG_DIS("mul r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
513 if (!(dc
->env
->features
& LM32_FEATURE_MULTIPLY
)) {
514 cpu_abort(dc
->env
, "hardware multiplier is not available\n");
517 if (dc
->format
== OP_FMT_RI
) {
518 tcg_gen_muli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
519 sign_extend(dc
->imm16
, 16));
521 tcg_gen_mul_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
525 static void dec_nor(DisasContext
*dc
)
527 if (dc
->format
== OP_FMT_RI
) {
528 LOG_DIS("nori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
529 zero_extend(dc
->imm16
, 16));
531 LOG_DIS("nor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
534 if (dc
->format
== OP_FMT_RI
) {
535 TCGv t0
= tcg_temp_new();
536 tcg_gen_movi_tl(t0
, zero_extend(dc
->imm16
, 16));
537 tcg_gen_nor_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], t0
);
540 tcg_gen_nor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
544 static void dec_or(DisasContext
*dc
)
546 if (dc
->format
== OP_FMT_RI
) {
547 LOG_DIS("ori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
548 zero_extend(dc
->imm16
, 16));
550 if (dc
->r1
== R_R0
) {
551 LOG_DIS("mv r%d, r%d\n", dc
->r2
, dc
->r0
);
553 LOG_DIS("or r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
557 if (dc
->format
== OP_FMT_RI
) {
558 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
559 zero_extend(dc
->imm16
, 16));
561 tcg_gen_or_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
565 static void dec_orhi(DisasContext
*dc
)
567 if (dc
->r0
== R_R0
) {
568 LOG_DIS("mvhi r%d, %d\n", dc
->r1
, dc
->imm16
);
570 LOG_DIS("orhi r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm16
);
573 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
576 static void dec_scall(DisasContext
*dc
)
580 } else if (dc
->imm5
== 2) {
583 cpu_abort(dc
->env
, "invalid opcode\n");
587 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
588 t_gen_raise_exception(dc
, EXCP_SYSTEMCALL
);
590 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
591 t_gen_raise_exception(dc
, EXCP_BREAKPOINT
);
595 static void dec_rcsr(DisasContext
*dc
)
597 LOG_DIS("rcsr r%d, %d\n", dc
->r2
, dc
->csr
);
601 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_ie
);
604 gen_helper_rcsr_im(cpu_R
[dc
->r2
], cpu_env
);
607 gen_helper_rcsr_ip(cpu_R
[dc
->r2
], cpu_env
);
610 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cc
);
613 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cfg
);
616 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_eba
);
619 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_dc
);
622 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_deba
);
625 gen_helper_rcsr_jtx(cpu_R
[dc
->r2
], cpu_env
);
628 gen_helper_rcsr_jrx(cpu_R
[dc
->r2
], cpu_env
);
640 cpu_abort(dc
->env
, "invalid read access csr=%x\n", dc
->csr
);
643 cpu_abort(dc
->env
, "read_csr: unknown csr=%x\n", dc
->csr
);
648 static void dec_sb(DisasContext
*dc
)
652 LOG_DIS("sb (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
655 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
656 tcg_gen_qemu_st8(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
660 static void dec_sextb(DisasContext
*dc
)
662 LOG_DIS("sextb r%d, r%d\n", dc
->r2
, dc
->r0
);
664 if (!(dc
->env
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
665 cpu_abort(dc
->env
, "hardware sign extender is not available\n");
668 tcg_gen_ext8s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
671 static void dec_sexth(DisasContext
*dc
)
673 LOG_DIS("sexth r%d, r%d\n", dc
->r2
, dc
->r0
);
675 if (!(dc
->env
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
676 cpu_abort(dc
->env
, "hardware sign extender is not available\n");
679 tcg_gen_ext16s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
682 static void dec_sh(DisasContext
*dc
)
686 LOG_DIS("sh (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
689 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
690 tcg_gen_qemu_st16(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
694 static void dec_sl(DisasContext
*dc
)
696 if (dc
->format
== OP_FMT_RI
) {
697 LOG_DIS("sli r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
699 LOG_DIS("sl r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
702 if (!(dc
->env
->features
& LM32_FEATURE_SHIFT
)) {
703 cpu_abort(dc
->env
, "hardware shifter is not available\n");
706 if (dc
->format
== OP_FMT_RI
) {
707 tcg_gen_shli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
709 TCGv t0
= tcg_temp_new();
710 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
711 tcg_gen_shl_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
716 static void dec_sr(DisasContext
*dc
)
718 if (dc
->format
== OP_FMT_RI
) {
719 LOG_DIS("sri r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
721 LOG_DIS("sr r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
724 if (!(dc
->env
->features
& LM32_FEATURE_SHIFT
)) {
725 if (dc
->format
== OP_FMT_RI
) {
726 /* TODO: check r1 == 1 during runtime */
729 cpu_abort(dc
->env
, "hardware shifter is not available\n");
734 if (dc
->format
== OP_FMT_RI
) {
735 tcg_gen_sari_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
737 TCGv t0
= tcg_temp_new();
738 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
739 tcg_gen_sar_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
744 static void dec_sru(DisasContext
*dc
)
746 if (dc
->format
== OP_FMT_RI
) {
747 LOG_DIS("srui r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
749 LOG_DIS("sru r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
752 if (!(dc
->env
->features
& LM32_FEATURE_SHIFT
)) {
753 if (dc
->format
== OP_FMT_RI
) {
754 /* TODO: check r1 == 1 during runtime */
757 cpu_abort(dc
->env
, "hardware shifter is not available\n");
762 if (dc
->format
== OP_FMT_RI
) {
763 tcg_gen_shri_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
765 TCGv t0
= tcg_temp_new();
766 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
767 tcg_gen_shr_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
772 static void dec_sub(DisasContext
*dc
)
774 LOG_DIS("sub r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
776 tcg_gen_sub_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
779 static void dec_sw(DisasContext
*dc
)
783 LOG_DIS("sw (r%d+%d), r%d\n", dc
->r0
, sign_extend(dc
->imm16
, 16), dc
->r1
);
786 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
787 tcg_gen_qemu_st32(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
791 static void dec_user(DisasContext
*dc
)
795 cpu_abort(dc
->env
, "user insn undefined\n");
798 static void dec_wcsr(DisasContext
*dc
)
802 LOG_DIS("wcsr r%d, %d\n", dc
->r1
, dc
->csr
);
806 tcg_gen_mov_tl(cpu_ie
, cpu_R
[dc
->r1
]);
807 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
808 dc
->is_jmp
= DISAS_UPDATE
;
811 /* mark as an io operation because it could cause an interrupt */
815 gen_helper_wcsr_im(cpu_env
, cpu_R
[dc
->r1
]);
816 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
820 dc
->is_jmp
= DISAS_UPDATE
;
823 /* mark as an io operation because it could cause an interrupt */
827 gen_helper_wcsr_ip(cpu_env
, cpu_R
[dc
->r1
]);
828 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
832 dc
->is_jmp
= DISAS_UPDATE
;
841 tcg_gen_mov_tl(cpu_eba
, cpu_R
[dc
->r1
]);
844 tcg_gen_mov_tl(cpu_deba
, cpu_R
[dc
->r1
]);
847 gen_helper_wcsr_jtx(cpu_env
, cpu_R
[dc
->r1
]);
850 gen_helper_wcsr_jrx(cpu_env
, cpu_R
[dc
->r1
]);
853 tcg_gen_mov_tl(cpu_dc
, cpu_R
[dc
->r1
]);
859 no
= dc
->csr
- CSR_BP0
;
860 if (dc
->env
->num_bps
<= no
) {
861 cpu_abort(dc
->env
, "breakpoint #%i is not available\n", no
);
863 tcg_gen_mov_tl(cpu_bp
[no
], cpu_R
[dc
->r1
]);
869 no
= dc
->csr
- CSR_WP0
;
870 if (dc
->env
->num_wps
<= no
) {
871 cpu_abort(dc
->env
, "watchpoint #%i is not available\n", no
);
873 tcg_gen_mov_tl(cpu_wp
[no
], cpu_R
[dc
->r1
]);
877 cpu_abort(dc
->env
, "invalid write access csr=%x\n", dc
->csr
);
880 cpu_abort(dc
->env
, "write_csr unknown csr=%x\n", dc
->csr
);
885 static void dec_xnor(DisasContext
*dc
)
887 if (dc
->format
== OP_FMT_RI
) {
888 LOG_DIS("xnori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
889 zero_extend(dc
->imm16
, 16));
891 if (dc
->r1
== R_R0
) {
892 LOG_DIS("not r%d, r%d\n", dc
->r2
, dc
->r0
);
894 LOG_DIS("xnor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
898 if (dc
->format
== OP_FMT_RI
) {
899 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
900 zero_extend(dc
->imm16
, 16));
901 tcg_gen_not_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r1
]);
903 tcg_gen_eqv_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
907 static void dec_xor(DisasContext
*dc
)
909 if (dc
->format
== OP_FMT_RI
) {
910 LOG_DIS("xori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
911 zero_extend(dc
->imm16
, 16));
913 LOG_DIS("xor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
916 if (dc
->format
== OP_FMT_RI
) {
917 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
918 zero_extend(dc
->imm16
, 16));
920 tcg_gen_xor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
924 static void dec_ill(DisasContext
*dc
)
926 cpu_abort(dc
->env
, "unknown opcode 0x%02x\n", dc
->opcode
);
929 typedef void (*DecoderInfo
)(DisasContext
*dc
);
930 static const DecoderInfo decinfo
[] = {
931 dec_sru
, dec_nor
, dec_mul
, dec_sh
, dec_lb
, dec_sr
, dec_xor
, dec_lh
,
932 dec_and
, dec_xnor
, dec_lw
, dec_lhu
, dec_sb
, dec_add
, dec_or
, dec_sl
,
933 dec_lbu
, dec_be
, dec_bg
, dec_bge
, dec_bgeu
, dec_bgu
, dec_sw
, dec_bne
,
934 dec_andhi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_orhi
,
936 dec_sru
, dec_nor
, dec_mul
, dec_divu
, dec_rcsr
, dec_sr
, dec_xor
, dec_ill
,
937 dec_and
, dec_xnor
, dec_ill
, dec_scall
, dec_sextb
, dec_add
, dec_or
, dec_sl
,
938 dec_b
, dec_modu
, dec_sub
, dec_user
, dec_wcsr
, dec_ill
, dec_call
, dec_sexth
,
939 dec_bi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_calli
,
943 static inline void decode(DisasContext
*dc
, uint32_t ir
)
945 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
946 tcg_gen_debug_insn_start(dc
->pc
);
950 LOG_DIS("%8.8x\t", dc
->ir
);
952 /* try guessing 'empty' instruction memory, although it may be a valid
953 * instruction sequence (eg. srui r0, r0, 0) */
957 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
959 if (dc
->nr_nops
> 4) {
960 cpu_abort(dc
->env
, "fetching nop sequence\n");
964 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
966 dc
->imm5
= EXTRACT_FIELD(ir
, 0, 4);
967 dc
->imm16
= EXTRACT_FIELD(ir
, 0, 15);
968 dc
->imm26
= EXTRACT_FIELD(ir
, 0, 25);
970 dc
->csr
= EXTRACT_FIELD(ir
, 21, 25);
971 dc
->r0
= EXTRACT_FIELD(ir
, 21, 25);
972 dc
->r1
= EXTRACT_FIELD(ir
, 16, 20);
973 dc
->r2
= EXTRACT_FIELD(ir
, 11, 15);
975 /* bit 31 seems to indicate insn type. */
976 if (ir
& (1 << 31)) {
977 dc
->format
= OP_FMT_RR
;
979 dc
->format
= OP_FMT_RI
;
982 assert(ARRAY_SIZE(decinfo
) == 64);
983 assert(dc
->opcode
< 64);
985 decinfo
[dc
->opcode
](dc
);
988 static void check_breakpoint(CPULM32State
*env
, DisasContext
*dc
)
992 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
993 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
994 if (bp
->pc
== dc
->pc
) {
995 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
996 t_gen_raise_exception(dc
, EXCP_DEBUG
);
997 dc
->is_jmp
= DISAS_UPDATE
;
1003 /* generate intermediate code for basic block 'tb'. */
1004 static void gen_intermediate_code_internal(CPULM32State
*env
,
1005 TranslationBlock
*tb
, int search_pc
)
1007 struct DisasContext ctx
, *dc
= &ctx
;
1008 uint16_t *gen_opc_end
;
1011 uint32_t next_page_start
;
1015 qemu_log_try_set_file(stderr
);
1021 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
1023 dc
->is_jmp
= DISAS_NEXT
;
1025 dc
->singlestep_enabled
= env
->singlestep_enabled
;
1029 cpu_abort(env
, "LM32: unaligned PC=%x\n", pc_start
);
1032 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1033 qemu_log("-----------------------------------------\n");
1034 log_cpu_state(env
, 0);
1037 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1040 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1041 if (max_insns
== 0) {
1042 max_insns
= CF_COUNT_MASK
;
1047 check_breakpoint(env
, dc
);
1050 j
= gen_opc_ptr
- gen_opc_buf
;
1054 gen_opc_instr_start
[lj
++] = 0;
1057 gen_opc_pc
[lj
] = dc
->pc
;
1058 gen_opc_instr_start
[lj
] = 1;
1059 gen_opc_icount
[lj
] = num_insns
;
1063 LOG_DIS("%8.8x:\t", dc
->pc
);
1065 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1069 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1073 } while (!dc
->is_jmp
1074 && gen_opc_ptr
< gen_opc_end
1075 && !env
->singlestep_enabled
1077 && (dc
->pc
< next_page_start
)
1078 && num_insns
< max_insns
);
1080 if (tb
->cflags
& CF_LAST_IO
) {
1084 if (unlikely(env
->singlestep_enabled
)) {
1085 if (dc
->is_jmp
== DISAS_NEXT
) {
1086 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1088 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1090 switch (dc
->is_jmp
) {
1092 gen_goto_tb(dc
, 1, dc
->pc
);
1097 /* indicate that the hash table must be used
1098 to find the next TB */
1102 /* nothing more to generate */
1107 gen_icount_end(tb
, num_insns
);
1108 *gen_opc_ptr
= INDEX_op_end
;
1110 j
= gen_opc_ptr
- gen_opc_buf
;
1113 gen_opc_instr_start
[lj
++] = 0;
1116 tb
->size
= dc
->pc
- pc_start
;
1117 tb
->icount
= num_insns
;
1121 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1123 log_target_disas(pc_start
, dc
->pc
- pc_start
, 0);
1124 qemu_log("\nisize=%d osize=%td\n",
1125 dc
->pc
- pc_start
, gen_opc_ptr
- gen_opc_buf
);
1130 void gen_intermediate_code(CPULM32State
*env
, struct TranslationBlock
*tb
)
1132 gen_intermediate_code_internal(env
, tb
, 0);
1135 void gen_intermediate_code_pc(CPULM32State
*env
, struct TranslationBlock
*tb
)
1137 gen_intermediate_code_internal(env
, tb
, 1);
1140 void cpu_dump_state(CPULM32State
*env
, FILE *f
, fprintf_function cpu_fprintf
,
1149 cpu_fprintf(f
, "IN: PC=%x %s\n",
1150 env
->pc
, lookup_symbol(env
->pc
));
1152 cpu_fprintf(f
, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1154 (env
->ie
& IE_IE
) ? 1 : 0,
1155 (env
->ie
& IE_EIE
) ? 1 : 0,
1156 (env
->ie
& IE_BIE
) ? 1 : 0,
1157 lm32_pic_get_im(env
->pic_state
),
1158 lm32_pic_get_ip(env
->pic_state
));
1159 cpu_fprintf(f
, "eba=%8.8x deba=%8.8x\n",
1163 for (i
= 0; i
< 32; i
++) {
1164 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1165 if ((i
+ 1) % 4 == 0) {
1166 cpu_fprintf(f
, "\n");
1169 cpu_fprintf(f
, "\n\n");
1172 void restore_state_to_opc(CPULM32State
*env
, TranslationBlock
*tb
, int pc_pos
)
1174 env
->pc
= gen_opc_pc
[pc_pos
];
1177 void lm32_translate_init(void)
1181 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1183 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1184 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
1185 offsetof(CPULM32State
, regs
[i
]),
1189 for (i
= 0; i
< ARRAY_SIZE(cpu_bp
); i
++) {
1190 cpu_bp
[i
] = tcg_global_mem_new(TCG_AREG0
,
1191 offsetof(CPULM32State
, bp
[i
]),
1195 for (i
= 0; i
< ARRAY_SIZE(cpu_wp
); i
++) {
1196 cpu_wp
[i
] = tcg_global_mem_new(TCG_AREG0
,
1197 offsetof(CPULM32State
, wp
[i
]),
1201 cpu_pc
= tcg_global_mem_new(TCG_AREG0
,
1202 offsetof(CPULM32State
, pc
),
1204 cpu_ie
= tcg_global_mem_new(TCG_AREG0
,
1205 offsetof(CPULM32State
, ie
),
1207 cpu_icc
= tcg_global_mem_new(TCG_AREG0
,
1208 offsetof(CPULM32State
, icc
),
1210 cpu_dcc
= tcg_global_mem_new(TCG_AREG0
,
1211 offsetof(CPULM32State
, dcc
),
1213 cpu_cc
= tcg_global_mem_new(TCG_AREG0
,
1214 offsetof(CPULM32State
, cc
),
1216 cpu_cfg
= tcg_global_mem_new(TCG_AREG0
,
1217 offsetof(CPULM32State
, cfg
),
1219 cpu_eba
= tcg_global_mem_new(TCG_AREG0
,
1220 offsetof(CPULM32State
, eba
),
1222 cpu_dc
= tcg_global_mem_new(TCG_AREG0
,
1223 offsetof(CPULM32State
, dc
),
1225 cpu_deba
= tcg_global_mem_new(TCG_AREG0
,
1226 offsetof(CPULM32State
, deba
),