2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30 #define USE_ARMV7_INSTRUCTIONS
33 #if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39 #define USE_ARMV6_INSTRUCTIONS
42 #if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46 #define USE_ARMV5_INSTRUCTIONS
49 #ifdef USE_ARMV5_INSTRUCTIONS
50 static const int use_armv5_instructions
= 1;
52 static const int use_armv5_instructions
= 0;
54 #undef USE_ARMV5_INSTRUCTIONS
56 #ifdef USE_ARMV6_INSTRUCTIONS
57 static const int use_armv6_instructions
= 1;
59 static const int use_armv6_instructions
= 0;
61 #undef USE_ARMV6_INSTRUCTIONS
63 #ifdef USE_ARMV7_INSTRUCTIONS
64 static const int use_armv7_instructions
= 1;
66 static const int use_armv7_instructions
= 0;
68 #undef USE_ARMV7_INSTRUCTIONS
71 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
91 static const int tcg_target_reg_alloc_order
[] = {
109 static const int tcg_target_call_iarg_regs
[4] = {
110 TCG_REG_R0
, TCG_REG_R1
, TCG_REG_R2
, TCG_REG_R3
112 static const int tcg_target_call_oarg_regs
[2] = {
113 TCG_REG_R0
, TCG_REG_R1
116 static inline void reloc_abs32(void *code_ptr
, tcg_target_long target
)
118 *(uint32_t *) code_ptr
= target
;
121 static inline void reloc_pc24(void *code_ptr
, tcg_target_long target
)
123 uint32_t offset
= ((target
- ((tcg_target_long
) code_ptr
+ 8)) >> 2);
125 *(uint32_t *) code_ptr
= ((*(uint32_t *) code_ptr
) & ~0xffffff)
126 | (offset
& 0xffffff);
129 static void patch_reloc(uint8_t *code_ptr
, int type
,
130 tcg_target_long value
, tcg_target_long addend
)
134 reloc_abs32(code_ptr
, value
);
143 reloc_pc24(code_ptr
, value
);
148 /* parse target specific constraints */
149 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
156 ct
->ct
|= TCG_CT_CONST_ARM
;
160 ct
->ct
|= TCG_CT_REG
;
161 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
164 /* qemu_ld address */
166 ct
->ct
|= TCG_CT_REG
;
167 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
168 #ifdef CONFIG_SOFTMMU
169 /* r0 and r1 will be overwritten when reading the tlb entry,
170 so don't use these. */
171 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
172 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
173 #if TARGET_LONG_BITS == 64
174 /* If we're passing env to the helper as r0 and need a regpair
175 * for the address then r2 will be overwritten as we're setting
176 * up the args to the helper.
178 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
183 ct
->ct
|= TCG_CT_REG
;
184 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
185 #ifdef CONFIG_SOFTMMU
186 /* r1 is still needed to load data_reg or data_reg2,
188 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
192 /* qemu_st address & data_reg */
194 ct
->ct
|= TCG_CT_REG
;
195 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
196 /* r0 and r1 will be overwritten when reading the tlb entry
197 (softmmu only) and doing the byte swapping, so don't
199 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
200 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
201 #if defined(CONFIG_SOFTMMU) && (TARGET_LONG_BITS == 64)
202 /* Avoid clashes with registers being used for helper args */
203 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
204 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
207 /* qemu_st64 data_reg2 */
209 ct
->ct
|= TCG_CT_REG
;
210 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
211 /* r0 and r1 will be overwritten when reading the tlb entry
212 (softmmu only) and doing the byte swapping, so don't
214 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
215 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
216 #ifdef CONFIG_SOFTMMU
217 /* r2 is still needed to load data_reg, so don't use it. */
218 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
219 #if TARGET_LONG_BITS == 64
220 /* Avoid clashes with registers being used for helper args */
221 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
235 static inline uint32_t rotl(uint32_t val
, int n
)
237 return (val
<< n
) | (val
>> (32 - n
));
240 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
241 right-rotated by an even amount between 0 and 30. */
242 static inline int encode_imm(uint32_t imm
)
246 /* simple case, only lower bits */
247 if ((imm
& ~0xff) == 0)
249 /* then try a simple even shift */
250 shift
= ctz32(imm
) & ~1;
251 if (((imm
>> shift
) & ~0xff) == 0)
253 /* now try harder with rotations */
254 if ((rotl(imm
, 2) & ~0xff) == 0)
256 if ((rotl(imm
, 4) & ~0xff) == 0)
258 if ((rotl(imm
, 6) & ~0xff) == 0)
260 /* imm can't be encoded */
264 static inline int check_fit_imm(uint32_t imm
)
266 return encode_imm(imm
) >= 0;
269 /* Test if a constant matches the constraint.
270 * TODO: define constraints for:
272 * ldr/str offset: between -0xfff and 0xfff
273 * ldrh/strh offset: between -0xff and 0xff
274 * mov operand2: values represented with x << (2 * y), x < 0x100
275 * add, sub, eor...: ditto
277 static inline int tcg_target_const_match(tcg_target_long val
,
278 const TCGArgConstraint
*arg_ct
)
282 if (ct
& TCG_CT_CONST
)
284 else if ((ct
& TCG_CT_CONST_ARM
) && check_fit_imm(val
))
290 enum arm_data_opc_e
{
308 #define TO_CPSR(opc) \
309 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
311 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
312 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
313 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
314 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
315 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
316 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
317 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
318 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
320 enum arm_cond_code_e
{
323 COND_CS
= 0x2, /* Unsigned greater or equal */
324 COND_CC
= 0x3, /* Unsigned less than */
325 COND_MI
= 0x4, /* Negative */
326 COND_PL
= 0x5, /* Zero or greater */
327 COND_VS
= 0x6, /* Overflow */
328 COND_VC
= 0x7, /* No overflow */
329 COND_HI
= 0x8, /* Unsigned greater than */
330 COND_LS
= 0x9, /* Unsigned less or equal */
338 static const uint8_t tcg_cond_to_arm_cond
[10] = {
339 [TCG_COND_EQ
] = COND_EQ
,
340 [TCG_COND_NE
] = COND_NE
,
341 [TCG_COND_LT
] = COND_LT
,
342 [TCG_COND_GE
] = COND_GE
,
343 [TCG_COND_LE
] = COND_LE
,
344 [TCG_COND_GT
] = COND_GT
,
346 [TCG_COND_LTU
] = COND_CC
,
347 [TCG_COND_GEU
] = COND_CS
,
348 [TCG_COND_LEU
] = COND_LS
,
349 [TCG_COND_GTU
] = COND_HI
,
352 static inline void tcg_out_bx(TCGContext
*s
, int cond
, int rn
)
354 tcg_out32(s
, (cond
<< 28) | 0x012fff10 | rn
);
357 static inline void tcg_out_b(TCGContext
*s
, int cond
, int32_t offset
)
359 tcg_out32(s
, (cond
<< 28) | 0x0a000000 |
360 (((offset
- 8) >> 2) & 0x00ffffff));
363 static inline void tcg_out_b_noaddr(TCGContext
*s
, int cond
)
365 /* We pay attention here to not modify the branch target by skipping
366 the corresponding bytes. This ensure that caches and memory are
367 kept coherent during retranslation. */
368 #ifdef HOST_WORDS_BIGENDIAN
369 tcg_out8(s
, (cond
<< 4) | 0x0a);
373 tcg_out8(s
, (cond
<< 4) | 0x0a);
377 static inline void tcg_out_bl(TCGContext
*s
, int cond
, int32_t offset
)
379 tcg_out32(s
, (cond
<< 28) | 0x0b000000 |
380 (((offset
- 8) >> 2) & 0x00ffffff));
383 static inline void tcg_out_blx(TCGContext
*s
, int cond
, int rn
)
385 tcg_out32(s
, (cond
<< 28) | 0x012fff30 | rn
);
388 static inline void tcg_out_blx_imm(TCGContext
*s
, int32_t offset
)
390 tcg_out32(s
, 0xfa000000 | ((offset
& 2) << 23) |
391 (((offset
- 8) >> 2) & 0x00ffffff));
394 static inline void tcg_out_dat_reg(TCGContext
*s
,
395 int cond
, int opc
, int rd
, int rn
, int rm
, int shift
)
397 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
398 (rn
<< 16) | (rd
<< 12) | shift
| rm
);
401 static inline void tcg_out_mov_reg(TCGContext
*s
, int cond
, int rd
, int rm
)
403 /* Simple reg-reg move, optimising out the 'do nothing' case */
405 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, rd
, 0, rm
, SHIFT_IMM_LSL(0));
409 static inline void tcg_out_dat_reg2(TCGContext
*s
,
410 int cond
, int opc0
, int opc1
, int rd0
, int rd1
,
411 int rn0
, int rn1
, int rm0
, int rm1
, int shift
)
413 if (rd0
== rn1
|| rd0
== rm1
) {
414 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
415 (rn0
<< 16) | (8 << 12) | shift
| rm0
);
416 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
417 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
418 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
419 rd0
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
421 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
422 (rn0
<< 16) | (rd0
<< 12) | shift
| rm0
);
423 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
424 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
428 static inline void tcg_out_dat_imm(TCGContext
*s
,
429 int cond
, int opc
, int rd
, int rn
, int im
)
431 tcg_out32(s
, (cond
<< 28) | (1 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
432 (rn
<< 16) | (rd
<< 12) | im
);
435 static inline void tcg_out_movi32(TCGContext
*s
,
436 int cond
, int rd
, uint32_t arg
)
438 /* TODO: This is very suboptimal, we can easily have a constant
439 * pool somewhere after all the instructions. */
440 if ((int)arg
< 0 && (int)arg
>= -0x100) {
441 tcg_out_dat_imm(s
, cond
, ARITH_MVN
, rd
, 0, (~arg
) & 0xff);
442 } else if (use_armv7_instructions
) {
445 tcg_out32(s
, (cond
<< 28) | 0x03000000 | (rd
<< 12)
446 | ((arg
<< 4) & 0x000f0000) | (arg
& 0xfff));
447 if (arg
& 0xffff0000) {
449 tcg_out32(s
, (cond
<< 28) | 0x03400000 | (rd
<< 12)
450 | ((arg
>> 12) & 0x000f0000) | ((arg
>> 16) & 0xfff));
460 rot
= ((32 - i
) << 7) & 0xf00;
461 tcg_out_dat_imm(s
, cond
, opc
, rd
, rn
, ((arg
>> i
) & 0xff) | rot
);
470 static inline void tcg_out_mul32(TCGContext
*s
,
471 int cond
, int rd
, int rs
, int rm
)
474 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
475 (rs
<< 8) | 0x90 | rm
);
477 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
478 (rm
<< 8) | 0x90 | rs
);
480 tcg_out32(s
, (cond
<< 28) | ( 8 << 16) | (0 << 12) |
481 (rs
<< 8) | 0x90 | rm
);
482 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
483 rd
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
487 static inline void tcg_out_umull32(TCGContext
*s
,
488 int cond
, int rd0
, int rd1
, int rs
, int rm
)
490 if (rd0
!= rm
&& rd1
!= rm
)
491 tcg_out32(s
, (cond
<< 28) | 0x800090 |
492 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
493 else if (rd0
!= rs
&& rd1
!= rs
)
494 tcg_out32(s
, (cond
<< 28) | 0x800090 |
495 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
497 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
498 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
499 tcg_out32(s
, (cond
<< 28) | 0x800098 |
500 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
504 static inline void tcg_out_smull32(TCGContext
*s
,
505 int cond
, int rd0
, int rd1
, int rs
, int rm
)
507 if (rd0
!= rm
&& rd1
!= rm
)
508 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
509 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
510 else if (rd0
!= rs
&& rd1
!= rs
)
511 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
512 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
514 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
515 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
516 tcg_out32(s
, (cond
<< 28) | 0xc00098 |
517 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
521 static inline void tcg_out_ext8s(TCGContext
*s
, int cond
,
524 if (use_armv6_instructions
) {
526 tcg_out32(s
, 0x06af0070 | (cond
<< 28) | (rd
<< 12) | rn
);
528 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
529 rd
, 0, rn
, SHIFT_IMM_LSL(24));
530 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
531 rd
, 0, rd
, SHIFT_IMM_ASR(24));
535 static inline void tcg_out_ext8u(TCGContext
*s
, int cond
,
538 tcg_out_dat_imm(s
, cond
, ARITH_AND
, rd
, rn
, 0xff);
541 static inline void tcg_out_ext16s(TCGContext
*s
, int cond
,
544 if (use_armv6_instructions
) {
546 tcg_out32(s
, 0x06bf0070 | (cond
<< 28) | (rd
<< 12) | rn
);
548 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
549 rd
, 0, rn
, SHIFT_IMM_LSL(16));
550 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
551 rd
, 0, rd
, SHIFT_IMM_ASR(16));
555 static inline void tcg_out_ext16u(TCGContext
*s
, int cond
,
558 if (use_armv6_instructions
) {
560 tcg_out32(s
, 0x06ff0070 | (cond
<< 28) | (rd
<< 12) | rn
);
562 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
563 rd
, 0, rn
, SHIFT_IMM_LSL(16));
564 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
565 rd
, 0, rd
, SHIFT_IMM_LSR(16));
569 static inline void tcg_out_bswap16s(TCGContext
*s
, int cond
, int rd
, int rn
)
571 if (use_armv6_instructions
) {
573 tcg_out32(s
, 0x06ff0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
575 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
576 TCG_REG_R8
, 0, rn
, SHIFT_IMM_LSL(24));
577 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
578 TCG_REG_R8
, 0, TCG_REG_R8
, SHIFT_IMM_ASR(16));
579 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
580 rd
, TCG_REG_R8
, rn
, SHIFT_IMM_LSR(8));
584 static inline void tcg_out_bswap16(TCGContext
*s
, int cond
, int rd
, int rn
)
586 if (use_armv6_instructions
) {
588 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
590 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
591 TCG_REG_R8
, 0, rn
, SHIFT_IMM_LSL(24));
592 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
593 TCG_REG_R8
, 0, TCG_REG_R8
, SHIFT_IMM_LSR(16));
594 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
595 rd
, TCG_REG_R8
, rn
, SHIFT_IMM_LSR(8));
599 static inline void tcg_out_bswap32(TCGContext
*s
, int cond
, int rd
, int rn
)
601 if (use_armv6_instructions
) {
603 tcg_out32(s
, 0x06bf0f30 | (cond
<< 28) | (rd
<< 12) | rn
);
605 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
606 TCG_REG_R8
, rn
, rn
, SHIFT_IMM_ROR(16));
607 tcg_out_dat_imm(s
, cond
, ARITH_BIC
,
608 TCG_REG_R8
, TCG_REG_R8
, 0xff | 0x800);
609 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
610 rd
, 0, rn
, SHIFT_IMM_ROR(8));
611 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
612 rd
, rd
, TCG_REG_R8
, SHIFT_IMM_LSR(8));
616 static inline void tcg_out_ld32_12(TCGContext
*s
, int cond
,
617 int rd
, int rn
, tcg_target_long im
)
620 tcg_out32(s
, (cond
<< 28) | 0x05900000 |
621 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
623 tcg_out32(s
, (cond
<< 28) | 0x05100000 |
624 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
627 static inline void tcg_out_st32_12(TCGContext
*s
, int cond
,
628 int rd
, int rn
, tcg_target_long im
)
631 tcg_out32(s
, (cond
<< 28) | 0x05800000 |
632 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
634 tcg_out32(s
, (cond
<< 28) | 0x05000000 |
635 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
638 static inline void tcg_out_ld32_r(TCGContext
*s
, int cond
,
639 int rd
, int rn
, int rm
)
641 tcg_out32(s
, (cond
<< 28) | 0x07900000 |
642 (rn
<< 16) | (rd
<< 12) | rm
);
645 static inline void tcg_out_st32_r(TCGContext
*s
, int cond
,
646 int rd
, int rn
, int rm
)
648 tcg_out32(s
, (cond
<< 28) | 0x07800000 |
649 (rn
<< 16) | (rd
<< 12) | rm
);
652 /* Register pre-increment with base writeback. */
653 static inline void tcg_out_ld32_rwb(TCGContext
*s
, int cond
,
654 int rd
, int rn
, int rm
)
656 tcg_out32(s
, (cond
<< 28) | 0x07b00000 |
657 (rn
<< 16) | (rd
<< 12) | rm
);
660 static inline void tcg_out_st32_rwb(TCGContext
*s
, int cond
,
661 int rd
, int rn
, int rm
)
663 tcg_out32(s
, (cond
<< 28) | 0x07a00000 |
664 (rn
<< 16) | (rd
<< 12) | rm
);
667 static inline void tcg_out_ld16u_8(TCGContext
*s
, int cond
,
668 int rd
, int rn
, tcg_target_long im
)
671 tcg_out32(s
, (cond
<< 28) | 0x01d000b0 |
672 (rn
<< 16) | (rd
<< 12) |
673 ((im
& 0xf0) << 4) | (im
& 0xf));
675 tcg_out32(s
, (cond
<< 28) | 0x015000b0 |
676 (rn
<< 16) | (rd
<< 12) |
677 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
680 static inline void tcg_out_st16_8(TCGContext
*s
, int cond
,
681 int rd
, int rn
, tcg_target_long im
)
684 tcg_out32(s
, (cond
<< 28) | 0x01c000b0 |
685 (rn
<< 16) | (rd
<< 12) |
686 ((im
& 0xf0) << 4) | (im
& 0xf));
688 tcg_out32(s
, (cond
<< 28) | 0x014000b0 |
689 (rn
<< 16) | (rd
<< 12) |
690 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
693 static inline void tcg_out_ld16u_r(TCGContext
*s
, int cond
,
694 int rd
, int rn
, int rm
)
696 tcg_out32(s
, (cond
<< 28) | 0x019000b0 |
697 (rn
<< 16) | (rd
<< 12) | rm
);
700 static inline void tcg_out_st16_r(TCGContext
*s
, int cond
,
701 int rd
, int rn
, int rm
)
703 tcg_out32(s
, (cond
<< 28) | 0x018000b0 |
704 (rn
<< 16) | (rd
<< 12) | rm
);
707 static inline void tcg_out_ld16s_8(TCGContext
*s
, int cond
,
708 int rd
, int rn
, tcg_target_long im
)
711 tcg_out32(s
, (cond
<< 28) | 0x01d000f0 |
712 (rn
<< 16) | (rd
<< 12) |
713 ((im
& 0xf0) << 4) | (im
& 0xf));
715 tcg_out32(s
, (cond
<< 28) | 0x015000f0 |
716 (rn
<< 16) | (rd
<< 12) |
717 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
720 static inline void tcg_out_ld16s_r(TCGContext
*s
, int cond
,
721 int rd
, int rn
, int rm
)
723 tcg_out32(s
, (cond
<< 28) | 0x019000f0 |
724 (rn
<< 16) | (rd
<< 12) | rm
);
727 static inline void tcg_out_ld8_12(TCGContext
*s
, int cond
,
728 int rd
, int rn
, tcg_target_long im
)
731 tcg_out32(s
, (cond
<< 28) | 0x05d00000 |
732 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
734 tcg_out32(s
, (cond
<< 28) | 0x05500000 |
735 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
738 static inline void tcg_out_st8_12(TCGContext
*s
, int cond
,
739 int rd
, int rn
, tcg_target_long im
)
742 tcg_out32(s
, (cond
<< 28) | 0x05c00000 |
743 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
745 tcg_out32(s
, (cond
<< 28) | 0x05400000 |
746 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
749 static inline void tcg_out_ld8_r(TCGContext
*s
, int cond
,
750 int rd
, int rn
, int rm
)
752 tcg_out32(s
, (cond
<< 28) | 0x07d00000 |
753 (rn
<< 16) | (rd
<< 12) | rm
);
756 static inline void tcg_out_st8_r(TCGContext
*s
, int cond
,
757 int rd
, int rn
, int rm
)
759 tcg_out32(s
, (cond
<< 28) | 0x07c00000 |
760 (rn
<< 16) | (rd
<< 12) | rm
);
763 static inline void tcg_out_ld8s_8(TCGContext
*s
, int cond
,
764 int rd
, int rn
, tcg_target_long im
)
767 tcg_out32(s
, (cond
<< 28) | 0x01d000d0 |
768 (rn
<< 16) | (rd
<< 12) |
769 ((im
& 0xf0) << 4) | (im
& 0xf));
771 tcg_out32(s
, (cond
<< 28) | 0x015000d0 |
772 (rn
<< 16) | (rd
<< 12) |
773 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
776 static inline void tcg_out_ld8s_r(TCGContext
*s
, int cond
,
777 int rd
, int rn
, int rm
)
779 tcg_out32(s
, (cond
<< 28) | 0x019000d0 |
780 (rn
<< 16) | (rd
<< 12) | rm
);
783 static inline void tcg_out_ld32u(TCGContext
*s
, int cond
,
784 int rd
, int rn
, int32_t offset
)
786 if (offset
> 0xfff || offset
< -0xfff) {
787 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
788 tcg_out_ld32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
790 tcg_out_ld32_12(s
, cond
, rd
, rn
, offset
);
793 static inline void tcg_out_st32(TCGContext
*s
, int cond
,
794 int rd
, int rn
, int32_t offset
)
796 if (offset
> 0xfff || offset
< -0xfff) {
797 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
798 tcg_out_st32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
800 tcg_out_st32_12(s
, cond
, rd
, rn
, offset
);
803 static inline void tcg_out_ld16u(TCGContext
*s
, int cond
,
804 int rd
, int rn
, int32_t offset
)
806 if (offset
> 0xff || offset
< -0xff) {
807 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
808 tcg_out_ld16u_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
810 tcg_out_ld16u_8(s
, cond
, rd
, rn
, offset
);
813 static inline void tcg_out_ld16s(TCGContext
*s
, int cond
,
814 int rd
, int rn
, int32_t offset
)
816 if (offset
> 0xff || offset
< -0xff) {
817 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
818 tcg_out_ld16s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
820 tcg_out_ld16s_8(s
, cond
, rd
, rn
, offset
);
823 static inline void tcg_out_st16(TCGContext
*s
, int cond
,
824 int rd
, int rn
, int32_t offset
)
826 if (offset
> 0xff || offset
< -0xff) {
827 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
828 tcg_out_st16_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
830 tcg_out_st16_8(s
, cond
, rd
, rn
, offset
);
833 static inline void tcg_out_ld8u(TCGContext
*s
, int cond
,
834 int rd
, int rn
, int32_t offset
)
836 if (offset
> 0xfff || offset
< -0xfff) {
837 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
838 tcg_out_ld8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
840 tcg_out_ld8_12(s
, cond
, rd
, rn
, offset
);
843 static inline void tcg_out_ld8s(TCGContext
*s
, int cond
,
844 int rd
, int rn
, int32_t offset
)
846 if (offset
> 0xff || offset
< -0xff) {
847 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
848 tcg_out_ld8s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
850 tcg_out_ld8s_8(s
, cond
, rd
, rn
, offset
);
853 static inline void tcg_out_st8(TCGContext
*s
, int cond
,
854 int rd
, int rn
, int32_t offset
)
856 if (offset
> 0xfff || offset
< -0xfff) {
857 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
858 tcg_out_st8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
860 tcg_out_st8_12(s
, cond
, rd
, rn
, offset
);
863 /* The _goto case is normally between TBs within the same code buffer,
864 * and with the code buffer limited to 16MB we shouldn't need the long
867 * .... except to the prologue that is in its own buffer.
869 static inline void tcg_out_goto(TCGContext
*s
, int cond
, uint32_t addr
)
874 /* goto to a Thumb destination isn't supported */
878 val
= addr
- (tcg_target_long
) s
->code_ptr
;
879 if (val
- 8 < 0x01fffffd && val
- 8 > -0x01fffffd)
880 tcg_out_b(s
, cond
, val
);
882 if (cond
== COND_AL
) {
883 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
886 tcg_out_movi32(s
, cond
, TCG_REG_R8
, val
- 8);
887 tcg_out_dat_reg(s
, cond
, ARITH_ADD
,
888 TCG_REG_PC
, TCG_REG_PC
,
889 TCG_REG_R8
, SHIFT_IMM_LSL(0));
894 /* The call case is mostly used for helpers - so it's not unreasonable
895 * for them to be beyond branch range */
896 static inline void tcg_out_call(TCGContext
*s
, uint32_t addr
)
900 val
= addr
- (tcg_target_long
) s
->code_ptr
;
901 if (val
- 8 < 0x02000000 && val
- 8 >= -0x02000000) {
903 /* Use BLX if the target is in Thumb mode */
904 if (!use_armv5_instructions
) {
907 tcg_out_blx_imm(s
, val
);
909 tcg_out_bl(s
, COND_AL
, val
);
912 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R14
, TCG_REG_PC
, 4);
913 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
918 static inline void tcg_out_callr(TCGContext
*s
, int cond
, int arg
)
920 if (use_armv5_instructions
) {
921 tcg_out_blx(s
, cond
, arg
);
923 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R14
, 0,
924 TCG_REG_PC
, SHIFT_IMM_LSL(0));
925 tcg_out_bx(s
, cond
, arg
);
929 static inline void tcg_out_goto_label(TCGContext
*s
, int cond
, int label_index
)
931 TCGLabel
*l
= &s
->labels
[label_index
];
934 tcg_out_goto(s
, cond
, l
->u
.value
);
935 else if (cond
== COND_AL
) {
936 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
937 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_ABS32
, label_index
, 31337);
940 /* Probably this should be preferred even for COND_AL... */
941 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_PC24
, label_index
, 31337);
942 tcg_out_b_noaddr(s
, cond
);
946 #ifdef CONFIG_SOFTMMU
948 #include "../../softmmu_defs.h"
950 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
952 static const void * const qemu_ld_helpers
[4] = {
959 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
960 uintxx_t val, int mmu_idx) */
961 static const void * const qemu_st_helpers
[4] = {
968 /* Helper routines for marshalling helper function arguments into
969 * the correct registers and stack.
970 * argreg is where we want to put this argument, arg is the argument itself.
971 * Return value is the updated argreg ready for the next call.
972 * Note that argreg 0..3 is real registers, 4+ on stack.
973 * When we reach the first stacked argument, we allocate space for it
974 * and the following stacked arguments using "str r8, [sp, #-0x10]!".
975 * Following arguments are filled in with "str r8, [sp, #0xNN]".
976 * For more than 4 stacked arguments we'd need to know how much
977 * space to allocate when we pushed the first stacked argument.
978 * We don't need this, so don't implement it (and will assert if you try it.)
980 * We provide routines for arguments which are: immediate, 32 bit
981 * value in register, 16 and 8 bit values in register (which must be zero
982 * extended before use) and 64 bit value in a lo:hi register pair.
984 #define DEFINE_TCG_OUT_ARG(NAME, ARGPARAM) \
985 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGPARAM) \
988 TCG_OUT_ARG_GET_ARG(argreg); \
989 } else if (argreg == 4) { \
990 TCG_OUT_ARG_GET_ARG(TCG_REG_R8); \
991 tcg_out32(s, (COND_AL << 28) | 0x052d8010); \
993 assert(argreg < 8); \
994 TCG_OUT_ARG_GET_ARG(TCG_REG_R8); \
995 tcg_out32(s, (COND_AL << 28) | 0x058d8000 | (argreg - 4) * 4); \
1000 #define TCG_OUT_ARG_GET_ARG(A) tcg_out_dat_imm(s, COND_AL, ARITH_MOV, A, 0, arg)
1001 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32
, uint32_t arg
)
1002 #undef TCG_OUT_ARG_GET_ARG
1003 #define TCG_OUT_ARG_GET_ARG(A) tcg_out_ext8u(s, COND_AL, A, arg)
1004 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8
, TCGReg arg
)
1005 #undef TCG_OUT_ARG_GET_ARG
1006 #define TCG_OUT_ARG_GET_ARG(A) tcg_out_ext16u(s, COND_AL, A, arg)
1007 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16
, TCGReg arg
)
1008 #undef TCG_OUT_ARG_GET_ARG
1010 /* We don't use the macro for this one to avoid an unnecessary reg-reg
1011 * move when storing to the stack.
1013 static TCGReg
tcg_out_arg_reg32(TCGContext
*s
, TCGReg argreg
, TCGReg arg
)
1016 tcg_out_mov_reg(s
, COND_AL
, argreg
, arg
);
1017 } else if (argreg
== 4) {
1018 /* str arg, [sp, #-0x10]! */
1019 tcg_out32(s
, (COND_AL
<< 28) | 0x052d0010 | (arg
<< 12));
1022 /* str arg, [sp, #0xNN] */
1023 tcg_out32(s
, (COND_AL
<< 28) | 0x058d0000 |
1024 (arg
<< 12) | (argreg
- 4) * 4);
1029 static inline TCGReg
tcg_out_arg_reg64(TCGContext
*s
, TCGReg argreg
,
1030 TCGReg arglo
, TCGReg arghi
)
1032 /* 64 bit arguments must go in even/odd register pairs
1033 * and in 8-aligned stack slots.
1038 argreg
= tcg_out_arg_reg32(s
, argreg
, arglo
);
1039 argreg
= tcg_out_arg_reg32(s
, argreg
, arghi
);
1043 static inline void tcg_out_arg_stacktidy(TCGContext
*s
, TCGReg argreg
)
1045 /* Output any necessary post-call cleanup of the stack */
1047 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R13
, TCG_REG_R13
, 0x10);
1053 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1055 static inline void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int opc
)
1057 int addr_reg
, data_reg
, data_reg2
, bswap
;
1058 #ifdef CONFIG_SOFTMMU
1059 int mem_index
, s_bits
;
1061 # if TARGET_LONG_BITS == 64
1064 uint32_t *label_ptr
;
1067 #ifdef TARGET_WORDS_BIGENDIAN
1074 data_reg2
= *args
++;
1076 data_reg2
= 0; /* suppress warning */
1078 #ifdef CONFIG_SOFTMMU
1079 # if TARGET_LONG_BITS == 64
1080 addr_reg2
= *args
++;
1085 /* Should generate something like the following:
1086 * shr r8, addr_reg, #TARGET_PAGE_BITS
1087 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1088 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1090 # if CPU_TLB_BITS > 8
1093 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
,
1094 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
1095 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
1096 TCG_REG_R0
, TCG_REG_R8
, CPU_TLB_SIZE
- 1);
1097 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_AREG0
,
1098 TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
1100 * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_read))]
1101 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1102 * not exceed otherwise, so use an
1103 * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table)
1107 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_REG_R0
,
1108 (mem_index
<< (TLB_SHIFT
& 1)) |
1109 ((16 - (TLB_SHIFT
>> 1)) << 8));
1110 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R0
,
1111 offsetof(CPUArchState
, tlb_table
[0][0].addr_read
));
1112 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R1
,
1113 TCG_REG_R8
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1114 /* Check alignment. */
1116 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
1117 0, addr_reg
, (1 << s_bits
) - 1);
1118 # if TARGET_LONG_BITS == 64
1119 /* XXX: possibly we could use a block data load or writeback in
1120 * the first access. */
1121 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1122 offsetof(CPUArchState
, tlb_table
[0][0].addr_read
) + 4);
1123 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1124 TCG_REG_R1
, addr_reg2
, SHIFT_IMM_LSL(0));
1126 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1127 offsetof(CPUArchState
, tlb_table
[0][0].addend
));
1131 tcg_out_ld8_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1134 tcg_out_ld8s_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1137 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1139 tcg_out_bswap16(s
, COND_EQ
, data_reg
, data_reg
);
1144 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1145 tcg_out_bswap16s(s
, COND_EQ
, data_reg
, data_reg
);
1147 tcg_out_ld16s_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1152 tcg_out_ld32_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1154 tcg_out_bswap32(s
, COND_EQ
, data_reg
, data_reg
);
1159 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, addr_reg
);
1160 tcg_out_ld32_12(s
, COND_EQ
, data_reg
, TCG_REG_R1
, 4);
1161 tcg_out_bswap32(s
, COND_EQ
, data_reg2
, data_reg2
);
1162 tcg_out_bswap32(s
, COND_EQ
, data_reg
, data_reg
);
1164 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg
, TCG_REG_R1
, addr_reg
);
1165 tcg_out_ld32_12(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, 4);
1170 label_ptr
= (void *) s
->code_ptr
;
1171 tcg_out_b_noaddr(s
, COND_EQ
);
1173 /* TODO: move this code to where the constants pool will be */
1174 /* Note that this code relies on the constraints we set in arm_op_defs[]
1175 * to ensure that later arguments are not passed to us in registers we
1176 * trash by moving the earlier arguments into them.
1178 argreg
= TCG_REG_R0
;
1179 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_AREG0
);
1180 #if TARGET_LONG_BITS == 64
1181 argreg
= tcg_out_arg_reg64(s
, argreg
, addr_reg
, addr_reg2
);
1183 argreg
= tcg_out_arg_reg32(s
, argreg
, addr_reg
);
1185 argreg
= tcg_out_arg_imm32(s
, argreg
, mem_index
);
1186 tcg_out_call(s
, (tcg_target_long
) qemu_ld_helpers
[s_bits
]);
1187 tcg_out_arg_stacktidy(s
, argreg
);
1191 tcg_out_ext8s(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1194 tcg_out_ext16s(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1200 tcg_out_mov_reg(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1203 tcg_out_mov_reg(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1204 tcg_out_mov_reg(s
, COND_AL
, data_reg2
, TCG_REG_R1
);
1208 reloc_pc24(label_ptr
, (tcg_target_long
)s
->code_ptr
);
1209 #else /* !CONFIG_SOFTMMU */
1211 uint32_t offset
= GUEST_BASE
;
1216 i
= ctz32(offset
) & ~1;
1217 rot
= ((32 - i
) << 7) & 0xf00;
1219 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R8
, addr_reg
,
1220 ((offset
>> i
) & 0xff) | rot
);
1221 addr_reg
= TCG_REG_R8
;
1222 offset
&= ~(0xff << i
);
1227 tcg_out_ld8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1230 tcg_out_ld8s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1233 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1235 tcg_out_bswap16(s
, COND_AL
, data_reg
, data_reg
);
1240 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1241 tcg_out_bswap16s(s
, COND_AL
, data_reg
, data_reg
);
1243 tcg_out_ld16s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1248 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1250 tcg_out_bswap32(s
, COND_AL
, data_reg
, data_reg
);
1254 /* TODO: use block load -
1255 * check that data_reg2 > data_reg or the other way */
1256 if (data_reg
== addr_reg
) {
1257 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, bswap
? 0 : 4);
1258 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, bswap
? 4 : 0);
1260 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, bswap
? 4 : 0);
1261 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, bswap
? 0 : 4);
1264 tcg_out_bswap32(s
, COND_AL
, data_reg
, data_reg
);
1265 tcg_out_bswap32(s
, COND_AL
, data_reg2
, data_reg2
);
1272 static inline void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int opc
)
1274 int addr_reg
, data_reg
, data_reg2
, bswap
;
1275 #ifdef CONFIG_SOFTMMU
1276 int mem_index
, s_bits
;
1278 # if TARGET_LONG_BITS == 64
1281 uint32_t *label_ptr
;
1284 #ifdef TARGET_WORDS_BIGENDIAN
1291 data_reg2
= *args
++;
1293 data_reg2
= 0; /* suppress warning */
1295 #ifdef CONFIG_SOFTMMU
1296 # if TARGET_LONG_BITS == 64
1297 addr_reg2
= *args
++;
1302 /* Should generate something like the following:
1303 * shr r8, addr_reg, #TARGET_PAGE_BITS
1304 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1305 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1307 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1308 TCG_REG_R8
, 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
1309 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
1310 TCG_REG_R0
, TCG_REG_R8
, CPU_TLB_SIZE
- 1);
1311 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
,
1312 TCG_AREG0
, TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
1314 * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_write))]
1315 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1316 * not exceed otherwise, so use an
1317 * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table)
1321 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_REG_R0
,
1322 (mem_index
<< (TLB_SHIFT
& 1)) |
1323 ((16 - (TLB_SHIFT
>> 1)) << 8));
1324 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R0
,
1325 offsetof(CPUArchState
, tlb_table
[0][0].addr_write
));
1326 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R1
,
1327 TCG_REG_R8
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1328 /* Check alignment. */
1330 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
1331 0, addr_reg
, (1 << s_bits
) - 1);
1332 # if TARGET_LONG_BITS == 64
1333 /* XXX: possibly we could use a block data load or writeback in
1334 * the first access. */
1335 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1336 offsetof(CPUArchState
, tlb_table
[0][0].addr_write
) + 4);
1337 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1338 TCG_REG_R1
, addr_reg2
, SHIFT_IMM_LSL(0));
1340 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1341 offsetof(CPUArchState
, tlb_table
[0][0].addend
));
1345 tcg_out_st8_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1349 tcg_out_bswap16(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1350 tcg_out_st16_r(s
, COND_EQ
, TCG_REG_R0
, addr_reg
, TCG_REG_R1
);
1352 tcg_out_st16_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1358 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1359 tcg_out_st32_r(s
, COND_EQ
, TCG_REG_R0
, addr_reg
, TCG_REG_R1
);
1361 tcg_out_st32_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1366 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg2
);
1367 tcg_out_st32_rwb(s
, COND_EQ
, TCG_REG_R0
, TCG_REG_R1
, addr_reg
);
1368 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1369 tcg_out_st32_12(s
, COND_EQ
, TCG_REG_R0
, TCG_REG_R1
, 4);
1371 tcg_out_st32_rwb(s
, COND_EQ
, data_reg
, TCG_REG_R1
, addr_reg
);
1372 tcg_out_st32_12(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, 4);
1377 label_ptr
= (void *) s
->code_ptr
;
1378 tcg_out_b_noaddr(s
, COND_EQ
);
1380 /* TODO: move this code to where the constants pool will be */
1381 /* Note that this code relies on the constraints we set in arm_op_defs[]
1382 * to ensure that later arguments are not passed to us in registers we
1383 * trash by moving the earlier arguments into them.
1385 argreg
= TCG_REG_R0
;
1386 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_AREG0
);
1387 #if TARGET_LONG_BITS == 64
1388 argreg
= tcg_out_arg_reg64(s
, argreg
, addr_reg
, addr_reg2
);
1390 argreg
= tcg_out_arg_reg32(s
, argreg
, addr_reg
);
1395 argreg
= tcg_out_arg_reg8(s
, argreg
, data_reg
);
1398 argreg
= tcg_out_arg_reg16(s
, argreg
, data_reg
);
1401 argreg
= tcg_out_arg_reg32(s
, argreg
, data_reg
);
1404 argreg
= tcg_out_arg_reg64(s
, argreg
, data_reg
, data_reg2
);
1408 argreg
= tcg_out_arg_imm32(s
, argreg
, mem_index
);
1409 tcg_out_call(s
, (tcg_target_long
) qemu_st_helpers
[s_bits
]);
1410 tcg_out_arg_stacktidy(s
, argreg
);
1412 reloc_pc24(label_ptr
, (tcg_target_long
)s
->code_ptr
);
1413 #else /* !CONFIG_SOFTMMU */
1415 uint32_t offset
= GUEST_BASE
;
1420 i
= ctz32(offset
) & ~1;
1421 rot
= ((32 - i
) << 7) & 0xf00;
1423 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R1
, addr_reg
,
1424 ((offset
>> i
) & 0xff) | rot
);
1425 addr_reg
= TCG_REG_R1
;
1426 offset
&= ~(0xff << i
);
1431 tcg_out_st8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1435 tcg_out_bswap16(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1436 tcg_out_st16_8(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1438 tcg_out_st16_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1444 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1445 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1447 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1451 /* TODO: use block store -
1452 * check that data_reg2 > data_reg or the other way */
1454 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg2
);
1455 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1456 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1457 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 4);
1459 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1460 tcg_out_st32_12(s
, COND_AL
, data_reg2
, addr_reg
, 4);
1467 static uint8_t *tb_ret_addr
;
1469 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1470 const TCGArg
*args
, const int *const_args
)
1475 case INDEX_op_exit_tb
:
1477 uint8_t *ld_ptr
= s
->code_ptr
;
1479 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_PC
, 0);
1481 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R0
, 0, args
[0]);
1482 tcg_out_goto(s
, COND_AL
, (tcg_target_ulong
) tb_ret_addr
);
1484 *ld_ptr
= (uint8_t) (s
->code_ptr
- ld_ptr
) - 8;
1485 tcg_out32(s
, args
[0]);
1489 case INDEX_op_goto_tb
:
1490 if (s
->tb_jmp_offset
) {
1491 /* Direct jump method */
1492 #if defined(USE_DIRECT_JUMP)
1493 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1494 tcg_out_b_noaddr(s
, COND_AL
);
1496 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
1497 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1501 /* Indirect jump method */
1503 c
= (int) (s
->tb_next
+ args
[0]) - ((int) s
->code_ptr
+ 8);
1504 if (c
> 0xfff || c
< -0xfff) {
1505 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
,
1506 (tcg_target_long
) (s
->tb_next
+ args
[0]));
1507 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, 0);
1509 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, c
);
1511 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_PC
, 0);
1512 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, 0);
1513 tcg_out32(s
, (tcg_target_long
) (s
->tb_next
+ args
[0]));
1516 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1520 tcg_out_call(s
, args
[0]);
1522 tcg_out_callr(s
, COND_AL
, args
[0]);
1526 tcg_out_goto(s
, COND_AL
, args
[0]);
1528 tcg_out_bx(s
, COND_AL
, args
[0]);
1531 tcg_out_goto_label(s
, COND_AL
, args
[0]);
1534 case INDEX_op_ld8u_i32
:
1535 tcg_out_ld8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1537 case INDEX_op_ld8s_i32
:
1538 tcg_out_ld8s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1540 case INDEX_op_ld16u_i32
:
1541 tcg_out_ld16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1543 case INDEX_op_ld16s_i32
:
1544 tcg_out_ld16s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1546 case INDEX_op_ld_i32
:
1547 tcg_out_ld32u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1549 case INDEX_op_st8_i32
:
1550 tcg_out_st8(s
, COND_AL
, args
[0], args
[1], args
[2]);
1552 case INDEX_op_st16_i32
:
1553 tcg_out_st16(s
, COND_AL
, args
[0], args
[1], args
[2]);
1555 case INDEX_op_st_i32
:
1556 tcg_out_st32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1559 case INDEX_op_mov_i32
:
1560 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1561 args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1563 case INDEX_op_movi_i32
:
1564 tcg_out_movi32(s
, COND_AL
, args
[0], args
[1]);
1566 case INDEX_op_add_i32
:
1569 case INDEX_op_sub_i32
:
1572 case INDEX_op_and_i32
:
1575 case INDEX_op_andc_i32
:
1578 case INDEX_op_or_i32
:
1581 case INDEX_op_xor_i32
:
1585 if (const_args
[2]) {
1587 rot
= encode_imm(args
[2]);
1588 tcg_out_dat_imm(s
, COND_AL
, c
,
1589 args
[0], args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1591 tcg_out_dat_reg(s
, COND_AL
, c
,
1592 args
[0], args
[1], args
[2], SHIFT_IMM_LSL(0));
1594 case INDEX_op_add2_i32
:
1595 tcg_out_dat_reg2(s
, COND_AL
, ARITH_ADD
, ARITH_ADC
,
1596 args
[0], args
[1], args
[2], args
[3],
1597 args
[4], args
[5], SHIFT_IMM_LSL(0));
1599 case INDEX_op_sub2_i32
:
1600 tcg_out_dat_reg2(s
, COND_AL
, ARITH_SUB
, ARITH_SBC
,
1601 args
[0], args
[1], args
[2], args
[3],
1602 args
[4], args
[5], SHIFT_IMM_LSL(0));
1604 case INDEX_op_neg_i32
:
1605 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, args
[0], args
[1], 0);
1607 case INDEX_op_not_i32
:
1608 tcg_out_dat_reg(s
, COND_AL
,
1609 ARITH_MVN
, args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1611 case INDEX_op_mul_i32
:
1612 tcg_out_mul32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1614 case INDEX_op_mulu2_i32
:
1615 tcg_out_umull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1617 /* XXX: Perhaps args[2] & 0x1f is wrong */
1618 case INDEX_op_shl_i32
:
1620 SHIFT_IMM_LSL(args
[2] & 0x1f) : SHIFT_REG_LSL(args
[2]);
1622 case INDEX_op_shr_i32
:
1623 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_LSR(args
[2] & 0x1f) :
1624 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args
[2]);
1626 case INDEX_op_sar_i32
:
1627 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ASR(args
[2] & 0x1f) :
1628 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args
[2]);
1630 case INDEX_op_rotr_i32
:
1631 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ROR(args
[2] & 0x1f) :
1632 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args
[2]);
1635 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1], c
);
1638 case INDEX_op_rotl_i32
:
1639 if (const_args
[2]) {
1640 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1641 ((0x20 - args
[2]) & 0x1f) ?
1642 SHIFT_IMM_ROR((0x20 - args
[2]) & 0x1f) :
1645 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, TCG_REG_R8
, args
[1], 0x20);
1646 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1647 SHIFT_REG_ROR(TCG_REG_R8
));
1651 case INDEX_op_brcond_i32
:
1652 if (const_args
[1]) {
1654 rot
= encode_imm(args
[1]);
1655 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0,
1656 args
[0], rotl(args
[1], rot
) | (rot
<< 7));
1658 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1659 args
[0], args
[1], SHIFT_IMM_LSL(0));
1661 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[2]], args
[3]);
1663 case INDEX_op_brcond2_i32
:
1664 /* The resulting conditions are:
1665 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1666 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1667 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1668 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1669 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1670 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1672 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1673 args
[1], args
[3], SHIFT_IMM_LSL(0));
1674 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1675 args
[0], args
[2], SHIFT_IMM_LSL(0));
1676 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[4]], args
[5]);
1678 case INDEX_op_setcond_i32
:
1679 if (const_args
[2]) {
1681 rot
= encode_imm(args
[2]);
1682 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0,
1683 args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1685 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1686 args
[1], args
[2], SHIFT_IMM_LSL(0));
1688 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[3]],
1689 ARITH_MOV
, args
[0], 0, 1);
1690 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[3])],
1691 ARITH_MOV
, args
[0], 0, 0);
1693 case INDEX_op_setcond2_i32
:
1694 /* See brcond2_i32 comment */
1695 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1696 args
[2], args
[4], SHIFT_IMM_LSL(0));
1697 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1698 args
[1], args
[3], SHIFT_IMM_LSL(0));
1699 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[5]],
1700 ARITH_MOV
, args
[0], 0, 1);
1701 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[5])],
1702 ARITH_MOV
, args
[0], 0, 0);
1705 case INDEX_op_qemu_ld8u
:
1706 tcg_out_qemu_ld(s
, args
, 0);
1708 case INDEX_op_qemu_ld8s
:
1709 tcg_out_qemu_ld(s
, args
, 0 | 4);
1711 case INDEX_op_qemu_ld16u
:
1712 tcg_out_qemu_ld(s
, args
, 1);
1714 case INDEX_op_qemu_ld16s
:
1715 tcg_out_qemu_ld(s
, args
, 1 | 4);
1717 case INDEX_op_qemu_ld32
:
1718 tcg_out_qemu_ld(s
, args
, 2);
1720 case INDEX_op_qemu_ld64
:
1721 tcg_out_qemu_ld(s
, args
, 3);
1724 case INDEX_op_qemu_st8
:
1725 tcg_out_qemu_st(s
, args
, 0);
1727 case INDEX_op_qemu_st16
:
1728 tcg_out_qemu_st(s
, args
, 1);
1730 case INDEX_op_qemu_st32
:
1731 tcg_out_qemu_st(s
, args
, 2);
1733 case INDEX_op_qemu_st64
:
1734 tcg_out_qemu_st(s
, args
, 3);
1737 case INDEX_op_bswap16_i32
:
1738 tcg_out_bswap16(s
, COND_AL
, args
[0], args
[1]);
1740 case INDEX_op_bswap32_i32
:
1741 tcg_out_bswap32(s
, COND_AL
, args
[0], args
[1]);
1744 case INDEX_op_ext8s_i32
:
1745 tcg_out_ext8s(s
, COND_AL
, args
[0], args
[1]);
1747 case INDEX_op_ext16s_i32
:
1748 tcg_out_ext16s(s
, COND_AL
, args
[0], args
[1]);
1750 case INDEX_op_ext16u_i32
:
1751 tcg_out_ext16u(s
, COND_AL
, args
[0], args
[1]);
1759 static const TCGTargetOpDef arm_op_defs
[] = {
1760 { INDEX_op_exit_tb
, { } },
1761 { INDEX_op_goto_tb
, { } },
1762 { INDEX_op_call
, { "ri" } },
1763 { INDEX_op_jmp
, { "ri" } },
1764 { INDEX_op_br
, { } },
1766 { INDEX_op_mov_i32
, { "r", "r" } },
1767 { INDEX_op_movi_i32
, { "r" } },
1769 { INDEX_op_ld8u_i32
, { "r", "r" } },
1770 { INDEX_op_ld8s_i32
, { "r", "r" } },
1771 { INDEX_op_ld16u_i32
, { "r", "r" } },
1772 { INDEX_op_ld16s_i32
, { "r", "r" } },
1773 { INDEX_op_ld_i32
, { "r", "r" } },
1774 { INDEX_op_st8_i32
, { "r", "r" } },
1775 { INDEX_op_st16_i32
, { "r", "r" } },
1776 { INDEX_op_st_i32
, { "r", "r" } },
1778 /* TODO: "r", "r", "ri" */
1779 { INDEX_op_add_i32
, { "r", "r", "rI" } },
1780 { INDEX_op_sub_i32
, { "r", "r", "rI" } },
1781 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1782 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1783 { INDEX_op_and_i32
, { "r", "r", "rI" } },
1784 { INDEX_op_andc_i32
, { "r", "r", "rI" } },
1785 { INDEX_op_or_i32
, { "r", "r", "rI" } },
1786 { INDEX_op_xor_i32
, { "r", "r", "rI" } },
1787 { INDEX_op_neg_i32
, { "r", "r" } },
1788 { INDEX_op_not_i32
, { "r", "r" } },
1790 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1791 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1792 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1793 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1794 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1796 { INDEX_op_brcond_i32
, { "r", "rI" } },
1797 { INDEX_op_setcond_i32
, { "r", "r", "rI" } },
1799 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1800 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "r", "r" } },
1801 { INDEX_op_sub2_i32
, { "r", "r", "r", "r", "r", "r" } },
1802 { INDEX_op_brcond2_i32
, { "r", "r", "r", "r" } },
1803 { INDEX_op_setcond2_i32
, { "r", "r", "r", "r", "r" } },
1805 #if TARGET_LONG_BITS == 32
1806 { INDEX_op_qemu_ld8u
, { "r", "l" } },
1807 { INDEX_op_qemu_ld8s
, { "r", "l" } },
1808 { INDEX_op_qemu_ld16u
, { "r", "l" } },
1809 { INDEX_op_qemu_ld16s
, { "r", "l" } },
1810 { INDEX_op_qemu_ld32
, { "r", "l" } },
1811 { INDEX_op_qemu_ld64
, { "L", "L", "l" } },
1813 { INDEX_op_qemu_st8
, { "s", "s" } },
1814 { INDEX_op_qemu_st16
, { "s", "s" } },
1815 { INDEX_op_qemu_st32
, { "s", "s" } },
1816 { INDEX_op_qemu_st64
, { "S", "S", "s" } },
1818 { INDEX_op_qemu_ld8u
, { "r", "l", "l" } },
1819 { INDEX_op_qemu_ld8s
, { "r", "l", "l" } },
1820 { INDEX_op_qemu_ld16u
, { "r", "l", "l" } },
1821 { INDEX_op_qemu_ld16s
, { "r", "l", "l" } },
1822 { INDEX_op_qemu_ld32
, { "r", "l", "l" } },
1823 { INDEX_op_qemu_ld64
, { "L", "L", "l", "l" } },
1825 { INDEX_op_qemu_st8
, { "s", "s", "s" } },
1826 { INDEX_op_qemu_st16
, { "s", "s", "s" } },
1827 { INDEX_op_qemu_st32
, { "s", "s", "s" } },
1828 { INDEX_op_qemu_st64
, { "S", "S", "s", "s" } },
1831 { INDEX_op_bswap16_i32
, { "r", "r" } },
1832 { INDEX_op_bswap32_i32
, { "r", "r" } },
1834 { INDEX_op_ext8s_i32
, { "r", "r" } },
1835 { INDEX_op_ext16s_i32
, { "r", "r" } },
1836 { INDEX_op_ext16u_i32
, { "r", "r" } },
1841 static void tcg_target_init(TCGContext
*s
)
1843 #if !defined(CONFIG_USER_ONLY)
1845 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1849 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
1850 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
1855 (1 << TCG_REG_R12
) |
1856 (1 << TCG_REG_R14
));
1858 tcg_regset_clear(s
->reserved_regs
);
1859 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
1860 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R8
);
1861 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_PC
);
1863 tcg_add_target_add_op_defs(arm_op_defs
);
1864 tcg_set_frame(s
, TCG_AREG0
, offsetof(CPUArchState
, temp_buf
),
1865 CPU_TEMP_BUF_NLONGS
* sizeof(long));
1868 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
1869 TCGReg arg1
, tcg_target_long arg2
)
1871 tcg_out_ld32u(s
, COND_AL
, arg
, arg1
, arg2
);
1874 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
1875 TCGReg arg1
, tcg_target_long arg2
)
1877 tcg_out_st32(s
, COND_AL
, arg
, arg1
, arg2
);
1880 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
1881 TCGReg ret
, TCGReg arg
)
1883 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, ret
, 0, arg
, SHIFT_IMM_LSL(0));
1886 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
1887 TCGReg ret
, tcg_target_long arg
)
1889 tcg_out_movi32(s
, COND_AL
, ret
, arg
);
1892 static void tcg_target_qemu_prologue(TCGContext
*s
)
1894 /* Calling convention requires us to save r4-r11 and lr;
1895 * save also r12 to maintain stack 8-alignment.
1898 /* stmdb sp!, { r4 - r12, lr } */
1899 tcg_out32(s
, (COND_AL
<< 28) | 0x092d5ff0);
1901 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1903 tcg_out_bx(s
, COND_AL
, tcg_target_call_iarg_regs
[1]);
1904 tb_ret_addr
= s
->code_ptr
;
1906 /* ldmia sp!, { r4 - r12, pc } */
1907 tcg_out32(s
, (COND_AL
<< 28) | 0x08bd9ff0);