tcg-hppa: Constrain immediate inputs to and_i32, or_i32, andc_i32.
[qemu/mdroth.git] / tcg / hppa / tcg-target.c
blobc9410b20b15f6ad9a2a84f5527267c061696b386
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
32 #endif
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
37 #ifndef GUEST_BASE
38 #define GUEST_BASE 0
39 #endif
41 #ifdef CONFIG_USE_GUEST_BASE
42 #define TCG_GUEST_BASE_REG TCG_REG_R16
43 #else
44 #define TCG_GUEST_BASE_REG TCG_REG_R0
45 #endif
47 static const int tcg_target_reg_alloc_order[] = {
48 TCG_REG_R4,
49 TCG_REG_R5,
50 TCG_REG_R6,
51 TCG_REG_R7,
52 TCG_REG_R8,
53 TCG_REG_R9,
54 TCG_REG_R10,
55 TCG_REG_R11,
56 TCG_REG_R12,
57 TCG_REG_R13,
59 TCG_REG_R17,
60 TCG_REG_R14,
61 TCG_REG_R15,
62 TCG_REG_R16,
64 TCG_REG_R26,
65 TCG_REG_R25,
66 TCG_REG_R24,
67 TCG_REG_R23,
69 TCG_REG_RET0,
70 TCG_REG_RET1,
73 static const int tcg_target_call_iarg_regs[4] = {
74 TCG_REG_R26,
75 TCG_REG_R25,
76 TCG_REG_R24,
77 TCG_REG_R23,
80 static const int tcg_target_call_oarg_regs[2] = {
81 TCG_REG_RET0,
82 TCG_REG_RET1,
85 /* True iff val fits a signed field of width BITS. */
86 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
88 return (val << ((sizeof(tcg_target_long) * 8 - bits))
89 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
92 /* True iff depi can be used to compute (reg | MASK).
93 Accept a bit pattern like:
94 0....01....1
95 1....10....0
96 0..01..10..0
97 Copied from gcc sources. */
98 static inline int or_mask_p(tcg_target_ulong mask)
100 if (mask == 0 || mask == -1) {
101 return 0;
103 mask += mask & -mask;
104 return (mask & (mask - 1)) == 0;
107 /* True iff depi or extru can be used to compute (reg & mask).
108 Accept a bit pattern like these:
109 0....01....1
110 1....10....0
111 1..10..01..1
112 Copied from gcc sources. */
113 static inline int and_mask_p(tcg_target_ulong mask)
115 return or_mask_p(~mask);
118 static int low_sign_ext(int val, int len)
120 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
123 static int reassemble_12(int as12)
125 return (((as12 & 0x800) >> 11) |
126 ((as12 & 0x400) >> 8) |
127 ((as12 & 0x3ff) << 3));
130 static int reassemble_17(int as17)
132 return (((as17 & 0x10000) >> 16) |
133 ((as17 & 0x0f800) << 5) |
134 ((as17 & 0x00400) >> 8) |
135 ((as17 & 0x003ff) << 3));
138 static int reassemble_21(int as21)
140 return (((as21 & 0x100000) >> 20) |
141 ((as21 & 0x0ffe00) >> 8) |
142 ((as21 & 0x000180) << 7) |
143 ((as21 & 0x00007c) << 14) |
144 ((as21 & 0x000003) << 12));
147 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
148 such relocations are simply fully handled by the assembler. */
149 #define R_PARISC_PCREL12F R_PARISC_NONE
151 static void patch_reloc(uint8_t *code_ptr, int type,
152 tcg_target_long value, tcg_target_long addend)
154 uint32_t *insn_ptr = (uint32_t *)code_ptr;
155 uint32_t insn = *insn_ptr;
156 tcg_target_long pcrel;
158 value += addend;
159 pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
161 switch (type) {
162 case R_PARISC_PCREL12F:
163 assert(check_fit_tl(pcrel, 12));
164 /* ??? We assume all patches are forward. See tcg_out_brcond
165 re setting the NUL bit on the branch and eliding the nop. */
166 assert(pcrel >= 0);
167 insn &= ~0x1ffdu;
168 insn |= reassemble_12(pcrel);
169 break;
170 case R_PARISC_PCREL17F:
171 assert(check_fit_tl(pcrel, 17));
172 insn &= ~0x1f1ffdu;
173 insn |= reassemble_17(pcrel);
174 break;
175 default:
176 tcg_abort();
179 *insn_ptr = insn;
182 /* maximum number of register used for input function arguments */
183 static inline int tcg_target_get_call_iarg_regs_count(int flags)
185 return 4;
188 /* parse target specific constraints */
189 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
191 const char *ct_str;
193 ct_str = *pct_str;
194 switch (ct_str[0]) {
195 case 'r':
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
198 break;
199 case 'L': /* qemu_ld/st constraint */
200 ct->ct |= TCG_CT_REG;
201 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
202 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
203 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
205 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
206 break;
207 case 'Z':
208 ct->ct |= TCG_CT_CONST_0;
209 break;
210 case 'I':
211 ct->ct |= TCG_CT_CONST_S11;
212 break;
213 case 'J':
214 ct->ct |= TCG_CT_CONST_S5;
215 break;
216 case 'K':
217 ct->ct |= TCG_CT_CONST_MS11;
218 break;
219 case 'M':
220 ct->ct |= TCG_CT_CONST_AND;
221 break;
222 case 'O':
223 ct->ct |= TCG_CT_CONST_OR;
224 break;
225 default:
226 return -1;
228 ct_str++;
229 *pct_str = ct_str;
230 return 0;
233 /* test if a constant matches the constraint */
234 static int tcg_target_const_match(tcg_target_long val,
235 const TCGArgConstraint *arg_ct)
237 int ct = arg_ct->ct;
238 if (ct & TCG_CT_CONST) {
239 return 1;
240 } else if (ct & TCG_CT_CONST_0) {
241 return val == 0;
242 } else if (ct & TCG_CT_CONST_S5) {
243 return check_fit_tl(val, 5);
244 } else if (ct & TCG_CT_CONST_S11) {
245 return check_fit_tl(val, 11);
246 } else if (ct & TCG_CT_CONST_MS11) {
247 return check_fit_tl(-val, 11);
248 } else if (ct & TCG_CT_CONST_AND) {
249 return and_mask_p(val);
250 } else if (ct & TCG_CT_CONST_OR) {
251 return or_mask_p(val);
253 return 0;
256 #define INSN_OP(x) ((x) << 26)
257 #define INSN_EXT3BR(x) ((x) << 13)
258 #define INSN_EXT3SH(x) ((x) << 10)
259 #define INSN_EXT4(x) ((x) << 6)
260 #define INSN_EXT5(x) (x)
261 #define INSN_EXT6(x) ((x) << 6)
262 #define INSN_EXT7(x) ((x) << 6)
263 #define INSN_EXT8A(x) ((x) << 6)
264 #define INSN_EXT8B(x) ((x) << 5)
265 #define INSN_T(x) (x)
266 #define INSN_R1(x) ((x) << 16)
267 #define INSN_R2(x) ((x) << 21)
268 #define INSN_DEP_LEN(x) (32 - (x))
269 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
270 #define INSN_SHDEP_P(x) ((x) << 5)
271 #define INSN_COND(x) ((x) << 13)
272 #define INSN_IM11(x) low_sign_ext(x, 11)
273 #define INSN_IM14(x) low_sign_ext(x, 14)
274 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
276 #define COND_NEVER 0
277 #define COND_EQ 1
278 #define COND_LT 2
279 #define COND_LE 3
280 #define COND_LTU 4
281 #define COND_LEU 5
282 #define COND_SV 6
283 #define COND_OD 7
284 #define COND_FALSE 8
286 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
287 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
288 #define INSN_ADDI (INSN_OP(0x2d))
289 #define INSN_ADDIL (INSN_OP(0x0a))
290 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
291 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
292 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
293 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
294 #define INSN_COMICLR (INSN_OP(0x24))
295 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
296 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
297 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
298 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
299 #define INSN_LDIL (INSN_OP(0x08))
300 #define INSN_LDO (INSN_OP(0x0d))
301 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
302 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
303 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
304 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
305 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
306 #define INSN_SUBI (INSN_OP(0x25))
307 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
308 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
309 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
310 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
311 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
312 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
314 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
315 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
316 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
317 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
318 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
319 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
321 #define INSN_LDB (INSN_OP(0x10))
322 #define INSN_LDH (INSN_OP(0x11))
323 #define INSN_LDW (INSN_OP(0x12))
324 #define INSN_LDWM (INSN_OP(0x13))
325 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
327 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
328 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
329 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
331 #define INSN_STB (INSN_OP(0x18))
332 #define INSN_STH (INSN_OP(0x19))
333 #define INSN_STW (INSN_OP(0x1a))
334 #define INSN_STWM (INSN_OP(0x1b))
335 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
337 #define INSN_COMBT (INSN_OP(0x20))
338 #define INSN_COMBF (INSN_OP(0x22))
339 #define INSN_COMIBT (INSN_OP(0x21))
340 #define INSN_COMIBF (INSN_OP(0x23))
342 /* supplied by libgcc */
343 extern void *__canonicalize_funcptr_for_compare(void *);
345 static void tcg_out_mov(TCGContext *s, int ret, int arg)
347 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
348 but hppa-dis.c is unaware of this definition */
349 if (ret != arg) {
350 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
351 | INSN_R2(TCG_REG_R0));
355 static void tcg_out_movi(TCGContext *s, TCGType type,
356 int ret, tcg_target_long arg)
358 if (check_fit_tl(arg, 14)) {
359 tcg_out32(s, INSN_LDO | INSN_R1(ret)
360 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
361 } else {
362 uint32_t hi, lo;
363 hi = arg >> 11;
364 lo = arg & 0x7ff;
366 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
367 if (lo) {
368 tcg_out32(s, INSN_LDO | INSN_R1(ret)
369 | INSN_R2(ret) | INSN_IM14(lo));
374 static void tcg_out_ldst(TCGContext *s, int ret, int addr,
375 tcg_target_long offset, int op)
377 if (!check_fit_tl(offset, 14)) {
378 uint32_t hi, lo, op;
380 hi = offset >> 11;
381 lo = offset & 0x7ff;
383 if (addr == TCG_REG_R0) {
384 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
385 } else {
386 op = INSN_ADDIL | INSN_R2(addr);
388 tcg_out32(s, op | reassemble_21(hi));
390 addr = TCG_REG_R1;
391 offset = lo;
394 if (ret != addr || offset != 0 || op != INSN_LDO) {
395 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
399 /* This function is required by tcg.c. */
400 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
401 int arg1, tcg_target_long arg2)
403 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
406 /* This function is required by tcg.c. */
407 static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
408 int arg1, tcg_target_long arg2)
410 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
413 static void tcg_out_ldst_index(TCGContext *s, int data,
414 int base, int index, int op)
416 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
419 static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
420 tcg_target_long val)
422 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
425 /* This function is required by tcg.c. */
426 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
428 tcg_out_addi2(s, reg, reg, val);
431 static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
433 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
436 static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
437 tcg_target_long val, int op)
439 assert(check_fit_tl(val, 11));
440 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
443 static inline void tcg_out_nop(TCGContext *s)
445 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
448 static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
450 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
453 /* Extract LEN bits at position OFS from ARG and place in RET.
454 Note that here the bit ordering is reversed from the PA-RISC
455 standard, such that the right-most bit is 0. */
456 static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
457 unsigned ofs, unsigned len, int sign)
459 assert(ofs < 32 && len <= 32 - ofs);
460 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
461 | INSN_R1(ret) | INSN_R2(arg)
462 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
465 /* Likewise with OFS interpreted little-endian. */
466 static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
467 unsigned ofs, unsigned len)
469 assert(ofs < 32 && len <= 32 - ofs);
470 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
471 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
474 static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
475 unsigned count)
477 assert(count < 32);
478 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
479 | INSN_SHDEP_CP(count));
482 static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
484 tcg_out_mtctl_sar(s, creg);
485 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
488 static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
490 int bs0, bs1;
492 /* Note that the argument is constrained to match or_mask_p. */
493 for (bs0 = 0; bs0 < 32; bs0++) {
494 if ((m & (1u << bs0)) != 0) {
495 break;
498 for (bs1 = bs0; bs1 < 32; bs1++) {
499 if ((m & (1u << bs1)) == 0) {
500 break;
503 assert(bs1 == 32 || (1ul << bs1) > m);
505 tcg_out_mov(s, ret, arg);
506 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(-1)
507 | INSN_SHDEP_CP(31 - bs0) | INSN_DEP_LEN(bs1 - bs0));
510 static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
512 int ls0, ls1, ms0;
514 /* Note that the argument is constrained to match and_mask_p. */
515 for (ls0 = 0; ls0 < 32; ls0++) {
516 if ((m & (1u << ls0)) == 0) {
517 break;
520 for (ls1 = ls0; ls1 < 32; ls1++) {
521 if ((m & (1u << ls1)) != 0) {
522 break;
525 for (ms0 = ls1; ms0 < 32; ms0++) {
526 if ((m & (1u << ms0)) == 0) {
527 break;
530 assert (ms0 == 32);
532 if (ls1 == 32) {
533 tcg_out_extr(s, ret, arg, 0, ls0, 0);
534 } else {
535 tcg_out_mov(s, ret, arg);
536 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(0)
537 | INSN_SHDEP_CP(31 - ls0) | INSN_DEP_LEN(ls1 - ls0));
541 static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
543 tcg_out_extr(s, ret, arg, 0, 8, 1);
546 static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
548 tcg_out_extr(s, ret, arg, 0, 16, 1);
551 static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
553 count &= 31;
554 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
555 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
558 static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
560 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
561 tcg_out_mtctl_sar(s, TCG_REG_R20);
562 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
565 static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
567 count &= 31;
568 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
571 static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
573 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
576 static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
578 count &= 31;
579 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
582 static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
584 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
585 tcg_out_mtctl_sar(s, TCG_REG_R20);
586 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
589 static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
591 count &= 31;
592 tcg_out_shd(s, ret, arg, arg, 32 - count);
595 static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
597 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
598 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
601 static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
603 count &= 31;
604 tcg_out_shd(s, ret, arg, arg, count);
607 static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
609 tcg_out_vshd(s, ret, arg, arg, creg);
612 static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
614 if (ret != arg) {
615 tcg_out_mov(s, ret, arg); /* arg = xxAB */
617 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
618 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
621 static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
623 /* arg = ABCD */
624 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
625 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
626 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
629 static void tcg_out_call(TCGContext *s, void *func)
631 tcg_target_long val, hi, lo, disp;
633 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
634 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
636 if (check_fit_tl(disp, 17)) {
637 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
638 } else {
639 hi = val >> 11;
640 lo = val & 0x7ff;
642 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
643 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
644 | reassemble_17(lo >> 2));
645 tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
649 static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
650 int arg1, int arg2)
652 /* Store both words into the stack for copy to the FPU. */
653 tcg_out_ldst(s, arg1, TCG_REG_SP, STACK_TEMP_OFS, INSN_STW);
654 tcg_out_ldst(s, arg2, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_STW);
656 /* Load both words into the FPU at the same time. We get away
657 with this because we can address the left and right half of the
658 FPU registers individually once loaded. */
659 /* fldds stack_temp(sp),fr22 */
660 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_SP)
661 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
663 /* xmpyu fr22r,fr22,fr22 */
664 tcg_out32(s, 0x3ad64796);
666 /* Store the 64-bit result back into the stack. */
667 /* fstds stack_temp(sp),fr22 */
668 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_SP)
669 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
671 /* Load the pieces of the result that the caller requested. */
672 if (reth) {
673 tcg_out_ldst(s, reth, TCG_REG_SP, STACK_TEMP_OFS, INSN_LDW);
675 if (retl) {
676 tcg_out_ldst(s, retl, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_LDW);
680 static void tcg_out_add2(TCGContext *s, int destl, int desth,
681 int al, int ah, int bl, int bh, int blconst)
683 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
685 if (blconst) {
686 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
687 } else {
688 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
690 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
692 tcg_out_mov(s, destl, tmp);
695 static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
696 int bl, int bh, int alconst, int blconst)
698 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
700 if (alconst) {
701 if (blconst) {
702 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
703 bl = TCG_REG_R20;
705 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
706 } else if (blconst) {
707 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
708 } else {
709 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
711 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
713 tcg_out_mov(s, destl, tmp);
716 static void tcg_out_branch(TCGContext *s, int label_index, int nul)
718 TCGLabel *l = &s->labels[label_index];
719 uint32_t op = nul ? INSN_BL_N : INSN_BL;
721 if (l->has_value) {
722 tcg_target_long val = l->u.value;
724 val -= (tcg_target_long)s->code_ptr + 8;
725 val >>= 2;
726 assert(check_fit_tl(val, 17));
728 tcg_out32(s, op | reassemble_17(val));
729 } else {
730 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
731 tcg_out32(s, op);
735 static const uint8_t tcg_cond_to_cmp_cond[10] =
737 [TCG_COND_EQ] = COND_EQ,
738 [TCG_COND_NE] = COND_EQ | COND_FALSE,
739 [TCG_COND_LT] = COND_LT,
740 [TCG_COND_GE] = COND_LT | COND_FALSE,
741 [TCG_COND_LE] = COND_LE,
742 [TCG_COND_GT] = COND_LE | COND_FALSE,
743 [TCG_COND_LTU] = COND_LTU,
744 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
745 [TCG_COND_LEU] = COND_LEU,
746 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
749 static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
750 TCGArg c2, int c2const, int label_index)
752 TCGLabel *l = &s->labels[label_index];
753 int op, pacond;
755 /* Note that COMIB operates as if the immediate is the first
756 operand. We model brcond with the immediate in the second
757 to better match what targets are likely to give us. For
758 consistency, model COMB with reversed operands as well. */
759 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
761 if (c2const) {
762 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
763 op |= INSN_IM5(c2);
764 } else {
765 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
766 op |= INSN_R1(c2);
768 op |= INSN_R2(c1);
769 op |= INSN_COND(pacond & 7);
771 if (l->has_value) {
772 tcg_target_long val = l->u.value;
774 val -= (tcg_target_long)s->code_ptr + 8;
775 val >>= 2;
776 assert(check_fit_tl(val, 12));
778 /* ??? Assume that all branches to defined labels are backward.
779 Which means that if the nul bit is set, the delay slot is
780 executed if the branch is taken, and not executed in fallthru. */
781 tcg_out32(s, op | reassemble_12(val));
782 tcg_out_nop(s);
783 } else {
784 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
785 /* ??? Assume that all branches to undefined labels are forward.
786 Which means that if the nul bit is set, the delay slot is
787 not executed if the branch is taken, which is what we want. */
788 tcg_out32(s, op | 2);
792 static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
793 TCGArg c1, TCGArg c2, int c2const)
795 int op, pacond;
797 /* Note that COMICLR operates as if the immediate is the first
798 operand. We model setcond with the immediate in the second
799 to better match what targets are likely to give us. For
800 consistency, model COMCLR with reversed operands as well. */
801 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
803 if (c2const) {
804 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
805 } else {
806 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
808 op |= INSN_COND(pacond & 7);
809 op |= pacond & COND_FALSE ? 1 << 12 : 0;
811 tcg_out32(s, op);
814 static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
815 TCGArg bl, int blconst, TCGArg bh, int bhconst,
816 int label_index)
818 switch (cond) {
819 case TCG_COND_EQ:
820 case TCG_COND_NE:
821 tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst);
822 tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
823 break;
825 default:
826 tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
827 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
828 tcg_out_brcond(s, tcg_unsigned_cond(cond),
829 al, bl, blconst, label_index);
830 break;
834 static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
835 TCGArg c1, TCGArg c2, int c2const)
837 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
838 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
841 static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
842 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
843 TCGArg bh, int bhconst)
845 int scratch = TCG_REG_R20;
847 if (ret != al && ret != ah
848 && (blconst || ret != bl)
849 && (bhconst || ret != bh)) {
850 scratch = ret;
853 switch (cond) {
854 case TCG_COND_EQ:
855 case TCG_COND_NE:
856 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
857 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
858 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
859 break;
861 default:
862 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
863 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
864 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
865 tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst);
866 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
867 break;
870 tcg_out_mov(s, ret, scratch);
873 #if defined(CONFIG_SOFTMMU)
874 #include "../../softmmu_defs.h"
876 static void *qemu_ld_helpers[4] = {
877 __ldb_mmu,
878 __ldw_mmu,
879 __ldl_mmu,
880 __ldq_mmu,
883 static void *qemu_st_helpers[4] = {
884 __stb_mmu,
885 __stw_mmu,
886 __stl_mmu,
887 __stq_mmu,
890 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
891 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
892 TLB for the memory index. The return value is the offset from ENV
893 contained in R1 afterward (to be used when loading ADDEND); if the
894 return value is 0, R1 is not used. */
896 static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
897 int addrhi, int s_bits, int lab_miss, int offset)
899 int ret;
901 /* Extracting the index into the TLB. The "normal C operation" is
902 r1 = addr_reg >> TARGET_PAGE_BITS;
903 r1 &= CPU_TLB_SIZE - 1;
904 r1 <<= CPU_TLB_ENTRY_BITS;
905 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
906 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
907 operations with an EXTRU. Unfortunately, the current value of
908 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
909 add that follows. */
910 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
911 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
912 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
913 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
915 /* Make sure that both the addr_{read,write} and addend can be
916 read with a 14-bit offset from the same base register. */
917 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
918 ret = 0;
919 } else {
920 ret = (offset + 0x400) & ~0x7ff;
921 offset = ret - offset;
922 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
923 r1 = TCG_REG_R1;
926 /* Load the entry from the computed slot. */
927 if (TARGET_LONG_BITS == 64) {
928 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
929 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
930 } else {
931 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
934 /* If not equal, jump to lab_miss. */
935 if (TARGET_LONG_BITS == 64) {
936 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
937 r0, 0, addrhi, 0, lab_miss);
938 } else {
939 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
942 return ret;
944 #endif
946 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
948 int addr_reg, addr_reg2;
949 int data_reg, data_reg2;
950 int r0, r1, mem_index, s_bits, bswap;
951 tcg_target_long offset;
952 #if defined(CONFIG_SOFTMMU)
953 int lab1, lab2, argreg;
954 #endif
956 data_reg = *args++;
957 data_reg2 = (opc == 3 ? *args++ : TCG_REG_R0);
958 addr_reg = *args++;
959 addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
960 mem_index = *args;
961 s_bits = opc & 3;
963 r0 = TCG_REG_R26;
964 r1 = TCG_REG_R25;
966 #if defined(CONFIG_SOFTMMU)
967 lab1 = gen_new_label();
968 lab2 = gen_new_label();
970 offset = tcg_out_tlb_read(s, r0, r1, addr_reg, addr_reg2, s_bits, lab1,
971 offsetof(CPUState,
972 tlb_table[mem_index][0].addr_read));
974 /* TLB Hit. */
975 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : r1),
976 offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
978 tcg_out_arith(s, r0, addr_reg, TCG_REG_R20, INSN_ADDL);
979 offset = TCG_REG_R0;
980 #else
981 r0 = addr_reg;
982 offset = GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0;
983 #endif
985 #ifdef TARGET_WORDS_BIGENDIAN
986 bswap = 0;
987 #else
988 bswap = 1;
989 #endif
990 switch (opc) {
991 case 0:
992 tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDBX);
993 break;
994 case 0 | 4:
995 tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDBX);
996 tcg_out_ext8s(s, data_reg, data_reg);
997 break;
998 case 1:
999 tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDHX);
1000 if (bswap) {
1001 tcg_out_bswap16(s, data_reg, data_reg, 0);
1003 break;
1004 case 1 | 4:
1005 tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDHX);
1006 if (bswap) {
1007 tcg_out_bswap16(s, data_reg, data_reg, 1);
1008 } else {
1009 tcg_out_ext16s(s, data_reg, data_reg);
1011 break;
1012 case 2:
1013 tcg_out_ldst_index(s, data_reg, r0, offset, INSN_LDWX);
1014 if (bswap) {
1015 tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
1017 break;
1018 case 3:
1019 if (bswap) {
1020 int t = data_reg2;
1021 data_reg2 = data_reg;
1022 data_reg = t;
1024 if (offset == TCG_REG_R0) {
1025 /* Make sure not to clobber the base register. */
1026 if (data_reg2 == r0) {
1027 tcg_out_ldst(s, data_reg, r0, 4, INSN_LDW);
1028 tcg_out_ldst(s, data_reg2, r0, 0, INSN_LDW);
1029 } else {
1030 tcg_out_ldst(s, data_reg2, r0, 0, INSN_LDW);
1031 tcg_out_ldst(s, data_reg, r0, 4, INSN_LDW);
1033 } else {
1034 tcg_out_addi2(s, TCG_REG_R20, r0, 4);
1035 tcg_out_ldst_index(s, data_reg2, r0, offset, INSN_LDWX);
1036 tcg_out_ldst_index(s, data_reg, TCG_REG_R20, offset, INSN_LDWX);
1038 if (bswap) {
1039 tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
1040 tcg_out_bswap32(s, data_reg2, data_reg2, TCG_REG_R20);
1042 break;
1043 default:
1044 tcg_abort();
1047 #if defined(CONFIG_SOFTMMU)
1048 tcg_out_branch(s, lab2, 1);
1050 /* TLB Miss. */
1051 /* label1: */
1052 tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1054 argreg = TCG_REG_R26;
1055 tcg_out_mov(s, argreg--, addr_reg);
1056 if (TARGET_LONG_BITS == 64) {
1057 tcg_out_mov(s, argreg--, addr_reg2);
1059 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1061 tcg_out_call(s, qemu_ld_helpers[s_bits]);
1063 switch (opc) {
1064 case 0:
1065 tcg_out_andi(s, data_reg, TCG_REG_RET0, 0xff);
1066 break;
1067 case 0 | 4:
1068 tcg_out_ext8s(s, data_reg, TCG_REG_RET0);
1069 break;
1070 case 1:
1071 tcg_out_andi(s, data_reg, TCG_REG_RET0, 0xffff);
1072 break;
1073 case 1 | 4:
1074 tcg_out_ext16s(s, data_reg, TCG_REG_RET0);
1075 break;
1076 case 2:
1077 case 2 | 4:
1078 tcg_out_mov(s, data_reg, TCG_REG_RET0);
1079 break;
1080 case 3:
1081 tcg_out_mov(s, data_reg, TCG_REG_RET0);
1082 tcg_out_mov(s, data_reg2, TCG_REG_RET1);
1083 break;
1084 default:
1085 tcg_abort();
1088 /* label2: */
1089 tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1090 #endif
1093 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1095 int addr_reg, addr_reg2;
1096 int data_reg, data_reg2;
1097 int r0, r1, mem_index, s_bits, bswap;
1098 #if defined(CONFIG_SOFTMMU)
1099 tcg_target_long offset;
1100 int lab1, lab2, argreg;
1101 #endif
1103 data_reg = *args++;
1104 data_reg2 = (opc == 3 ? *args++ : 0);
1105 addr_reg = *args++;
1106 addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1107 mem_index = *args;
1108 s_bits = opc;
1110 r0 = TCG_REG_R26;
1111 r1 = TCG_REG_R25;
1113 #if defined(CONFIG_SOFTMMU)
1114 lab1 = gen_new_label();
1115 lab2 = gen_new_label();
1117 offset = tcg_out_tlb_read(s, r0, r1, addr_reg, addr_reg2, s_bits, lab1,
1118 offsetof(CPUState,
1119 tlb_table[mem_index][0].addr_write));
1121 /* TLB Hit. */
1122 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : r1),
1123 offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1125 tcg_out_arith(s, r0, addr_reg, TCG_REG_R20, INSN_ADDL);
1126 #else
1127 /* There are no indexed stores, so if GUEST_BASE is set
1128 we must do the add explicitly. Careful to avoid R20,
1129 which is used for the bswaps to follow. */
1130 if (GUEST_BASE == 0) {
1131 r0 = addr_reg;
1132 } else {
1133 tcg_out_arith(s, TCG_REG_R31, addr_reg, TCG_GUEST_BASE_REG, INSN_ADDL);
1134 r0 = TCG_REG_R31;
1136 #endif
1138 #ifdef TARGET_WORDS_BIGENDIAN
1139 bswap = 0;
1140 #else
1141 bswap = 1;
1142 #endif
1143 switch (opc) {
1144 case 0:
1145 tcg_out_ldst(s, data_reg, r0, 0, INSN_STB);
1146 break;
1147 case 1:
1148 if (bswap) {
1149 tcg_out_bswap16(s, TCG_REG_R20, data_reg, 0);
1150 data_reg = TCG_REG_R20;
1152 tcg_out_ldst(s, data_reg, r0, 0, INSN_STH);
1153 break;
1154 case 2:
1155 if (bswap) {
1156 tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20);
1157 data_reg = TCG_REG_R20;
1159 tcg_out_ldst(s, data_reg, r0, 0, INSN_STW);
1160 break;
1161 case 3:
1162 if (bswap) {
1163 tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20);
1164 tcg_out_bswap32(s, TCG_REG_R23, data_reg2, TCG_REG_R23);
1165 data_reg2 = TCG_REG_R20;
1166 data_reg = TCG_REG_R23;
1168 tcg_out_ldst(s, data_reg2, r0, 0, INSN_STW);
1169 tcg_out_ldst(s, data_reg, r0, 4, INSN_STW);
1170 break;
1171 default:
1172 tcg_abort();
1175 #if defined(CONFIG_SOFTMMU)
1176 tcg_out_branch(s, lab2, 1);
1178 /* TLB Miss. */
1179 /* label1: */
1180 tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1182 argreg = TCG_REG_R26;
1183 tcg_out_mov(s, argreg--, addr_reg);
1184 if (TARGET_LONG_BITS == 64) {
1185 tcg_out_mov(s, argreg--, addr_reg2);
1188 switch(opc) {
1189 case 0:
1190 tcg_out_andi(s, argreg--, data_reg, 0xff);
1191 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1192 break;
1193 case 1:
1194 tcg_out_andi(s, argreg--, data_reg, 0xffff);
1195 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1196 break;
1197 case 2:
1198 tcg_out_mov(s, argreg--, data_reg);
1199 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1200 break;
1201 case 3:
1202 /* Because of the alignment required by the 64-bit data argument,
1203 we will always use R23/R24. Also, we will always run out of
1204 argument registers for storing mem_index, so that will have
1205 to go on the stack. */
1206 if (mem_index == 0) {
1207 argreg = TCG_REG_R0;
1208 } else {
1209 argreg = TCG_REG_R20;
1210 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1212 tcg_out_mov(s, TCG_REG_R23, data_reg2);
1213 tcg_out_mov(s, TCG_REG_R24, data_reg);
1214 tcg_out_st(s, TCG_TYPE_I32, argreg, TCG_REG_SP,
1215 TCG_TARGET_CALL_STACK_OFFSET - 4);
1216 break;
1217 default:
1218 tcg_abort();
1221 tcg_out_call(s, qemu_st_helpers[s_bits]);
1223 /* label2: */
1224 tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1225 #endif
1228 static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1230 if (!check_fit_tl(arg, 14)) {
1231 uint32_t hi, lo;
1232 hi = arg & ~0x7ff;
1233 lo = arg & 0x7ff;
1234 if (lo) {
1235 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1236 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1237 tcg_out_addi(s, TCG_REG_RET0, lo);
1238 return;
1240 arg = hi;
1242 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1243 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1246 static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1248 if (s->tb_jmp_offset) {
1249 /* direct jump method */
1250 fprintf(stderr, "goto_tb direct\n");
1251 tcg_abort();
1252 } else {
1253 /* indirect jump method */
1254 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1255 (tcg_target_long)(s->tb_next + arg));
1256 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1258 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1261 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1262 const int *const_args)
1264 switch (opc) {
1265 case INDEX_op_exit_tb:
1266 tcg_out_exit_tb(s, args[0]);
1267 break;
1268 case INDEX_op_goto_tb:
1269 tcg_out_goto_tb(s, args[0]);
1270 break;
1272 case INDEX_op_call:
1273 if (const_args[0]) {
1274 tcg_out_call(s, (void *)args[0]);
1275 } else {
1276 /* ??? FIXME: the value in the register in args[0] is almost
1277 certainly a procedure descriptor, not a code address. We
1278 probably need to use the millicode $$dyncall routine. */
1279 tcg_abort();
1281 break;
1283 case INDEX_op_jmp:
1284 fprintf(stderr, "unimplemented jmp\n");
1285 tcg_abort();
1286 break;
1288 case INDEX_op_br:
1289 tcg_out_branch(s, args[0], 1);
1290 break;
1292 case INDEX_op_movi_i32:
1293 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1294 break;
1296 case INDEX_op_ld8u_i32:
1297 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1298 break;
1299 case INDEX_op_ld8s_i32:
1300 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1301 tcg_out_ext8s(s, args[0], args[0]);
1302 break;
1303 case INDEX_op_ld16u_i32:
1304 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1305 break;
1306 case INDEX_op_ld16s_i32:
1307 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1308 tcg_out_ext16s(s, args[0], args[0]);
1309 break;
1310 case INDEX_op_ld_i32:
1311 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1312 break;
1314 case INDEX_op_st8_i32:
1315 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1316 break;
1317 case INDEX_op_st16_i32:
1318 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1319 break;
1320 case INDEX_op_st_i32:
1321 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1322 break;
1324 case INDEX_op_add_i32:
1325 if (const_args[2]) {
1326 tcg_out_addi2(s, args[0], args[1], args[2]);
1327 } else {
1328 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1330 break;
1332 case INDEX_op_sub_i32:
1333 if (const_args[1]) {
1334 if (const_args[2]) {
1335 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1336 } else {
1337 /* Recall that SUBI is a reversed subtract. */
1338 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1340 } else if (const_args[2]) {
1341 tcg_out_addi2(s, args[0], args[1], -args[2]);
1342 } else {
1343 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1345 break;
1347 case INDEX_op_and_i32:
1348 if (const_args[2]) {
1349 tcg_out_andi(s, args[0], args[1], args[2]);
1350 } else {
1351 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1353 break;
1355 case INDEX_op_or_i32:
1356 if (const_args[2]) {
1357 tcg_out_ori(s, args[0], args[1], args[2]);
1358 } else {
1359 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1361 break;
1363 case INDEX_op_xor_i32:
1364 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1365 break;
1367 case INDEX_op_andc_i32:
1368 if (const_args[2]) {
1369 tcg_out_andi(s, args[0], args[1], ~args[2]);
1370 } else {
1371 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1373 break;
1375 case INDEX_op_shl_i32:
1376 if (const_args[2]) {
1377 tcg_out_shli(s, args[0], args[1], args[2]);
1378 } else {
1379 tcg_out_shl(s, args[0], args[1], args[2]);
1381 break;
1383 case INDEX_op_shr_i32:
1384 if (const_args[2]) {
1385 tcg_out_shri(s, args[0], args[1], args[2]);
1386 } else {
1387 tcg_out_shr(s, args[0], args[1], args[2]);
1389 break;
1391 case INDEX_op_sar_i32:
1392 if (const_args[2]) {
1393 tcg_out_sari(s, args[0], args[1], args[2]);
1394 } else {
1395 tcg_out_sar(s, args[0], args[1], args[2]);
1397 break;
1399 case INDEX_op_rotl_i32:
1400 if (const_args[2]) {
1401 tcg_out_rotli(s, args[0], args[1], args[2]);
1402 } else {
1403 tcg_out_rotl(s, args[0], args[1], args[2]);
1405 break;
1407 case INDEX_op_rotr_i32:
1408 if (const_args[2]) {
1409 tcg_out_rotri(s, args[0], args[1], args[2]);
1410 } else {
1411 tcg_out_rotr(s, args[0], args[1], args[2]);
1413 break;
1415 case INDEX_op_mul_i32:
1416 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1417 break;
1418 case INDEX_op_mulu2_i32:
1419 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1420 break;
1422 case INDEX_op_bswap16_i32:
1423 tcg_out_bswap16(s, args[0], args[1], 0);
1424 break;
1425 case INDEX_op_bswap32_i32:
1426 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1427 break;
1429 case INDEX_op_not_i32:
1430 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1431 break;
1432 case INDEX_op_ext8s_i32:
1433 tcg_out_ext8s(s, args[0], args[1]);
1434 break;
1435 case INDEX_op_ext16s_i32:
1436 tcg_out_ext16s(s, args[0], args[1]);
1437 break;
1439 /* These three correspond exactly to the fallback implementation.
1440 But by including them we reduce the number of TCG ops that
1441 need to be generated, and these opcodes are fairly common. */
1442 case INDEX_op_neg_i32:
1443 tcg_out_arith(s, args[0], TCG_REG_R0, args[1], INSN_SUB);
1444 break;
1445 case INDEX_op_ext8u_i32:
1446 tcg_out_andi(s, args[0], args[1], 0xff);
1447 break;
1448 case INDEX_op_ext16u_i32:
1449 tcg_out_andi(s, args[0], args[1], 0xffff);
1450 break;
1452 case INDEX_op_brcond_i32:
1453 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1454 break;
1455 case INDEX_op_brcond2_i32:
1456 tcg_out_brcond2(s, args[4], args[0], args[1],
1457 args[2], const_args[2],
1458 args[3], const_args[3], args[5]);
1459 break;
1461 case INDEX_op_setcond_i32:
1462 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1463 break;
1464 case INDEX_op_setcond2_i32:
1465 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1466 args[3], const_args[3], args[4], const_args[4]);
1467 break;
1469 case INDEX_op_add2_i32:
1470 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1471 args[4], args[5], const_args[4]);
1472 break;
1474 case INDEX_op_sub2_i32:
1475 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1476 args[4], args[5], const_args[2], const_args[4]);
1477 break;
1479 case INDEX_op_qemu_ld8u:
1480 tcg_out_qemu_ld(s, args, 0);
1481 break;
1482 case INDEX_op_qemu_ld8s:
1483 tcg_out_qemu_ld(s, args, 0 | 4);
1484 break;
1485 case INDEX_op_qemu_ld16u:
1486 tcg_out_qemu_ld(s, args, 1);
1487 break;
1488 case INDEX_op_qemu_ld16s:
1489 tcg_out_qemu_ld(s, args, 1 | 4);
1490 break;
1491 case INDEX_op_qemu_ld32:
1492 tcg_out_qemu_ld(s, args, 2);
1493 break;
1494 case INDEX_op_qemu_ld64:
1495 tcg_out_qemu_ld(s, args, 3);
1496 break;
1498 case INDEX_op_qemu_st8:
1499 tcg_out_qemu_st(s, args, 0);
1500 break;
1501 case INDEX_op_qemu_st16:
1502 tcg_out_qemu_st(s, args, 1);
1503 break;
1504 case INDEX_op_qemu_st32:
1505 tcg_out_qemu_st(s, args, 2);
1506 break;
1507 case INDEX_op_qemu_st64:
1508 tcg_out_qemu_st(s, args, 3);
1509 break;
1511 default:
1512 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1513 tcg_abort();
1517 static const TCGTargetOpDef hppa_op_defs[] = {
1518 { INDEX_op_exit_tb, { } },
1519 { INDEX_op_goto_tb, { } },
1521 { INDEX_op_call, { "ri" } },
1522 { INDEX_op_jmp, { "r" } },
1523 { INDEX_op_br, { } },
1525 { INDEX_op_mov_i32, { "r", "r" } },
1526 { INDEX_op_movi_i32, { "r" } },
1528 { INDEX_op_ld8u_i32, { "r", "r" } },
1529 { INDEX_op_ld8s_i32, { "r", "r" } },
1530 { INDEX_op_ld16u_i32, { "r", "r" } },
1531 { INDEX_op_ld16s_i32, { "r", "r" } },
1532 { INDEX_op_ld_i32, { "r", "r" } },
1533 { INDEX_op_st8_i32, { "rZ", "r" } },
1534 { INDEX_op_st16_i32, { "rZ", "r" } },
1535 { INDEX_op_st_i32, { "rZ", "r" } },
1537 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1538 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1539 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1540 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
1541 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1542 /* Note that the second argument will be inverted, which means
1543 we want a constant whose inversion matches M, and that O = ~M.
1544 See the implementation of and_mask_p. */
1545 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1547 { INDEX_op_mul_i32, { "r", "r", "r" } },
1548 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1550 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1551 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1552 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1553 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1554 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1556 { INDEX_op_bswap16_i32, { "r", "r" } },
1557 { INDEX_op_bswap32_i32, { "r", "r" } },
1558 { INDEX_op_neg_i32, { "r", "r" } },
1559 { INDEX_op_not_i32, { "r", "r" } },
1561 { INDEX_op_ext8s_i32, { "r", "r" } },
1562 { INDEX_op_ext8u_i32, { "r", "r" } },
1563 { INDEX_op_ext16s_i32, { "r", "r" } },
1564 { INDEX_op_ext16u_i32, { "r", "r" } },
1566 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1567 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1569 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1570 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1572 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1573 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1575 #if TARGET_LONG_BITS == 32
1576 { INDEX_op_qemu_ld8u, { "r", "L" } },
1577 { INDEX_op_qemu_ld8s, { "r", "L" } },
1578 { INDEX_op_qemu_ld16u, { "r", "L" } },
1579 { INDEX_op_qemu_ld16s, { "r", "L" } },
1580 { INDEX_op_qemu_ld32, { "r", "L" } },
1581 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1583 { INDEX_op_qemu_st8, { "LZ", "L" } },
1584 { INDEX_op_qemu_st16, { "LZ", "L" } },
1585 { INDEX_op_qemu_st32, { "LZ", "L" } },
1586 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1587 #else
1588 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1589 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1590 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1591 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1592 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1593 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1595 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1596 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1597 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1598 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1599 #endif
1600 { -1 },
1603 static int tcg_target_callee_save_regs[] = {
1604 /* R2, the return address register, is saved specially
1605 in the caller's frame. */
1606 /* R3, the frame pointer, is not currently modified. */
1607 TCG_REG_R4,
1608 TCG_REG_R5,
1609 TCG_REG_R6,
1610 TCG_REG_R7,
1611 TCG_REG_R8,
1612 TCG_REG_R9,
1613 TCG_REG_R10,
1614 TCG_REG_R11,
1615 TCG_REG_R12,
1616 TCG_REG_R13,
1617 TCG_REG_R14,
1618 TCG_REG_R15,
1619 TCG_REG_R16,
1620 /* R17 is the global env, so no need to save. */
1621 TCG_REG_R18
1624 void tcg_target_qemu_prologue(TCGContext *s)
1626 int frame_size, i;
1628 /* Allocate space for the fixed frame marker. */
1629 frame_size = -TCG_TARGET_CALL_STACK_OFFSET;
1630 frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE;
1632 /* Allocate space for the saved registers. */
1633 frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1635 /* Align the allocated space. */
1636 frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
1637 & -TCG_TARGET_STACK_ALIGN);
1639 /* The return address is stored in the caller's frame. */
1640 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -20);
1642 /* Allocate stack frame, saving the first register at the same time. */
1643 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1644 TCG_REG_SP, frame_size, INSN_STWM);
1646 /* Save all callee saved registers. */
1647 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1648 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1649 TCG_REG_SP, -frame_size + i * 4);
1652 if (GUEST_BASE != 0) {
1653 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1656 /* Jump to TB, and adjust R18 to be the return address. */
1657 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R26));
1658 tcg_out_mov(s, TCG_REG_R18, TCG_REG_R31);
1660 /* Restore callee saved registers. */
1661 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -frame_size - 20);
1662 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1663 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1664 TCG_REG_SP, -frame_size + i * 4);
1667 /* Deallocate stack frame and return. */
1668 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1669 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1670 TCG_REG_SP, -frame_size, INSN_LDWM);
1673 void tcg_target_init(TCGContext *s)
1675 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1677 tcg_regset_clear(tcg_target_call_clobber_regs);
1678 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1679 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1680 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1681 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1682 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1683 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1684 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1685 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1686 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1688 tcg_regset_clear(s->reserved_regs);
1689 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1690 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1691 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1692 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1693 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1694 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1695 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1696 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
1697 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
1698 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1699 if (GUEST_BASE != 0) {
1700 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1703 tcg_add_target_add_op_defs(hppa_op_defs);