2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.c.inc"
27 #include "../tcg-ldst.c.inc"
30 * Standardize on the _CALL_FOO symbols used by GCC:
31 * Apple XCode does not define _CALL_DARWIN.
32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
34 #if !defined(_CALL_SYSV) && \
35 !defined(_CALL_DARWIN) && \
36 !defined(_CALL_AIX) && \
38 # if defined(__APPLE__)
40 # elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
48 # define TCG_TARGET_CALL_ALIGN_ARGS 1
51 /* For some memory operations, we need a scratch that isn't R0. For the AIX
52 calling convention, we can re-use the TOC register since we'll be reloading
53 it at every call. Otherwise R12 will do nicely as neither a call-saved
54 register nor a parameter register. */
56 # define TCG_REG_TMP1 TCG_REG_R2
58 # define TCG_REG_TMP1 TCG_REG_R12
61 #define TCG_VEC_TMP1 TCG_REG_V0
62 #define TCG_VEC_TMP2 TCG_REG_V1
64 #define TCG_REG_TB TCG_REG_R31
65 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
67 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
68 #define SZP ((int)sizeof(void *))
70 /* Shorthand for size of a register. */
71 #define SZR (TCG_TARGET_REG_BITS / 8)
73 #define TCG_CT_CONST_S16 0x100
74 #define TCG_CT_CONST_U16 0x200
75 #define TCG_CT_CONST_S32 0x400
76 #define TCG_CT_CONST_U32 0x800
77 #define TCG_CT_CONST_ZERO 0x1000
78 #define TCG_CT_CONST_MONE 0x2000
79 #define TCG_CT_CONST_WSZ 0x4000
81 #define ALL_GENERAL_REGS 0xffffffffu
82 #define ALL_VECTOR_REGS 0xffffffff00000000ull
85 #define ALL_QLOAD_REGS \
87 ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5)))
88 #define ALL_QSTORE_REGS \
89 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | \
90 (1 << TCG_REG_R5) | (1 << TCG_REG_R6)))
92 #define ALL_QLOAD_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R3))
93 #define ALL_QSTORE_REGS ALL_QLOAD_REGS
97 static bool have_isel;
101 #ifndef CONFIG_SOFTMMU
102 #define TCG_GUEST_BASE_REG 30
105 #ifdef CONFIG_DEBUG_TCG
106 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
107 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
108 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
109 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
110 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
111 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
112 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
113 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
114 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
118 static const int tcg_target_reg_alloc_order[] = {
119 TCG_REG_R14, /* call saved registers */
137 TCG_REG_R12, /* call clobbered, non-arguments */
141 TCG_REG_R10, /* call clobbered, arguments */
150 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
151 TCG_REG_V2, /* call clobbered, vectors */
171 static const int tcg_target_call_iarg_regs[] = {
182 static const int tcg_target_call_oarg_regs[] = {
187 static const int tcg_target_callee_save_regs[] = {
204 TCG_REG_R27, /* currently used for the global env */
211 static inline bool in_range_b(tcg_target_long target)
213 return target == sextract64(target, 0, 26);
216 static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
217 const tcg_insn_unit *target)
219 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
220 tcg_debug_assert(in_range_b(disp));
221 return disp & 0x3fffffc;
224 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
226 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
227 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
229 if (in_range_b(disp)) {
230 *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
236 static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
237 const tcg_insn_unit *target)
239 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
240 tcg_debug_assert(disp == (int16_t) disp);
241 return disp & 0xfffc;
244 static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
246 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
247 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
249 if (disp == (int16_t) disp) {
250 *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
256 /* test if a constant matches the constraint */
257 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
259 if (ct & TCG_CT_CONST) {
263 /* The only 32-bit constraint we use aside from
264 TCG_CT_CONST is TCG_CT_CONST_S16. */
265 if (type == TCG_TYPE_I32) {
269 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
271 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
273 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
275 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
277 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
279 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
281 } else if ((ct & TCG_CT_CONST_WSZ)
282 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
288 #define OPCD(opc) ((opc)<<26)
289 #define XO19(opc) (OPCD(19)|((opc)<<1))
290 #define MD30(opc) (OPCD(30)|((opc)<<2))
291 #define MDS30(opc) (OPCD(30)|((opc)<<1))
292 #define XO31(opc) (OPCD(31)|((opc)<<1))
293 #define XO58(opc) (OPCD(58)|(opc))
294 #define XO62(opc) (OPCD(62)|(opc))
295 #define VX4(opc) (OPCD(4)|(opc))
299 #define LBZ OPCD( 34)
300 #define LHZ OPCD( 40)
301 #define LHA OPCD( 42)
302 #define LWZ OPCD( 32)
303 #define LWZUX XO31( 55)
304 #define STB OPCD( 38)
305 #define STH OPCD( 44)
306 #define STW OPCD( 36)
309 #define STDU XO62( 1)
310 #define STDX XO31(149)
313 #define LDX XO31( 21)
315 #define LDUX XO31( 53)
317 #define LWAX XO31(341)
319 #define ADDIC OPCD( 12)
320 #define ADDI OPCD( 14)
321 #define ADDIS OPCD( 15)
322 #define ORI OPCD( 24)
323 #define ORIS OPCD( 25)
324 #define XORI OPCD( 26)
325 #define XORIS OPCD( 27)
326 #define ANDI OPCD( 28)
327 #define ANDIS OPCD( 29)
328 #define MULLI OPCD( 7)
329 #define CMPLI OPCD( 10)
330 #define CMPI OPCD( 11)
331 #define SUBFIC OPCD( 8)
333 #define LWZU OPCD( 33)
334 #define STWU OPCD( 37)
336 #define RLWIMI OPCD( 20)
337 #define RLWINM OPCD( 21)
338 #define RLWNM OPCD( 23)
340 #define RLDICL MD30( 0)
341 #define RLDICR MD30( 1)
342 #define RLDIMI MD30( 3)
343 #define RLDCL MDS30( 8)
345 #define BCLR XO19( 16)
346 #define BCCTR XO19(528)
347 #define CRAND XO19(257)
348 #define CRANDC XO19(129)
349 #define CRNAND XO19(225)
350 #define CROR XO19(449)
351 #define CRNOR XO19( 33)
353 #define EXTSB XO31(954)
354 #define EXTSH XO31(922)
355 #define EXTSW XO31(986)
356 #define ADD XO31(266)
357 #define ADDE XO31(138)
358 #define ADDME XO31(234)
359 #define ADDZE XO31(202)
360 #define ADDC XO31( 10)
361 #define AND XO31( 28)
362 #define SUBF XO31( 40)
363 #define SUBFC XO31( 8)
364 #define SUBFE XO31(136)
365 #define SUBFME XO31(232)
366 #define SUBFZE XO31(200)
368 #define XOR XO31(316)
369 #define MULLW XO31(235)
370 #define MULHW XO31( 75)
371 #define MULHWU XO31( 11)
372 #define DIVW XO31(491)
373 #define DIVWU XO31(459)
375 #define CMPL XO31( 32)
376 #define LHBRX XO31(790)
377 #define LWBRX XO31(534)
378 #define LDBRX XO31(532)
379 #define STHBRX XO31(918)
380 #define STWBRX XO31(662)
381 #define STDBRX XO31(660)
382 #define MFSPR XO31(339)
383 #define MTSPR XO31(467)
384 #define SRAWI XO31(824)
385 #define NEG XO31(104)
386 #define MFCR XO31( 19)
387 #define MFOCRF (MFCR | (1u << 20))
388 #define NOR XO31(124)
389 #define CNTLZW XO31( 26)
390 #define CNTLZD XO31( 58)
391 #define CNTTZW XO31(538)
392 #define CNTTZD XO31(570)
393 #define CNTPOPW XO31(378)
394 #define CNTPOPD XO31(506)
395 #define ANDC XO31( 60)
396 #define ORC XO31(412)
397 #define EQV XO31(284)
398 #define NAND XO31(476)
399 #define ISEL XO31( 15)
401 #define MULLD XO31(233)
402 #define MULHD XO31( 73)
403 #define MULHDU XO31( 9)
404 #define DIVD XO31(489)
405 #define DIVDU XO31(457)
407 #define LBZX XO31( 87)
408 #define LHZX XO31(279)
409 #define LHAX XO31(343)
410 #define LWZX XO31( 23)
411 #define STBX XO31(215)
412 #define STHX XO31(407)
413 #define STWX XO31(151)
415 #define EIEIO XO31(854)
416 #define HWSYNC XO31(598)
417 #define LWSYNC (HWSYNC | (1u << 21))
419 #define SPR(a, b) ((((a)<<5)|(b))<<11)
421 #define CTR SPR(9, 0)
423 #define SLW XO31( 24)
424 #define SRW XO31(536)
425 #define SRAW XO31(792)
427 #define SLD XO31( 27)
428 #define SRD XO31(539)
429 #define SRAD XO31(794)
430 #define SRADI XO31(413<<1)
432 #define BRH XO31(219)
433 #define BRW XO31(155)
434 #define BRD XO31(187)
437 #define TRAP (TW | TO(31))
439 #define NOP ORI /* ori 0,0,0 */
441 #define LVX XO31(103)
442 #define LVEBX XO31(7)
443 #define LVEHX XO31(39)
444 #define LVEWX XO31(71)
445 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
446 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
447 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
448 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
449 #define LXSD (OPCD(57) | 2) /* v3.00 */
450 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
452 #define STVX XO31(231)
453 #define STVEWX XO31(199)
454 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
455 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
456 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
457 #define STXSD (OPCD(61) | 2) /* v3.00 */
459 #define VADDSBS VX4(768)
460 #define VADDUBS VX4(512)
461 #define VADDUBM VX4(0)
462 #define VADDSHS VX4(832)
463 #define VADDUHS VX4(576)
464 #define VADDUHM VX4(64)
465 #define VADDSWS VX4(896)
466 #define VADDUWS VX4(640)
467 #define VADDUWM VX4(128)
468 #define VADDUDM VX4(192) /* v2.07 */
470 #define VSUBSBS VX4(1792)
471 #define VSUBUBS VX4(1536)
472 #define VSUBUBM VX4(1024)
473 #define VSUBSHS VX4(1856)
474 #define VSUBUHS VX4(1600)
475 #define VSUBUHM VX4(1088)
476 #define VSUBSWS VX4(1920)
477 #define VSUBUWS VX4(1664)
478 #define VSUBUWM VX4(1152)
479 #define VSUBUDM VX4(1216) /* v2.07 */
481 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
482 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
484 #define VMAXSB VX4(258)
485 #define VMAXSH VX4(322)
486 #define VMAXSW VX4(386)
487 #define VMAXSD VX4(450) /* v2.07 */
488 #define VMAXUB VX4(2)
489 #define VMAXUH VX4(66)
490 #define VMAXUW VX4(130)
491 #define VMAXUD VX4(194) /* v2.07 */
492 #define VMINSB VX4(770)
493 #define VMINSH VX4(834)
494 #define VMINSW VX4(898)
495 #define VMINSD VX4(962) /* v2.07 */
496 #define VMINUB VX4(514)
497 #define VMINUH VX4(578)
498 #define VMINUW VX4(642)
499 #define VMINUD VX4(706) /* v2.07 */
501 #define VCMPEQUB VX4(6)
502 #define VCMPEQUH VX4(70)
503 #define VCMPEQUW VX4(134)
504 #define VCMPEQUD VX4(199) /* v2.07 */
505 #define VCMPGTSB VX4(774)
506 #define VCMPGTSH VX4(838)
507 #define VCMPGTSW VX4(902)
508 #define VCMPGTSD VX4(967) /* v2.07 */
509 #define VCMPGTUB VX4(518)
510 #define VCMPGTUH VX4(582)
511 #define VCMPGTUW VX4(646)
512 #define VCMPGTUD VX4(711) /* v2.07 */
513 #define VCMPNEB VX4(7) /* v3.00 */
514 #define VCMPNEH VX4(71) /* v3.00 */
515 #define VCMPNEW VX4(135) /* v3.00 */
517 #define VSLB VX4(260)
518 #define VSLH VX4(324)
519 #define VSLW VX4(388)
520 #define VSLD VX4(1476) /* v2.07 */
521 #define VSRB VX4(516)
522 #define VSRH VX4(580)
523 #define VSRW VX4(644)
524 #define VSRD VX4(1732) /* v2.07 */
525 #define VSRAB VX4(772)
526 #define VSRAH VX4(836)
527 #define VSRAW VX4(900)
528 #define VSRAD VX4(964) /* v2.07 */
531 #define VRLW VX4(132)
532 #define VRLD VX4(196) /* v2.07 */
534 #define VMULEUB VX4(520)
535 #define VMULEUH VX4(584)
536 #define VMULEUW VX4(648) /* v2.07 */
537 #define VMULOUB VX4(8)
538 #define VMULOUH VX4(72)
539 #define VMULOUW VX4(136) /* v2.07 */
540 #define VMULUWM VX4(137) /* v2.07 */
541 #define VMULLD VX4(457) /* v3.10 */
542 #define VMSUMUHM VX4(38)
544 #define VMRGHB VX4(12)
545 #define VMRGHH VX4(76)
546 #define VMRGHW VX4(140)
547 #define VMRGLB VX4(268)
548 #define VMRGLH VX4(332)
549 #define VMRGLW VX4(396)
551 #define VPKUHUM VX4(14)
552 #define VPKUWUM VX4(78)
554 #define VAND VX4(1028)
555 #define VANDC VX4(1092)
556 #define VNOR VX4(1284)
557 #define VOR VX4(1156)
558 #define VXOR VX4(1220)
559 #define VEQV VX4(1668) /* v2.07 */
560 #define VNAND VX4(1412) /* v2.07 */
561 #define VORC VX4(1348) /* v2.07 */
563 #define VSPLTB VX4(524)
564 #define VSPLTH VX4(588)
565 #define VSPLTW VX4(652)
566 #define VSPLTISB VX4(780)
567 #define VSPLTISH VX4(844)
568 #define VSPLTISW VX4(908)
570 #define VSLDOI VX4(44)
572 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
573 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
574 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
576 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
577 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
578 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
579 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
580 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
581 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
583 #define RT(r) ((r)<<21)
584 #define RS(r) ((r)<<21)
585 #define RA(r) ((r)<<16)
586 #define RB(r) ((r)<<11)
587 #define TO(t) ((t)<<21)
588 #define SH(s) ((s)<<11)
589 #define MB(b) ((b)<<6)
590 #define ME(e) ((e)<<1)
591 #define BO(o) ((o)<<21)
592 #define MB64(b) ((b)<<5)
593 #define FXM(b) (1 << (19 - (b)))
595 #define VRT(r) (((r) & 31) << 21)
596 #define VRA(r) (((r) & 31) << 16)
597 #define VRB(r) (((r) & 31) << 11)
598 #define VRC(r) (((r) & 31) << 6)
602 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
603 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
604 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
605 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
607 #define BF(n) ((n)<<23)
608 #define BI(n, c) (((c)+((n)*4))<<16)
609 #define BT(n, c) (((c)+((n)*4))<<21)
610 #define BA(n, c) (((c)+((n)*4))<<16)
611 #define BB(n, c) (((c)+((n)*4))<<11)
612 #define BC_(n, c) (((c)+((n)*4))<<6)
614 #define BO_COND_TRUE BO(12)
615 #define BO_COND_FALSE BO( 4)
616 #define BO_ALWAYS BO(20)
625 static const uint32_t tcg_to_bc[] = {
626 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
627 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
628 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
629 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
630 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
631 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
632 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
633 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
634 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
635 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
638 /* The low bit here is set if the RA and RB fields must be inverted. */
639 static const uint32_t tcg_to_isel[] = {
640 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
641 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
642 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
643 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
644 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
645 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
646 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
647 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
648 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
649 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
652 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
653 intptr_t value, intptr_t addend)
655 const tcg_insn_unit *target;
660 target = (const tcg_insn_unit *)value;
664 return reloc_pc14(code_ptr, target);
666 return reloc_pc24(code_ptr, target);
669 * We are (slightly) abusing this relocation type. In particular,
670 * assert that the low 2 bits are zero, and do not modify them.
671 * That way we can use this with LD et al that have opcode bits
672 * in the low 2 bits of the insn.
674 if ((value & 3) || value != (int16_t)value) {
677 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
681 * We are abusing this relocation type. Again, this points to
682 * a pair of insns, lis + load. This is an absolute address
683 * relocation for PPC32 so the lis cannot be removed.
687 if (hi + lo != value) {
690 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
691 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
694 g_assert_not_reached();
699 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
700 TCGReg base, tcg_target_long offset);
702 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
709 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
712 if (ret < TCG_REG_V0) {
713 if (arg < TCG_REG_V0) {
714 tcg_out32(s, OR | SAB(arg, ret, arg));
716 } else if (have_isa_2_07) {
717 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
718 | VRT(arg) | RA(ret));
721 /* Altivec does not support vector->integer moves. */
724 } else if (arg < TCG_REG_V0) {
726 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
727 | VRT(ret) | RA(arg));
730 /* Altivec does not support integer->vector moves. */
737 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
738 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
741 g_assert_not_reached();
746 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
749 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
750 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
751 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
752 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
755 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
756 int sh, int mb, int me)
758 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
761 static inline void tcg_out_ext8s(TCGContext *s, TCGReg dst, TCGReg src)
763 tcg_out32(s, EXTSB | RA(dst) | RS(src));
766 static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src)
768 tcg_out32(s, EXTSH | RA(dst) | RS(src));
771 static inline void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
773 tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
776 static inline void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
778 tcg_out32(s, EXTSW | RA(dst) | RS(src));
781 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
783 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
786 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
788 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
791 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
793 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
796 static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
798 /* Limit immediate shift count lest we create an illegal insn. */
799 tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
802 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
804 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
807 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
809 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
812 static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
814 tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
817 static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
819 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
822 tcg_out32(s, BRH | RA(dst) | RS(src));
823 if (flags & TCG_BSWAP_OS) {
824 tcg_out_ext16s(s, dst, dst);
825 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
826 tcg_out_ext16u(s, dst, dst);
833 * dep(a, b, m) -> (a & ~m) | (b & m)
835 * Begin with: src = xxxxabcd
837 /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
838 tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
839 /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
840 tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
842 if (flags & TCG_BSWAP_OS) {
843 tcg_out_ext16s(s, dst, tmp);
845 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
849 static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
851 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
854 tcg_out32(s, BRW | RA(dst) | RS(src));
855 if (flags & TCG_BSWAP_OS) {
856 tcg_out_ext32s(s, dst, dst);
857 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
858 tcg_out_ext32u(s, dst, dst);
864 * Stolen from gcc's builtin_bswap32.
866 * dep(a, b, m) -> (a & ~m) | (b & m)
868 * Begin with: src = xxxxabcd
870 /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
871 tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
872 /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
873 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
874 /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
875 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
877 if (flags & TCG_BSWAP_OS) {
878 tcg_out_ext32s(s, dst, tmp);
880 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
884 static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
886 TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
887 TCGReg t1 = dst == src ? dst : TCG_REG_R0;
890 tcg_out32(s, BRD | RA(dst) | RS(src));
896 * dep(a, b, m) -> (a & ~m) | (b & m)
898 * Begin with: src = abcdefgh
900 /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
901 tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
902 /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
903 tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
904 /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
905 tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
907 /* t0 = rol64(t0, 32) = hgfe0000 */
908 tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
909 /* t1 = rol64(src, 32) = efghabcd */
910 tcg_out_rld(s, RLDICL, t1, src, 32, 0);
912 /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
913 tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
914 /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
915 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
916 /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
917 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
919 tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
922 /* Emit a move into ret of arg, if it can be done in one insn. */
923 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
925 if (arg == (int16_t)arg) {
926 tcg_out32(s, ADDI | TAI(ret, 0, arg));
929 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
930 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
936 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
937 tcg_target_long arg, bool in_prologue)
943 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
945 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
949 /* Load 16-bit immediates with one insn. */
950 if (tcg_out_movi_one(s, ret, arg)) {
954 /* Load addresses within the TB with one insn. */
955 tb_diff = tcg_tbrel_diff(s, (void *)arg);
956 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
957 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
961 /* Load 32-bit immediates with two insns. Note that we've already
962 eliminated bare ADDIS, so we know both insns are required. */
963 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
964 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
965 tcg_out32(s, ORI | SAI(ret, ret, arg));
968 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
969 tcg_out32(s, ADDI | TAI(ret, 0, arg));
970 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
974 /* Load masked 16-bit value. */
975 if (arg > 0 && (arg & 0x8000)) {
977 if ((tmp & (tmp + 1)) == 0) {
978 int mb = clz64(tmp + 1) + 1;
979 tcg_out32(s, ADDI | TAI(ret, 0, arg));
980 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
985 /* Load common masks with 2 insns. */
988 if (tmp == (int16_t)tmp) {
989 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
990 tcg_out_shli64(s, ret, ret, shift);
994 if (tcg_out_movi_one(s, ret, arg << shift)) {
995 tcg_out_shri64(s, ret, ret, shift);
999 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
1000 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
1001 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
1005 /* Use the constant pool, if possible. */
1006 if (!in_prologue && USE_REG_TB) {
1007 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1008 tcg_tbrel_diff(s, NULL));
1009 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
1013 tmp = arg >> 31 >> 1;
1014 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1016 tcg_out_shli64(s, ret, ret, 32);
1018 if (arg & 0xffff0000) {
1019 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1022 tcg_out32(s, ORI | SAI(ret, ret, arg));
1026 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1027 TCGReg ret, int64_t val)
1036 if (low >= -16 && low < 16) {
1037 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1040 if (have_isa_3_00) {
1041 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1048 if (low >= -16 && low < 16) {
1049 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1056 if (low >= -16 && low < 16) {
1057 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1064 * Otherwise we must load the value from the constant pool.
1068 add = tcg_tbrel_diff(s, NULL);
1075 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1076 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1077 if (TCG_TARGET_REG_BITS == 64) {
1078 new_pool_label(s, val, rel, s->code_ptr, add);
1080 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1083 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1084 if (TCG_TARGET_REG_BITS == 64) {
1085 new_pool_l2(s, rel, s->code_ptr, add, val, val);
1087 new_pool_l4(s, rel, s->code_ptr, add,
1088 val >> 32, val, val >> 32, val);
1093 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1094 load_insn |= RA(TCG_REG_TB);
1096 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1097 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1099 tcg_out32(s, load_insn);
1102 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1103 tcg_target_long arg)
1108 tcg_debug_assert(ret < TCG_REG_V0);
1109 tcg_out_movi_int(s, type, ret, arg, false);
1113 g_assert_not_reached();
1117 static bool mask_operand(uint32_t c, int *mb, int *me)
1121 /* Accept a bit pattern like:
1125 Keep track of the transitions. */
1126 if (c == 0 || c == -1) {
1132 if (test & (test - 1)) {
1137 *mb = test ? clz32(test & -test) + 1 : 0;
1141 static bool mask64_operand(uint64_t c, int *mb, int *me)
1150 /* Accept 1..10..0. */
1156 /* Accept 0..01..1. */
1157 if (lsb == 1 && (c & (c + 1)) == 0) {
1158 *mb = clz64(c + 1) + 1;
1165 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1169 if (mask_operand(c, &mb, &me)) {
1170 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1171 } else if ((c & 0xffff) == c) {
1172 tcg_out32(s, ANDI | SAI(src, dst, c));
1174 } else if ((c & 0xffff0000) == c) {
1175 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1178 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1179 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1183 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1187 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1188 if (mask64_operand(c, &mb, &me)) {
1190 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1192 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1194 } else if ((c & 0xffff) == c) {
1195 tcg_out32(s, ANDI | SAI(src, dst, c));
1197 } else if ((c & 0xffff0000) == c) {
1198 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1201 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1202 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1206 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1207 int op_lo, int op_hi)
1210 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1214 tcg_out32(s, op_lo | SAI(src, dst, c));
1219 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1221 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1224 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1226 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1229 static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1231 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1232 if (in_range_b(disp)) {
1233 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1235 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1236 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1237 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1241 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1242 TCGReg base, tcg_target_long offset)
1244 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1245 bool is_int_store = false;
1246 TCGReg rs = TCG_REG_TMP1;
1253 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1269 case STB: case STH: case STW:
1270 is_int_store = true;
1274 /* For unaligned, or very large offsets, use the indexed form. */
1275 if (offset & align || offset != (int32_t)offset || opi == 0) {
1279 tcg_debug_assert(!is_int_store || rs != rt);
1280 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1281 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1285 l0 = (int16_t)offset;
1286 offset = (offset - l0) >> 16;
1287 l1 = (int16_t)offset;
1289 if (l1 < 0 && orig >= 0) {
1291 l1 = (int16_t)(offset - 0x4000);
1294 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1298 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1301 if (opi != ADDI || base != rt || l0 != 0) {
1302 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1306 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1307 TCGReg va, TCGReg vb, int shb)
1309 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1312 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1313 TCGReg base, intptr_t offset)
1319 if (ret < TCG_REG_V0) {
1320 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1323 if (have_isa_2_07 && have_vsx) {
1324 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1327 tcg_debug_assert((offset & 3) == 0);
1328 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1329 shift = (offset - 4) & 0xc;
1331 tcg_out_vsldoi(s, ret, ret, ret, shift);
1335 if (ret < TCG_REG_V0) {
1336 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1337 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1342 tcg_debug_assert(ret >= TCG_REG_V0);
1344 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1348 tcg_debug_assert((offset & 7) == 0);
1349 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1351 tcg_out_vsldoi(s, ret, ret, ret, 8);
1355 tcg_debug_assert(ret >= TCG_REG_V0);
1356 tcg_debug_assert((offset & 15) == 0);
1357 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1358 LVX, ret, base, offset);
1361 g_assert_not_reached();
1365 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1366 TCGReg base, intptr_t offset)
1372 if (arg < TCG_REG_V0) {
1373 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1376 if (have_isa_2_07 && have_vsx) {
1377 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1380 assert((offset & 3) == 0);
1381 tcg_debug_assert((offset & 3) == 0);
1382 shift = (offset - 4) & 0xc;
1384 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1387 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1390 if (arg < TCG_REG_V0) {
1391 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1392 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1397 tcg_debug_assert(arg >= TCG_REG_V0);
1399 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1400 STXSDX, arg, base, offset);
1403 tcg_debug_assert((offset & 7) == 0);
1405 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1408 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1409 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1412 tcg_debug_assert(arg >= TCG_REG_V0);
1413 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1414 STVX, arg, base, offset);
1417 g_assert_not_reached();
1421 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1422 TCGReg base, intptr_t ofs)
1427 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1428 int const_arg2, int cr, TCGType type)
1433 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1435 /* Simplify the comparisons below wrt CMPI. */
1436 if (type == TCG_TYPE_I32) {
1437 arg2 = (int32_t)arg2;
1444 if ((int16_t) arg2 == arg2) {
1448 } else if ((uint16_t) arg2 == arg2) {
1463 if ((int16_t) arg2 == arg2) {
1478 if ((uint16_t) arg2 == arg2) {
1491 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1494 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1497 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1500 tcg_out32(s, op | RA(arg1) | RB(arg2));
1504 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1505 TCGReg dst, TCGReg src)
1507 if (type == TCG_TYPE_I32) {
1508 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1509 tcg_out_shri32(s, dst, dst, 5);
1511 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1512 tcg_out_shri64(s, dst, dst, 6);
1516 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1518 /* X != 0 implies X + -1 generates a carry. Extra addition
1519 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1521 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1522 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1524 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1525 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1529 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1533 if ((uint32_t)arg2 == arg2) {
1534 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1536 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1537 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1540 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1545 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1546 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1551 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1553 /* Ignore high bits of a potential constant arg2. */
1554 if (type == TCG_TYPE_I32) {
1555 arg2 = (uint32_t)arg2;
1558 /* Handle common and trivial cases before handling anything else. */
1562 tcg_out_setcond_eq0(s, type, arg0, arg1);
1565 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1566 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1569 tcg_out_setcond_ne0(s, arg0, arg1);
1572 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1576 /* Extract the sign bit. */
1577 if (type == TCG_TYPE_I32) {
1578 tcg_out_shri32(s, arg0, arg1, 31);
1580 tcg_out_shri64(s, arg0, arg1, 63);
1588 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1589 All other cases below are also at least 3 insns, so speed up the
1590 code generator by not considering them and always using ISEL. */
1594 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1596 isel = tcg_to_isel[cond];
1598 tcg_out_movi(s, type, arg0, 1);
1600 /* arg0 = (bc ? 0 : 1) */
1601 tab = TAB(arg0, 0, arg0);
1604 /* arg0 = (bc ? 1 : 0) */
1605 tcg_out_movi(s, type, TCG_REG_R0, 0);
1606 tab = TAB(arg0, arg0, TCG_REG_R0);
1608 tcg_out32(s, isel | tab);
1614 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1615 tcg_out_setcond_eq0(s, type, arg0, arg1);
1619 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1620 /* Discard the high bits only once, rather than both inputs. */
1621 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1622 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1625 tcg_out_setcond_ne0(s, arg0, arg1);
1643 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1649 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1651 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1655 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1656 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1664 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1667 bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
1669 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1674 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1675 TCGArg arg1, TCGArg arg2, int const_arg2,
1676 TCGLabel *l, TCGType type)
1678 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1679 tcg_out_bc(s, tcg_to_bc[cond], l);
1682 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1683 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1684 TCGArg v2, bool const_c2)
1686 /* If for some reason both inputs are zero, don't produce bad code. */
1687 if (v1 == 0 && v2 == 0) {
1688 tcg_out_movi(s, type, dest, 0);
1692 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1695 int isel = tcg_to_isel[cond];
1697 /* Swap the V operands if the operation indicates inversion. */
1704 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1706 tcg_out_movi(s, type, TCG_REG_R0, 0);
1708 tcg_out32(s, isel | TAB(dest, v1, v2));
1711 cond = tcg_invert_cond(cond);
1713 } else if (dest != v1) {
1715 tcg_out_movi(s, type, dest, 0);
1717 tcg_out_mov(s, type, dest, v1);
1720 /* Branch forward over one insn */
1721 tcg_out32(s, tcg_to_bc[cond] | 8);
1723 tcg_out_movi(s, type, dest, 0);
1725 tcg_out_mov(s, type, dest, v2);
1730 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1731 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1733 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1734 tcg_out32(s, opc | RA(a0) | RS(a1));
1736 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1737 /* Note that the only other valid constant for a2 is 0. */
1739 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1740 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1741 } else if (!const_a2 && a0 == a2) {
1742 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1743 tcg_out32(s, opc | RA(a0) | RS(a1));
1745 tcg_out32(s, opc | RA(a0) | RS(a1));
1746 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1748 tcg_out_movi(s, type, a0, 0);
1750 tcg_out_mov(s, type, a0, a2);
1756 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1757 const int *const_args)
1759 static const struct { uint8_t bit1, bit2; } bits[] = {
1760 [TCG_COND_LT ] = { CR_LT, CR_LT },
1761 [TCG_COND_LE ] = { CR_LT, CR_GT },
1762 [TCG_COND_GT ] = { CR_GT, CR_GT },
1763 [TCG_COND_GE ] = { CR_GT, CR_LT },
1764 [TCG_COND_LTU] = { CR_LT, CR_LT },
1765 [TCG_COND_LEU] = { CR_LT, CR_GT },
1766 [TCG_COND_GTU] = { CR_GT, CR_GT },
1767 [TCG_COND_GEU] = { CR_GT, CR_LT },
1770 TCGCond cond = args[4], cond2;
1771 TCGArg al, ah, bl, bh;
1772 int blconst, bhconst;
1779 blconst = const_args[2];
1780 bhconst = const_args[3];
1789 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1790 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1791 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1802 bit1 = bits[cond].bit1;
1803 bit2 = bits[cond].bit2;
1804 op = (bit1 != bit2 ? CRANDC : CRAND);
1805 cond2 = tcg_unsigned_cond(cond);
1807 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1808 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1809 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1810 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1818 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1819 const int *const_args)
1821 tcg_out_cmp2(s, args + 1, const_args + 1);
1822 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1823 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1826 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1827 const int *const_args)
1829 tcg_out_cmp2(s, args, const_args);
1830 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1833 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1835 uint32_t insn = HWSYNC;
1837 if (a0 == TCG_MO_LD_LD) {
1839 } else if (a0 == TCG_MO_ST_ST) {
1845 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1846 uintptr_t jmp_rw, uintptr_t addr)
1848 if (TCG_TARGET_REG_BITS == 64) {
1849 tcg_insn_unit i1, i2;
1850 intptr_t tb_diff = addr - tc_ptr;
1851 intptr_t br_diff = addr - (jmp_rx + 4);
1854 /* This does not exercise the range of the branch, but we do
1855 still need to be able to load the new value of TCG_REG_TB.
1856 But this does still happen quite often. */
1857 if (tb_diff == (int16_t)tb_diff) {
1858 i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
1859 i2 = B | (br_diff & 0x3fffffc);
1861 intptr_t lo = (int16_t)tb_diff;
1862 intptr_t hi = (int32_t)(tb_diff - lo);
1863 assert(tb_diff == hi + lo);
1864 i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
1865 i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
1867 #ifdef HOST_WORDS_BIGENDIAN
1868 pair = (uint64_t)i1 << 32 | i2;
1870 pair = (uint64_t)i2 << 32 | i1;
1873 /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
1874 within qatomic_set that would fail to build a ppc32 host. */
1875 qatomic_set__nocheck((uint64_t *)jmp_rw, pair);
1876 flush_idcache_range(jmp_rx, jmp_rw, 8);
1878 intptr_t diff = addr - jmp_rx;
1879 tcg_debug_assert(in_range_b(diff));
1880 qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
1881 flush_idcache_range(jmp_rx, jmp_rw, 4);
1885 static void tcg_out_call_int(TCGContext *s, int lk,
1886 const tcg_insn_unit *target)
1889 /* Look through the descriptor. If the branch is in range, and we
1890 don't have to spend too much effort on building the toc. */
1891 const void *tgt = ((const void * const *)target)[0];
1892 uintptr_t toc = ((const uintptr_t *)target)[1];
1893 intptr_t diff = tcg_pcrel_diff(s, tgt);
1895 if (in_range_b(diff) && toc == (uint32_t)toc) {
1896 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1897 tcg_out_b(s, lk, tgt);
1899 /* Fold the low bits of the constant into the addresses below. */
1900 intptr_t arg = (intptr_t)target;
1901 int ofs = (int16_t)arg;
1903 if (ofs + 8 < 0x8000) {
1908 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1909 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1910 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1911 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1912 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1914 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1917 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1918 address, which the callee uses to compute its TOC address. */
1919 /* FIXME: when the branch is in range, we could avoid r12 load if we
1920 knew that the destination uses the same TOC, and what its local
1921 entry point offset is. */
1922 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1924 diff = tcg_pcrel_diff(s, target);
1925 if (in_range_b(diff)) {
1926 tcg_out_b(s, lk, target);
1928 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1929 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1932 tcg_out_b(s, lk, target);
1936 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
1938 tcg_out_call_int(s, LK, target);
1941 static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
1948 [MO_BSWAP | MO_UB] = LBZX,
1949 [MO_BSWAP | MO_UW] = LHBRX,
1950 [MO_BSWAP | MO_UL] = LWBRX,
1951 [MO_BSWAP | MO_UQ] = LDBRX,
1954 static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
1959 [MO_BSWAP | MO_UB] = STBX,
1960 [MO_BSWAP | MO_UW] = STHBRX,
1961 [MO_BSWAP | MO_UL] = STWBRX,
1962 [MO_BSWAP | MO_UQ] = STDBRX,
1965 static const uint32_t qemu_exts_opc[4] = {
1966 EXTSB, EXTSH, EXTSW, 0
1969 #if defined (CONFIG_SOFTMMU)
1970 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1971 * int mmu_idx, uintptr_t ra)
1973 static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1974 [MO_UB] = helper_ret_ldub_mmu,
1975 [MO_LEUW] = helper_le_lduw_mmu,
1976 [MO_LEUL] = helper_le_ldul_mmu,
1977 [MO_LEUQ] = helper_le_ldq_mmu,
1978 [MO_BEUW] = helper_be_lduw_mmu,
1979 [MO_BEUL] = helper_be_ldul_mmu,
1980 [MO_BEUQ] = helper_be_ldq_mmu,
1983 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1984 * uintxx_t val, int mmu_idx, uintptr_t ra)
1986 static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1987 [MO_UB] = helper_ret_stb_mmu,
1988 [MO_LEUW] = helper_le_stw_mmu,
1989 [MO_LEUL] = helper_le_stl_mmu,
1990 [MO_LEUQ] = helper_le_stq_mmu,
1991 [MO_BEUW] = helper_be_stw_mmu,
1992 [MO_BEUL] = helper_be_stl_mmu,
1993 [MO_BEUQ] = helper_be_stq_mmu,
1996 /* We expect to use a 16-bit negative offset from ENV. */
1997 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1998 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
2000 /* Perform the TLB load and compare. Places the result of the comparison
2001 in CR7, loads the addend of the TLB into R3, and returns the register
2002 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
2004 static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
2005 TCGReg addrlo, TCGReg addrhi,
2006 int mem_index, bool is_read)
2010 ? offsetof(CPUTLBEntry, addr_read)
2011 : offsetof(CPUTLBEntry, addr_write));
2012 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
2013 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
2014 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
2015 unsigned s_bits = opc & MO_SIZE;
2016 unsigned a_bits = get_alignment_bits(opc);
2018 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
2019 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
2020 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
2022 /* Extract the page index, shifted into place for tlb index. */
2023 if (TCG_TARGET_REG_BITS == 32) {
2024 tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
2025 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2027 tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
2028 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2030 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
2032 /* Load the TLB comparator. */
2033 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2034 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
2036 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
2038 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
2039 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2040 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
2041 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
2043 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
2047 /* Load the TLB addend for use on the fast path. Do this asap
2048 to minimize any load use delay. */
2049 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
2050 offsetof(CPUTLBEntry, addend));
2052 /* Clear the non-page, non-alignment bits from the address */
2053 if (TCG_TARGET_REG_BITS == 32) {
2054 /* We don't support unaligned accesses on 32-bits.
2055 * Preserve the bottom bits and thus trigger a comparison
2056 * failure on unaligned accesses.
2058 if (a_bits < s_bits) {
2061 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2062 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2066 /* If the access is unaligned, we need to make sure we fail if we
2067 * cross a page boundary. The trick is to add the access size-1
2068 * to the address before masking the low bits. That will make the
2069 * address overflow to the next page if we cross a page boundary,
2070 * which will then force a mismatch of the TLB compare.
2072 if (a_bits < s_bits) {
2073 unsigned a_mask = (1 << a_bits) - 1;
2074 unsigned s_mask = (1 << s_bits) - 1;
2075 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2079 /* Mask the address for the requested alignment. */
2080 if (TARGET_LONG_BITS == 32) {
2081 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2082 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2083 /* Zero-extend the address for use in the final address. */
2084 tcg_out_ext32u(s, TCG_REG_R4, addrlo);
2085 addrlo = TCG_REG_R4;
2086 } else if (a_bits == 0) {
2087 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
2089 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2090 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
2091 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
2095 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2096 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
2097 0, 7, TCG_TYPE_I32);
2098 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
2099 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2101 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
2108 /* Record the context of a call to the out of line helper code for the slow
2109 path for a load or store, so that we can later generate the correct
2111 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
2112 TCGReg datalo_reg, TCGReg datahi_reg,
2113 TCGReg addrlo_reg, TCGReg addrhi_reg,
2114 tcg_insn_unit *raddr, tcg_insn_unit *lptr)
2116 TCGLabelQemuLdst *label = new_ldst_label(s);
2118 label->is_ld = is_ld;
2120 label->datalo_reg = datalo_reg;
2121 label->datahi_reg = datahi_reg;
2122 label->addrlo_reg = addrlo_reg;
2123 label->addrhi_reg = addrhi_reg;
2124 label->raddr = tcg_splitwx_to_rx(raddr);
2125 label->label_ptr[0] = lptr;
2128 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2130 MemOpIdx oi = lb->oi;
2131 MemOp opc = get_memop(oi);
2132 TCGReg hi, lo, arg = TCG_REG_R3;
2134 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2138 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2140 lo = lb->addrlo_reg;
2141 hi = lb->addrhi_reg;
2142 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2143 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2146 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2147 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2149 /* If the address needed to be zero-extended, we'll have already
2150 placed it in R4. The only remaining case is 64-bit guest. */
2151 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2154 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2155 tcg_out32(s, MFSPR | RT(arg) | LR);
2157 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2159 lo = lb->datalo_reg;
2160 hi = lb->datahi_reg;
2161 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2162 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
2163 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
2164 } else if (opc & MO_SIGN) {
2165 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
2166 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
2168 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
2171 tcg_out_b(s, 0, lb->raddr);
2175 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2177 MemOpIdx oi = lb->oi;
2178 MemOp opc = get_memop(oi);
2179 MemOp s_bits = opc & MO_SIZE;
2180 TCGReg hi, lo, arg = TCG_REG_R3;
2182 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2186 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2188 lo = lb->addrlo_reg;
2189 hi = lb->addrhi_reg;
2190 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2191 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2194 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2195 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2197 /* If the address needed to be zero-extended, we'll have already
2198 placed it in R4. The only remaining case is 64-bit guest. */
2199 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2202 lo = lb->datalo_reg;
2203 hi = lb->datahi_reg;
2204 if (TCG_TARGET_REG_BITS == 32) {
2207 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2210 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2213 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2216 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
2220 if (s_bits == MO_64) {
2221 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
2223 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
2227 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2228 tcg_out32(s, MFSPR | RT(arg) | LR);
2230 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2232 tcg_out_b(s, 0, lb->raddr);
2237 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
2238 TCGReg addrhi, unsigned a_bits)
2240 unsigned a_mask = (1 << a_bits) - 1;
2241 TCGLabelQemuLdst *label = new_ldst_label(s);
2243 label->is_ld = is_ld;
2244 label->addrlo_reg = addrlo;
2245 label->addrhi_reg = addrhi;
2247 /* We are expecting a_bits to max out at 7, much lower than ANDI. */
2248 tcg_debug_assert(a_bits < 16);
2249 tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask));
2251 label->label_ptr[0] = s->code_ptr;
2252 tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
2254 label->raddr = tcg_splitwx_to_rx(s->code_ptr);
2257 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
2259 if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2263 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2264 TCGReg arg = TCG_REG_R4;
2265 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2268 if (l->addrlo_reg != arg) {
2269 tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
2270 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
2271 } else if (l->addrhi_reg != arg + 1) {
2272 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
2273 tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
2275 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg);
2276 tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1);
2277 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0);
2280 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg);
2282 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0);
2284 /* "Tail call" to the helper, with the return address back inline. */
2285 tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld
2286 : helper_unaligned_st));
2290 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2292 return tcg_out_fail_alignment(s, l);
2295 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2297 return tcg_out_fail_alignment(s, l);
2300 #endif /* SOFTMMU */
2302 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
2304 TCGReg datalo, datahi, addrlo, rbase;
2305 TCGReg addrhi __attribute__((unused));
2308 #ifdef CONFIG_SOFTMMU
2310 tcg_insn_unit *label_ptr;
2316 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2318 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2320 opc = get_memop(oi);
2321 s_bits = opc & MO_SIZE;
2323 #ifdef CONFIG_SOFTMMU
2324 mem_index = get_mmuidx(oi);
2325 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
2327 /* Load a pointer into the current opcode w/conditional branch-link. */
2328 label_ptr = s->code_ptr;
2329 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2332 #else /* !CONFIG_SOFTMMU */
2333 a_bits = get_alignment_bits(opc);
2335 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
2337 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2338 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2339 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2340 addrlo = TCG_REG_TMP1;
2344 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2345 if (opc & MO_BSWAP) {
2346 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2347 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2348 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
2349 } else if (rbase != 0) {
2350 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2351 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
2352 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
2353 } else if (addrlo == datahi) {
2354 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2355 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2357 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2358 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2361 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2362 if (!have_isa_2_06 && insn == LDBRX) {
2363 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2364 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2365 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
2366 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2368 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2370 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2371 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2372 insn = qemu_exts_opc[s_bits];
2373 tcg_out32(s, insn | RA(datalo) | RS(datalo));
2377 #ifdef CONFIG_SOFTMMU
2378 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2379 s->code_ptr, label_ptr);
2383 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
2385 TCGReg datalo, datahi, addrlo, rbase;
2386 TCGReg addrhi __attribute__((unused));
2389 #ifdef CONFIG_SOFTMMU
2391 tcg_insn_unit *label_ptr;
2397 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2399 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2401 opc = get_memop(oi);
2402 s_bits = opc & MO_SIZE;
2404 #ifdef CONFIG_SOFTMMU
2405 mem_index = get_mmuidx(oi);
2406 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
2408 /* Load a pointer into the current opcode w/conditional branch-link. */
2409 label_ptr = s->code_ptr;
2410 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2413 #else /* !CONFIG_SOFTMMU */
2414 a_bits = get_alignment_bits(opc);
2416 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
2418 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2419 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2420 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2421 addrlo = TCG_REG_TMP1;
2425 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2426 if (opc & MO_BSWAP) {
2427 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2428 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2429 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
2430 } else if (rbase != 0) {
2431 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2432 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
2433 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
2435 tcg_out32(s, STW | TAI(datahi, addrlo, 0));
2436 tcg_out32(s, STW | TAI(datalo, addrlo, 4));
2439 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2440 if (!have_isa_2_06 && insn == STDBRX) {
2441 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2442 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
2443 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2444 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
2446 tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
2450 #ifdef CONFIG_SOFTMMU
2451 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2452 s->code_ptr, label_ptr);
2456 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2459 for (i = 0; i < count; ++i) {
2464 /* Parameters for function call generation, used in tcg.c. */
2465 #define TCG_TARGET_STACK_ALIGN 16
2466 #define TCG_TARGET_EXTEND_ARGS 1
2469 # define LINK_AREA_SIZE (6 * SZR)
2470 # define LR_OFFSET (1 * SZR)
2471 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2472 #elif defined(_CALL_DARWIN)
2473 # define LINK_AREA_SIZE (6 * SZR)
2474 # define LR_OFFSET (2 * SZR)
2475 #elif TCG_TARGET_REG_BITS == 64
2476 # if defined(_CALL_ELF) && _CALL_ELF == 2
2477 # define LINK_AREA_SIZE (4 * SZR)
2478 # define LR_OFFSET (1 * SZR)
2480 #else /* TCG_TARGET_REG_BITS == 32 */
2481 # if defined(_CALL_SYSV)
2482 # define LINK_AREA_SIZE (2 * SZR)
2483 # define LR_OFFSET (1 * SZR)
2487 # error "Unhandled abi"
2489 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2490 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2493 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2494 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2496 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2497 + TCG_STATIC_CALL_ARGS_SIZE \
2498 + CPU_TEMP_BUF_SIZE \
2500 + TCG_TARGET_STACK_ALIGN - 1) \
2501 & -TCG_TARGET_STACK_ALIGN)
2503 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2505 static void tcg_target_qemu_prologue(TCGContext *s)
2510 const void **desc = (const void **)s->code_ptr;
2511 desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */
2512 desc[1] = 0; /* environment pointer */
2513 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2516 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2520 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2521 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2522 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2524 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2525 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2526 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2528 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2530 #ifndef CONFIG_SOFTMMU
2532 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2533 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2537 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2538 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2540 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2542 tcg_out32(s, BCCTR | BO_ALWAYS);
2545 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2547 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2548 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2549 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2550 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2552 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2553 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2554 tcg_out32(s, BCLR | BO_ALWAYS);
2557 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2558 const TCGArg args[TCG_MAX_OP_ARGS],
2559 const int const_args[TCG_MAX_OP_ARGS])
2564 case INDEX_op_exit_tb:
2565 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
2566 tcg_out_b(s, 0, tcg_code_gen_epilogue);
2568 case INDEX_op_goto_tb:
2569 if (s->tb_jmp_insn_offset) {
2571 if (TCG_TARGET_REG_BITS == 64) {
2572 /* Ensure the next insns are 8-byte aligned. */
2573 if ((uintptr_t)s->code_ptr & 7) {
2576 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2577 tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2578 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2580 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2582 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
2586 /* Indirect jump. */
2587 tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
2588 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
2589 (intptr_t)(s->tb_jmp_insn_offset + args[0]));
2591 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2592 tcg_out32(s, BCCTR | BO_ALWAYS);
2593 set_jmp_reset_offset(s, args[0]);
2595 /* For the unlinked case, need to reset TCG_REG_TB. */
2596 tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
2597 -tcg_current_code_size(s));
2600 case INDEX_op_goto_ptr:
2601 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2603 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2605 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2606 tcg_out32(s, BCCTR | BO_ALWAYS);
2610 TCGLabel *l = arg_label(args[0]);
2614 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2617 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2622 case INDEX_op_ld8u_i32:
2623 case INDEX_op_ld8u_i64:
2624 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2626 case INDEX_op_ld8s_i32:
2627 case INDEX_op_ld8s_i64:
2628 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2629 tcg_out_ext8s(s, args[0], args[0]);
2631 case INDEX_op_ld16u_i32:
2632 case INDEX_op_ld16u_i64:
2633 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2635 case INDEX_op_ld16s_i32:
2636 case INDEX_op_ld16s_i64:
2637 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2639 case INDEX_op_ld_i32:
2640 case INDEX_op_ld32u_i64:
2641 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2643 case INDEX_op_ld32s_i64:
2644 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2646 case INDEX_op_ld_i64:
2647 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2649 case INDEX_op_st8_i32:
2650 case INDEX_op_st8_i64:
2651 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2653 case INDEX_op_st16_i32:
2654 case INDEX_op_st16_i64:
2655 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2657 case INDEX_op_st_i32:
2658 case INDEX_op_st32_i64:
2659 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2661 case INDEX_op_st_i64:
2662 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2665 case INDEX_op_add_i32:
2666 a0 = args[0], a1 = args[1], a2 = args[2];
2667 if (const_args[2]) {
2669 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2671 tcg_out32(s, ADD | TAB(a0, a1, a2));
2674 case INDEX_op_sub_i32:
2675 a0 = args[0], a1 = args[1], a2 = args[2];
2676 if (const_args[1]) {
2677 if (const_args[2]) {
2678 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2680 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2682 } else if (const_args[2]) {
2686 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2690 case INDEX_op_and_i32:
2691 a0 = args[0], a1 = args[1], a2 = args[2];
2692 if (const_args[2]) {
2693 tcg_out_andi32(s, a0, a1, a2);
2695 tcg_out32(s, AND | SAB(a1, a0, a2));
2698 case INDEX_op_and_i64:
2699 a0 = args[0], a1 = args[1], a2 = args[2];
2700 if (const_args[2]) {
2701 tcg_out_andi64(s, a0, a1, a2);
2703 tcg_out32(s, AND | SAB(a1, a0, a2));
2706 case INDEX_op_or_i64:
2707 case INDEX_op_or_i32:
2708 a0 = args[0], a1 = args[1], a2 = args[2];
2709 if (const_args[2]) {
2710 tcg_out_ori32(s, a0, a1, a2);
2712 tcg_out32(s, OR | SAB(a1, a0, a2));
2715 case INDEX_op_xor_i64:
2716 case INDEX_op_xor_i32:
2717 a0 = args[0], a1 = args[1], a2 = args[2];
2718 if (const_args[2]) {
2719 tcg_out_xori32(s, a0, a1, a2);
2721 tcg_out32(s, XOR | SAB(a1, a0, a2));
2724 case INDEX_op_andc_i32:
2725 a0 = args[0], a1 = args[1], a2 = args[2];
2726 if (const_args[2]) {
2727 tcg_out_andi32(s, a0, a1, ~a2);
2729 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2732 case INDEX_op_andc_i64:
2733 a0 = args[0], a1 = args[1], a2 = args[2];
2734 if (const_args[2]) {
2735 tcg_out_andi64(s, a0, a1, ~a2);
2737 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2740 case INDEX_op_orc_i32:
2741 if (const_args[2]) {
2742 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2746 case INDEX_op_orc_i64:
2747 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2749 case INDEX_op_eqv_i32:
2750 if (const_args[2]) {
2751 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2755 case INDEX_op_eqv_i64:
2756 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2758 case INDEX_op_nand_i32:
2759 case INDEX_op_nand_i64:
2760 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2762 case INDEX_op_nor_i32:
2763 case INDEX_op_nor_i64:
2764 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2767 case INDEX_op_clz_i32:
2768 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2769 args[2], const_args[2]);
2771 case INDEX_op_ctz_i32:
2772 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2773 args[2], const_args[2]);
2775 case INDEX_op_ctpop_i32:
2776 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2779 case INDEX_op_clz_i64:
2780 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2781 args[2], const_args[2]);
2783 case INDEX_op_ctz_i64:
2784 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2785 args[2], const_args[2]);
2787 case INDEX_op_ctpop_i64:
2788 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2791 case INDEX_op_mul_i32:
2792 a0 = args[0], a1 = args[1], a2 = args[2];
2793 if (const_args[2]) {
2794 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2796 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2800 case INDEX_op_div_i32:
2801 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2804 case INDEX_op_divu_i32:
2805 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2808 case INDEX_op_shl_i32:
2809 if (const_args[2]) {
2810 /* Limit immediate shift count lest we create an illegal insn. */
2811 tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2813 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2816 case INDEX_op_shr_i32:
2817 if (const_args[2]) {
2818 /* Limit immediate shift count lest we create an illegal insn. */
2819 tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2821 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2824 case INDEX_op_sar_i32:
2825 if (const_args[2]) {
2826 tcg_out_sari32(s, args[0], args[1], args[2]);
2828 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2831 case INDEX_op_rotl_i32:
2832 if (const_args[2]) {
2833 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2835 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2839 case INDEX_op_rotr_i32:
2840 if (const_args[2]) {
2841 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2843 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2844 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2849 case INDEX_op_brcond_i32:
2850 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2851 arg_label(args[3]), TCG_TYPE_I32);
2853 case INDEX_op_brcond_i64:
2854 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2855 arg_label(args[3]), TCG_TYPE_I64);
2857 case INDEX_op_brcond2_i32:
2858 tcg_out_brcond2(s, args, const_args);
2861 case INDEX_op_neg_i32:
2862 case INDEX_op_neg_i64:
2863 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2866 case INDEX_op_not_i32:
2867 case INDEX_op_not_i64:
2868 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2871 case INDEX_op_add_i64:
2872 a0 = args[0], a1 = args[1], a2 = args[2];
2873 if (const_args[2]) {
2875 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2877 tcg_out32(s, ADD | TAB(a0, a1, a2));
2880 case INDEX_op_sub_i64:
2881 a0 = args[0], a1 = args[1], a2 = args[2];
2882 if (const_args[1]) {
2883 if (const_args[2]) {
2884 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2886 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2888 } else if (const_args[2]) {
2892 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2896 case INDEX_op_shl_i64:
2897 if (const_args[2]) {
2898 /* Limit immediate shift count lest we create an illegal insn. */
2899 tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2901 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2904 case INDEX_op_shr_i64:
2905 if (const_args[2]) {
2906 /* Limit immediate shift count lest we create an illegal insn. */
2907 tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2909 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2912 case INDEX_op_sar_i64:
2913 if (const_args[2]) {
2914 tcg_out_sari64(s, args[0], args[1], args[2]);
2916 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2919 case INDEX_op_rotl_i64:
2920 if (const_args[2]) {
2921 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2923 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2926 case INDEX_op_rotr_i64:
2927 if (const_args[2]) {
2928 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2930 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2931 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2935 case INDEX_op_mul_i64:
2936 a0 = args[0], a1 = args[1], a2 = args[2];
2937 if (const_args[2]) {
2938 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2940 tcg_out32(s, MULLD | TAB(a0, a1, a2));
2943 case INDEX_op_div_i64:
2944 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2946 case INDEX_op_divu_i64:
2947 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2950 case INDEX_op_qemu_ld_i32:
2951 tcg_out_qemu_ld(s, args, false);
2953 case INDEX_op_qemu_ld_i64:
2954 tcg_out_qemu_ld(s, args, true);
2956 case INDEX_op_qemu_st_i32:
2957 tcg_out_qemu_st(s, args, false);
2959 case INDEX_op_qemu_st_i64:
2960 tcg_out_qemu_st(s, args, true);
2963 case INDEX_op_ext8s_i32:
2964 case INDEX_op_ext8s_i64:
2965 tcg_out_ext8s(s, args[0], args[1]);
2967 case INDEX_op_ext16s_i32:
2968 case INDEX_op_ext16s_i64:
2969 tcg_out_ext16s(s, args[0], args[1]);
2971 case INDEX_op_ext_i32_i64:
2972 case INDEX_op_ext32s_i64:
2973 tcg_out_ext32s(s, args[0], args[1]);
2975 case INDEX_op_extu_i32_i64:
2976 tcg_out_ext32u(s, args[0], args[1]);
2979 case INDEX_op_setcond_i32:
2980 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2983 case INDEX_op_setcond_i64:
2984 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2987 case INDEX_op_setcond2_i32:
2988 tcg_out_setcond2(s, args, const_args);
2991 case INDEX_op_bswap16_i32:
2992 case INDEX_op_bswap16_i64:
2993 tcg_out_bswap16(s, args[0], args[1], args[2]);
2995 case INDEX_op_bswap32_i32:
2996 tcg_out_bswap32(s, args[0], args[1], 0);
2998 case INDEX_op_bswap32_i64:
2999 tcg_out_bswap32(s, args[0], args[1], args[2]);
3001 case INDEX_op_bswap64_i64:
3002 tcg_out_bswap64(s, args[0], args[1]);
3005 case INDEX_op_deposit_i32:
3006 if (const_args[2]) {
3007 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
3008 tcg_out_andi32(s, args[0], args[0], ~mask);
3010 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
3011 32 - args[3] - args[4], 31 - args[3]);
3014 case INDEX_op_deposit_i64:
3015 if (const_args[2]) {
3016 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
3017 tcg_out_andi64(s, args[0], args[0], ~mask);
3019 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
3020 64 - args[3] - args[4]);
3024 case INDEX_op_extract_i32:
3025 tcg_out_rlw(s, RLWINM, args[0], args[1],
3026 32 - args[2], 32 - args[3], 31);
3028 case INDEX_op_extract_i64:
3029 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
3032 case INDEX_op_movcond_i32:
3033 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
3034 args[3], args[4], const_args[2]);
3036 case INDEX_op_movcond_i64:
3037 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
3038 args[3], args[4], const_args[2]);
3041 #if TCG_TARGET_REG_BITS == 64
3042 case INDEX_op_add2_i64:
3044 case INDEX_op_add2_i32:
3046 /* Note that the CA bit is defined based on the word size of the
3047 environment. So in 64-bit mode it's always carry-out of bit 63.
3048 The fallback code using deposit works just as well for 32-bit. */
3049 a0 = args[0], a1 = args[1];
3050 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
3053 if (const_args[4]) {
3054 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
3056 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
3058 if (const_args[5]) {
3059 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
3061 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
3063 if (a0 != args[0]) {
3064 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3068 #if TCG_TARGET_REG_BITS == 64
3069 case INDEX_op_sub2_i64:
3071 case INDEX_op_sub2_i32:
3073 a0 = args[0], a1 = args[1];
3074 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
3077 if (const_args[2]) {
3078 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
3080 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
3082 if (const_args[3]) {
3083 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
3085 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
3087 if (a0 != args[0]) {
3088 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3092 case INDEX_op_muluh_i32:
3093 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
3095 case INDEX_op_mulsh_i32:
3096 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
3098 case INDEX_op_muluh_i64:
3099 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
3101 case INDEX_op_mulsh_i64:
3102 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
3106 tcg_out_mb(s, args[0]);
3109 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
3110 case INDEX_op_mov_i64:
3111 case INDEX_op_call: /* Always emitted via tcg_out_call. */
3117 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3120 case INDEX_op_and_vec:
3121 case INDEX_op_or_vec:
3122 case INDEX_op_xor_vec:
3123 case INDEX_op_andc_vec:
3124 case INDEX_op_not_vec:
3125 case INDEX_op_nor_vec:
3126 case INDEX_op_eqv_vec:
3127 case INDEX_op_nand_vec:
3129 case INDEX_op_orc_vec:
3130 return have_isa_2_07;
3131 case INDEX_op_add_vec:
3132 case INDEX_op_sub_vec:
3133 case INDEX_op_smax_vec:
3134 case INDEX_op_smin_vec:
3135 case INDEX_op_umax_vec:
3136 case INDEX_op_umin_vec:
3137 case INDEX_op_shlv_vec:
3138 case INDEX_op_shrv_vec:
3139 case INDEX_op_sarv_vec:
3140 case INDEX_op_rotlv_vec:
3141 return vece <= MO_32 || have_isa_2_07;
3142 case INDEX_op_ssadd_vec:
3143 case INDEX_op_sssub_vec:
3144 case INDEX_op_usadd_vec:
3145 case INDEX_op_ussub_vec:
3146 return vece <= MO_32;
3147 case INDEX_op_cmp_vec:
3148 case INDEX_op_shli_vec:
3149 case INDEX_op_shri_vec:
3150 case INDEX_op_sari_vec:
3151 case INDEX_op_rotli_vec:
3152 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3153 case INDEX_op_neg_vec:
3154 return vece >= MO_32 && have_isa_3_00;
3155 case INDEX_op_mul_vec:
3161 return have_isa_2_07 ? 1 : -1;
3163 return have_isa_3_10;
3166 case INDEX_op_bitsel_vec:
3168 case INDEX_op_rotrv_vec:
3175 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3176 TCGReg dst, TCGReg src)
3178 tcg_debug_assert(dst >= TCG_REG_V0);
3180 /* Splat from integer reg allowed via constraints for v3.00. */
3181 if (src < TCG_REG_V0) {
3182 tcg_debug_assert(have_isa_3_00);
3185 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3188 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3191 /* Fail, so that we fall back on either dupm or mov+dup. */
3197 * Recall we use (or emulate) VSX integer loads, so the integer is
3198 * right justified within the left (zero-index) double-word.
3202 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3205 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3208 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3212 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3215 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3216 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3219 g_assert_not_reached();
3224 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3225 TCGReg out, TCGReg base, intptr_t offset)
3229 tcg_debug_assert(out >= TCG_REG_V0);
3232 if (have_isa_3_00) {
3233 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3235 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3237 elt = extract32(offset, 0, 4);
3238 #ifndef HOST_WORDS_BIGENDIAN
3241 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3244 tcg_debug_assert((offset & 1) == 0);
3245 if (have_isa_3_00) {
3246 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3248 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3250 elt = extract32(offset, 1, 3);
3251 #ifndef HOST_WORDS_BIGENDIAN
3254 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3257 if (have_isa_3_00) {
3258 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3261 tcg_debug_assert((offset & 3) == 0);
3262 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3263 elt = extract32(offset, 2, 2);
3264 #ifndef HOST_WORDS_BIGENDIAN
3267 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3271 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3274 tcg_debug_assert((offset & 7) == 0);
3275 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3276 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3277 elt = extract32(offset, 3, 1);
3278 #ifndef HOST_WORDS_BIGENDIAN
3282 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3284 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3288 g_assert_not_reached();
3293 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3294 unsigned vecl, unsigned vece,
3295 const TCGArg args[TCG_MAX_OP_ARGS],
3296 const int const_args[TCG_MAX_OP_ARGS])
3298 static const uint32_t
3299 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3300 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3301 mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3302 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3303 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3304 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3305 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3306 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3307 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3308 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3309 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3310 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3311 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3312 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3313 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3314 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3315 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3316 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3317 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3318 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3319 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3320 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3321 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3322 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3323 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3325 TCGType type = vecl + TCG_TYPE_V64;
3326 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3330 case INDEX_op_ld_vec:
3331 tcg_out_ld(s, type, a0, a1, a2);
3333 case INDEX_op_st_vec:
3334 tcg_out_st(s, type, a0, a1, a2);
3336 case INDEX_op_dupm_vec:
3337 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3340 case INDEX_op_add_vec:
3341 insn = add_op[vece];
3343 case INDEX_op_sub_vec:
3344 insn = sub_op[vece];
3346 case INDEX_op_neg_vec:
3347 insn = neg_op[vece];
3351 case INDEX_op_mul_vec:
3352 insn = mul_op[vece];
3354 case INDEX_op_ssadd_vec:
3355 insn = ssadd_op[vece];
3357 case INDEX_op_sssub_vec:
3358 insn = sssub_op[vece];
3360 case INDEX_op_usadd_vec:
3361 insn = usadd_op[vece];
3363 case INDEX_op_ussub_vec:
3364 insn = ussub_op[vece];
3366 case INDEX_op_smin_vec:
3367 insn = smin_op[vece];
3369 case INDEX_op_umin_vec:
3370 insn = umin_op[vece];
3372 case INDEX_op_smax_vec:
3373 insn = smax_op[vece];
3375 case INDEX_op_umax_vec:
3376 insn = umax_op[vece];
3378 case INDEX_op_shlv_vec:
3379 insn = shlv_op[vece];
3381 case INDEX_op_shrv_vec:
3382 insn = shrv_op[vece];
3384 case INDEX_op_sarv_vec:
3385 insn = sarv_op[vece];
3387 case INDEX_op_and_vec:
3390 case INDEX_op_or_vec:
3393 case INDEX_op_xor_vec:
3396 case INDEX_op_andc_vec:
3399 case INDEX_op_not_vec:
3403 case INDEX_op_orc_vec:
3406 case INDEX_op_nand_vec:
3409 case INDEX_op_nor_vec:
3412 case INDEX_op_eqv_vec:
3416 case INDEX_op_cmp_vec:
3425 insn = gts_op[vece];
3428 insn = gtu_op[vece];
3431 g_assert_not_reached();
3435 case INDEX_op_bitsel_vec:
3436 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3439 case INDEX_op_dup2_vec:
3440 assert(TCG_TARGET_REG_BITS == 32);
3441 /* With inputs a1 = xLxx, a2 = xHxx */
3442 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3443 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3444 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3447 case INDEX_op_ppc_mrgh_vec:
3448 insn = mrgh_op[vece];
3450 case INDEX_op_ppc_mrgl_vec:
3451 insn = mrgl_op[vece];
3453 case INDEX_op_ppc_muleu_vec:
3454 insn = muleu_op[vece];
3456 case INDEX_op_ppc_mulou_vec:
3457 insn = mulou_op[vece];
3459 case INDEX_op_ppc_pkum_vec:
3460 insn = pkum_op[vece];
3462 case INDEX_op_rotlv_vec:
3463 insn = rotl_op[vece];
3465 case INDEX_op_ppc_msum_vec:
3466 tcg_debug_assert(vece == MO_16);
3467 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3470 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3471 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3473 g_assert_not_reached();
3476 tcg_debug_assert(insn != 0);
3477 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3480 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3481 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3485 if (vece == MO_32) {
3487 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3488 * So using negative numbers gets us the 4th bit easily.
3490 imm = sextract32(imm, 0, 5);
3492 imm &= (8 << vece) - 1;
3495 /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3496 t1 = tcg_constant_vec(type, MO_8, imm);
3497 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3498 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3501 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3502 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3504 bool need_swap = false, need_inv = false;
3506 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3514 if (have_isa_3_00 && vece <= MO_32) {
3528 need_swap = need_inv = true;
3531 g_assert_not_reached();
3535 cond = tcg_invert_cond(cond);
3539 t1 = v1, v1 = v2, v2 = t1;
3540 cond = tcg_swap_cond(cond);
3543 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3544 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3547 tcg_gen_not_vec(vece, v0, v0);
3551 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3552 TCGv_vec v1, TCGv_vec v2)
3554 TCGv_vec t1 = tcg_temp_new_vec(type);
3555 TCGv_vec t2 = tcg_temp_new_vec(type);
3561 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3562 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3563 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3564 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3565 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3566 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3567 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3568 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3569 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3570 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3574 tcg_debug_assert(!have_isa_2_07);
3576 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3577 * So using -16 is a quick way to represent 16.
3579 c16 = tcg_constant_vec(type, MO_8, -16);
3580 c0 = tcg_constant_vec(type, MO_8, 0);
3582 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3583 tcgv_vec_arg(v2), tcgv_vec_arg(c16));
3584 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3585 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3586 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
3587 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
3588 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
3589 tcgv_vec_arg(t1), tcgv_vec_arg(c16));
3590 tcg_gen_add_vec(MO_32, v0, t1, t2);
3594 g_assert_not_reached();
3596 tcg_temp_free_vec(t1);
3597 tcg_temp_free_vec(t2);
3600 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3604 TCGv_vec v0, v1, v2, t0;
3608 v0 = temp_tcgv_vec(arg_temp(a0));
3609 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3610 a2 = va_arg(va, TCGArg);
3613 case INDEX_op_shli_vec:
3614 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3616 case INDEX_op_shri_vec:
3617 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3619 case INDEX_op_sari_vec:
3620 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3622 case INDEX_op_rotli_vec:
3623 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3625 case INDEX_op_cmp_vec:
3626 v2 = temp_tcgv_vec(arg_temp(a2));
3627 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3629 case INDEX_op_mul_vec:
3630 v2 = temp_tcgv_vec(arg_temp(a2));
3631 expand_vec_mul(type, vece, v0, v1, v2);
3633 case INDEX_op_rotlv_vec:
3634 v2 = temp_tcgv_vec(arg_temp(a2));
3635 t0 = tcg_temp_new_vec(type);
3636 tcg_gen_neg_vec(vece, t0, v2);
3637 tcg_gen_rotlv_vec(vece, v0, v1, t0);
3638 tcg_temp_free_vec(t0);
3641 g_assert_not_reached();
3646 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3649 case INDEX_op_goto_ptr:
3652 case INDEX_op_ld8u_i32:
3653 case INDEX_op_ld8s_i32:
3654 case INDEX_op_ld16u_i32:
3655 case INDEX_op_ld16s_i32:
3656 case INDEX_op_ld_i32:
3657 case INDEX_op_ctpop_i32:
3658 case INDEX_op_neg_i32:
3659 case INDEX_op_not_i32:
3660 case INDEX_op_ext8s_i32:
3661 case INDEX_op_ext16s_i32:
3662 case INDEX_op_bswap16_i32:
3663 case INDEX_op_bswap32_i32:
3664 case INDEX_op_extract_i32:
3665 case INDEX_op_ld8u_i64:
3666 case INDEX_op_ld8s_i64:
3667 case INDEX_op_ld16u_i64:
3668 case INDEX_op_ld16s_i64:
3669 case INDEX_op_ld32u_i64:
3670 case INDEX_op_ld32s_i64:
3671 case INDEX_op_ld_i64:
3672 case INDEX_op_ctpop_i64:
3673 case INDEX_op_neg_i64:
3674 case INDEX_op_not_i64:
3675 case INDEX_op_ext8s_i64:
3676 case INDEX_op_ext16s_i64:
3677 case INDEX_op_ext32s_i64:
3678 case INDEX_op_ext_i32_i64:
3679 case INDEX_op_extu_i32_i64:
3680 case INDEX_op_bswap16_i64:
3681 case INDEX_op_bswap32_i64:
3682 case INDEX_op_bswap64_i64:
3683 case INDEX_op_extract_i64:
3684 return C_O1_I1(r, r);
3686 case INDEX_op_st8_i32:
3687 case INDEX_op_st16_i32:
3688 case INDEX_op_st_i32:
3689 case INDEX_op_st8_i64:
3690 case INDEX_op_st16_i64:
3691 case INDEX_op_st32_i64:
3692 case INDEX_op_st_i64:
3693 return C_O0_I2(r, r);
3695 case INDEX_op_add_i32:
3696 case INDEX_op_and_i32:
3697 case INDEX_op_or_i32:
3698 case INDEX_op_xor_i32:
3699 case INDEX_op_andc_i32:
3700 case INDEX_op_orc_i32:
3701 case INDEX_op_eqv_i32:
3702 case INDEX_op_shl_i32:
3703 case INDEX_op_shr_i32:
3704 case INDEX_op_sar_i32:
3705 case INDEX_op_rotl_i32:
3706 case INDEX_op_rotr_i32:
3707 case INDEX_op_setcond_i32:
3708 case INDEX_op_and_i64:
3709 case INDEX_op_andc_i64:
3710 case INDEX_op_shl_i64:
3711 case INDEX_op_shr_i64:
3712 case INDEX_op_sar_i64:
3713 case INDEX_op_rotl_i64:
3714 case INDEX_op_rotr_i64:
3715 case INDEX_op_setcond_i64:
3716 return C_O1_I2(r, r, ri);
3718 case INDEX_op_mul_i32:
3719 case INDEX_op_mul_i64:
3720 return C_O1_I2(r, r, rI);
3722 case INDEX_op_div_i32:
3723 case INDEX_op_divu_i32:
3724 case INDEX_op_nand_i32:
3725 case INDEX_op_nor_i32:
3726 case INDEX_op_muluh_i32:
3727 case INDEX_op_mulsh_i32:
3728 case INDEX_op_orc_i64:
3729 case INDEX_op_eqv_i64:
3730 case INDEX_op_nand_i64:
3731 case INDEX_op_nor_i64:
3732 case INDEX_op_div_i64:
3733 case INDEX_op_divu_i64:
3734 case INDEX_op_mulsh_i64:
3735 case INDEX_op_muluh_i64:
3736 return C_O1_I2(r, r, r);
3738 case INDEX_op_sub_i32:
3739 return C_O1_I2(r, rI, ri);
3740 case INDEX_op_add_i64:
3741 return C_O1_I2(r, r, rT);
3742 case INDEX_op_or_i64:
3743 case INDEX_op_xor_i64:
3744 return C_O1_I2(r, r, rU);
3745 case INDEX_op_sub_i64:
3746 return C_O1_I2(r, rI, rT);
3747 case INDEX_op_clz_i32:
3748 case INDEX_op_ctz_i32:
3749 case INDEX_op_clz_i64:
3750 case INDEX_op_ctz_i64:
3751 return C_O1_I2(r, r, rZW);
3753 case INDEX_op_brcond_i32:
3754 case INDEX_op_brcond_i64:
3755 return C_O0_I2(r, ri);
3757 case INDEX_op_movcond_i32:
3758 case INDEX_op_movcond_i64:
3759 return C_O1_I4(r, r, ri, rZ, rZ);
3760 case INDEX_op_deposit_i32:
3761 case INDEX_op_deposit_i64:
3762 return C_O1_I2(r, 0, rZ);
3763 case INDEX_op_brcond2_i32:
3764 return C_O0_I4(r, r, ri, ri);
3765 case INDEX_op_setcond2_i32:
3766 return C_O1_I4(r, r, r, ri, ri);
3767 case INDEX_op_add2_i64:
3768 case INDEX_op_add2_i32:
3769 return C_O2_I4(r, r, r, r, rI, rZM);
3770 case INDEX_op_sub2_i64:
3771 case INDEX_op_sub2_i32:
3772 return C_O2_I4(r, r, rI, rZM, r, r);
3774 case INDEX_op_qemu_ld_i32:
3775 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3777 : C_O1_I2(r, L, L));
3779 case INDEX_op_qemu_st_i32:
3780 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3782 : C_O0_I3(S, S, S));
3784 case INDEX_op_qemu_ld_i64:
3785 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
3786 : TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L)
3787 : C_O2_I2(L, L, L, L));
3789 case INDEX_op_qemu_st_i64:
3790 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S)
3791 : TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S)
3792 : C_O0_I4(S, S, S, S));
3794 case INDEX_op_add_vec:
3795 case INDEX_op_sub_vec:
3796 case INDEX_op_mul_vec:
3797 case INDEX_op_and_vec:
3798 case INDEX_op_or_vec:
3799 case INDEX_op_xor_vec:
3800 case INDEX_op_andc_vec:
3801 case INDEX_op_orc_vec:
3802 case INDEX_op_nor_vec:
3803 case INDEX_op_eqv_vec:
3804 case INDEX_op_nand_vec:
3805 case INDEX_op_cmp_vec:
3806 case INDEX_op_ssadd_vec:
3807 case INDEX_op_sssub_vec:
3808 case INDEX_op_usadd_vec:
3809 case INDEX_op_ussub_vec:
3810 case INDEX_op_smax_vec:
3811 case INDEX_op_smin_vec:
3812 case INDEX_op_umax_vec:
3813 case INDEX_op_umin_vec:
3814 case INDEX_op_shlv_vec:
3815 case INDEX_op_shrv_vec:
3816 case INDEX_op_sarv_vec:
3817 case INDEX_op_rotlv_vec:
3818 case INDEX_op_rotrv_vec:
3819 case INDEX_op_ppc_mrgh_vec:
3820 case INDEX_op_ppc_mrgl_vec:
3821 case INDEX_op_ppc_muleu_vec:
3822 case INDEX_op_ppc_mulou_vec:
3823 case INDEX_op_ppc_pkum_vec:
3824 case INDEX_op_dup2_vec:
3825 return C_O1_I2(v, v, v);
3827 case INDEX_op_not_vec:
3828 case INDEX_op_neg_vec:
3829 return C_O1_I1(v, v);
3831 case INDEX_op_dup_vec:
3832 return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
3834 case INDEX_op_ld_vec:
3835 case INDEX_op_dupm_vec:
3836 return C_O1_I1(v, r);
3838 case INDEX_op_st_vec:
3839 return C_O0_I2(v, r);
3841 case INDEX_op_bitsel_vec:
3842 case INDEX_op_ppc_msum_vec:
3843 return C_O1_I3(v, v, v, v);
3846 g_assert_not_reached();
3850 static void tcg_target_init(TCGContext *s)
3852 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3853 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3855 have_isa = tcg_isa_base;
3856 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3857 have_isa = tcg_isa_2_06;
3859 #ifdef PPC_FEATURE2_ARCH_2_07
3860 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3861 have_isa = tcg_isa_2_07;
3864 #ifdef PPC_FEATURE2_ARCH_3_00
3865 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3866 have_isa = tcg_isa_3_00;
3869 #ifdef PPC_FEATURE2_ARCH_3_10
3870 if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3871 have_isa = tcg_isa_3_10;
3875 #ifdef PPC_FEATURE2_HAS_ISEL
3876 /* Prefer explicit instruction from the kernel. */
3877 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3879 /* Fall back to knowing Power7 (2.06) has ISEL. */
3880 have_isel = have_isa_2_06;
3883 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3884 have_altivec = true;
3885 /* We only care about the portion of VSX that overlaps Altivec. */
3886 if (hwcap & PPC_FEATURE_HAS_VSX) {
3891 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3892 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3894 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3895 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3898 tcg_target_call_clobber_regs = 0;
3899 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3900 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3901 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3902 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3903 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3904 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3905 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3906 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3907 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3908 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3909 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3910 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3912 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3913 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3914 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3915 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3916 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3917 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3918 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3919 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3920 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3921 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3922 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3923 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3924 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3925 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3926 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3927 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3928 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3929 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3930 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3931 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3933 s->reserved_regs = 0;
3934 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3935 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3936 #if defined(_CALL_SYSV)
3937 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3939 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3940 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3942 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
3943 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3944 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3946 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
3953 DebugFrameFDEHeader fde;
3954 uint8_t fde_def_cfa[4];
3955 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3958 /* We're expecting a 2 byte uleb128 encoded value. */
3959 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3961 #if TCG_TARGET_REG_BITS == 64
3962 # define ELF_HOST_MACHINE EM_PPC64
3964 # define ELF_HOST_MACHINE EM_PPC
3967 static DebugFrame debug_frame = {
3968 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3971 .cie.code_align = 1,
3972 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
3973 .cie.return_column = 65,
3975 /* Total FDE size does not include the "len" member. */
3976 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3979 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
3980 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3984 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3985 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
3989 void tcg_register_jit(const void *buf, size_t buf_size)
3991 uint8_t *p = &debug_frame.fde_reg_ofs[3];
3994 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
3995 p[0] = 0x80 + tcg_target_callee_save_regs[i];
3996 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
3999 debug_frame.fde.func_start = (uintptr_t)buf;
4000 debug_frame.fde.func_len = buf_size;
4002 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
4004 #endif /* __ELF__ */