Correct typos and spelling mistakes
[valgrind.git] / VEX / priv / guest_ppc_toIR.c
blob18716dd04c4154fca704672c4cda29109ce009ca
2 /*--------------------------------------------------------------------*/
3 /*--- begin guest_ppc_toIR.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2004-2017 OpenWorks LLP
11 info@open-works.net
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
28 Neither the names of the U.S. Department of Energy nor the
29 University of California nor the names of its contributors may be
30 used to endorse or promote products derived from this software
31 without prior written permission.
34 /* TODO 18/Nov/05:
36 Spot rld... cases which are simply left/right shifts and emit
37 Shl64/Shr64 accordingly.
39 Altivec
40 - datastream insns
41 - lvxl,stvxl: load/store with 'least recently used' hint
42 - vexptefp, vlogefp
44 LIMITATIONS:
46 Various, including:
48 - Some invalid forms of lswi and lswx are accepted when they should
49 not be.
51 - Floating Point:
52 - All exceptions disabled in FPSCR
53 - condition codes not set in FPSCR
55 - Altivec floating point:
56 - vmaddfp, vnmsubfp
57 Because we're using Java/IEEE mode (FPSCR[NJ]), rather than the
58 system default of Non-Java mode, we get some small errors
59 (lowest bit only).
60 This is because Non-Java mode brutally hacks denormalised results
61 to zero, whereas we keep maximum accuracy. However, using
62 Non-Java mode would give us more inaccuracy, as our intermediate
63 results would then be zeroed, too.
65 - AbiHints for the stack red zone are only emitted for
66 unconditional calls and returns (bl, blr). They should also be
67 emitted for conditional calls and returns, but we don't have a
68 way to express that right now. Ah well.
70 - Uses of Iop_{Add,Sub,Mul}32Fx4: the backend (host_ppc_isel.c)
71 ignores the rounding mode, and generates code that assumes
72 round-to-nearest. This means V will compute incorrect results
73 for uses of these IROps when the rounding mode (first) arg is
74 not mkU32(Irrm_NEAREST).
77 /* "Special" instructions.
79 This instruction decoder can decode four special instructions
80 which mean nothing natively (are no-ops as far as regs/mem are
81 concerned) but have meaning for supporting Valgrind. A special
82 instruction is flagged by a 16-byte preamble:
84 32-bit mode: 5400183E 5400683E 5400E83E 5400983E
85 (rlwinm 0,0,3,0,31; rlwinm 0,0,13,0,31;
86 rlwinm 0,0,29,0,31; rlwinm 0,0,19,0,31)
88 64-bit mode: 78001800 78006800 7800E802 78009802
89 (rotldi 0,0,3; rotldi 0,0,13;
90 rotldi 0,0,61; rotldi 0,0,51)
92 Following that, one of the following 3 are allowed
93 (standard interpretation in parentheses):
95 7C210B78 (or 1,1,1) %R3 = client_request ( %R4 )
96 7C421378 (or 2,2,2) %R3 = guest_NRADDR
97 7C631B78 (or 3,3,3) branch-and-link-to-noredir %R11 Big endian
98 7C631B78 (or 3,3,3) branch-and-link-to-noredir %R12 Little endian
99 7C842378 (or 4,4,4) %R3 = guest_NRADDR_GPR2
100 7CA52B78 (or 5,5,5) IR injection
102 Any other bytes following the 16-byte preamble are illegal and
103 constitute a failure in instruction decoding. This all assumes
104 that the preamble will never occur except in specific code
105 fragments designed for Valgrind to catch.
108 /* Little Endian notes */
110 * Vector operations in little Endian mode behave in non-obvious ways at times.
111 * Below is an attempt at explaining this.
113 * LE/BE vector example
114 * With a vector of unsigned ints declared as follows:
115 * vector unsigned int vec_inA =
116 { 0x11111111, 0x22222222, 0x33333333, 0x44444444 };
117 * The '0x11111111' word is word zero in both LE and BE format. But the
118 * loaded vector register will have word zero on the far left in BE mode and
119 * on the far right in LE mode. The lvx and stvx instructions work naturally
120 * for whatever endianness is in effect. For example, in LE mode, the stvx
121 * stores word zero (far right word) of the vector at the lowest memory
122 * address of the EA; in BE mode, stvx still stores word zero at the lowest
123 * memory address, but with word zero interpreted as the one at the far left
124 * of the register.
126 * The lxvd2x and stxvd2x instructions are not so well suited for LE mode.
127 * When the compiler generates an lxvd2x instruction to load the
128 * above-declared vector of unsigned integers, it loads the vector as two
129 * double words, but they are in BE word-wise format. To put the vector in
130 * the right order for LE, the compiler also generates an xxswapd after the
131 * load, which puts it in proper LE format. Similarly, the stxvd2x
132 * instruction has a BE bias, storing the vector in BE word-wise format. But
133 * the compiler also generates an xxswapd prior to the store, thus ensuring
134 * the vector is stored in memory in the correct LE order.
136 * Vector-flavored Iops, such Iop_V128Hito64, reference the hi and lo parts
137 * of a double words and words within a vector. Because of the reverse order
138 * of numbering for LE as described above, the high part refers to word 1 in
139 * LE format. When input data is saved to a guest state vector register
140 * (e.g., via Iop_64HLtoV128), it is first saved to memory and then the
141 * register is loaded via PPCInstr_AvLdSt, which does an lvx instruction.
142 * The saving of the data to memory must be done in proper LE order. For the
143 * inverse operation of extracting data from a vector register (e.g.,
144 * Iop_V128Hito64), the register is first saved (by PPCInstr_AvLdSt resulting
145 * in stvx), and then integer registers are loaded from the memory location
146 * from where the vector register was saved. Again, this must be done in
147 * proper LE order. So for these various vector Iops, we have LE-specific
148 * code in host_ppc_isel.c
150 * Another unique behavior of vectors in LE mode is with the vector scalar
151 * (VSX) operations that operate on "double word 0" of the source register,
152 * storing the result in "double word 0" of the output vector register. For
153 * these operations, "double word 0" is interpreted as "high half of the
154 * register" (i.e, the part on the left side).
158 /* Notes on handling subnormal results:
160 * The various vector floating point instructions:
161 * vmaddfp, vaddfp, vsubfp, vmaxfp, vminfp, vrefp, vexptefp,
162 * vlogefp, vcmpeqfp, vcmpgefp, vcmpgtfp, vcmpbfp, vrfin, vrfiz,
163 * vrfip, vrfim
164 * generate subnormal results that are controled by the VSCR[NJ] bit setting.
166 * The following describes how the host and guest is setup so that the function
167 * dnorm_adj_Vector() can properly handle the results of the Iops in the guest
168 * state.
170 * At startup, on all host variants, we set VSCR[NJ].host = 0 (don't flush to
171 * zero). It stays at 0 permanently.
173 * At startup, we set VSCR[NJ].guest = (if BE then 1 else 0)
175 * When running, guest insns can set/clear/query VSCR[NJ].guest as they
176 * like.
178 * When running, any (guest) insn whose result depends on VSCR[NJ] will query
179 * VSCR[NJ].guest and the results will be truncated accordingly, by
180 * dnorm_adj_Vector(). Because VSCR[NJ].host is always 0, we will always
181 * be able to provide correct guest results for either value of
182 * VSCR[NJ].guest.
186 /* Translates PPC32/64 code to IR. */
188 /* References
190 #define PPC32
191 "PowerPC Microprocessor Family:
192 The Programming Environments Manual for 32-Bit Microprocessors"
193 02/21/2000
194 http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600719DF2
196 #define PPC64
197 "PowerPC Microprocessor Family:
198 Programming Environments Manual for 64-Bit Microprocessors"
199 06/10/2003
200 http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/F7E732FF811F783187256FDD004D3797
202 #define AV
203 "PowerPC Microprocessor Family:
204 AltiVec(TM) Technology Programming Environments Manual"
205 07/10/2003
206 http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/FBFA164F824370F987256D6A006F424D
209 #include "libvex_basictypes.h"
210 #include "libvex_ir.h"
211 #include "libvex.h"
212 #include "libvex_emnote.h"
213 #include "libvex_guest_ppc32.h"
214 #include "libvex_guest_ppc64.h"
216 #include "main_util.h"
217 #include "main_globals.h"
218 #include "guest_generic_bb_to_IR.h"
219 #include "guest_ppc_defs.h"
221 /*------------------------------------------------------------*/
222 /*--- Globals ---*/
223 /*------------------------------------------------------------*/
225 /* These are set at the start of the translation of an insn, right
226 down in disInstr_PPC, so that we don't have to pass them around
227 endlessly. They are all constant during the translation of any
228 given insn. */
230 /* We need to know this to do sub-register accesses correctly. */
231 static VexEndness host_endness;
233 /* Pointer to the guest code area. */
234 static const UChar* guest_code;
236 /* The guest address corresponding to guest_code[0]. */
237 static Addr64 guest_CIA_bbstart;
239 /* The guest address for the instruction currently being
240 translated. */
241 static Addr64 guest_CIA_curr_instr;
243 /* The IRSB* into which we're generating code. */
244 static IRSB* irsb;
246 /* Is our guest binary 32 or 64bit? Set at each call to
247 disInstr_PPC below. */
248 static Bool mode64 = False;
250 // Given a pointer to a function as obtained by "& functionname" in C,
251 // produce a pointer to the actual entry point for the function. For
252 // most platforms it's the identity function. Unfortunately, on
253 // ppc64-linux it isn't (sigh)
254 static void* fnptr_to_fnentry( const VexAbiInfo* vbi, void* f )
256 if (vbi->host_ppc_calls_use_fndescrs) {
257 /* f is a pointer to a 3-word function descriptor, of which the
258 first word is the entry address. */
259 /* note, this is correct even with cross-jitting, since this is
260 purely a host issue, not a guest one. */
261 HWord* fdescr = (HWord*)f;
262 return (void*)(fdescr[0]);
263 } else {
264 /* Simple; "& f" points directly at the code for f. */
265 return f;
269 /* The OV32 and CA32 bits were added with ISA3.0 */
270 static Bool OV32_CA32_supported = False;
272 #define SIGN_BIT 0x8000000000000000ULL
273 #define SIGN_MASK 0x7fffffffffffffffULL
274 #define SIGN_BIT32 0x80000000
275 #define SIGN_MASK32 0x7fffffff
277 /* The instruction size can be either 4 byte (word instruction) or 8 bytes
278 (prefix instruction) starting with ISA 3.1 */
279 #define WORD_INST_SIZE 4
280 #define PREFIX_INST_SIZE 8
282 /*------------------------------------------------------------*/
283 /*--- Debugging output ---*/
284 /*------------------------------------------------------------*/
286 /* Pre DIP macro for prefix instruction printing. */
287 #define pDIP(flag,format, args...) \
288 if (vex_traceflags & VEX_TRACE_FE){ \
289 if (flag) {vex_printf("p"); vex_printf(format, ## args);} \
290 else {vex_printf(format, ## args); vex_printf("\n");}}
292 /* Post DIP macro to print additional args for prefix instruction printing. */
293 #define DIPp(flag,format, args...) \
294 if (vex_traceflags & VEX_TRACE_FE) { \
295 if (flag) {vex_printf(format, ## args); vex_printf("\n");}}
297 /* Post DIP macro with no additional args for prefix instruction printing. */
298 #define DIPn(flag) \
299 if (vex_traceflags & VEX_TRACE_FE) {if (flag) vex_printf("\n");}
301 #define DIP(format, args...) \
302 if (vex_traceflags & VEX_TRACE_FE) \
303 vex_printf(format, ## args)
305 #define DIS(buf, format, args...) \
306 if (vex_traceflags & VEX_TRACE_FE) \
307 vex_sprintf(buf, format, ## args)
310 /*------------------------------------------------------------*/
311 /*--- Offsets of various parts of the ppc32/64 guest state ---*/
312 /*------------------------------------------------------------*/
314 #define offsetofPPCGuestState(_x) \
315 (mode64 ? offsetof(VexGuestPPC64State, _x) : \
316 offsetof(VexGuestPPC32State, _x))
318 #define OFFB_CIA offsetofPPCGuestState(guest_CIA)
319 #define OFFB_IP_AT_SYSCALL offsetofPPCGuestState(guest_IP_AT_SYSCALL)
320 #define OFFB_SPRG3_RO offsetofPPCGuestState(guest_SPRG3_RO)
321 #define OFFB_LR offsetofPPCGuestState(guest_LR)
322 #define OFFB_CTR offsetofPPCGuestState(guest_CTR)
323 #define OFFB_XER_SO offsetofPPCGuestState(guest_XER_SO)
324 #define OFFB_XER_OV offsetofPPCGuestState(guest_XER_OV)
325 #define OFFB_XER_OV32 offsetofPPCGuestState(guest_XER_OV32)
326 #define OFFB_XER_CA offsetofPPCGuestState(guest_XER_CA)
327 #define OFFB_XER_CA32 offsetofPPCGuestState(guest_XER_CA32)
328 #define OFFB_XER_BC offsetofPPCGuestState(guest_XER_BC)
329 #define OFFB_FPROUND offsetofPPCGuestState(guest_FPROUND)
330 #define OFFB_DFPROUND offsetofPPCGuestState(guest_DFPROUND)
331 #define OFFB_C_FPCC offsetofPPCGuestState(guest_C_FPCC)
332 #define OFFB_VRSAVE offsetofPPCGuestState(guest_VRSAVE)
333 #define OFFB_VSCR offsetofPPCGuestState(guest_VSCR)
334 #define OFFB_EMNOTE offsetofPPCGuestState(guest_EMNOTE)
335 #define OFFB_CMSTART offsetofPPCGuestState(guest_CMSTART)
336 #define OFFB_CMLEN offsetofPPCGuestState(guest_CMLEN)
337 #define OFFB_NRADDR offsetofPPCGuestState(guest_NRADDR)
338 #define OFFB_NRADDR_GPR2 offsetofPPCGuestState(guest_NRADDR_GPR2)
339 #define OFFB_TFHAR offsetofPPCGuestState(guest_TFHAR)
340 #define OFFB_TEXASR offsetofPPCGuestState(guest_TEXASR)
341 #define OFFB_TEXASRU offsetofPPCGuestState(guest_TEXASRU)
342 #define OFFB_TFIAR offsetofPPCGuestState(guest_TFIAR)
343 #define OFFB_PPR offsetofPPCGuestState(guest_PPR)
344 #define OFFB_PSPB offsetofPPCGuestState(guest_PSPB)
345 #define OFFB_DSCR offsetofPPCGuestState(guest_DSCR)
346 /* Note the offset for the various ACC entries are calculated based on
347 the OFFB_ACC_0_r0 value. */
348 #define OFFB_ACC_0_r0 offsetofPPCGuestState(guest_ACC_0_r0)
349 #define OFFB_syscall_flag offsetofPPCGuestState(guest_syscall_flag)
352 /*------------------------------------------------------------*/
353 /*--- Extract instruction fields --- */
354 /*------------------------------------------------------------*/
356 /* Extract field from insn, given idx (zero = lsb) and field length */
357 #define IFIELD( insn, idx, len ) ((insn >> idx) & ((1<<len)-1))
359 /* Extract primary opcode, instr[31:26] */
360 static UChar ifieldOPC( UInt instr ) {
361 return toUChar( IFIELD( instr, 26, 6 ) );
364 /* Extract 11-bit secondary opcode, instr[10:0] */
365 static UInt ifieldOPClo11 ( UInt instr) {
366 return IFIELD( instr, 0, 11 );
369 /* Extract 10-bit secondary opcode, instr[10:1] */
370 static UInt ifieldOPClo10 ( UInt instr) {
371 return IFIELD( instr, 1, 10 );
374 /* Extract 9-bit secondary opcode, instr[9:1] */
375 static UInt ifieldOPClo9 ( UInt instr) {
376 return IFIELD( instr, 1, 9 );
379 /* Extract 8-bit secondary opcode, instr[8:1] */
380 static UInt ifieldOPClo8 ( UInt instr) {
381 return IFIELD( instr, 1, 8 );
384 /* Extract 4-bit secondary opcode, instr[5:1] */
385 static UInt ifieldOPClo4 ( UInt instr) {
386 return IFIELD( instr, 0, 4 );
389 /* Extract 5-bit secondary opcode, instr[5:1] */
390 static UInt ifieldOPClo5 ( UInt instr) {
391 return IFIELD( instr, 1, 5 );
394 /* Extract 2-bit secondary opcode, instr[1:0] */
395 static UInt ifieldOPC0o2 ( UInt instr) {
396 return IFIELD( instr, 0, 2 );
399 /* Extract RD (destination register) field, instr[25:21] */
400 static UChar ifieldRegDS( UInt instr ) {
401 return toUChar( IFIELD( instr, 21, 5 ) );
404 /* Extract XTp (destination register) field, instr[25:22, 21] */
405 static UChar ifieldRegXTp ( UInt instr )
407 UChar TX = toUChar (IFIELD (instr, 21, 1));
408 UChar Tp = toUChar (IFIELD (instr, 22, 4));
409 /* XTp = 32 * TX + 2* Tp; Only even values of XTp can be encoded. */
410 return (TX << 5) | (Tp << 1);
413 /* Extract XT (destination register) field, instr[0,25:21] */
414 static UChar ifieldRegXT ( UInt instr )
416 UChar upper_bit = toUChar (IFIELD (instr, 0, 1));
417 UChar lower_bits = toUChar (IFIELD (instr, 21, 5));
418 return (upper_bit << 5) | lower_bits;
421 /* Extract XS (store source register) field, instr[0,25:21] */
422 static inline UChar ifieldRegXS ( UInt instr )
424 return ifieldRegXT ( instr );
427 /* Extract RA (1st source register) field, instr[20:16] */
428 static UChar ifieldRegA ( UInt instr ) {
429 return toUChar( IFIELD( instr, 16, 5 ) );
432 /* Extract XA (1st source register) field, instr[2,20:16] */
433 static UChar ifieldRegXA ( UInt instr )
435 UChar upper_bit = toUChar (IFIELD (instr, 2, 1));
436 UChar lower_bits = toUChar (IFIELD (instr, 16, 5));
437 return (upper_bit << 5) | lower_bits;
440 /* Extract RB (2nd source register) field, instr[15:11] */
441 static UChar ifieldRegB ( UInt instr ) {
442 return toUChar( IFIELD( instr, 11, 5 ) );
445 /* Extract XB (2nd source register) field, instr[1,15:11] */
446 static UChar ifieldRegXB ( UInt instr )
448 UChar upper_bit = toUChar (IFIELD (instr, 1, 1));
449 UChar lower_bits = toUChar (IFIELD (instr, 11, 5));
450 return (upper_bit << 5) | lower_bits;
453 /* Extract RC (3rd source register) field, instr[10:6] */
454 static UChar ifieldRegC ( UInt instr ) {
455 return toUChar( IFIELD( instr, 6, 5 ) );
458 /* Extract XC (3rd source register) field, instr[3,10:6] */
459 static UChar ifieldRegXC ( UInt instr )
461 UChar upper_bit = toUChar (IFIELD (instr, 3, 1));
462 UChar lower_bits = toUChar (IFIELD (instr, 6, 5));
463 return (upper_bit << 5) | lower_bits;
466 /* Extract bit 10, instr[10] */
467 static UChar ifieldBIT10 ( UInt instr ) {
468 return toUChar( IFIELD( instr, 10, 1 ) );
471 /* Extract 2nd lowest bit, instr[1] */
472 static UChar ifieldBIT1 ( UInt instr ) {
473 return toUChar( IFIELD( instr, 1, 1 ) );
476 /* Extract lowest bit, instr[0] */
477 static UChar ifieldBIT0 ( UInt instr ) {
478 return toUChar( instr & 0x1 );
481 /* Extract unsigned bottom half, instr[15:0] */
482 static UInt ifieldUIMM16 ( UInt instr ) {
483 return instr & 0xFFFF;
486 /* Extract unsigned bottom 26 bits, instr[25:0] */
487 static UInt ifieldUIMM26 ( UInt instr ) {
488 return instr & 0x3FFFFFF;
491 /* Extract DM field, instr[9:8] */
492 static UChar ifieldDM ( UInt instr ) {
493 return toUChar( IFIELD( instr, 8, 2 ) );
496 /* Extract SHW field, instr[9:8] */
497 static inline UChar ifieldSHW ( UInt instr )
499 return ifieldDM ( instr );
502 /* Extract AT field from theInstr 8LS:D form */
503 static UChar ifieldAT ( UInt instr ) {
504 return toUChar( IFIELD( instr, 23, 3 ) );
507 /*------------------------------------------------------------*/
508 /*--- Guest-state identifiers ---*/
509 /*------------------------------------------------------------*/
511 typedef enum {
512 PPC_GST_CIA, // Current Instruction Address
513 PPC_GST_LR, // Link Register
514 PPC_GST_CTR, // Count Register
515 PPC_GST_XER, // Overflow, carry flags, byte count
516 PPC_GST_CR, // Condition Register
517 PPC_GST_FPSCR, // Floating Point Status/Control Register
518 PPC_GST_VRSAVE, // Vector Save/Restore Register
519 PPC_GST_VSCR, // Vector Status and Control Register
520 PPC_GST_EMWARN, // Emulation warnings
521 PPC_GST_CMSTART,// For icbi: start of area to invalidate
522 PPC_GST_CMLEN, // For icbi: length of area to invalidate
523 PPC_GST_IP_AT_SYSCALL, // the CIA of the most recently executed SC insn
524 PPC_GST_SPRG3_RO, // SPRG3
525 PPC_GST_TFHAR, // Transactional Failure Handler Address Register
526 PPC_GST_TFIAR, // Transactional Failure Instruction Address Register
527 PPC_GST_TEXASR, // Transactional EXception And Summary Register
528 PPC_GST_TEXASRU, // Transactional EXception And Summary Register Upper
529 PPC_GST_PPR, // Program Priority register
530 PPC_GST_PPR32, // Upper 32-bits of Program Priority register
531 PPC_GST_PSPB, /* Problem State Priority Boost register, Note, the
532 * register is initialized to a non-zero value. Currently
533 * Valgrind is not supporting the register value to
534 * automatically decrement. Could be added later if
535 * needed.
537 PPC_GST_DSCR, // Data Stream Control Register
538 PPC_GST_ACC_0_r0, /* Accumulator register file. Eight accumulators each
539 * with four 128-bit registers.
541 PPC_GST_ACC_0_r1,
542 PPC_GST_ACC_0_r2,
543 PPC_GST_ACC_0_r3,
544 PPC_GST_ACC_1_r0,
545 PPC_GST_ACC_1_r1,
546 PPC_GST_ACC_1_r2,
547 PPC_GST_ACC_1_r3,
548 PPC_GST_ACC_2_r0,
549 PPC_GST_ACC_2_r1,
550 PPC_GST_ACC_2_r2,
551 PPC_GST_ACC_2_r3,
552 PPC_GST_ACC_3_r0,
553 PPC_GST_ACC_3_r1,
554 PPC_GST_ACC_3_r2,
555 PPC_GST_ACC_3_r3,
556 PPC_GST_ACC_4_r0,
557 PPC_GST_ACC_4_r1,
558 PPC_GST_ACC_4_r2,
559 PPC_GST_ACC_4_r3,
560 PPC_GST_ACC_5_r0,
561 PPC_GST_ACC_5_r1,
562 PPC_GST_ACC_5_r2,
563 PPC_GST_ACC_5_r3,
564 PPC_GST_ACC_6_r0,
565 PPC_GST_ACC_6_r1,
566 PPC_GST_ACC_6_r2,
567 PPC_GST_ACC_6_r3,
568 PPC_GST_ACC_7_r0,
569 PPC_GST_ACC_7_r1,
570 PPC_GST_ACC_7_r2,
571 PPC_GST_ACC_7_r3,
572 PPC_GST_MAX
573 } PPC_GST;
575 #define MASK_FPSCR_RN 0x3ULL // Binary floating point rounding mode
576 #define MASK_FPSCR_DRN 0x700000000ULL // Decimal floating point rounding mode
577 #define MASK_FPSCR_C_FPCC 0x1F000ULL // Floating-Point Condition code FPCC
579 #define MASK_VSCR_VALID 0x00010001
582 /*------------------------------------------------------------*/
583 /*--- Misc Helpers ---*/
584 /*------------------------------------------------------------*/
586 static void Get_lmd( IRTemp * lmd, IRExpr * gfield_0_4 );
588 /* Generate mask with 1's from 'begin' through 'end',
589 wrapping if begin > end.
590 begin->end works from right to left, 0=lsb
592 static UInt MASK32( UInt begin, UInt end )
594 UInt m1, m2, mask;
595 vassert(begin < 32);
596 vassert(end < 32);
597 m1 = ((UInt)(-1)) << begin;
598 m2 = ((UInt)(-1)) << end << 1;
599 mask = m1 ^ m2;
600 if (begin > end) mask = ~mask; // wrap mask
601 return mask;
604 static ULong MASK64( UInt begin, UInt end )
606 ULong m1, m2, mask;
607 vassert(begin < 64);
608 vassert(end < 64);
609 m1 = ((ULong)(-1)) << begin;
610 m2 = ((ULong)(-1)) << end << 1;
611 mask = m1 ^ m2;
612 if (begin > end) mask = ~mask; // wrap mask
613 return mask;
616 static Addr64 nextInsnAddr( void )
618 /* Note in the case of a prefix instruction, delta has already been
619 incremented by WORD_INST_SIZE to move past the prefix part of the
620 instruction. So only need to increment by WORD_INST_SIZE to get to
621 the start of the next instruction. */
622 return guest_CIA_curr_instr + WORD_INST_SIZE;
626 /*------------------------------------------------------------*/
627 /*--- Helper bits and pieces for deconstructing the ---*/
628 /*--- ppc32/64 insn stream. ---*/
629 /*------------------------------------------------------------*/
631 /* Add a statement to the list held by "irsb". */
632 static void stmt ( IRStmt* st )
634 addStmtToIRSB( irsb, st );
637 /* Generate a new temporary of the given type. */
638 static IRTemp newTemp ( IRType ty )
640 vassert(isPlausibleIRType(ty));
641 return newIRTemp( irsb->tyenv, ty );
644 /* Various simple conversions */
646 static UChar extend_s_5to8 ( UChar x )
648 return toUChar((((Int)x) << 27) >> 27);
651 static UInt extend_s_8to32( UChar x )
653 return (UInt)((((Int)x) << 24) >> 24);
656 static UInt extend_s_16to32 ( UInt x )
658 return (UInt)((((Int)x) << 16) >> 16);
661 static ULong extend_s_16to64 ( UInt x )
663 return (ULong)((((Long)x) << 48) >> 48);
666 static ULong extend_s_26to64 ( UInt x )
668 return (ULong)((((Long)x) << 38) >> 38);
671 static ULong extend_s_32to64 ( UInt x )
673 return (ULong)((((Long)x) << 32) >> 32);
676 /* Do a proper-endian load of a 32-bit word, regardless of the endianness
677 of the underlying host. */
678 static UInt getUIntPPCendianly ( const UChar* p )
680 UInt w = 0;
681 if (host_endness == VexEndnessBE) {
682 w = (w << 8) | p[0];
683 w = (w << 8) | p[1];
684 w = (w << 8) | p[2];
685 w = (w << 8) | p[3];
686 } else {
687 w = (w << 8) | p[3];
688 w = (w << 8) | p[2];
689 w = (w << 8) | p[1];
690 w = (w << 8) | p[0];
692 return w;
696 /*------------------------------------------------------------*/
697 /*--- Helpers for constructing IR. ---*/
698 /*------------------------------------------------------------*/
700 static void assign ( IRTemp dst, IRExpr* e )
702 stmt( IRStmt_WrTmp(dst, e) );
705 /* This generates a normal (non store-conditional) store. */
706 static void store ( IRExpr* addr, IRExpr* data )
708 IRType tyA = typeOfIRExpr(irsb->tyenv, addr);
709 vassert(tyA == Ity_I32 || tyA == Ity_I64);
711 if (host_endness == VexEndnessBE)
712 stmt( IRStmt_Store(Iend_BE, addr, data) );
713 else
714 stmt( IRStmt_Store(Iend_LE, addr, data) );
717 static IRExpr* unop ( IROp op, IRExpr* a )
719 return IRExpr_Unop(op, a);
722 static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
724 return IRExpr_Binop(op, a1, a2);
727 static IRExpr* triop ( IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3 )
729 return IRExpr_Triop(op, a1, a2, a3);
732 static IRExpr* qop ( IROp op, IRExpr* a1, IRExpr* a2,
733 IRExpr* a3, IRExpr* a4 )
735 return IRExpr_Qop(op, a1, a2, a3, a4);
738 static IRExpr* mkexpr ( IRTemp tmp )
740 return IRExpr_RdTmp(tmp);
743 #define mkU1(_n) IRExpr_Const(IRConst_U1(_n))
745 static IRExpr* mkU8 ( UChar i )
747 return IRExpr_Const(IRConst_U8(i));
750 static IRExpr* mkU16 ( UInt i )
752 return IRExpr_Const(IRConst_U16(i));
755 static IRExpr* mkU32 ( UInt i )
757 return IRExpr_Const(IRConst_U32(i));
760 static IRExpr* mkU64 ( ULong i )
762 return IRExpr_Const(IRConst_U64(i));
765 static IRExpr* mkV128 ( UShort i )
767 vassert(i == 0 || i == 0xffff);
768 return IRExpr_Const(IRConst_V128(i));
771 /* This generates a normal (non load-linked) load. */
772 static IRExpr* load ( IRType ty, IRExpr* addr )
774 if (host_endness == VexEndnessBE)
775 return IRExpr_Load(Iend_BE, ty, addr);
776 else
777 return IRExpr_Load(Iend_LE, ty, addr);
780 static IRStmt* stmt_load ( IRTemp result,
781 IRExpr* addr, IRExpr* storedata )
783 if (host_endness == VexEndnessBE)
784 return IRStmt_LLSC(Iend_BE, result, addr, storedata);
785 else
786 return IRStmt_LLSC(Iend_LE, result, addr, storedata);
789 static IRExpr* mkOR1 ( IRExpr* arg1, IRExpr* arg2 )
791 vassert(typeOfIRExpr(irsb->tyenv, arg1) == Ity_I1);
792 vassert(typeOfIRExpr(irsb->tyenv, arg2) == Ity_I1);
793 return unop(Iop_32to1, binop(Iop_Or32, unop(Iop_1Uto32, arg1),
794 unop(Iop_1Uto32, arg2)));
797 static IRExpr* mkAND1 ( IRExpr* arg1, IRExpr* arg2 )
799 vassert(typeOfIRExpr(irsb->tyenv, arg1) == Ity_I1);
800 vassert(typeOfIRExpr(irsb->tyenv, arg2) == Ity_I1);
801 return unop(Iop_32to1, binop(Iop_And32, unop(Iop_1Uto32, arg1),
802 unop(Iop_1Uto32, arg2)));
805 static inline IRExpr* mkXOr4_32( IRTemp t0, IRTemp t1, IRTemp t2,
806 IRTemp t3 )
808 return binop( Iop_Xor32,
809 binop( Iop_Xor32, mkexpr( t0 ), mkexpr( t1 ) ),
810 binop( Iop_Xor32, mkexpr( t2 ), mkexpr( t3 ) ) );
813 static inline IRExpr* mkOr3_V128( IRTemp t0, IRTemp t1, IRTemp t2 )
815 return binop( Iop_OrV128,
816 mkexpr( t0 ),
817 binop( Iop_OrV128, mkexpr( t1 ), mkexpr( t2 ) ) );
820 static inline IRExpr* mkOr4_V128( IRTemp t0, IRTemp t1, IRTemp t2,
821 IRTemp t3 )
823 return binop( Iop_OrV128,
824 binop( Iop_OrV128, mkexpr( t0 ), mkexpr( t1 ) ),
825 binop( Iop_OrV128, mkexpr( t2 ), mkexpr( t3 ) ) );
828 static inline IRExpr* mkOr4_V128_expr( IRExpr* t0, IRExpr* t1, IRExpr* t2,
829 IRExpr* t3 )
831 /* arguments are already expressions */
832 return binop( Iop_OrV128,
833 binop( Iop_OrV128, ( t0 ), ( t1 ) ),
834 binop( Iop_OrV128, ( t2 ), ( t3 ) ) );
837 static IRExpr* mkNOT1 ( IRExpr* arg1 )
839 vassert(typeOfIRExpr(irsb->tyenv, arg1) == Ity_I1);
840 return unop(Iop_32to1, unop(Iop_Not32, unop(Iop_1Uto32, arg1) ) );
843 /* expand V128_8Ux16 to 2x V128_16Ux8's */
844 static void expand8Ux16( IRExpr* vIn,
845 /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
847 IRTemp ones8x16 = newTemp(Ity_V128);
849 vassert(typeOfIRExpr(irsb->tyenv, vIn) == Ity_V128);
850 vassert(vEvn && *vEvn == IRTemp_INVALID);
851 vassert(vOdd && *vOdd == IRTemp_INVALID);
852 *vEvn = newTemp(Ity_V128);
853 *vOdd = newTemp(Ity_V128);
855 assign( ones8x16, unop(Iop_Dup8x16, mkU8(0x1)) );
856 assign( *vOdd, binop(Iop_MullEven8Ux16, mkexpr(ones8x16), vIn) );
857 assign( *vEvn, binop(Iop_MullEven8Ux16, mkexpr(ones8x16),
858 binop(Iop_ShrV128, vIn, mkU8(8))) );
861 /* expand V128_8Sx16 to 2x V128_16Sx8's */
862 static void expand8Sx16( IRExpr* vIn,
863 /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
865 IRTemp ones8x16 = newTemp(Ity_V128);
867 vassert(typeOfIRExpr(irsb->tyenv, vIn) == Ity_V128);
868 vassert(vEvn && *vEvn == IRTemp_INVALID);
869 vassert(vOdd && *vOdd == IRTemp_INVALID);
870 *vEvn = newTemp(Ity_V128);
871 *vOdd = newTemp(Ity_V128);
873 assign( ones8x16, unop(Iop_Dup8x16, mkU8(0x1)) );
874 assign( *vOdd, binop(Iop_MullEven8Sx16, mkexpr(ones8x16), vIn) );
875 assign( *vEvn, binop(Iop_MullEven8Sx16, mkexpr(ones8x16),
876 binop(Iop_ShrV128, vIn, mkU8(8))) );
879 /* expand V128_16Uto8 to 2x V128_32Ux4's */
880 static void expand16Ux8( IRExpr* vIn,
881 /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
883 IRTemp ones16x8 = newTemp(Ity_V128);
885 vassert(typeOfIRExpr(irsb->tyenv, vIn) == Ity_V128);
886 vassert(vEvn && *vEvn == IRTemp_INVALID);
887 vassert(vOdd && *vOdd == IRTemp_INVALID);
888 *vEvn = newTemp(Ity_V128);
889 *vOdd = newTemp(Ity_V128);
891 assign( ones16x8, unop(Iop_Dup16x8, mkU16(0x1)) );
892 assign( *vOdd, binop(Iop_MullEven16Ux8, mkexpr(ones16x8), vIn) );
893 assign( *vEvn, binop(Iop_MullEven16Ux8, mkexpr(ones16x8),
894 binop(Iop_ShrV128, vIn, mkU8(16))) );
897 /* expand V128_16Sto8 to 2x V128_32Sx4's */
898 static void expand16Sx8( IRExpr* vIn,
899 /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
901 IRTemp ones16x8 = newTemp(Ity_V128);
903 vassert(typeOfIRExpr(irsb->tyenv, vIn) == Ity_V128);
904 vassert(vEvn && *vEvn == IRTemp_INVALID);
905 vassert(vOdd && *vOdd == IRTemp_INVALID);
906 *vEvn = newTemp(Ity_V128);
907 *vOdd = newTemp(Ity_V128);
909 assign( ones16x8, unop(Iop_Dup16x8, mkU16(0x1)) );
910 assign( *vOdd, binop(Iop_MullEven16Sx8, mkexpr(ones16x8), vIn) );
911 assign( *vEvn, binop(Iop_MullEven16Sx8, mkexpr(ones16x8),
912 binop(Iop_ShrV128, vIn, mkU8(16))) );
915 /* break V128 to 4xF64's*/
916 static void breakV128to4xF64( IRExpr* t128,
917 /*OUTs*/
918 IRTemp* t3, IRTemp* t2,
919 IRTemp* t1, IRTemp* t0 )
921 IRTemp hi64 = newTemp(Ity_I64);
922 IRTemp lo64 = newTemp(Ity_I64);
924 vassert(typeOfIRExpr(irsb->tyenv, t128) == Ity_V128);
925 vassert(t0 && *t0 == IRTemp_INVALID);
926 vassert(t1 && *t1 == IRTemp_INVALID);
927 vassert(t2 && *t2 == IRTemp_INVALID);
928 vassert(t3 && *t3 == IRTemp_INVALID);
929 *t0 = newTemp(Ity_F64);
930 *t1 = newTemp(Ity_F64);
931 *t2 = newTemp(Ity_F64);
932 *t3 = newTemp(Ity_F64);
934 assign( hi64, unop(Iop_V128HIto64, t128) );
935 assign( lo64, unop(Iop_V128to64, t128) );
936 assign( *t3,
937 unop( Iop_F32toF64,
938 unop( Iop_ReinterpI32asF32,
939 unop( Iop_64HIto32, mkexpr( hi64 ) ) ) ) );
940 assign( *t2,
941 unop( Iop_F32toF64,
942 unop( Iop_ReinterpI32asF32, unop( Iop_64to32, mkexpr( hi64 ) ) ) ) );
943 assign( *t1,
944 unop( Iop_F32toF64,
945 unop( Iop_ReinterpI32asF32,
946 unop( Iop_64HIto32, mkexpr( lo64 ) ) ) ) );
947 assign( *t0,
948 unop( Iop_F32toF64,
949 unop( Iop_ReinterpI32asF32, unop( Iop_64to32, mkexpr( lo64 ) ) ) ) );
953 /* break V128 to 4xI32's, then sign-extend to I64's */
954 static void breakV128to4x64S( IRExpr* t128,
955 /*OUTs*/
956 IRTemp* t3, IRTemp* t2,
957 IRTemp* t1, IRTemp* t0 )
959 IRTemp hi64 = newTemp(Ity_I64);
960 IRTemp lo64 = newTemp(Ity_I64);
962 vassert(typeOfIRExpr(irsb->tyenv, t128) == Ity_V128);
963 vassert(t0 && *t0 == IRTemp_INVALID);
964 vassert(t1 && *t1 == IRTemp_INVALID);
965 vassert(t2 && *t2 == IRTemp_INVALID);
966 vassert(t3 && *t3 == IRTemp_INVALID);
967 *t0 = newTemp(Ity_I64);
968 *t1 = newTemp(Ity_I64);
969 *t2 = newTemp(Ity_I64);
970 *t3 = newTemp(Ity_I64);
972 assign( hi64, unop(Iop_V128HIto64, t128) );
973 assign( lo64, unop(Iop_V128to64, t128) );
974 assign( *t3, unop(Iop_32Sto64, unop(Iop_64HIto32, mkexpr(hi64))) );
975 assign( *t2, unop(Iop_32Sto64, unop(Iop_64to32, mkexpr(hi64))) );
976 assign( *t1, unop(Iop_32Sto64, unop(Iop_64HIto32, mkexpr(lo64))) );
977 assign( *t0, unop(Iop_32Sto64, unop(Iop_64to32, mkexpr(lo64))) );
980 /* break V128 to 4xI32's, then zero-extend to I64's */
981 static void breakV128to4x64U ( IRExpr* t128,
982 /*OUTs*/
983 IRTemp* t3, IRTemp* t2,
984 IRTemp* t1, IRTemp* t0 )
986 IRTemp hi64 = newTemp(Ity_I64);
987 IRTemp lo64 = newTemp(Ity_I64);
989 vassert(typeOfIRExpr(irsb->tyenv, t128) == Ity_V128);
990 vassert(t0 && *t0 == IRTemp_INVALID);
991 vassert(t1 && *t1 == IRTemp_INVALID);
992 vassert(t2 && *t2 == IRTemp_INVALID);
993 vassert(t3 && *t3 == IRTemp_INVALID);
994 *t0 = newTemp(Ity_I64);
995 *t1 = newTemp(Ity_I64);
996 *t2 = newTemp(Ity_I64);
997 *t3 = newTemp(Ity_I64);
999 assign( hi64, unop(Iop_V128HIto64, t128) );
1000 assign( lo64, unop(Iop_V128to64, t128) );
1001 assign( *t3, unop(Iop_32Uto64, unop(Iop_64HIto32, mkexpr(hi64))) );
1002 assign( *t2, unop(Iop_32Uto64, unop(Iop_64to32, mkexpr(hi64))) );
1003 assign( *t1, unop(Iop_32Uto64, unop(Iop_64HIto32, mkexpr(lo64))) );
1004 assign( *t0, unop(Iop_32Uto64, unop(Iop_64to32, mkexpr(lo64))) );
1007 static void breakV128to4x32( IRExpr* t128,
1008 /*OUTs*/
1009 IRTemp* t3, IRTemp* t2,
1010 IRTemp* t1, IRTemp* t0 )
1012 IRTemp hi64 = newTemp(Ity_I64);
1013 IRTemp lo64 = newTemp(Ity_I64);
1015 vassert(typeOfIRExpr(irsb->tyenv, t128) == Ity_V128);
1016 vassert(t0 && *t0 == IRTemp_INVALID);
1017 vassert(t1 && *t1 == IRTemp_INVALID);
1018 vassert(t2 && *t2 == IRTemp_INVALID);
1019 vassert(t3 && *t3 == IRTemp_INVALID);
1020 *t0 = newTemp(Ity_I32);
1021 *t1 = newTemp(Ity_I32);
1022 *t2 = newTemp(Ity_I32);
1023 *t3 = newTemp(Ity_I32);
1025 assign( hi64, unop(Iop_V128HIto64, t128) );
1026 assign( lo64, unop(Iop_V128to64, t128) );
1027 assign( *t3, unop(Iop_64HIto32, mkexpr(hi64)) );
1028 assign( *t2, unop(Iop_64to32, mkexpr(hi64)) );
1029 assign( *t1, unop(Iop_64HIto32, mkexpr(lo64)) );
1030 assign( *t0, unop(Iop_64to32, mkexpr(lo64)) );
1033 static IRExpr* mkV128from32( IRTemp t3, IRTemp t2,
1034 IRTemp t1, IRTemp t0 )
1036 return
1037 binop( Iop_64HLtoV128,
1038 binop(Iop_32HLto64, mkexpr(t3), mkexpr(t2)),
1039 binop(Iop_32HLto64, mkexpr(t1), mkexpr(t0))
1043 static IRExpr* extract_field_from_vector( IRTemp vB, IRExpr* index, UInt mask)
1045 /* vB is a vector, extract bits starting at index to size of mask */
1046 return unop( Iop_V128to64,
1047 binop( Iop_AndV128,
1048 binop( Iop_ShrV128,
1049 mkexpr( vB ),
1050 unop( Iop_64to8,
1051 binop( Iop_Mul64, index,
1052 mkU64( 8 ) ) ) ),
1053 binop( Iop_64HLtoV128,
1054 mkU64( 0x0 ),
1055 mkU64( mask ) ) ) );
1058 static IRExpr* insert_field_into_vector( IRTemp vSrc, IRExpr* index,
1059 IRExpr* bits, IRExpr* mask)
1061 /* vSrc is a vector v128, index is I64 between 0 and 15 bytes, bits is I64,
1062 mask is I64. Indexing is based on the least significant byte being
1063 index 0. Insert bits starting at index to size of mask */
1064 IRTemp shift = newTemp(Ity_I8);
1065 IRTemp tmp_mask = newTemp(Ity_V128);
1066 IRTemp tmp_not_mask = newTemp(Ity_V128);
1067 UInt index_mask = 0xF; //Index is only 4-bits wide
1069 assign( shift, unop( Iop_64to8,
1070 binop( Iop_Mul64,
1071 binop( Iop_And64,
1072 index,
1073 mkU64( index_mask ) ),
1074 mkU64( 8 ) ) ) );
1075 assign( tmp_mask, binop( Iop_ShlV128,
1076 binop( Iop_64HLtoV128,
1077 mkU64( 0x0 ),
1078 mask ),
1079 mkexpr( shift) ) );
1080 assign( tmp_not_mask, unop( Iop_NotV128, mkexpr( tmp_mask ) ) );
1081 return binop( Iop_OrV128,
1082 binop( Iop_AndV128,
1083 mkexpr( vSrc ),
1084 mkexpr( tmp_not_mask ) ),
1085 binop( Iop_AndV128,
1086 binop( Iop_ShlV128,
1087 binop( Iop_64HLtoV128,
1088 mkU64( 0x0 ),
1089 bits ),
1090 mkexpr( shift) ),
1091 mkexpr( tmp_mask ) ) );
1094 static IRExpr* extractBytefromV256( IRTemp vA, IRTemp vB, UInt byte_index)
1096 UInt byte_mask = 0xFF;
1097 UInt byte_size = 8; // size in bits
1098 IRTemp shift = newTemp(Ity_I8);
1099 IRTemp select_tmp = newTemp(Ity_I64);
1100 IRTemp reg_select = newTemp(Ity_V128);
1101 IRTemp src_tmp = newTemp(Ity_V128);
1103 /* The byte numbering is right to left: byte_n-1, byte_n-2, ...., byte0.
1104 The byte-index is between 0 and 31. */
1105 assign( shift, unop( Iop_64to8,
1106 binop( Iop_Mul64,
1107 binop( Iop_And64,
1108 mkU64( 0xF ),
1109 mkexpr( byte_index ) ),
1110 mkU64( byte_size ) ) ) );
1112 /* Create mask to select byte from srcA if byte_index > 16 or
1113 from srcB. Use byte_index[4] to select srcA or srcB. */
1114 assign( select_tmp, unop( Iop_1Sto64,
1115 unop( Iop_64to1,
1116 binop( Iop_Shr64,
1117 mkexpr( byte_index ),
1118 mkU8( 4 ) ) ) ) );
1120 assign( reg_select, binop( Iop_64HLtoV128,
1121 mkexpr( select_tmp ),
1122 mkexpr( select_tmp ) ) );
1124 assign( src_tmp,
1125 binop( Iop_OrV128,
1126 binop( Iop_AndV128,
1127 mkexpr( reg_select ),
1128 binop( Iop_ShrV128,
1129 mkexpr( vA ),
1130 mkexpr( shift ) ) ),
1131 binop( Iop_AndV128,
1132 unop( Iop_NotV128, mkexpr( reg_select ) ),
1133 binop( Iop_ShrV128,
1134 mkexpr( vB ),
1135 mkexpr( shift ) ) ) ) );
1137 /* Mask off element */
1138 return binop( Iop_And64,
1139 unop( Iop_V128to64, mkexpr( src_tmp ) ),
1140 mkU64( byte_mask ) );
1143 /* Signed saturating narrow 64S to 32 */
1144 static IRExpr* mkQNarrow64Sto32 ( IRExpr* t64 )
1146 IRTemp hi32 = newTemp(Ity_I32);
1147 IRTemp lo32 = newTemp(Ity_I32);
1149 vassert(typeOfIRExpr(irsb->tyenv, t64) == Ity_I64);
1151 assign( hi32, unop(Iop_64HIto32, t64));
1152 assign( lo32, unop(Iop_64to32, t64));
1154 return IRExpr_ITE(
1155 /* if (hi32 == (lo32 >>s 31)) */
1156 binop(Iop_CmpEQ32, mkexpr(hi32),
1157 binop( Iop_Sar32, mkexpr(lo32), mkU8(31))),
1158 /* then: within signed-32 range: lo half good enough */
1159 mkexpr(lo32),
1160 /* else: sign dep saturate: 1->0x80000000, 0->0x7FFFFFFF */
1161 binop(Iop_Add32, mkU32(0x7FFFFFFF),
1162 binop(Iop_Shr32, mkexpr(hi32), mkU8(31))));
1165 /* Unsigned saturating narrow 64S to 32 */
1166 static IRExpr* mkQNarrow64Uto32 ( IRExpr* t64 )
1168 IRTemp hi32 = newTemp(Ity_I32);
1169 IRTemp lo32 = newTemp(Ity_I32);
1171 vassert(typeOfIRExpr(irsb->tyenv, t64) == Ity_I64);
1173 assign( hi32, unop(Iop_64HIto32, t64));
1174 assign( lo32, unop(Iop_64to32, t64));
1176 return IRExpr_ITE(
1177 /* if (top 32 bits of t64 are 0) */
1178 binop(Iop_CmpEQ32, mkexpr(hi32), mkU32(0)),
1179 /* then: within unsigned-32 range: lo half good enough */
1180 mkexpr(lo32),
1181 /* else: positive saturate -> 0xFFFFFFFF */
1182 mkU32(0xFFFFFFFF));
1185 /* Signed saturate narrow 64->32, combining to V128 */
1186 static IRExpr* mkV128from4x64S ( IRExpr* t3, IRExpr* t2,
1187 IRExpr* t1, IRExpr* t0 )
1189 vassert(typeOfIRExpr(irsb->tyenv, t3) == Ity_I64);
1190 vassert(typeOfIRExpr(irsb->tyenv, t2) == Ity_I64);
1191 vassert(typeOfIRExpr(irsb->tyenv, t1) == Ity_I64);
1192 vassert(typeOfIRExpr(irsb->tyenv, t0) == Ity_I64);
1193 return binop(Iop_64HLtoV128,
1194 binop(Iop_32HLto64,
1195 mkQNarrow64Sto32( t3 ),
1196 mkQNarrow64Sto32( t2 )),
1197 binop(Iop_32HLto64,
1198 mkQNarrow64Sto32( t1 ),
1199 mkQNarrow64Sto32( t0 )));
1202 /* Unsigned saturate narrow 64->32, combining to V128 */
1203 static IRExpr* mkV128from4x64U ( IRExpr* t3, IRExpr* t2,
1204 IRExpr* t1, IRExpr* t0 )
1206 vassert(typeOfIRExpr(irsb->tyenv, t3) == Ity_I64);
1207 vassert(typeOfIRExpr(irsb->tyenv, t2) == Ity_I64);
1208 vassert(typeOfIRExpr(irsb->tyenv, t1) == Ity_I64);
1209 vassert(typeOfIRExpr(irsb->tyenv, t0) == Ity_I64);
1210 return binop(Iop_64HLtoV128,
1211 binop(Iop_32HLto64,
1212 mkQNarrow64Uto32( t3 ),
1213 mkQNarrow64Uto32( t2 )),
1214 binop(Iop_32HLto64,
1215 mkQNarrow64Uto32( t1 ),
1216 mkQNarrow64Uto32( t0 )));
1219 /* Simulate irops Iop_MullOdd*, since we don't have them */
1220 #define MK_Iop_MullOdd8Ux16( expr_vA, expr_vB ) \
1221 binop(Iop_MullEven8Ux16, \
1222 binop(Iop_ShrV128, expr_vA, mkU8(8)), \
1223 binop(Iop_ShrV128, expr_vB, mkU8(8)))
1225 #define MK_Iop_MullOdd8Sx16( expr_vA, expr_vB ) \
1226 binop(Iop_MullEven8Sx16, \
1227 binop(Iop_ShrV128, expr_vA, mkU8(8)), \
1228 binop(Iop_ShrV128, expr_vB, mkU8(8)))
1230 #define MK_Iop_MullOdd16Ux8( expr_vA, expr_vB ) \
1231 binop(Iop_MullEven16Ux8, \
1232 binop(Iop_ShrV128, expr_vA, mkU8(16)), \
1233 binop(Iop_ShrV128, expr_vB, mkU8(16)))
1235 #define MK_Iop_MullOdd32Ux4( expr_vA, expr_vB ) \
1236 binop(Iop_MullEven32Ux4, \
1237 binop(Iop_ShrV128, expr_vA, mkU8(32)), \
1238 binop(Iop_ShrV128, expr_vB, mkU8(32)))
1240 #define MK_Iop_MullOdd16Sx8( expr_vA, expr_vB ) \
1241 binop(Iop_MullEven16Sx8, \
1242 binop(Iop_ShrV128, expr_vA, mkU8(16)), \
1243 binop(Iop_ShrV128, expr_vB, mkU8(16)))
1245 #define MK_Iop_MullOdd32Sx4( expr_vA, expr_vB ) \
1246 binop(Iop_MullEven32Sx4, \
1247 binop(Iop_ShrV128, expr_vA, mkU8(32)), \
1248 binop(Iop_ShrV128, expr_vB, mkU8(32)))
1251 static IRExpr* /* :: Ity_I64 */ mk64lo32Sto64 ( IRExpr* src )
1253 vassert(typeOfIRExpr(irsb->tyenv, src) == Ity_I64);
1254 return unop(Iop_32Sto64, unop(Iop_64to32, src));
1257 static IRExpr* /* :: Ity_I64 */ mk64lo32Uto64 ( IRExpr* src )
1259 vassert(typeOfIRExpr(irsb->tyenv, src) == Ity_I64);
1260 return unop(Iop_32Uto64, unop(Iop_64to32, src));
1263 static IROp mkSzOp ( IRType ty, IROp op8 )
1265 Int adj;
1266 vassert(ty == Ity_I8 || ty == Ity_I16 ||
1267 ty == Ity_I32 || ty == Ity_I64);
1268 vassert(op8 == Iop_Add8 || op8 == Iop_Sub8 || op8 == Iop_Mul8 ||
1269 op8 == Iop_Or8 || op8 == Iop_And8 || op8 == Iop_Xor8 ||
1270 op8 == Iop_Shl8 || op8 == Iop_Shr8 || op8 == Iop_Sar8 ||
1271 op8 == Iop_CmpEQ8 || op8 == Iop_CmpNE8 ||
1272 op8 == Iop_Not8 );
1273 adj = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : (ty==Ity_I32 ? 2 : 3));
1274 return adj + op8;
1277 /* Make sure we get valid 32 and 64bit addresses */
1278 static Addr64 mkSzAddr ( IRType ty, Addr64 addr )
1280 vassert(ty == Ity_I32 || ty == Ity_I64);
1281 return ( ty == Ity_I64 ?
1282 (Addr64)addr :
1283 (Addr64)extend_s_32to64( toUInt(addr) ) );
1286 /* sz, ULong -> IRExpr */
1287 static IRExpr* mkSzImm ( IRType ty, ULong imm64 )
1289 vassert(ty == Ity_I32 || ty == Ity_I64);
1290 return ty == Ity_I64 ? mkU64(imm64) : mkU32((UInt)imm64);
1293 /* sz, ULong -> IRConst */
1294 static IRConst* mkSzConst ( IRType ty, ULong imm64 )
1296 vassert(ty == Ity_I32 || ty == Ity_I64);
1297 return ( ty == Ity_I64 ?
1298 IRConst_U64(imm64) :
1299 IRConst_U32((UInt)imm64) );
1302 /* Sign extend imm16 -> IRExpr* */
1303 static IRExpr* mkSzExtendS16 ( IRType ty, UInt imm16 )
1305 vassert(ty == Ity_I32 || ty == Ity_I64);
1306 return ( ty == Ity_I64 ?
1307 mkU64(extend_s_16to64(imm16)) :
1308 mkU32(extend_s_16to32(imm16)) );
1311 /* Sign extend imm32 -> IRExpr* */
1312 static IRExpr* mkSzExtendS32 ( IRType ty, UInt imm32 )
1314 vassert(ty == Ity_I32 || ty == Ity_I64);
1315 return ( ty == Ity_I64 ?
1316 mkU64(extend_s_32to64(imm32)) :
1317 mkU32(imm32) );
1320 /* IR narrows I32/I64 -> I8/I16/I32 */
1321 static IRExpr* mkNarrowTo8 ( IRType ty, IRExpr* src )
1323 vassert(ty == Ity_I32 || ty == Ity_I64);
1324 return ty == Ity_I64 ? unop(Iop_64to8, src) : unop(Iop_32to8, src);
1327 static IRExpr* mkNarrowTo16 ( IRType ty, IRExpr* src )
1329 vassert(ty == Ity_I32 || ty == Ity_I64);
1330 return ty == Ity_I64 ? unop(Iop_64to16, src) : unop(Iop_32to16, src);
1333 static IRExpr* mkNarrowTo32 ( IRType ty, IRExpr* src )
1335 vassert(ty == Ity_I32 || ty == Ity_I64);
1336 return ty == Ity_I64 ? unop(Iop_64to32, src) : src;
1339 /* Signed/Unsigned IR widens I8/I16/I32 -> I32/I64 */
1340 static IRExpr* mkWidenFrom8 ( IRType ty, IRExpr* src, Bool sined )
1342 IROp op;
1343 vassert(ty == Ity_I32 || ty == Ity_I64);
1344 if (sined) op = (ty==Ity_I32) ? Iop_8Sto32 : Iop_8Sto64;
1345 else op = (ty==Ity_I32) ? Iop_8Uto32 : Iop_8Uto64;
1346 return unop(op, src);
1349 static IRExpr* mkWidenFrom16 ( IRType ty, IRExpr* src, Bool sined )
1351 IROp op;
1352 vassert(ty == Ity_I32 || ty == Ity_I64);
1353 if (sined) op = (ty==Ity_I32) ? Iop_16Sto32 : Iop_16Sto64;
1354 else op = (ty==Ity_I32) ? Iop_16Uto32 : Iop_16Uto64;
1355 return unop(op, src);
1358 static IRExpr* mkWidenFrom32 ( IRType ty, IRExpr* src, Bool sined )
1360 vassert(ty == Ity_I32 || ty == Ity_I64);
1361 if (ty == Ity_I32)
1362 return src;
1363 return (sined) ? unop(Iop_32Sto64, src) : unop(Iop_32Uto64, src);
1367 static Int integerGuestRegOffset ( UInt archreg )
1369 vassert(archreg < 32);
1371 // jrs: probably not necessary; only matters if we reference sub-parts
1372 // of the ppc registers, but that isn't the case
1373 // later: this might affect Altivec though?
1375 switch (archreg) {
1376 case 0: return offsetofPPCGuestState(guest_GPR0);
1377 case 1: return offsetofPPCGuestState(guest_GPR1);
1378 case 2: return offsetofPPCGuestState(guest_GPR2);
1379 case 3: return offsetofPPCGuestState(guest_GPR3);
1380 case 4: return offsetofPPCGuestState(guest_GPR4);
1381 case 5: return offsetofPPCGuestState(guest_GPR5);
1382 case 6: return offsetofPPCGuestState(guest_GPR6);
1383 case 7: return offsetofPPCGuestState(guest_GPR7);
1384 case 8: return offsetofPPCGuestState(guest_GPR8);
1385 case 9: return offsetofPPCGuestState(guest_GPR9);
1386 case 10: return offsetofPPCGuestState(guest_GPR10);
1387 case 11: return offsetofPPCGuestState(guest_GPR11);
1388 case 12: return offsetofPPCGuestState(guest_GPR12);
1389 case 13: return offsetofPPCGuestState(guest_GPR13);
1390 case 14: return offsetofPPCGuestState(guest_GPR14);
1391 case 15: return offsetofPPCGuestState(guest_GPR15);
1392 case 16: return offsetofPPCGuestState(guest_GPR16);
1393 case 17: return offsetofPPCGuestState(guest_GPR17);
1394 case 18: return offsetofPPCGuestState(guest_GPR18);
1395 case 19: return offsetofPPCGuestState(guest_GPR19);
1396 case 20: return offsetofPPCGuestState(guest_GPR20);
1397 case 21: return offsetofPPCGuestState(guest_GPR21);
1398 case 22: return offsetofPPCGuestState(guest_GPR22);
1399 case 23: return offsetofPPCGuestState(guest_GPR23);
1400 case 24: return offsetofPPCGuestState(guest_GPR24);
1401 case 25: return offsetofPPCGuestState(guest_GPR25);
1402 case 26: return offsetofPPCGuestState(guest_GPR26);
1403 case 27: return offsetofPPCGuestState(guest_GPR27);
1404 case 28: return offsetofPPCGuestState(guest_GPR28);
1405 case 29: return offsetofPPCGuestState(guest_GPR29);
1406 case 30: return offsetofPPCGuestState(guest_GPR30);
1407 case 31: return offsetofPPCGuestState(guest_GPR31);
1408 default: break;
1410 vpanic("integerGuestRegOffset(ppc,be)"); /*notreached*/
1413 static IRExpr* getIReg ( UInt archreg )
1415 IRType ty = mode64 ? Ity_I64 : Ity_I32;
1416 vassert(archreg < 32);
1417 return IRExpr_Get( integerGuestRegOffset(archreg), ty );
1420 /* Ditto, but write to a reg instead. */
1421 static void putIReg ( UInt archreg, IRExpr* e )
1423 IRType ty = mode64 ? Ity_I64 : Ity_I32;
1424 vassert(archreg < 32);
1425 vassert(typeOfIRExpr(irsb->tyenv, e) == ty );
1426 stmt( IRStmt_Put(integerGuestRegOffset(archreg), e) );
1430 /* Floating point egisters are mapped to VSX registers[0..31]. */
1431 static Int floatGuestRegOffset ( UInt archreg )
1433 vassert(archreg < 32);
1435 if (host_endness == VexEndnessLE) {
1436 switch (archreg) {
1437 case 0: return offsetofPPCGuestState(guest_VSR0) + 8;
1438 case 1: return offsetofPPCGuestState(guest_VSR1) + 8;
1439 case 2: return offsetofPPCGuestState(guest_VSR2) + 8;
1440 case 3: return offsetofPPCGuestState(guest_VSR3) + 8;
1441 case 4: return offsetofPPCGuestState(guest_VSR4) + 8;
1442 case 5: return offsetofPPCGuestState(guest_VSR5) + 8;
1443 case 6: return offsetofPPCGuestState(guest_VSR6) + 8;
1444 case 7: return offsetofPPCGuestState(guest_VSR7) + 8;
1445 case 8: return offsetofPPCGuestState(guest_VSR8) + 8;
1446 case 9: return offsetofPPCGuestState(guest_VSR9) + 8;
1447 case 10: return offsetofPPCGuestState(guest_VSR10) + 8;
1448 case 11: return offsetofPPCGuestState(guest_VSR11) + 8;
1449 case 12: return offsetofPPCGuestState(guest_VSR12) + 8;
1450 case 13: return offsetofPPCGuestState(guest_VSR13) + 8;
1451 case 14: return offsetofPPCGuestState(guest_VSR14) + 8;
1452 case 15: return offsetofPPCGuestState(guest_VSR15) + 8;
1453 case 16: return offsetofPPCGuestState(guest_VSR16) + 8;
1454 case 17: return offsetofPPCGuestState(guest_VSR17) + 8;
1455 case 18: return offsetofPPCGuestState(guest_VSR18) + 8;
1456 case 19: return offsetofPPCGuestState(guest_VSR19) + 8;
1457 case 20: return offsetofPPCGuestState(guest_VSR20) + 8;
1458 case 21: return offsetofPPCGuestState(guest_VSR21) + 8;
1459 case 22: return offsetofPPCGuestState(guest_VSR22) + 8;
1460 case 23: return offsetofPPCGuestState(guest_VSR23) + 8;
1461 case 24: return offsetofPPCGuestState(guest_VSR24) + 8;
1462 case 25: return offsetofPPCGuestState(guest_VSR25) + 8;
1463 case 26: return offsetofPPCGuestState(guest_VSR26) + 8;
1464 case 27: return offsetofPPCGuestState(guest_VSR27) + 8;
1465 case 28: return offsetofPPCGuestState(guest_VSR28) + 8;
1466 case 29: return offsetofPPCGuestState(guest_VSR29) + 8;
1467 case 30: return offsetofPPCGuestState(guest_VSR30) + 8;
1468 case 31: return offsetofPPCGuestState(guest_VSR31) + 8;
1469 default: break;
1471 } else {
1472 switch (archreg) {
1473 case 0: return offsetofPPCGuestState(guest_VSR0);
1474 case 1: return offsetofPPCGuestState(guest_VSR1);
1475 case 2: return offsetofPPCGuestState(guest_VSR2);
1476 case 3: return offsetofPPCGuestState(guest_VSR3);
1477 case 4: return offsetofPPCGuestState(guest_VSR4);
1478 case 5: return offsetofPPCGuestState(guest_VSR5);
1479 case 6: return offsetofPPCGuestState(guest_VSR6);
1480 case 7: return offsetofPPCGuestState(guest_VSR7);
1481 case 8: return offsetofPPCGuestState(guest_VSR8);
1482 case 9: return offsetofPPCGuestState(guest_VSR9);
1483 case 10: return offsetofPPCGuestState(guest_VSR10);
1484 case 11: return offsetofPPCGuestState(guest_VSR11);
1485 case 12: return offsetofPPCGuestState(guest_VSR12);
1486 case 13: return offsetofPPCGuestState(guest_VSR13);
1487 case 14: return offsetofPPCGuestState(guest_VSR14);
1488 case 15: return offsetofPPCGuestState(guest_VSR15);
1489 case 16: return offsetofPPCGuestState(guest_VSR16);
1490 case 17: return offsetofPPCGuestState(guest_VSR17);
1491 case 18: return offsetofPPCGuestState(guest_VSR18);
1492 case 19: return offsetofPPCGuestState(guest_VSR19);
1493 case 20: return offsetofPPCGuestState(guest_VSR20);
1494 case 21: return offsetofPPCGuestState(guest_VSR21);
1495 case 22: return offsetofPPCGuestState(guest_VSR22);
1496 case 23: return offsetofPPCGuestState(guest_VSR23);
1497 case 24: return offsetofPPCGuestState(guest_VSR24);
1498 case 25: return offsetofPPCGuestState(guest_VSR25);
1499 case 26: return offsetofPPCGuestState(guest_VSR26);
1500 case 27: return offsetofPPCGuestState(guest_VSR27);
1501 case 28: return offsetofPPCGuestState(guest_VSR28);
1502 case 29: return offsetofPPCGuestState(guest_VSR29);
1503 case 30: return offsetofPPCGuestState(guest_VSR30);
1504 case 31: return offsetofPPCGuestState(guest_VSR31);
1505 default: break;
1508 vpanic("floatGuestRegOffset(ppc)"); /*notreached*/
1511 static IRExpr* getFReg ( UInt archreg )
1513 vassert(archreg < 32);
1514 return IRExpr_Get( floatGuestRegOffset(archreg), Ity_F64 );
1517 /* Ditto, but write to a reg instead. */
1518 static void putFReg ( UInt archreg, IRExpr* e )
1520 vassert(archreg < 32);
1521 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64);
1522 stmt( IRStmt_Put(floatGuestRegOffset(archreg), e) );
1525 /* get Decimal float value. Note, they share floating point register file. */
1526 static IRExpr* getDReg(UInt archreg) {
1527 IRExpr *e;
1528 vassert( archreg < 32 );
1529 e = IRExpr_Get( floatGuestRegOffset( archreg ), Ity_D64 );
1530 return e;
1532 static IRExpr* getDReg32(UInt archreg) {
1533 IRExpr *e;
1534 vassert( archreg < 32 );
1535 e = IRExpr_Get( floatGuestRegOffset( archreg ), Ity_D32 );
1536 return e;
1539 /* Read a floating point register pair and combine their contents into a
1540 128-bit value */
1541 static IRExpr *getDReg_pair(UInt archreg) {
1542 IRExpr *high = getDReg( archreg );
1543 IRExpr *low = getDReg( archreg + 1 );
1545 return binop( Iop_D64HLtoD128, high, low );
1548 /* Ditto, but write to a reg instead. */
1549 static void putDReg32(UInt archreg, IRExpr* e) {
1550 vassert( archreg < 32 );
1551 vassert( typeOfIRExpr(irsb->tyenv, e) == Ity_D32 );
1552 stmt( IRStmt_Put( floatGuestRegOffset( archreg ), e ) );
1555 static void putDReg(UInt archreg, IRExpr* e) {
1556 vassert( archreg < 32 );
1557 vassert( typeOfIRExpr(irsb->tyenv, e) == Ity_D64 );
1558 stmt( IRStmt_Put( floatGuestRegOffset( archreg ), e ) );
1561 /* Write a 128-bit floating point value into a register pair. */
1562 static void putDReg_pair(UInt archreg, IRExpr *e) {
1563 IRTemp low = newTemp( Ity_D64 );
1564 IRTemp high = newTemp( Ity_D64 );
1566 vassert( archreg < 32 );
1567 vassert( typeOfIRExpr(irsb->tyenv, e) == Ity_D128 );
1569 assign( low, unop( Iop_D128LOtoD64, e ) );
1570 assign( high, unop( Iop_D128HItoD64, e ) );
1572 stmt( IRStmt_Put( floatGuestRegOffset( archreg ), mkexpr( high ) ) );
1573 stmt( IRStmt_Put( floatGuestRegOffset( archreg + 1 ), mkexpr( low ) ) );
1576 static Int vsxGuestRegOffset ( UInt archreg )
1578 vassert(archreg < 64);
1579 switch (archreg) {
1580 case 0: return offsetofPPCGuestState(guest_VSR0);
1581 case 1: return offsetofPPCGuestState(guest_VSR1);
1582 case 2: return offsetofPPCGuestState(guest_VSR2);
1583 case 3: return offsetofPPCGuestState(guest_VSR3);
1584 case 4: return offsetofPPCGuestState(guest_VSR4);
1585 case 5: return offsetofPPCGuestState(guest_VSR5);
1586 case 6: return offsetofPPCGuestState(guest_VSR6);
1587 case 7: return offsetofPPCGuestState(guest_VSR7);
1588 case 8: return offsetofPPCGuestState(guest_VSR8);
1589 case 9: return offsetofPPCGuestState(guest_VSR9);
1590 case 10: return offsetofPPCGuestState(guest_VSR10);
1591 case 11: return offsetofPPCGuestState(guest_VSR11);
1592 case 12: return offsetofPPCGuestState(guest_VSR12);
1593 case 13: return offsetofPPCGuestState(guest_VSR13);
1594 case 14: return offsetofPPCGuestState(guest_VSR14);
1595 case 15: return offsetofPPCGuestState(guest_VSR15);
1596 case 16: return offsetofPPCGuestState(guest_VSR16);
1597 case 17: return offsetofPPCGuestState(guest_VSR17);
1598 case 18: return offsetofPPCGuestState(guest_VSR18);
1599 case 19: return offsetofPPCGuestState(guest_VSR19);
1600 case 20: return offsetofPPCGuestState(guest_VSR20);
1601 case 21: return offsetofPPCGuestState(guest_VSR21);
1602 case 22: return offsetofPPCGuestState(guest_VSR22);
1603 case 23: return offsetofPPCGuestState(guest_VSR23);
1604 case 24: return offsetofPPCGuestState(guest_VSR24);
1605 case 25: return offsetofPPCGuestState(guest_VSR25);
1606 case 26: return offsetofPPCGuestState(guest_VSR26);
1607 case 27: return offsetofPPCGuestState(guest_VSR27);
1608 case 28: return offsetofPPCGuestState(guest_VSR28);
1609 case 29: return offsetofPPCGuestState(guest_VSR29);
1610 case 30: return offsetofPPCGuestState(guest_VSR30);
1611 case 31: return offsetofPPCGuestState(guest_VSR31);
1612 case 32: return offsetofPPCGuestState(guest_VSR32);
1613 case 33: return offsetofPPCGuestState(guest_VSR33);
1614 case 34: return offsetofPPCGuestState(guest_VSR34);
1615 case 35: return offsetofPPCGuestState(guest_VSR35);
1616 case 36: return offsetofPPCGuestState(guest_VSR36);
1617 case 37: return offsetofPPCGuestState(guest_VSR37);
1618 case 38: return offsetofPPCGuestState(guest_VSR38);
1619 case 39: return offsetofPPCGuestState(guest_VSR39);
1620 case 40: return offsetofPPCGuestState(guest_VSR40);
1621 case 41: return offsetofPPCGuestState(guest_VSR41);
1622 case 42: return offsetofPPCGuestState(guest_VSR42);
1623 case 43: return offsetofPPCGuestState(guest_VSR43);
1624 case 44: return offsetofPPCGuestState(guest_VSR44);
1625 case 45: return offsetofPPCGuestState(guest_VSR45);
1626 case 46: return offsetofPPCGuestState(guest_VSR46);
1627 case 47: return offsetofPPCGuestState(guest_VSR47);
1628 case 48: return offsetofPPCGuestState(guest_VSR48);
1629 case 49: return offsetofPPCGuestState(guest_VSR49);
1630 case 50: return offsetofPPCGuestState(guest_VSR50);
1631 case 51: return offsetofPPCGuestState(guest_VSR51);
1632 case 52: return offsetofPPCGuestState(guest_VSR52);
1633 case 53: return offsetofPPCGuestState(guest_VSR53);
1634 case 54: return offsetofPPCGuestState(guest_VSR54);
1635 case 55: return offsetofPPCGuestState(guest_VSR55);
1636 case 56: return offsetofPPCGuestState(guest_VSR56);
1637 case 57: return offsetofPPCGuestState(guest_VSR57);
1638 case 58: return offsetofPPCGuestState(guest_VSR58);
1639 case 59: return offsetofPPCGuestState(guest_VSR59);
1640 case 60: return offsetofPPCGuestState(guest_VSR60);
1641 case 61: return offsetofPPCGuestState(guest_VSR61);
1642 case 62: return offsetofPPCGuestState(guest_VSR62);
1643 case 63: return offsetofPPCGuestState(guest_VSR63);
1644 default: break;
1646 vpanic("vsxGuestRegOffset(ppc)"); /*notreached*/
1649 /* Vector registers are mapped to VSX registers[32..63]. */
1650 static Int vectorGuestRegOffset ( UInt archreg )
1652 vassert(archreg < 32);
1654 switch (archreg) {
1655 case 0: return offsetofPPCGuestState(guest_VSR32);
1656 case 1: return offsetofPPCGuestState(guest_VSR33);
1657 case 2: return offsetofPPCGuestState(guest_VSR34);
1658 case 3: return offsetofPPCGuestState(guest_VSR35);
1659 case 4: return offsetofPPCGuestState(guest_VSR36);
1660 case 5: return offsetofPPCGuestState(guest_VSR37);
1661 case 6: return offsetofPPCGuestState(guest_VSR38);
1662 case 7: return offsetofPPCGuestState(guest_VSR39);
1663 case 8: return offsetofPPCGuestState(guest_VSR40);
1664 case 9: return offsetofPPCGuestState(guest_VSR41);
1665 case 10: return offsetofPPCGuestState(guest_VSR42);
1666 case 11: return offsetofPPCGuestState(guest_VSR43);
1667 case 12: return offsetofPPCGuestState(guest_VSR44);
1668 case 13: return offsetofPPCGuestState(guest_VSR45);
1669 case 14: return offsetofPPCGuestState(guest_VSR46);
1670 case 15: return offsetofPPCGuestState(guest_VSR47);
1671 case 16: return offsetofPPCGuestState(guest_VSR48);
1672 case 17: return offsetofPPCGuestState(guest_VSR49);
1673 case 18: return offsetofPPCGuestState(guest_VSR50);
1674 case 19: return offsetofPPCGuestState(guest_VSR51);
1675 case 20: return offsetofPPCGuestState(guest_VSR52);
1676 case 21: return offsetofPPCGuestState(guest_VSR53);
1677 case 22: return offsetofPPCGuestState(guest_VSR54);
1678 case 23: return offsetofPPCGuestState(guest_VSR55);
1679 case 24: return offsetofPPCGuestState(guest_VSR56);
1680 case 25: return offsetofPPCGuestState(guest_VSR57);
1681 case 26: return offsetofPPCGuestState(guest_VSR58);
1682 case 27: return offsetofPPCGuestState(guest_VSR59);
1683 case 28: return offsetofPPCGuestState(guest_VSR60);
1684 case 29: return offsetofPPCGuestState(guest_VSR61);
1685 case 30: return offsetofPPCGuestState(guest_VSR62);
1686 case 31: return offsetofPPCGuestState(guest_VSR63);
1687 default: break;
1689 vpanic("vextorGuestRegOffset(ppc)"); /*notreached*/
1692 static IRExpr* getVReg ( UInt archreg )
1694 vassert(archreg < 32);
1695 return IRExpr_Get( vectorGuestRegOffset(archreg), Ity_V128 );
1698 /* Get contents of 128-bit reg guest register */
1699 static IRExpr* getF128Reg ( UInt archreg )
1701 vassert(archreg < 64);
1702 return IRExpr_Get( vectorGuestRegOffset(archreg), Ity_F128 );
1705 /* Ditto, but write to a reg instead. */
1706 static void putF128Reg ( UInt archreg, IRExpr* e )
1708 vassert(archreg < 64);
1709 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F128);
1710 stmt( IRStmt_Put(vectorGuestRegOffset(archreg), e) );
1713 /* Ditto, but write to a reg instead. */
1714 static void putVReg ( UInt archreg, IRExpr* e )
1716 vassert(archreg < 32);
1717 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_V128);
1718 stmt( IRStmt_Put(vectorGuestRegOffset(archreg), e) );
1721 /* Get contents of VSX guest register */
1722 static IRExpr* getVSReg ( UInt archreg )
1724 vassert(archreg < 64);
1725 return IRExpr_Get( vsxGuestRegOffset(archreg), Ity_V128 );
1728 /* Ditto, but write to a VSX reg instead. */
1729 static void putVSReg ( UInt archreg, IRExpr* e )
1731 vassert(archreg < 64);
1732 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_V128);
1733 stmt( IRStmt_Put(vsxGuestRegOffset(archreg), e) );
1737 static Int guestCR321offset ( UInt cr )
1739 switch (cr) {
1740 case 0: return offsetofPPCGuestState(guest_CR0_321 );
1741 case 1: return offsetofPPCGuestState(guest_CR1_321 );
1742 case 2: return offsetofPPCGuestState(guest_CR2_321 );
1743 case 3: return offsetofPPCGuestState(guest_CR3_321 );
1744 case 4: return offsetofPPCGuestState(guest_CR4_321 );
1745 case 5: return offsetofPPCGuestState(guest_CR5_321 );
1746 case 6: return offsetofPPCGuestState(guest_CR6_321 );
1747 case 7: return offsetofPPCGuestState(guest_CR7_321 );
1748 default: vpanic("guestCR321offset(ppc)");
1752 static Int guestCR0offset ( UInt cr )
1754 switch (cr) {
1755 case 0: return offsetofPPCGuestState(guest_CR0_0 );
1756 case 1: return offsetofPPCGuestState(guest_CR1_0 );
1757 case 2: return offsetofPPCGuestState(guest_CR2_0 );
1758 case 3: return offsetofPPCGuestState(guest_CR3_0 );
1759 case 4: return offsetofPPCGuestState(guest_CR4_0 );
1760 case 5: return offsetofPPCGuestState(guest_CR5_0 );
1761 case 6: return offsetofPPCGuestState(guest_CR6_0 );
1762 case 7: return offsetofPPCGuestState(guest_CR7_0 );
1763 default: vpanic("guestCR3offset(ppc)");
1767 typedef enum {
1768 _placeholder0,
1769 _placeholder1,
1770 _placeholder2,
1771 BYTE,
1772 HWORD,
1773 WORD,
1774 DWORD
1775 } _popcount_data_type;
1777 /*-----------------------------------------------------------*/
1778 /*--- IR popcount helpers ---*/
1779 /*-----------------------------------------------------------*/
1780 /* Generate an IR sequence to do a popcount operation on the supplied
1781 IRTemp, and return a new IRTemp holding the result. 'ty' may be
1782 Ity_I32 or Ity_I64 only. */
1783 static IRTemp gen_POPCOUNT ( IRType ty, IRTemp src,
1784 _popcount_data_type data_type )
1786 /* Do count across 2^data_type bits,
1787 byte: data_type = 3
1788 half word: data_type = 4
1789 word: data_type = 5
1790 double word: data_type = 6 (not supported for 32-bit type)
1792 Int shift[6];
1793 _popcount_data_type idx, i;
1794 IRTemp mask[6];
1795 IRTemp old = IRTemp_INVALID;
1796 IRTemp nyu = IRTemp_INVALID;
1798 vassert(ty == Ity_I64 || ty == Ity_I32);
1800 // Use a single IROp in cases where we can.
1802 if (ty == Ity_I64 && data_type == DWORD) {
1803 IRTemp res = newTemp(Ity_I64);
1804 assign(res, unop(Iop_PopCount64, mkexpr(src)));
1805 return res;
1808 if (ty == Ity_I32 && data_type == WORD) {
1809 IRTemp res = newTemp(Ity_I32);
1810 assign(res, unop(Iop_PopCount32, mkexpr(src)));
1811 return res;
1814 // For the rest, we have to do it the slow way.
1816 if (ty == Ity_I32) {
1818 for (idx = 0; idx < WORD; idx++) {
1819 mask[idx] = newTemp(ty);
1820 shift[idx] = 1 << idx;
1822 assign(mask[0], mkU32(0x55555555));
1823 assign(mask[1], mkU32(0x33333333));
1824 assign(mask[2], mkU32(0x0F0F0F0F));
1825 assign(mask[3], mkU32(0x00FF00FF));
1826 assign(mask[4], mkU32(0x0000FFFF));
1827 old = src;
1828 for (i = 0; i < data_type; i++) {
1829 nyu = newTemp(ty);
1830 assign(nyu,
1831 binop(Iop_Add32,
1832 binop(Iop_And32,
1833 mkexpr(old),
1834 mkexpr(mask[i])),
1835 binop(Iop_And32,
1836 binop(Iop_Shr32, mkexpr(old), mkU8(shift[i])),
1837 mkexpr(mask[i]))));
1838 old = nyu;
1840 return nyu;
1843 // else, ty == Ity_I64
1844 vassert(mode64);
1846 for (i = 0; i < DWORD; i++) {
1847 mask[i] = newTemp( Ity_I64 );
1848 shift[i] = 1 << i;
1850 assign( mask[0], mkU64( 0x5555555555555555ULL ) );
1851 assign( mask[1], mkU64( 0x3333333333333333ULL ) );
1852 assign( mask[2], mkU64( 0x0F0F0F0F0F0F0F0FULL ) );
1853 assign( mask[3], mkU64( 0x00FF00FF00FF00FFULL ) );
1854 assign( mask[4], mkU64( 0x0000FFFF0000FFFFULL ) );
1855 assign( mask[5], mkU64( 0x00000000FFFFFFFFULL ) );
1856 old = src;
1857 for (i = 0; i < data_type; i++) {
1858 nyu = newTemp( Ity_I64 );
1859 assign( nyu,
1860 binop( Iop_Add64,
1861 binop( Iop_And64, mkexpr( old ), mkexpr( mask[i] ) ),
1862 binop( Iop_And64,
1863 binop( Iop_Shr64, mkexpr( old ), mkU8( shift[i] ) ),
1864 mkexpr( mask[i] ) ) ) );
1865 old = nyu;
1867 return nyu;
1870 /* Special purpose population count function for
1871 * vpopcntd in 32-bit mode.
1873 static IRTemp gen_vpopcntd_mode32 ( IRTemp src1, IRTemp src2 )
1875 IRTemp retval = newTemp(Ity_I64);
1877 vassert(!mode64);
1879 assign(retval,
1880 unop(Iop_32Uto64,
1881 binop(Iop_Add32,
1882 unop(Iop_PopCount32, mkexpr(src1)),
1883 unop(Iop_PopCount32, mkexpr(src2)))));
1884 return retval;
1888 // ROTL(src32/64, rot_amt5/6)
1889 static IRExpr* /* :: Ity_I32/64 */ ROTL ( IRExpr* src,
1890 IRExpr* rot_amt )
1892 IRExpr *mask, *rot;
1893 vassert(typeOfIRExpr(irsb->tyenv,rot_amt) == Ity_I8);
1895 if (typeOfIRExpr(irsb->tyenv,src) == Ity_I64) {
1896 // rot = (src << rot_amt) | (src >> (64-rot_amt))
1897 mask = binop(Iop_And8, rot_amt, mkU8(63));
1898 rot = binop(Iop_Or64,
1899 binop(Iop_Shl64, src, mask),
1900 binop(Iop_Shr64, src, binop(Iop_Sub8, mkU8(64), mask)));
1901 } else {
1902 // rot = (src << rot_amt) | (src >> (32-rot_amt))
1903 mask = binop(Iop_And8, rot_amt, mkU8(31));
1904 rot = binop(Iop_Or32,
1905 binop(Iop_Shl32, src, mask),
1906 binop(Iop_Shr32, src, binop(Iop_Sub8, mkU8(32), mask)));
1908 /* Note: the ITE not merely an optimisation; it's needed
1909 because otherwise the Shr is a shift by the word size when
1910 mask denotes zero. For rotates by immediates, a lot of
1911 this junk gets folded out. */
1912 return IRExpr_ITE( binop(Iop_CmpNE8, mask, mkU8(0)),
1913 /* non-zero rotate */ rot,
1914 /* zero rotate */ src);
1917 /* Standard effective address calc: (rA + rB) */
1918 static IRExpr* ea_rA_idxd ( UInt rA, UInt rB )
1920 IRType ty = mode64 ? Ity_I64 : Ity_I32;
1921 vassert(rA < 32);
1922 vassert(rB < 32);
1923 return binop(mkSzOp(ty, Iop_Add8), getIReg(rA), getIReg(rB));
1926 /* Standard effective address calc: (rA + simm) */
1927 static IRExpr* ea_rA_simm ( UInt rA, UInt simm16 )
1929 IRType ty = mode64 ? Ity_I64 : Ity_I32;
1930 vassert(rA < 32);
1931 return binop(mkSzOp(ty, Iop_Add8), getIReg(rA),
1932 mkSzExtendS16(ty, simm16));
1935 /* Standard effective address calc: (rA|0) */
1936 static IRExpr* ea_rAor0 ( UInt rA )
1938 IRType ty = mode64 ? Ity_I64 : Ity_I32;
1939 vassert(rA < 32);
1940 if (rA == 0) {
1941 return mkSzImm(ty, 0);
1942 } else {
1943 return getIReg(rA);
1947 /* Standard effective address calc: (rA|0) + rB */
1948 static IRExpr* ea_rAor0_idxd ( UInt rA, UInt rB )
1950 vassert(rA < 32);
1951 vassert(rB < 32);
1952 return (rA == 0) ? getIReg(rB) : ea_rA_idxd( rA, rB );
1955 /* Standard effective address calc: (rA|0) + simm16 */
1956 static IRExpr* ea_rAor0_simm ( UInt rA, UInt simm16 )
1958 IRType ty = mode64 ? Ity_I64 : Ity_I32;
1959 vassert(rA < 32);
1960 if (rA == 0) {
1961 return mkSzExtendS16(ty, simm16);
1962 } else {
1963 return ea_rA_simm( rA, simm16 );
1968 /* Align effective address */
1969 static IRExpr* addr_align( IRExpr* addr, UChar align )
1971 IRType ty = mode64 ? Ity_I64 : Ity_I32;
1972 ULong mask;
1973 switch (align) {
1974 case 1: return addr; // byte aligned
1975 case 2: mask = ~0ULL << 1; break; // half-word aligned
1976 case 4: mask = ~0ULL << 2; break; // word aligned
1977 case 16: mask = ~0ULL << 4; break; // quad-word aligned
1978 default:
1979 vex_printf("addr_align: align = %u\n", align);
1980 vpanic("addr_align(ppc)");
1983 vassert(typeOfIRExpr(irsb->tyenv,addr) == ty);
1984 return binop( mkSzOp(ty, Iop_And8), addr, mkSzImm(ty, mask) );
1988 /* Exit the trace if ADDR (intended to be a guest memory address) is
1989 not ALIGN-aligned, generating a request for a SIGBUS followed by a
1990 restart of the current insn. */
1991 static void gen_SIGBUS_if_misaligned ( IRTemp addr, UChar align )
1993 vassert(align == 2 || align == 4 || align == 8 || align == 16);
1994 if (mode64) {
1995 vassert(typeOfIRTemp(irsb->tyenv, addr) == Ity_I64);
1996 stmt(
1997 IRStmt_Exit(
1998 binop(Iop_CmpNE64,
1999 binop(Iop_And64, mkexpr(addr), mkU64(align-1)),
2000 mkU64(0)),
2001 Ijk_SigBUS,
2002 IRConst_U64( guest_CIA_curr_instr ), OFFB_CIA
2005 } else {
2006 vassert(typeOfIRTemp(irsb->tyenv, addr) == Ity_I32);
2007 stmt(
2008 IRStmt_Exit(
2009 binop(Iop_CmpNE32,
2010 binop(Iop_And32, mkexpr(addr), mkU32(align-1)),
2011 mkU32(0)),
2012 Ijk_SigBUS,
2013 IRConst_U32( guest_CIA_curr_instr ), OFFB_CIA
2020 /* Generate AbiHints which mark points at which the ELF or PowerOpen
2021 ABIs say that the stack red zone (viz, -N(r1) .. -1(r1), for some
2022 N) becomes undefined. That is at function calls and returns. ELF
2023 ppc32 doesn't have this "feature" (how fortunate for it). nia is
2024 the address of the next instruction to be executed.
2026 static void make_redzone_AbiHint ( const VexAbiInfo* vbi,
2027 IRTemp nia, const HChar* who )
2029 Int szB = vbi->guest_stack_redzone_size;
2030 if (0) vex_printf("AbiHint: %s\n", who);
2031 vassert(szB >= 0);
2032 if (szB > 0) {
2033 if (mode64) {
2034 vassert(typeOfIRTemp(irsb->tyenv, nia) == Ity_I64);
2035 stmt( IRStmt_AbiHint(
2036 binop(Iop_Sub64, getIReg(1), mkU64(szB)),
2037 szB,
2038 mkexpr(nia)
2040 } else {
2041 vassert(typeOfIRTemp(irsb->tyenv, nia) == Ity_I32);
2042 stmt( IRStmt_AbiHint(
2043 binop(Iop_Sub32, getIReg(1), mkU32(szB)),
2044 szB,
2045 mkexpr(nia)
2052 /*------------------------------------------------------------*/
2053 /*--- Helpers for condition codes. ---*/
2054 /*------------------------------------------------------------*/
2056 /* Condition register layout.
2058 In the hardware, CR is laid out like this. The leftmost end is the
2059 most significant bit in the register; however the IBM documentation
2060 numbers the bits backwards for some reason.
2062 CR0 CR1 .......... CR6 CR7
2063 0 .. 3 ....................... 28 .. 31 (IBM bit numbering)
2064 31 28 3 0 (normal bit numbering)
2066 Each CR field is 4 bits: [<,>,==,SO]
2068 Hence in IBM's notation, BI=0 is CR7[SO], BI=1 is CR7[==], etc.
2070 Indexing from BI to guest state:
2072 let n = BI / 4
2073 off = BI % 4
2074 this references CR n:
2076 off==0 -> guest_CRn_321 >> 3
2077 off==1 -> guest_CRn_321 >> 2
2078 off==2 -> guest_CRn_321 >> 1
2079 off==3 -> guest_CRn_SO
2081 Bear in mind the only significant bit in guest_CRn_SO is bit 0
2082 (normal notation) and in guest_CRn_321 the significant bits are
2083 3, 2 and 1 (normal notation).
2086 static void putCR321 ( UInt cr, IRExpr* e )
2088 vassert(cr < 8);
2089 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
2090 stmt( IRStmt_Put(guestCR321offset(cr), e) );
2093 static void putCR0 ( UInt cr, IRExpr* e )
2095 vassert(cr < 8);
2096 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
2097 stmt( IRStmt_Put(guestCR0offset(cr), e) );
2100 static void putC ( IRExpr* e )
2102 /* The assumption is that the value of the Floating-Point Result
2103 * Class Descriptor bit (C) is passed in the lower four bits of a
2104 * 32 bit value.
2106 * Note, the C and FPCC bits which are fields in the FPSCR
2107 * register are stored in their own memory location of
2108 * memory. The FPCC bits are in the lower 4 bits. The C bit needs
2109 * to be shifted to bit 4 in the memory location that holds C and FPCC.
2110 * Note not all of the FPSCR register bits are supported. We are
2111 * only writing C bit.
2113 IRExpr* tmp;
2115 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
2117 /* Get the FPCC bit field */
2118 tmp = binop( Iop_And32,
2119 mkU32( 0xF ),
2120 unop( Iop_8Uto32, IRExpr_Get( OFFB_C_FPCC, Ity_I8 ) ) );
2122 stmt( IRStmt_Put( OFFB_C_FPCC,
2123 unop( Iop_32to8,
2124 binop( Iop_Or32, tmp,
2125 binop( Iop_Shl32,
2126 binop( Iop_And32, mkU32( 0x1 ), e ),
2127 mkU8( 4 ) ) ) ) ) );
2130 static IRExpr* /* :: Ity_I8 */ getCR0 ( UInt cr )
2132 vassert(cr < 8);
2133 return IRExpr_Get(guestCR0offset(cr), Ity_I8);
2136 static IRExpr* /* :: Ity_I8 */ getCR321 ( UInt cr )
2138 vassert(cr < 8);
2139 return IRExpr_Get(guestCR321offset(cr), Ity_I8);
2142 /* Fetch the specified CR bit (as per IBM/hardware notation) and
2143 return it at the bottom of an I32; the top 31 bits are guaranteed
2144 to be zero. */
2145 static IRExpr* /* :: Ity_I32 */ getCRbit ( UInt bi )
2147 UInt n = bi / 4;
2148 UInt off = bi % 4;
2149 vassert(bi < 32);
2150 if (off == 3) {
2151 /* Fetch the SO bit for this CR field */
2152 /* Note: And32 is redundant paranoia iff guest state only has 0
2153 or 1 in that slot. */
2154 return binop(Iop_And32, unop(Iop_8Uto32, getCR0(n)), mkU32(1));
2155 } else {
2156 /* Fetch the <, > or == bit for this CR field */
2157 return binop( Iop_And32,
2158 binop( Iop_Shr32,
2159 unop(Iop_8Uto32, getCR321(n)),
2160 mkU8(toUChar(3-off)) ),
2161 mkU32(1) );
2165 /* Dually, write the least significant bit of BIT to the specified CR
2166 bit. Indexing as per getCRbit. */
2167 static void putCRbit ( UInt bi, IRExpr* bit )
2169 UInt n, off;
2170 IRExpr* safe;
2171 vassert(typeOfIRExpr(irsb->tyenv,bit) == Ity_I32);
2172 safe = binop(Iop_And32, bit, mkU32(1));
2173 n = bi / 4;
2174 off = bi % 4;
2175 vassert(bi < 32);
2176 if (off == 3) {
2177 /* This is the SO bit for this CR field */
2178 putCR0(n, unop(Iop_32to8, safe));
2179 } else {
2180 off = 3 - off;
2181 vassert(off == 1 || off == 2 || off == 3);
2182 putCR321(
2184 unop( Iop_32to8,
2185 binop( Iop_Or32,
2186 /* old value with field masked out */
2187 binop(Iop_And32, unop(Iop_8Uto32, getCR321(n)),
2188 mkU32(~(1 << off))),
2189 /* new value in the right place */
2190 binop(Iop_Shl32, safe, mkU8(toUChar(off)))
2197 /* Fetch the specified CR bit (as per IBM/hardware notation) and
2198 return it somewhere in an I32; it does not matter where, but
2199 whichever bit it is, all other bits are guaranteed to be zero. In
2200 other words, the I32-typed expression will be zero if the bit is
2201 zero and nonzero if the bit is 1. Write into *where the index
2202 of where the bit will be. */
2204 static
2205 IRExpr* /* :: Ity_I32 */ getCRbit_anywhere ( UInt bi, Int* where )
2207 UInt n = bi / 4;
2208 UInt off = bi % 4;
2209 vassert(bi < 32);
2210 if (off == 3) {
2211 /* Fetch the SO bit for this CR field */
2212 /* Note: And32 is redundant paranoia iff guest state only has 0
2213 or 1 in that slot. */
2214 *where = 0;
2215 return binop(Iop_And32, unop(Iop_8Uto32, getCR0(n)), mkU32(1));
2216 } else {
2217 /* Fetch the <, > or == bit for this CR field */
2218 *where = 3-off;
2219 return binop( Iop_And32,
2220 unop(Iop_8Uto32, getCR321(n)),
2221 mkU32(1 << (3-off)) );
2225 /* Set the CR0 flags following an arithmetic operation.
2226 (Condition Register CR0 Field Definition, PPC32 p60)
2228 static IRExpr* getXER_SO ( void );
2229 static void set_CR0 ( IRExpr* result )
2231 vassert(typeOfIRExpr(irsb->tyenv,result) == Ity_I32 ||
2232 typeOfIRExpr(irsb->tyenv,result) == Ity_I64);
2233 if (mode64) {
2234 putCR321( 0, unop(Iop_64to8,
2235 binop(Iop_CmpORD64S, result, mkU64(0))) );
2236 } else {
2237 putCR321( 0, unop(Iop_32to8,
2238 binop(Iop_CmpORD32S, result, mkU32(0))) );
2240 putCR0( 0, getXER_SO() );
2244 /* Set the CR6 flags following an AltiVec compare operation.
2245 * NOTE: This also works for VSX single-precision compares.
2246 * */
2247 static void set_AV_CR6 ( IRExpr* result, Bool test_all_ones )
2249 /* CR6[0:3] = {all_ones, 0, all_zeros, 0}
2250 32 bit: all_zeros = (v[0] || v[1] || v[2] || v[3]) == 0x0000'0000
2251 all_ones = ~(v[0] && v[1] && v[2] && v[3]) == 0x0000'0000
2252 where v[] denotes 32-bit lanes
2254 64 bit: all_zeros = (v[0] || v[1]) == 0x0000'0000'0000'0000
2255 all_ones = ~(v[0] && v[1]) == 0x0000'0000'0000'0000
2256 where v[] denotes 64-bit lanes
2258 The 32- and 64-bit versions compute the same thing, but the 64-bit one
2259 tries to be a bit more efficient.
2261 vassert(typeOfIRExpr(irsb->tyenv,result) == Ity_V128);
2263 IRTemp overlappedOred = newTemp(Ity_V128);
2264 IRTemp overlappedAnded = newTemp(Ity_V128);
2266 if (mode64) {
2267 IRTemp v0 = newTemp(Ity_V128);
2268 IRTemp v1 = newTemp(Ity_V128);
2269 assign( v0, result );
2270 assign( v1, binop(Iop_ShrV128, result, mkU8(64)) );
2271 assign(overlappedOred,
2272 binop(Iop_OrV128, mkexpr(v0), mkexpr(v1)));
2273 assign(overlappedAnded,
2274 binop(Iop_AndV128, mkexpr(v0), mkexpr(v1)));
2275 } else {
2276 IRTemp v0 = newTemp(Ity_V128);
2277 IRTemp v1 = newTemp(Ity_V128);
2278 IRTemp v2 = newTemp(Ity_V128);
2279 IRTemp v3 = newTemp(Ity_V128);
2280 assign( v0, result );
2281 assign( v1, binop(Iop_ShrV128, result, mkU8(32)) );
2282 assign( v2, binop(Iop_ShrV128, result, mkU8(64)) );
2283 assign( v3, binop(Iop_ShrV128, result, mkU8(96)) );
2284 assign(overlappedOred,
2285 binop(Iop_OrV128,
2286 binop(Iop_OrV128, mkexpr(v0), mkexpr(v1)),
2287 binop(Iop_OrV128, mkexpr(v2), mkexpr(v3))));
2288 assign(overlappedAnded,
2289 binop(Iop_AndV128,
2290 binop(Iop_AndV128, mkexpr(v0), mkexpr(v1)),
2291 binop(Iop_AndV128, mkexpr(v2), mkexpr(v3))));
2294 IRTemp rOnes = newTemp(Ity_I8);
2295 IRTemp rZeroes = newTemp(Ity_I8);
2297 if (mode64) {
2298 assign(rZeroes,
2299 unop(Iop_1Uto8,
2300 binop(Iop_CmpEQ64,
2301 mkU64(0),
2302 unop(Iop_V128to64, mkexpr(overlappedOred)))));
2303 assign(rOnes,
2304 unop(Iop_1Uto8,
2305 binop(Iop_CmpEQ64,
2306 mkU64(0),
2307 unop(Iop_Not64,
2308 unop(Iop_V128to64, mkexpr(overlappedAnded))))));
2309 } else {
2310 assign(rZeroes,
2311 unop(Iop_1Uto8,
2312 binop(Iop_CmpEQ32,
2313 mkU32(0),
2314 unop(Iop_V128to32, mkexpr(overlappedOred)))));
2315 assign(rOnes,
2316 unop(Iop_1Uto8,
2317 binop(Iop_CmpEQ32,
2318 mkU32(0),
2319 unop(Iop_Not32,
2320 unop(Iop_V128to32, mkexpr(overlappedAnded))))));
2323 // rOnes might not be used below. But iropt will remove it, so there's no
2324 // inefficiency as a result.
2326 if (test_all_ones) {
2327 putCR321( 6, binop(Iop_Or8,
2328 binop(Iop_Shl8, mkexpr(rOnes), mkU8(3)),
2329 binop(Iop_Shl8, mkexpr(rZeroes), mkU8(1))) );
2330 } else {
2331 putCR321( 6, binop(Iop_Shl8, mkexpr(rZeroes), mkU8(1)) );
2333 putCR0( 6, mkU8(0) );
2337 static IRExpr * create_DCM ( IRType size, IRTemp NaN, IRTemp inf, IRTemp zero,
2338 IRTemp dnorm, IRTemp pos)
2340 /* This is a general function for creating the DCM for a 32-bit or
2341 64-bit expression based on the passes size.
2343 IRTemp neg;
2344 IROp opAND, opOR, opSHL, opXto1, op1UtoX;
2346 vassert( ( size == Ity_I32 ) || ( size == Ity_I64 ) );
2348 if ( size == Ity_I32 ) {
2349 opSHL = Iop_Shl32;
2350 opAND = Iop_And32;
2351 opOR = Iop_Or32;
2352 opXto1 = Iop_32to1;
2353 op1UtoX = Iop_1Uto32;
2354 neg = newTemp( Ity_I32 );
2356 } else {
2357 opSHL = Iop_Shl64;
2358 opAND = Iop_And64;
2359 opOR = Iop_Or64;
2360 opXto1 = Iop_64to1;
2361 op1UtoX = Iop_1Uto64;
2362 neg = newTemp( Ity_I64 );
2365 assign( neg, unop( op1UtoX, mkNOT1( unop( opXto1,
2366 mkexpr ( pos ) ) ) ) );
2368 return binop( opOR,
2369 binop( opSHL, mkexpr( NaN ), mkU8( 6 ) ),
2370 binop( opOR,
2371 binop( opOR,
2372 binop( opOR,
2373 binop( opSHL,
2374 binop( opAND,
2375 mkexpr( pos ),
2376 mkexpr( inf ) ),
2377 mkU8( 5 ) ),
2378 binop( opSHL,
2379 binop( opAND,
2380 mkexpr( neg ),
2381 mkexpr( inf ) ),
2382 mkU8( 4 ) ) ),
2383 binop( opOR,
2384 binop( opSHL,
2385 binop( opAND,
2386 mkexpr( pos ),
2387 mkexpr( zero ) ),
2388 mkU8( 3 ) ),
2389 binop( opSHL,
2390 binop( opAND,
2391 mkexpr( neg ),
2392 mkexpr( zero ) ),
2393 mkU8( 2 ) ) ) ),
2394 binop( opOR,
2395 binop( opSHL,
2396 binop( opAND,
2397 mkexpr( pos ),
2398 mkexpr( dnorm ) ),
2399 mkU8( 1 ) ),
2400 binop( opAND,
2401 mkexpr( neg ),
2402 mkexpr( dnorm ) ) ) ) );
2405 /*------------------------------------------------------------*/
2406 /*--- Helpers for XER flags. ---*/
2407 /*------------------------------------------------------------*/
2409 static void putXER_SO ( IRExpr* e )
2411 IRExpr* so;
2412 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
2413 so = binop(Iop_And8, e, mkU8(1));
2414 stmt( IRStmt_Put( OFFB_XER_SO, so ) );
2417 static void putXER_OV ( IRExpr* e )
2419 /* Interface to write XER[OV] */
2420 IRExpr* ov;
2421 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
2422 ov = binop(Iop_And8, e, mkU8(1));
2423 stmt( IRStmt_Put( OFFB_XER_OV, ov ) );
2426 static void putXER_OV32 ( IRExpr* e )
2428 /*Interface to write XER[OV32] */
2429 IRExpr* ov;
2430 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
2431 ov = binop(Iop_And8, e, mkU8(1));
2433 /* The OV32 bit was added to XER in ISA 3.0. Do not write unless we
2434 * ISA 3.0 or beyond is supported. */
2435 if( OV32_CA32_supported )
2436 stmt( IRStmt_Put( OFFB_XER_OV32, ov ) );
2439 static void putXER_CA ( IRExpr* e )
2441 /* Interface to write XER[CA] */
2442 IRExpr* ca;
2443 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
2444 ca = binop(Iop_And8, e, mkU8(1));
2445 stmt( IRStmt_Put( OFFB_XER_CA, ca ) );
2448 static void putXER_CA32 ( IRExpr* e )
2450 /* Interface to write XER[CA32] */
2451 IRExpr* ca;
2452 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
2453 ca = binop(Iop_And8, e, mkU8(1));
2455 /* The CA32 bit was added to XER in ISA 3.0. Do not write unless we
2456 * ISA 3.0 or beyond is supported. */
2457 if( OV32_CA32_supported )
2458 stmt( IRStmt_Put( OFFB_XER_CA32, ca ) );
2461 static void putXER_BC ( IRExpr* e )
2463 IRExpr* bc;
2464 vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
2465 bc = binop(Iop_And8, e, mkU8(0x7F));
2466 stmt( IRStmt_Put( OFFB_XER_BC, bc ) );
2469 static IRExpr* /* :: Ity_I8 */ getXER_SO ( void )
2471 return IRExpr_Get( OFFB_XER_SO, Ity_I8 );
2474 static IRExpr* /* :: Ity_I32 */ getXER_SO_32 ( void )
2476 return binop( Iop_And32, unop(Iop_8Uto32, getXER_SO()), mkU32(1) );
2479 static IRExpr* /* :: Ity_I8 */ getXER_OV ( void )
2481 return IRExpr_Get( OFFB_XER_OV, Ity_I8 );
2484 static IRExpr* /* :: Ity_I8 */ getXER_OV32 ( void )
2486 return IRExpr_Get( OFFB_XER_OV32, Ity_I8 );
2489 static IRExpr* /* :: Ity_I32 */ getXER_OV_32 ( void )
2491 /* get XER[OV], 32-bit interface */
2492 return binop( Iop_And32, unop(Iop_8Uto32, getXER_OV()), mkU32(1) );
2495 static IRExpr* /* :: Ity_I32 */ getXER_OV32_32 ( void )
2497 /* get XER[OV32], 32-bit interface */
2498 return binop( Iop_And32, unop(Iop_8Uto32, getXER_OV32()), mkU32(1) );
2501 static IRExpr* /* :: Ity_I32 */ getXER_CA_32 ( void )
2503 /* get XER[CA], 32-bit interface */
2504 IRExpr* ca = IRExpr_Get( OFFB_XER_CA, Ity_I8 );
2505 return binop( Iop_And32, unop(Iop_8Uto32, ca ), mkU32(1) );
2508 static IRExpr* /* :: Ity_I32 */ getXER_CA32_32 ( void )
2510 /* get XER[CA32], 32-bit interface */
2511 IRExpr* ca = IRExpr_Get( OFFB_XER_CA32, Ity_I8 );
2512 return binop( Iop_And32, unop(Iop_8Uto32, ca ), mkU32(1) );
2515 static IRExpr* /* :: Ity_I8 */ getXER_BC ( void )
2517 return IRExpr_Get( OFFB_XER_BC, Ity_I8 );
2520 static IRExpr* /* :: Ity_I32 */ getXER_BC_32 ( void )
2522 IRExpr* bc = IRExpr_Get( OFFB_XER_BC, Ity_I8 );
2523 return binop( Iop_And32, unop(Iop_8Uto32, bc), mkU32(0x7F) );
2527 /* RES is the result of doing OP on ARGL and ARGR. Set %XER.OV and
2528 %XER.SO accordingly. */
2530 static IRExpr* calculate_XER_OV_32( UInt op, IRExpr* res,
2531 IRExpr* argL, IRExpr* argR )
2533 IRTemp t64;
2534 IRExpr* xer_ov;
2536 # define INT32_MIN 0x80000000
2538 # define XOR2(_aa,_bb) \
2539 binop(Iop_Xor32,(_aa),(_bb))
2541 # define XOR3(_cc,_dd,_ee) \
2542 binop(Iop_Xor32,binop(Iop_Xor32,(_cc),(_dd)),(_ee))
2544 # define AND3(_ff,_gg,_hh) \
2545 binop(Iop_And32,binop(Iop_And32,(_ff),(_gg)),(_hh))
2547 #define NOT(_jj) \
2548 unop(Iop_Not32, (_jj))
2550 switch (op) {
2551 case /* 0 */ PPCG_FLAG_OP_ADD:
2552 case /* 1 */ PPCG_FLAG_OP_ADDE:
2553 /* (argL^argR^-1) & (argL^res) & (1<<31) ?1:0 */
2554 // i.e. ((both_same_sign) & (sign_changed) & (sign_mask))
2555 xer_ov
2556 = AND3( XOR3(argL,argR,mkU32(-1)),
2557 XOR2(argL,res),
2558 mkU32(INT32_MIN) );
2559 /* xer_ov can only be 0 or 1<<31 */
2560 xer_ov
2561 = binop(Iop_Shr32, xer_ov, mkU8(31) );
2562 break;
2564 case /* 2 */ PPCG_FLAG_OP_DIVW:
2565 /* (argL == INT32_MIN && argR == -1) || argR == 0 */
2566 xer_ov
2567 = mkOR1(
2568 mkAND1(
2569 binop(Iop_CmpEQ32, argL, mkU32(INT32_MIN)),
2570 binop(Iop_CmpEQ32, argR, mkU32(-1))
2572 binop(Iop_CmpEQ32, argR, mkU32(0) )
2574 xer_ov
2575 = unop(Iop_1Uto32, xer_ov);
2576 break;
2578 case /* 3 */ PPCG_FLAG_OP_DIVWU:
2579 /* argR == 0 */
2580 xer_ov
2581 = unop(Iop_1Uto32, binop(Iop_CmpEQ32, argR, mkU32(0)));
2582 break;
2584 case /* 4 */ PPCG_FLAG_OP_MULLW:
2585 /* OV true if result can't be represented in 32 bits
2586 i.e sHi != sign extension of sLo */
2587 t64 = newTemp(Ity_I64);
2588 assign( t64, binop(Iop_MullS32, argL, argR) );
2589 xer_ov
2590 = binop( Iop_CmpNE32,
2591 unop(Iop_64HIto32, mkexpr(t64)),
2592 binop( Iop_Sar32,
2593 unop(Iop_64to32, mkexpr(t64)),
2594 mkU8(31))
2596 xer_ov
2597 = unop(Iop_1Uto32, xer_ov);
2598 break;
2600 case /* 5 */ PPCG_FLAG_OP_NEG:
2601 /* argL == INT32_MIN */
2602 xer_ov
2603 = unop( Iop_1Uto32,
2604 binop(Iop_CmpEQ32, argL, mkU32(INT32_MIN)) );
2605 break;
2607 case /* 6 */ PPCG_FLAG_OP_SUBF:
2608 case /* 7 */ PPCG_FLAG_OP_SUBFC:
2609 case /* 8 */ PPCG_FLAG_OP_SUBFE:
2610 /* ((~argL)^argR^-1) & ((~argL)^res) & (1<<31) ?1:0; */
2611 xer_ov
2612 = AND3( XOR3(NOT(argL),argR,mkU32(-1)),
2613 XOR2(NOT(argL),res),
2614 mkU32(INT32_MIN) );
2615 /* xer_ov can only be 0 or 1<<31 */
2616 xer_ov
2617 = binop(Iop_Shr32, xer_ov, mkU8(31) );
2618 break;
2620 case PPCG_FLAG_OP_DIVWEU:
2621 xer_ov
2622 = binop( Iop_Or32,
2623 unop( Iop_1Uto32, binop( Iop_CmpEQ32, argR, mkU32( 0 ) ) ),
2624 unop( Iop_1Uto32, binop( Iop_CmpLT32U, argR, argL ) ) );
2625 break;
2627 case PPCG_FLAG_OP_DIVWE:
2629 /* If argR == 0 of if the result cannot fit in the 32-bit destination register,
2630 * then OV <- 1. If dest reg is 0 AND both dividend and divisor are non-zero,
2631 * an overflow is implied.
2633 xer_ov = binop( Iop_Or32,
2634 unop( Iop_1Uto32, binop( Iop_CmpEQ32, argR, mkU32( 0 ) ) ),
2635 unop( Iop_1Uto32, mkAND1( binop( Iop_CmpEQ32, res, mkU32( 0 ) ),
2636 mkAND1( binop( Iop_CmpNE32, argL, mkU32( 0 ) ),
2637 binop( Iop_CmpNE32, argR, mkU32( 0 ) ) ) ) ) );
2638 break;
2642 default:
2643 vex_printf("calculate_XER_OV_32: op = %u\n", op);
2644 vpanic("calculate_XER_OV_32(ppc)");
2647 return xer_ov;
2649 # undef INT32_MIN
2650 # undef AND3
2651 # undef XOR3
2652 # undef XOR2
2653 # undef NOT
2656 static void set_XER_OV_OV32_32( UInt op, IRExpr* res,
2657 IRExpr* argL, IRExpr* argR )
2659 IRExpr* xer_ov;
2661 vassert(op < PPCG_FLAG_OP_NUMBER);
2662 vassert(typeOfIRExpr(irsb->tyenv,res) == Ity_I32);
2663 vassert(typeOfIRExpr(irsb->tyenv,argL) == Ity_I32);
2664 vassert(typeOfIRExpr(irsb->tyenv,argR) == Ity_I32);
2666 xer_ov = calculate_XER_OV_32( op, res, argL, argR );
2668 /* xer_ov MUST denote either 0 or 1, no other value allowed */
2669 putXER_OV( unop(Iop_32to8, xer_ov) );
2670 putXER_OV32( unop(Iop_32to8, xer_ov) );
2673 static IRExpr* calculate_XER_OV_64( UInt op, IRExpr* res,
2674 IRExpr* argL, IRExpr* argR )
2676 IRExpr* xer_ov;
2678 # define INT64_MIN 0x8000000000000000ULL
2680 # define XOR2(_aa,_bb) \
2681 binop(Iop_Xor64,(_aa),(_bb))
2683 # define XOR3(_cc,_dd,_ee) \
2684 binop(Iop_Xor64,binop(Iop_Xor64,(_cc),(_dd)),(_ee))
2686 # define AND3(_ff,_gg,_hh) \
2687 binop(Iop_And64,binop(Iop_And64,(_ff),(_gg)),(_hh))
2689 #define NOT(_jj) \
2690 unop(Iop_Not64, (_jj))
2692 switch (op) {
2693 case /* 0 */ PPCG_FLAG_OP_ADD:
2694 case /* 1 */ PPCG_FLAG_OP_ADDE:
2695 /* (argL^argR^-1) & (argL^res) & (1<<63) ? 1:0 */
2696 // i.e. ((both_same_sign) & (sign_changed) & (sign_mask))
2697 xer_ov
2698 = AND3( XOR3(argL,argR,mkU64(-1)),
2699 XOR2(argL,res),
2700 mkU64(INT64_MIN) );
2701 /* xer_ov can only be 0 or 1<<63 */
2702 xer_ov
2703 = unop(Iop_64to1, binop(Iop_Shr64, xer_ov, mkU8(63)));
2704 break;
2706 case /* 2 */ PPCG_FLAG_OP_DIVW:
2707 /* (argL == INT64_MIN && argR == -1) || argR == 0 */
2708 xer_ov
2709 = mkOR1(
2710 mkAND1(
2711 binop(Iop_CmpEQ64, argL, mkU64(INT64_MIN)),
2712 binop(Iop_CmpEQ64, argR, mkU64(-1))
2714 binop(Iop_CmpEQ64, argR, mkU64(0) )
2716 break;
2718 case /* 3 */ PPCG_FLAG_OP_DIVWU:
2719 /* argR == 0 */
2720 xer_ov
2721 = binop(Iop_CmpEQ64, argR, mkU64(0));
2722 break;
2724 case /* 4 */ PPCG_FLAG_OP_MULLW: {
2725 /* OV true if result can't be represented in 64 bits
2726 i.e sHi != sign extension of sLo */
2727 xer_ov
2728 = binop( Iop_CmpNE32,
2729 unop(Iop_64HIto32, res),
2730 binop( Iop_Sar32,
2731 unop(Iop_64to32, res),
2732 mkU8(31))
2734 break;
2737 case /* 5 */ PPCG_FLAG_OP_NEG:
2738 /* argL == INT64_MIN */
2739 xer_ov
2740 = binop(Iop_CmpEQ64, argL, mkU64(INT64_MIN));
2741 break;
2743 case /* 6 */ PPCG_FLAG_OP_SUBF:
2744 case /* 7 */ PPCG_FLAG_OP_SUBFC:
2745 case /* 8 */ PPCG_FLAG_OP_SUBFE:
2746 /* ((~argL)^argR^-1) & ((~argL)^res) & (1<<63) ?1:0; */
2747 xer_ov
2748 = AND3( XOR3(NOT(argL),argR,mkU64(-1)),
2749 XOR2(NOT(argL),res),
2750 mkU64(INT64_MIN) );
2751 /* xer_ov can only be 0 or 1<<63 */
2752 xer_ov
2753 = unop(Iop_64to1, binop(Iop_Shr64, xer_ov, mkU8(63)));
2754 break;
2756 case /* 14 */ PPCG_FLAG_OP_DIVDE:
2758 /* If argR == 0, we must set the OV bit. But there's another condition
2759 * where we can get overflow set for divde . . . when the
2760 * result cannot fit in the 64-bit destination register. If dest reg is 0 AND
2761 * both dividend and divisor are non-zero, it implies an overflow.
2763 xer_ov
2764 = mkOR1( binop( Iop_CmpEQ64, argR, mkU64( 0 ) ),
2765 mkAND1( binop( Iop_CmpEQ64, res, mkU64( 0 ) ),
2766 mkAND1( binop( Iop_CmpNE64, argL, mkU64( 0 ) ),
2767 binop( Iop_CmpNE64, argR, mkU64( 0 ) ) ) ) );
2768 break;
2770 case /* 17 */ PPCG_FLAG_OP_DIVDEU:
2771 /* If argR == 0 or if argL >= argR, set OV. */
2772 xer_ov = mkOR1( binop( Iop_CmpEQ64, argR, mkU64( 0 ) ),
2773 binop( Iop_CmpLE64U, argR, argL ) );
2774 break;
2776 case /* 18 */ PPCG_FLAG_OP_MULLD: {
2777 IRTemp t128;
2778 /* OV true if result can't be represented in 64 bits
2779 i.e sHi != sign extension of sLo */
2780 t128 = newTemp(Ity_I128);
2781 assign( t128, binop(Iop_MullS64, argL, argR) );
2782 xer_ov
2783 = binop( Iop_CmpNE64,
2784 unop(Iop_128HIto64, mkexpr(t128)),
2785 binop( Iop_Sar64,
2786 unop(Iop_128to64, mkexpr(t128)),
2787 mkU8(63))
2789 break;
2792 default:
2793 vex_printf("calculate_XER_OV_64: op = %u\n", op);
2794 vpanic("calculate_XER_OV_64(ppc64)");
2797 return xer_ov;
2799 # undef INT64_MIN
2800 # undef AND3
2801 # undef XOR3
2802 # undef XOR2
2803 # undef NOT
2806 static void set_XER_OV_64( UInt op, IRExpr* res,
2807 IRExpr* argL, IRExpr* argR )
2809 IRExpr* xer_ov;
2810 vassert(op < PPCG_FLAG_OP_NUMBER);
2811 vassert(typeOfIRExpr(irsb->tyenv,res) == Ity_I64);
2812 vassert(typeOfIRExpr(irsb->tyenv,argL) == Ity_I64);
2813 vassert(typeOfIRExpr(irsb->tyenv,argR) == Ity_I64);
2815 /* xer_ov MUST denote either 0 or 1, no other value allowed */
2816 xer_ov = calculate_XER_OV_64( op, res, argL, argR);
2817 putXER_OV( unop(Iop_1Uto8, xer_ov) );
2819 /* Update the summary overflow */
2820 putXER_SO( binop(Iop_Or8, getXER_SO(), getXER_OV()) );
2823 static void update_SO( void ) {
2824 /* Update the summary overflow bit */
2825 putXER_SO( binop(Iop_Or8, getXER_SO(), getXER_OV()) );
2828 static void copy_OV_to_OV32( void ) {
2829 /* Update the OV32 to match OV */
2830 putXER_OV32( getXER_OV() );
2833 static void set_XER_OV_OV32_SO ( IRType ty, UInt op, IRExpr* res,
2834 IRExpr* argL, IRExpr* argR )
2836 if (ty == Ity_I32) {
2837 set_XER_OV_OV32_32( op, res, argL, argR );
2838 } else {
2839 IRExpr* xer_ov_32;
2840 set_XER_OV_64( op, res, argL, argR );
2841 xer_ov_32 = calculate_XER_OV_32( op, unop(Iop_64to32, res),
2842 unop(Iop_64to32, argL),
2843 unop(Iop_64to32, argR));
2844 putXER_OV32( unop(Iop_32to8, xer_ov_32) );
2846 update_SO();
2851 /* RES is the result of doing OP on ARGL and ARGR with the old %XER.CA
2852 value being OLDCA. Set %XER.CA accordingly. */
2854 static IRExpr* calculate_XER_CA_32 ( UInt op, IRExpr* res,
2855 IRExpr* argL, IRExpr* argR, IRExpr* oldca )
2857 IRExpr* xer_ca;
2859 switch (op) {
2860 case /* 0 */ PPCG_FLAG_OP_ADD:
2861 /* res <u argL */
2862 xer_ca
2863 = unop(Iop_1Uto32, binop(Iop_CmpLT32U, res, argL));
2864 break;
2866 case /* 1 */ PPCG_FLAG_OP_ADDE:
2867 /* res <u argL || (old_ca==1 && res==argL) */
2868 xer_ca
2869 = mkOR1(
2870 binop(Iop_CmpLT32U, res, argL),
2871 mkAND1(
2872 binop(Iop_CmpEQ32, oldca, mkU32(1)),
2873 binop(Iop_CmpEQ32, res, argL)
2876 xer_ca
2877 = unop(Iop_1Uto32, xer_ca);
2878 break;
2880 case /* 8 */ PPCG_FLAG_OP_SUBFE:
2881 /* res <u argR || (old_ca==1 && res==argR) */
2882 xer_ca
2883 = mkOR1(
2884 binop(Iop_CmpLT32U, res, argR),
2885 mkAND1(
2886 binop(Iop_CmpEQ32, oldca, mkU32(1)),
2887 binop(Iop_CmpEQ32, res, argR)
2890 xer_ca
2891 = unop(Iop_1Uto32, xer_ca);
2892 break;
2894 case /* 7 */ PPCG_FLAG_OP_SUBFC:
2895 case /* 9 */ PPCG_FLAG_OP_SUBFI:
2896 /* res <=u argR */
2897 xer_ca
2898 = unop(Iop_1Uto32, binop(Iop_CmpLE32U, res, argR));
2899 break;
2901 case /* 10 */ PPCG_FLAG_OP_SRAW:
2902 /* The shift amount is guaranteed to be in 0 .. 63 inclusive.
2903 If it is <= 31, behave like SRAWI; else XER.CA is the sign
2904 bit of argL. */
2905 /* This term valid for shift amount < 32 only */
2906 xer_ca
2907 = binop(
2908 Iop_And32,
2909 binop(Iop_Sar32, argL, mkU8(31)),
2910 binop( Iop_And32,
2911 argL,
2912 binop( Iop_Sub32,
2913 binop(Iop_Shl32, mkU32(1),
2914 unop(Iop_32to8,argR)),
2915 mkU32(1) )
2918 xer_ca
2919 = IRExpr_ITE(
2920 /* shift amt > 31 ? */
2921 binop(Iop_CmpLT32U, mkU32(31), argR),
2922 /* yes -- get sign bit of argL */
2923 binop(Iop_Shr32, argL, mkU8(31)),
2924 /* no -- be like srawi */
2925 unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0)))
2927 break;
2929 case /* 11 */ PPCG_FLAG_OP_SRAWI:
2930 /* xer_ca is 1 iff src was negative and bits_shifted_out !=
2931 0. Since the shift amount is known to be in the range
2932 0 .. 31 inclusive the following seems viable:
2933 xer.ca == 1 iff the following is nonzero:
2934 (argL >>s 31) -- either all 0s or all 1s
2935 & (argL & (1<<argR)-1) -- the stuff shifted out */
2936 xer_ca
2937 = binop(
2938 Iop_And32,
2939 binop(Iop_Sar32, argL, mkU8(31)),
2940 binop( Iop_And32,
2941 argL,
2942 binop( Iop_Sub32,
2943 binop(Iop_Shl32, mkU32(1),
2944 unop(Iop_32to8,argR)),
2945 mkU32(1) )
2948 xer_ca
2949 = unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0)));
2950 break;
2952 default:
2953 vex_printf("set_XER_CA: op = %u\n", op);
2954 vpanic("set_XER_CA(ppc)");
2957 return xer_ca;
2960 static void set_XER_CA_32 ( UInt op, IRExpr* res,
2961 IRExpr* argL, IRExpr* argR, IRExpr* oldca )
2963 IRExpr* xer_ca;
2964 vassert(op < PPCG_FLAG_OP_NUMBER);
2965 vassert(typeOfIRExpr(irsb->tyenv,res) == Ity_I32);
2966 vassert(typeOfIRExpr(irsb->tyenv,argL) == Ity_I32);
2967 vassert(typeOfIRExpr(irsb->tyenv,argR) == Ity_I32);
2968 vassert(typeOfIRExpr(irsb->tyenv,oldca) == Ity_I32);
2970 /* Incoming oldca is assumed to hold the values 0 or 1 only. This
2971 seems reasonable given that it's always generated by
2972 getXER_CA_32(), which masks it accordingly. In any case it being
2973 0 or 1 is an invariant of the ppc guest state representation;
2974 if it has any other value, that invariant has been violated. */
2976 xer_ca = calculate_XER_CA_32( op, res, argL, argR, oldca);
2978 /* xer_ca MUST denote either 0 or 1, no other value allowed */
2979 putXER_CA( unop(Iop_32to8, xer_ca) );
2982 static IRExpr* calculate_XER_CA_64 ( UInt op, IRExpr* res,
2983 IRExpr* argL, IRExpr* argR, IRExpr* oldca )
2985 IRExpr* xer_ca;
2987 switch (op) {
2988 case /* 0 */ PPCG_FLAG_OP_ADD:
2989 /* res <u argL */
2990 xer_ca
2991 = unop(Iop_1Uto32, binop(Iop_CmpLT64U, res, argL));
2992 break;
2994 case /* 1 */ PPCG_FLAG_OP_ADDE:
2995 /* res <u argL || (old_ca==1 && res==argL) */
2996 xer_ca
2997 = mkOR1(
2998 binop(Iop_CmpLT64U, res, argL),
2999 mkAND1(
3000 binop(Iop_CmpEQ64, oldca, mkU64(1)),
3001 binop(Iop_CmpEQ64, res, argL)
3004 xer_ca
3005 = unop(Iop_1Uto32, xer_ca);
3006 break;
3008 case /* 8 */ PPCG_FLAG_OP_SUBFE:
3009 /* res <u argR || (old_ca==1 && res==argR) */
3010 xer_ca
3011 = mkOR1(
3012 binop(Iop_CmpLT64U, res, argR),
3013 mkAND1(
3014 binop(Iop_CmpEQ64, oldca, mkU64(1)),
3015 binop(Iop_CmpEQ64, res, argR)
3018 xer_ca
3019 = unop(Iop_1Uto32, xer_ca);
3020 break;
3022 case /* 7 */ PPCG_FLAG_OP_SUBFC:
3023 case /* 9 */ PPCG_FLAG_OP_SUBFI:
3024 /* res <=u argR */
3025 xer_ca
3026 = unop(Iop_1Uto32, binop(Iop_CmpLE64U, res, argR));
3027 break;
3030 case /* 10 */ PPCG_FLAG_OP_SRAW:
3031 /* The shift amount is guaranteed to be in 0 .. 31 inclusive.
3032 If it is <= 31, behave like SRAWI; else XER.CA is the sign
3033 bit of argL. */
3034 /* This term valid for shift amount < 31 only */
3036 xer_ca
3037 = binop(
3038 Iop_And64,
3039 binop(Iop_Sar64, argL, mkU8(31)),
3040 binop( Iop_And64,
3041 argL,
3042 binop( Iop_Sub64,
3043 binop(Iop_Shl64, mkU64(1),
3044 unop(Iop_64to8,argR)),
3045 mkU64(1) )
3048 xer_ca
3049 = IRExpr_ITE(
3050 /* shift amt > 31 ? */
3051 binop(Iop_CmpLT64U, mkU64(31), argR),
3052 /* yes -- get sign bit of argL */
3053 unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))),
3054 /* no -- be like srawi */
3055 unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)))
3057 break;
3059 case /* 11 */ PPCG_FLAG_OP_SRAWI:
3060 /* xer_ca is 1 iff src was negative and bits_shifted_out != 0.
3061 Since the shift amount is known to be in the range 0 .. 31
3062 inclusive the following seems viable:
3063 xer.ca == 1 iff the following is nonzero:
3064 (argL >>s 31) -- either all 0s or all 1s
3065 & (argL & (1<<argR)-1) -- the stuff shifted out */
3067 xer_ca
3068 = binop(
3069 Iop_And64,
3070 binop(Iop_Sar64, argL, mkU8(31)),
3071 binop( Iop_And64,
3072 argL,
3073 binop( Iop_Sub64,
3074 binop(Iop_Shl64, mkU64(1),
3075 unop(Iop_64to8,argR)),
3076 mkU64(1) )
3079 xer_ca
3080 = unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)));
3081 break;
3084 case /* 12 */ PPCG_FLAG_OP_SRAD:
3085 /* The shift amount is guaranteed to be in 0 .. 63 inclusive.
3086 If it is <= 63, behave like SRADI; else XER.CA is the sign
3087 bit of argL. */
3088 /* This term valid for shift amount < 63 only */
3090 xer_ca
3091 = binop(
3092 Iop_And64,
3093 binop(Iop_Sar64, argL, mkU8(63)),
3094 binop( Iop_And64,
3095 argL,
3096 binop( Iop_Sub64,
3097 binop(Iop_Shl64, mkU64(1),
3098 unop(Iop_64to8,argR)),
3099 mkU64(1) )
3102 xer_ca
3103 = IRExpr_ITE(
3104 /* shift amt > 63 ? */
3105 binop(Iop_CmpLT64U, mkU64(63), argR),
3106 /* yes -- get sign bit of argL */
3107 unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))),
3108 /* no -- be like sradi */
3109 unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)))
3111 break;
3114 case /* 13 */ PPCG_FLAG_OP_SRADI:
3115 /* xer_ca is 1 iff src was negative and bits_shifted_out != 0.
3116 Since the shift amount is known to be in the range 0 .. 63
3117 inclusive, the following seems viable:
3118 xer.ca == 1 iff the following is nonzero:
3119 (argL >>s 63) -- either all 0s or all 1s
3120 & (argL & (1<<argR)-1) -- the stuff shifted out */
3122 xer_ca
3123 = binop(
3124 Iop_And64,
3125 binop(Iop_Sar64, argL, mkU8(63)),
3126 binop( Iop_And64,
3127 argL,
3128 binop( Iop_Sub64,
3129 binop(Iop_Shl64, mkU64(1),
3130 unop(Iop_64to8,argR)),
3131 mkU64(1) )
3134 xer_ca
3135 = unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)));
3136 break;
3138 default:
3139 vex_printf("set_XER_CA: op = %u\n", op);
3140 vpanic("set_XER_CA(ppc64)");
3143 return xer_ca;
3146 static void set_XER_CA_64 ( UInt op, IRExpr* res,
3147 IRExpr* argL, IRExpr* argR, IRExpr* oldca )
3149 IRExpr* xer_ca;
3150 vassert(op < PPCG_FLAG_OP_NUMBER);
3151 vassert(typeOfIRExpr(irsb->tyenv,res) == Ity_I64);
3152 vassert(typeOfIRExpr(irsb->tyenv,argL) == Ity_I64);
3153 vassert(typeOfIRExpr(irsb->tyenv,argR) == Ity_I64);
3154 vassert(typeOfIRExpr(irsb->tyenv,oldca) == Ity_I64);
3156 /* Incoming oldca is assumed to hold the values 0 or 1 only. This
3157 seems reasonable given that it's always generated by
3158 getXER_CA_32(), which masks it accordingly. In any case it being
3159 0 or 1 is an invariant of the ppc guest state representation;
3160 if it has any other value, that invariant has been violated. */
3162 xer_ca = calculate_XER_CA_64( op, res, argL, argR, oldca );
3164 /* xer_ca MUST denote either 0 or 1, no other value allowed */
3165 putXER_CA( unop(Iop_32to8, xer_ca) );
3168 static void set_XER_CA_CA32 ( IRType ty, UInt op, IRExpr* res,
3169 IRExpr* argL, IRExpr* argR, IRExpr* oldca )
3171 if (ty == Ity_I32) {
3172 set_XER_CA_32( op, res, argL, argR, oldca );
3173 } else {
3174 set_XER_CA_64( op, res, argL, argR, oldca );
3178 /* Used only by addex instruction, which uses and sets OV as carry. */
3179 static void set_XER_OV_OV32_ADDEX ( IRType ty, IRExpr* res,
3180 IRExpr* argL, IRExpr* argR,
3181 IRExpr* old_ov )
3183 if (ty == Ity_I32) {
3184 IRTemp xer_ov = newTemp(Ity_I32);
3185 assign ( xer_ov, unop(Iop_32to8,
3186 calculate_XER_CA_32( PPCG_FLAG_OP_ADDE,
3187 res, argL, argR, old_ov ) ) );
3188 putXER_OV( mkexpr (xer_ov) );
3189 putXER_OV32( mkexpr (xer_ov) );
3190 } else {
3191 IRExpr *xer_ov;
3192 IRExpr* xer_ov_32;
3193 xer_ov = calculate_XER_CA_64( PPCG_FLAG_OP_ADDE,
3194 res, argL, argR, old_ov );
3195 putXER_OV( unop(Iop_32to8, xer_ov) );
3196 xer_ov_32 = calculate_XER_CA_32( PPCG_FLAG_OP_ADDE,
3197 unop(Iop_64to32, res),
3198 unop(Iop_64to32, argL),
3199 unop(Iop_64to32, argR),
3200 unop(Iop_64to32, old_ov) );
3201 putXER_OV32( unop(Iop_32to8, xer_ov_32) );
3205 static IRExpr * absI64( IRTemp src )
3207 IRTemp sign_mask;
3208 IRTemp twos_comp;
3209 sign_mask = newTemp( Ity_I64 );
3210 twos_comp = newTemp( Ity_I64 );
3212 assign( sign_mask, unop( Iop_1Sto64, unop( Iop_64to1, binop( Iop_Shr64,
3213 mkexpr( src ), mkU8( 63 ) ) ) ) );
3214 assign( twos_comp, binop( Iop_Add64, unop( Iop_Not64, mkexpr( src ) ), mkU64( 1 ) ) );
3216 return binop( Iop_Or64,
3217 binop( Iop_And64, mkexpr ( src ), unop( Iop_Not64, mkexpr( sign_mask ) ) ),
3218 binop( Iop_And64, mkexpr( twos_comp ), mkexpr( sign_mask ) ) );
3221 static IRExpr * locate_vector_ele_eq ( IRTemp src, IRExpr *value,
3222 UInt dir, IRType size )
3224 #define MAX_ELE 16
3225 /* Find the index, 0 to max-1, of the element in 128-bit vector that matches
3226 value. The returned value will be index+1. Return the index as an
3227 Ity_I8. If no match is found, the returned value is equal to the number
3228 of elements in the vector plus one. The argument dir specifies match from
3229 left (dir = 0) or from the right (dir != 0). */
3230 UInt i, num_bytes;
3231 UInt max = 0; /* number of vector elements */
3232 UInt mask = 0;
3233 IRTemp cnt[MAX_ELE+1];
3234 IRTemp flag[MAX_ELE+1];
3235 IRTemp cmp_result[MAX_ELE];
3236 UInt byte_index;
3238 vassert(size == Ity_I8 || size == Ity_I16);
3240 if (size == Ity_I8) {
3241 mask = 0xFF;
3242 max = 128/8;
3243 num_bytes = 1;
3244 } else {
3245 mask = 0xFFFF;
3246 max = 128/16;
3247 num_bytes = 2; // num bytes in half word
3250 cnt[0] = newTemp(Ity_I8);
3251 assign( cnt[0], mkU8( 1 ) );
3252 flag[0] = newTemp(Ity_I8);
3253 assign( flag[0], mkU8( 1 ) );
3255 for (i = 0; i < max; i++) {
3256 if (dir == 0) {
3257 byte_index = (max - 1 - i)*num_bytes;
3258 } else {
3259 byte_index = i*num_bytes;
3262 cnt[i+1] = newTemp(Ity_I8);
3263 cmp_result[i] = newTemp(Ity_I8);
3264 flag[i+1] = newTemp(Ity_I8);
3266 assign( cmp_result[i],
3267 unop( Iop_1Uto8,
3268 binop( Iop_CmpEQ64,
3269 binop( Iop_And64,
3270 mkU64( mask ),
3271 value ),
3272 extract_field_from_vector( src,
3273 mkU64( byte_index ),
3274 mask ) ) ) );
3276 assign( flag[i+1], binop( Iop_And8,
3277 mkexpr( flag[i] ),
3278 unop( Iop_Not8,
3279 mkexpr( cmp_result[i] ) ) ) );
3281 // Once flag[i] becomes zero, it forces the increment to zero
3282 assign( cnt[i+1],
3283 binop( Iop_Add8,
3284 binop( Iop_And8, mkexpr( flag[i+1] ), mkU8( 1 ) ),
3285 mkexpr( cnt[i] ) ) );
3287 return mkexpr( cnt[max] );
3288 #undef MAX_ELE
3291 /*-----------------------------------------------------------*/
3292 /*--- Prefix instruction helpers ---*/
3293 /*-----------------------------------------------------------*/
3294 #define DFORM_IMMASK 0xffffffff
3295 #define DSFORM_IMMASK 0xfffffffc
3296 #define DQFORM_IMMASK 0xfffffff0
3297 #define DA8LSFORM_IMMASK 0x3fffffff // Algebraic 8LS Dform
3299 #define ISA_3_1_PREFIX_CHECK if (prefix) {if (!allow_isa_3_1) goto decode_noIsa3_1;}
3301 /* ENABLE_PREFIX_CHECK is for development purposes. Turn off for production
3302 releases to improve performance. */
3303 #define ENABLE_PREFIX_CHECK 0
3305 #if ENABLE_PREFIX_CHECK
3306 #define PREFIX_CHECK { vassert( !prefix_instruction( prefix ) ); }
3307 #else
3308 #define PREFIX_CHECK { }
3309 #endif
3311 /* Bits 0:5 of all prefix instructions are assigned the primary opcode
3312 value 0b000001. 0b000001 is not available for use as a primary opcode for
3313 either word instructions or suffixes of prefixed instructions. */
3315 #define PREFIX_INST 0x1
3316 #define PREFIX_NOP_INVALID -1
3318 #define CONCAT(_aa,_bb,_cc) ((_aa) << (_cc) | (_bb))
3320 /* The codes for the prefix types */
3321 #define pType0 0 /* Eight-Byte Load/Store Instructions */
3322 #define pType1 1 /* Eight-Byte Register-to-Register Instructions */
3323 #define pType2 2 /* Modified Load/Store Instructions */
3324 #define pType3 3 /* Modified Register-to-Register Instructions */
3326 /* Extract unsigned from prefix instr[17:0] */
3327 static UInt ifieldUIMM18 ( UInt instr ) {
3328 return instr & 0x3FFFF;
3331 static ULong extend_s_34to64 ( ULong x )
3333 return (ULong)((((Long)x) << 30) >> 30);
3336 static UChar PrefixType( UInt instr ) {
3337 return toUChar( IFIELD( instr, 24, 2 ) );
3340 /* Extract XT 8RR D-form (destination register) field, instr[38:42] | [47] */
3341 static UChar ifieldRegXT_8RR_D ( UInt instr )
3343 UChar TX = toUChar (IFIELD (instr, (63 - 47), 1));
3344 UChar T = toUChar (IFIELD (instr, (63 - 42), 4));
3345 return (TX << 5) | T;
3348 /* Extract immediate 8RR D-form prefix[16:31] or inst[48:63] */
3349 static UInt ifield_imm_8RR_D ( UInt instr )
3351 return IFIELD( instr, 0, 16 );
3354 static UChar ifieldR( UInt instr ) {
3355 return toUChar( IFIELD( instr, 20, 1 ) );
3358 /* Sign extend imm34 -> IRExpr* */
3359 static IRExpr* mkSzExtendS34 ( ULong imm64 )
3361 return ( mkU64(extend_s_34to64(imm64)));
3364 /* Prefix instruction effective address calc: (rA + simm) */
3365 static IRExpr* ea_rA_simm34 ( UInt rA, ULong simm34 )
3367 vassert(rA < 32);
3368 vassert(mode64);
3369 return binop(Iop_Add64, getIReg(rA), mkSzExtendS34(simm34));
3372 /* Standard prefix instruction effective address calc: (rA|0) + simm16 */
3373 static IRExpr* ea_rAor0_simm34 ( UInt rA, ULong simm34 )
3375 vassert(rA < 32);
3376 vassert(mode64);
3377 if (rA == 0) {
3378 return mkSzExtendS34(simm34);
3379 } else {
3380 return ea_rA_simm34( rA, simm34 );
3384 static int prefix_instruction ( UInt instr )
3386 /* Format of first 4 bytes of prefix instruction
3387 bits [0:5] - must be 0x1 identifying this as a prefix inst
3388 bits [6:7] - prefix instruction type. */
3389 UChar opcode = IFIELD( instr, 26, 6);
3391 if (opcode == PREFIX_INST) return True;
3392 return False;
3395 /* standard offset calculation, check prefix type */
3396 static IRExpr* calculate_prefix_EA ( UInt prefix, UInt suffixInstr,
3397 UChar rA_addr, UInt ptype,
3398 UInt immediate_mask,
3399 ULong *immediate_val,
3400 UInt *R )
3402 IRType ty = Ity_I64;
3403 ULong d0 = ifieldUIMM18(prefix); // Will be zero for word inst
3404 ULong d1 = ifieldUIMM16(suffixInstr) & immediate_mask;
3405 ULong D = CONCAT( d0, d1, 16 ); // result is 34 bits wide
3406 Bool is_prefix = prefix_instruction( prefix );
3407 IRTemp tmp = newTemp(ty);
3409 if ( !is_prefix ) {
3410 *immediate_val = extend_s_16to32( d1 );
3411 assign( tmp, ea_rAor0_simm( rA_addr, d1 ) );
3412 *R = 0;
3414 } else {
3415 vassert( ty == Ity_I64 ); // prefix instructions must be 64-bit
3416 vassert( (ptype == pType0) || (ptype == pType2) );
3417 *R = ifieldR( prefix );
3418 *immediate_val = extend_s_34to64( D ); // D is 34 bits wide
3419 assign( tmp, ea_rAor0_simm34( rA_addr, D ) );
3422 /* Get the EA */
3423 if ( *R == 0 )
3424 return mkexpr ( tmp );
3426 /* Add immediate value from instruction to the current instruction
3427 address. guest_CIA_curr_instr is pointing at the prefix, use address
3428 of the instruction prefix. */
3429 return binop( Iop_Add64,
3430 mkexpr ( tmp ),
3431 mkU64( guest_CIA_curr_instr ) );
3434 /* Extract prefix intruction register fields 8RR:XX4-form */
3435 static UChar ifieldRegXT_8RR_XX4 ( UInt instr ) {
3436 return toUChar( ( IFIELD( instr, (63-63), 1) << 5)
3437 | ( IFIELD( instr, (63-42), 5 ) ) ); // instr[38:42] | [63]
3440 static UChar ifieldRegXA_8RR_XX4 ( UInt instr ) {
3441 return toUChar( ( IFIELD( instr, (63-61), 1) << 5)
3442 | ( IFIELD( instr, (63-47), 5 ) ) ); // instr[43:47] | [61]
3445 static UChar ifieldRegXB_8RR_XX4 ( UInt instr ) {
3446 return toUChar( ( IFIELD( instr, (63-62), 1) << 5)
3447 | ( IFIELD( instr, (63-52), 5 ) ) ); // instr[48:52] | [62]
3450 static UChar ifieldRegXC_8RR_XX4 ( UInt instr ) {
3451 return toUChar( ( IFIELD( instr, (63-60), 1) << 5)
3452 | ( IFIELD( instr, (63-57), 5 ) ) ); // instr[53:57] | [60]
3455 /*------------------------------------------------------------*/
3456 /*--- Read/write to guest-state --- */
3457 /*------------------------------------------------------------*/
3459 static IRExpr* /* :: Ity_I32/64 */ getGST ( PPC_GST reg )
3461 IRType ty = mode64 ? Ity_I64 : Ity_I32;
3462 switch (reg) {
3463 case PPC_GST_SPRG3_RO:
3464 return IRExpr_Get( OFFB_SPRG3_RO, ty );
3466 case PPC_GST_CIA:
3467 return IRExpr_Get( OFFB_CIA, ty );
3469 case PPC_GST_LR:
3470 return IRExpr_Get( OFFB_LR, ty );
3472 case PPC_GST_CTR:
3473 return IRExpr_Get( OFFB_CTR, ty );
3475 case PPC_GST_VRSAVE:
3476 return IRExpr_Get( OFFB_VRSAVE, Ity_I32 );
3478 case PPC_GST_VSCR:
3479 return binop(Iop_And32, IRExpr_Get( OFFB_VSCR,Ity_I32 ),
3480 mkU32(MASK_VSCR_VALID));
3482 case PPC_GST_CR: {
3483 /* Synthesise the entire CR into a single word. Expensive. */
3484 # define FIELD(_n) \
3485 binop(Iop_Shl32, \
3486 unop(Iop_8Uto32, \
3487 binop(Iop_Or8, \
3488 binop(Iop_And8, getCR321(_n), mkU8(7<<1)), \
3489 binop(Iop_And8, getCR0(_n), mkU8(1)) \
3491 ), \
3492 mkU8(4 * (7-(_n))) \
3494 return binop(Iop_Or32,
3495 binop(Iop_Or32,
3496 binop(Iop_Or32, FIELD(0), FIELD(1)),
3497 binop(Iop_Or32, FIELD(2), FIELD(3))
3499 binop(Iop_Or32,
3500 binop(Iop_Or32, FIELD(4), FIELD(5)),
3501 binop(Iop_Or32, FIELD(6), FIELD(7))
3504 # undef FIELD
3507 case PPC_GST_XER:
3508 return binop(Iop_Or32,
3509 binop(Iop_Or32,
3510 binop(Iop_Or32,
3511 binop( Iop_Shl32, getXER_SO_32(), mkU8(31)),
3512 binop( Iop_Shl32, getXER_OV_32(), mkU8(30))),
3513 binop(Iop_Or32,
3514 binop( Iop_Shl32, getXER_CA_32(), mkU8(29)),
3515 getXER_BC_32())),
3516 binop(Iop_Or32,
3517 binop( Iop_Shl32, getXER_OV32_32(), mkU8(19)),
3518 binop( Iop_Shl32, getXER_CA32_32(), mkU8(18))));
3520 case PPC_GST_TFHAR:
3521 return IRExpr_Get( OFFB_TFHAR, ty );
3523 case PPC_GST_TEXASR:
3524 return IRExpr_Get( OFFB_TEXASR, ty );
3526 case PPC_GST_TEXASRU:
3527 return IRExpr_Get( OFFB_TEXASRU, ty );
3529 case PPC_GST_TFIAR:
3530 return IRExpr_Get( OFFB_TFIAR, ty );
3532 case PPC_GST_PPR:
3533 return IRExpr_Get( OFFB_PPR, ty );
3535 case PPC_GST_PPR32:
3536 return unop( Iop_64HIto32, IRExpr_Get( OFFB_PPR, ty ) );
3538 case PPC_GST_PSPB:
3539 return IRExpr_Get( OFFB_PSPB, ty );
3541 case PPC_GST_DSCR:
3542 return IRExpr_Get( OFFB_DSCR, ty );
3544 default:
3545 vex_printf("getGST(ppc): reg = %u", reg);
3546 vpanic("getGST(ppc)");
3550 /* Get a masked word from the given reg */
3551 static IRExpr* /* ::Ity_I32 */ getGST_masked ( PPC_GST reg, ULong mask )
3553 IRTemp val = newTemp(Ity_I32);
3554 vassert( reg < PPC_GST_MAX );
3556 switch (reg) {
3558 case PPC_GST_FPSCR: {
3559 /* Vex-generated code expects the FPSCR to be set as follows:
3560 all exceptions masked, round-to-nearest.
3561 This corresponds to a FPSCR value of 0x0. */
3563 /* In the lower 32 bits of FPSCR, we're keeping track of the binary
3564 * floating point rounding mode and Floating-point Condition code, so
3565 * if the mask isn't asking for either of these, just return 0x0.
3567 if ( mask & ( MASK_FPSCR_C_FPCC | MASK_FPSCR_RN ) ) {
3568 assign( val, binop( Iop_Or32,
3569 unop( Iop_8Uto32, IRExpr_Get( OFFB_FPROUND, Ity_I8 ) ),
3570 binop( Iop_Shl32,
3571 unop( Iop_8Uto32,
3572 IRExpr_Get( OFFB_C_FPCC, Ity_I8 ) ),
3573 mkU8( 12 ) ) ) );
3574 } else {
3575 assign( val, mkU32(0x0) );
3578 break;
3581 default:
3582 vex_printf("getGST_masked(ppc): reg = %u", reg);
3583 vpanic("getGST_masked(ppc)");
3586 if ( mask != 0xFFFFFFFF ) {
3587 return binop(Iop_And32, mkexpr(val), mkU32(mask));
3588 } else {
3589 return mkexpr(val);
3593 /* Get a masked word from the given reg */
3594 static IRExpr* /* ::Ity_I32 */getGST_masked_upper(PPC_GST reg, ULong mask) {
3595 IRExpr * val;
3596 vassert( reg < PPC_GST_MAX );
3598 switch (reg) {
3600 case PPC_GST_FPSCR: {
3601 /* In the upper 32 bits of FPSCR, we're only keeping track
3602 * of the decimal floating point rounding mode, so if the mask
3603 * isn't asking for this, just return 0x0.
3605 if (mask & MASK_FPSCR_DRN) {
3606 val = binop( Iop_And32,
3607 unop( Iop_8Uto32, IRExpr_Get( OFFB_DFPROUND, Ity_I8 ) ),
3608 unop( Iop_64HIto32, mkU64( mask ) ) );
3609 } else {
3610 val = mkU32( 0x0ULL );
3612 break;
3615 default:
3616 vex_printf( "getGST_masked_upper(ppc): reg = %u", reg );
3617 vpanic( "getGST_masked_upper(ppc)" );
3619 return val;
3623 /* Fetch the specified REG[FLD] nibble (as per IBM/hardware notation)
3624 and return it at the bottom of an I32; the top 27 bits are
3625 guaranteed to be zero. */
3626 static IRExpr* /* ::Ity_I32 */ getGST_field ( PPC_GST reg, UInt fld )
3628 UInt shft, mask;
3630 vassert( fld < 8 );
3631 vassert( reg < PPC_GST_MAX );
3633 shft = 4*(7-fld);
3634 mask = 0xF<<shft;
3636 switch (reg) {
3637 case PPC_GST_XER:
3638 vassert(fld ==7);
3639 return binop(Iop_Or32,
3640 binop(Iop_Or32,
3641 binop(Iop_Shl32, getXER_SO_32(), mkU8(3)),
3642 binop(Iop_Shl32, getXER_OV_32(), mkU8(2))),
3643 binop( Iop_Shl32, getXER_CA_32(), mkU8(1)));
3644 break;
3646 default:
3647 if (shft == 0)
3648 return getGST_masked( reg, mask );
3649 else
3650 return binop(Iop_Shr32,
3651 getGST_masked( reg, mask ),
3652 mkU8(toUChar( shft )));
3656 static void putGST ( PPC_GST reg, IRExpr* src )
3658 IRType ty = mode64 ? Ity_I64 : Ity_I32;
3659 IRType ty_src = typeOfIRExpr(irsb->tyenv,src );
3660 vassert( reg < PPC_GST_MAX );
3661 switch (reg) {
3662 case PPC_GST_IP_AT_SYSCALL:
3663 vassert( ty_src == ty );
3664 stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL, src ) );
3665 break;
3666 case PPC_GST_CIA:
3667 vassert( ty_src == ty );
3668 stmt( IRStmt_Put( OFFB_CIA, src ) );
3669 break;
3670 case PPC_GST_LR:
3671 vassert( ty_src == ty );
3672 stmt( IRStmt_Put( OFFB_LR, src ) );
3673 break;
3674 case PPC_GST_CTR:
3675 vassert( ty_src == ty );
3676 stmt( IRStmt_Put( OFFB_CTR, src ) );
3677 break;
3678 case PPC_GST_VRSAVE:
3679 vassert( ty_src == Ity_I32 );
3680 stmt( IRStmt_Put( OFFB_VRSAVE,src));
3681 break;
3682 case PPC_GST_VSCR:
3683 vassert( ty_src == Ity_I32 );
3684 stmt( IRStmt_Put( OFFB_VSCR,
3685 binop(Iop_And32, src,
3686 mkU32(MASK_VSCR_VALID)) ) );
3687 break;
3688 case PPC_GST_XER:
3689 vassert( ty_src == Ity_I32 );
3690 putXER_SO( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(31))) );
3691 putXER_OV( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(30))) );
3692 putXER_CA( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(29))) );
3693 putXER_OV32( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(19))) );
3694 putXER_CA32( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(18))) );
3695 putXER_BC( unop(Iop_32to8, src) );
3696 break;
3698 case PPC_GST_EMWARN:
3699 vassert( ty_src == Ity_I32 );
3700 stmt( IRStmt_Put( OFFB_EMNOTE,src) );
3701 break;
3703 case PPC_GST_CMSTART:
3704 vassert( ty_src == ty );
3705 stmt( IRStmt_Put( OFFB_CMSTART, src) );
3706 break;
3708 case PPC_GST_CMLEN:
3709 vassert( ty_src == ty );
3710 stmt( IRStmt_Put( OFFB_CMLEN, src) );
3711 break;
3713 case PPC_GST_TEXASR:
3714 vassert( ty_src == Ity_I64 );
3715 stmt( IRStmt_Put( OFFB_TEXASR, src ) );
3716 break;
3718 case PPC_GST_TEXASRU:
3719 vassert( ty_src == Ity_I32 );
3720 stmt( IRStmt_Put( OFFB_TEXASRU, src ) );
3721 break;
3723 case PPC_GST_TFIAR:
3724 vassert( ty_src == Ity_I64 );
3725 stmt( IRStmt_Put( OFFB_TFIAR, src ) );
3726 break;
3727 case PPC_GST_TFHAR:
3728 vassert( ty_src == Ity_I64 );
3729 stmt( IRStmt_Put( OFFB_TFHAR, src ) );
3730 break;
3732 case PPC_GST_PPR32:
3733 case PPC_GST_PPR:
3735 /* The Program Priority Register (PPR) stores the priority in
3736 * bits [52:50]. The user setable priorities are:
3738 * 001 very low
3739 * 010 low
3740 * 011 medium low
3741 * 100 medium
3742 * 101 medium high
3744 * If the argument is not between 0b001 and 0b100 the priority is set
3745 * to 0b100. The priority can only be set to 0b101 if the the Problem
3746 * State Boost Register is non-zero. The value of the PPR is not
3747 * changed if the input is not valid.
3750 IRTemp not_valid = newTemp(Ity_I64);
3751 IRTemp has_perm = newTemp(Ity_I64);
3752 IRTemp new_src = newTemp(Ity_I64);
3753 IRTemp PSPB_val = newTemp(Ity_I64);
3754 IRTemp value = newTemp(Ity_I64);
3756 vassert(( ty_src == Ity_I64 ) || ( ty_src == Ity_I32 ));
3757 assign( PSPB_val, binop( Iop_32HLto64,
3758 mkU32( 0 ),
3759 IRExpr_Get( OFFB_PSPB, Ity_I32 ) ) );
3760 if( reg == PPC_GST_PPR32 ) {
3761 vassert( ty_src == Ity_I32 );
3762 assign( value, binop( Iop_32HLto64,
3763 mkU32(0),
3764 binop( Iop_And32,
3765 binop( Iop_Shr32, src, mkU8( 18 ) ),
3766 mkU32( 0x7 ) ) ) );
3767 } else {
3768 vassert( ty_src == Ity_I64 );
3769 assign( value, binop( Iop_And64,
3770 binop( Iop_Shr64, src, mkU8( 50 ) ),
3771 mkU64( 0x7 ) ) );
3773 assign( has_perm,
3774 binop( Iop_And64,
3775 unop( Iop_1Sto64,
3776 binop( Iop_CmpEQ64,
3777 mkexpr( PSPB_val ),
3778 mkU64( 0 ) ) ),
3779 unop( Iop_1Sto64,
3780 binop( Iop_CmpEQ64,
3781 mkU64( 0x5 ),
3782 mkexpr( value ) ) ) ) );
3783 assign( not_valid,
3784 binop( Iop_Or64,
3785 unop( Iop_1Sto64,
3786 binop( Iop_CmpEQ64,
3787 mkexpr( value ),
3788 mkU64( 0 ) ) ),
3789 unop( Iop_1Sto64,
3790 binop( Iop_CmpLT64U,
3791 mkU64( 0x5 ),
3792 mkexpr( value ) ) ) ) );
3793 assign( new_src,
3794 binop( Iop_Or64,
3795 binop( Iop_And64,
3796 unop( Iop_Not64,
3797 mkexpr( not_valid ) ),
3798 src ),
3799 binop( Iop_And64,
3800 mkexpr( not_valid ),
3801 binop( Iop_Or64,
3802 binop( Iop_And64,
3803 mkexpr( has_perm),
3804 binop( Iop_Shl64,
3805 mkexpr( value ),
3806 mkU8( 50 ) ) ),
3807 binop( Iop_And64,
3808 IRExpr_Get( OFFB_PPR, ty ),
3809 unop( Iop_Not64,
3810 mkexpr( has_perm )
3811 ) ) ) ) ) );
3813 /* make sure we only set the valid bit field [52:50] */
3814 stmt( IRStmt_Put( OFFB_PPR,
3815 binop( Iop_And64,
3816 mkexpr( new_src ),
3817 mkU64( 0x1C000000000000) ) ) );
3818 break;
3820 case PPC_GST_DSCR:
3821 vassert( ty_src == Ity_I64 );
3822 stmt( IRStmt_Put( OFFB_DSCR, src ) );
3823 break;
3825 default:
3826 vex_printf("putGST(ppc): reg = %u", reg);
3827 vpanic("putGST(ppc)");
3831 /* Write masked src to the given reg */
3832 static void putGST_masked ( PPC_GST reg, IRExpr* src, ULong mask )
3834 IRType ty = mode64 ? Ity_I64 : Ity_I32;
3835 vassert( reg < PPC_GST_MAX );
3836 vassert( typeOfIRExpr( irsb->tyenv,src ) == Ity_I64 );
3838 switch (reg) {
3839 case PPC_GST_FPSCR: {
3840 /* Allow writes to either binary or decimal floating point
3841 Rounding Mode.
3843 /* If any part of |mask| covers FPSCR.RN, update the bits of
3844 FPSCR.RN by copying in |src| for locations where the
3845 corresponding bit in |mask| is 1, and leaving it unchanged
3846 for corresponding |mask| zero bits. */
3847 if (mask & MASK_FPSCR_RN) {
3848 stmt(
3849 IRStmt_Put(
3850 OFFB_FPROUND,
3851 unop(
3852 Iop_32to8,
3853 binop(
3854 Iop_Or32,
3855 binop(
3856 Iop_And32,
3857 unop(Iop_64to32, src),
3858 mkU32(MASK_FPSCR_RN & mask)
3860 binop(
3861 Iop_And32,
3862 unop(Iop_8Uto32, IRExpr_Get(OFFB_FPROUND,Ity_I8)),
3863 mkU32(MASK_FPSCR_RN & ~mask)
3871 if (mask & MASK_FPSCR_C_FPCC) {
3872 /* FPCC bits are in [47:51] */
3873 stmt(
3874 IRStmt_Put(
3875 OFFB_C_FPCC,
3876 unop(
3877 Iop_32to8,
3878 binop(Iop_Shr32,
3879 binop(
3880 Iop_Or32,
3881 binop(
3882 Iop_And32,
3883 unop(Iop_64to32, src),
3884 mkU32(MASK_FPSCR_C_FPCC & mask) ),
3885 binop(
3886 Iop_And32,
3887 unop(Iop_8Uto32,
3888 IRExpr_Get(OFFB_C_FPCC,Ity_I8)),
3889 mkU32(MASK_FPSCR_C_FPCC & ~mask)
3890 ) ),
3891 mkU8( 12 ) )
3892 ) ) );
3895 /* Similarly, update FPSCR.DRN if any bits of |mask|
3896 corresponding to FPSCR.DRN are set. */
3897 if (mask & MASK_FPSCR_DRN) {
3898 stmt(
3899 IRStmt_Put(
3900 OFFB_DFPROUND,
3901 unop(
3902 Iop_32to8,
3903 binop(
3904 Iop_Or32,
3905 binop(
3906 Iop_And32,
3907 unop(Iop_64HIto32, src),
3908 mkU32((MASK_FPSCR_DRN & mask) >> 32)
3910 binop(
3911 Iop_And32,
3912 unop(Iop_8Uto32, IRExpr_Get(OFFB_DFPROUND,Ity_I8)),
3913 mkU32((MASK_FPSCR_DRN & ~mask) >> 32)
3921 /* Give EmNote for attempted writes to:
3922 - Exception Controls
3923 - Non-IEEE Mode
3925 if (mask & 0xFC) { // Exception Control, Non-IEE mode
3926 VexEmNote ew = EmWarn_PPCexns;
3928 /* If any of the src::exception_control bits are actually set,
3929 side-exit to the next insn, reporting the warning,
3930 so that Valgrind's dispatcher sees the warning. */
3931 putGST( PPC_GST_EMWARN, mkU32(ew) );
3932 stmt(
3933 IRStmt_Exit(
3934 binop(Iop_CmpNE32, mkU32(ew), mkU32(EmNote_NONE)),
3935 Ijk_EmWarn,
3936 mkSzConst( ty, nextInsnAddr()), OFFB_CIA ));
3939 /* Ignore all other writes */
3940 break;
3943 default:
3944 vex_printf("putGST_masked(ppc): reg = %u", reg);
3945 vpanic("putGST_masked(ppc)");
3949 /* Write the least significant nibble of src to the specified
3950 REG[FLD] (as per IBM/hardware notation). */
3951 static void putGST_field ( PPC_GST reg, IRExpr* src, UInt fld )
3953 UInt shft;
3954 ULong mask;
3956 vassert( typeOfIRExpr(irsb->tyenv,src ) == Ity_I32 );
3957 vassert( fld < 16 );
3958 vassert( reg < PPC_GST_MAX );
3960 if (fld < 8)
3961 shft = 4*(7-fld);
3962 else
3963 shft = 4*(15-fld);
3964 mask = 0xF;
3965 mask = mask << shft;
3967 switch (reg) {
3968 case PPC_GST_CR:
3969 putCR0 (fld, binop(Iop_And8, mkU8(1 ), unop(Iop_32to8, src)));
3970 putCR321(fld, binop(Iop_And8, mkU8(7<<1), unop(Iop_32to8, src)));
3971 break;
3973 default:
3975 IRExpr * src64 = unop( Iop_32Uto64, src );
3977 if (shft == 0) {
3978 putGST_masked( reg, src64, mask );
3979 } else {
3980 putGST_masked( reg,
3981 binop( Iop_Shl64, src64, mkU8( toUChar( shft ) ) ),
3982 mask );
3988 static void putFPCC ( IRExpr* e )
3990 /* The assumption is that the value of the FPCC are passed in the lower
3991 * four bits of a 32 bit value.
3993 * Note, the C and FPCC bits which are a field of the FPSCR
3994 * register are stored in their own "register" in
3995 * memory. The FPCC bits are in the lower 4 bits. We don't need to
3996 * shift it to the bits to their location in the FPSCR register. Note,
3997 * not all of the FPSCR register bits are supported. We are writing all
3998 * of the bits in the FPCC field but not the C field.
4000 IRExpr* tmp;
4002 vassert( typeOfIRExpr( irsb->tyenv, e ) == Ity_I32 );
4003 /* Get the C bit field */
4004 tmp = binop( Iop_And32,
4005 mkU32( 0x10 ),
4006 unop( Iop_8Uto32, IRExpr_Get( OFFB_C_FPCC, Ity_I8 ) ) );
4008 stmt( IRStmt_Put( OFFB_C_FPCC,
4009 unop( Iop_32to8,
4010 binop( Iop_Or32, tmp,
4011 binop( Iop_And32, mkU32( 0xF ), e ) ) ) ) );
4015 static IRExpr* /* ::Ity_I32 */ getC ( void )
4017 /* Note, the Floating-Point Result Class Descriptor (C) bit is a field of
4018 * the FPSCR registered are stored in its own "register" in guest state
4019 * with the FPCC bit field. C | FPCC
4021 IRTemp val = newTemp(Ity_I32);
4023 assign( val, binop( Iop_Shr32,
4024 unop( Iop_8Uto32, IRExpr_Get( OFFB_C_FPCC, Ity_I8 ) ),
4025 mkU8( 4 ) ) );
4026 return mkexpr(val);
4029 static IRExpr* /* ::Ity_I32 */ getFPCC ( void )
4031 /* Note, the FPCC bits are a field of the FPSCR
4032 * register are stored in their own "register" in
4033 * guest state with the C bit field. C | FPCC
4035 IRTemp val = newTemp( Ity_I32 );
4037 assign( val, binop( Iop_And32, unop( Iop_8Uto32,
4038 IRExpr_Get( OFFB_C_FPCC, Ity_I8 ) ),
4039 mkU32( 0xF ) ));
4040 return mkexpr(val);
4043 static void put_syscall_flag( IRExpr* src )
4045 /* Need to pass a flag indicating if the system call is using the sc or
4046 scv instructions. Because Valgrind does an end-of-block after the
4047 system call, the contents of a gpr can not be saved and restored after
4048 the system call. A custom guest state register guest_syscall_flag is
4049 used to pass the flag so the guest state is not disturbed. */
4051 stmt( IRStmt_Put( offsetofPPCGuestState(guest_syscall_flag), src ) );
4055 /*-----------------------------------------------------------*/
4056 /* Helpers to access VSX Accumulator register file
4057 *-----------------------------------------------------------*/
4058 static UInt ACC_offset( UInt index, UInt reg )
4060 #define SizeofACC_row 16 /* size of ACC row in bytes */
4061 #define ACC_row_per_entry 4
4062 #define ACC_num_entries 8
4064 vassert(index < ACC_num_entries);
4065 vassert(reg < ACC_row_per_entry);
4066 return index * ACC_row_per_entry * SizeofACC_row + reg * SizeofACC_row;
4069 static UInt base_acc_addr( Bool ACC_mapped_on_VSR )
4071 /* Return base ACC address if ACC mapped over vsrs or as a separate
4072 register file. */
4073 if ( ACC_mapped_on_VSR ) /* ISA 3.1 implementation */
4074 return offsetofPPCGuestState( guest_VSR0 );
4075 else
4076 return offsetofPPCGuestState( guest_ACC_0_r0 );
4079 static void putACC( UInt index, UInt reg, IRExpr* src, Bool ACC_mapped_on_VSR)
4082 stmt( IRStmt_Put( base_acc_addr( ACC_mapped_on_VSR )
4083 + ACC_offset( index, reg), src ) );
4086 static IRExpr* /* :: Ity_V128 */ getACC ( UInt index, UInt reg,
4087 Bool ACC_mapped_on_VSR)
4089 vassert(index < 8);
4090 vassert(reg < 4);
4092 return IRExpr_Get( base_acc_addr( ACC_mapped_on_VSR )
4093 + ACC_offset( index, reg), Ity_V128 );
4097 /*------------------------------------------------------------*/
4098 /* Helpers for VSX instructions that do floating point
4099 * operations and need to determine if a src contains a
4100 * special FP value.
4102 *------------------------------------------------------------*/
4104 #define NONZERO_FRAC_MASK 0x000fffffffffffffULL
4105 #define FP_FRAC_PART(x) binop( Iop_And64, \
4106 mkexpr( x ), \
4107 mkU64( NONZERO_FRAC_MASK ) )
4109 #define NONZERO_FRAC_MASK32 0x007fffffULL
4110 #define FP_FRAC_PART32(x) binop( Iop_And32, \
4111 mkexpr( x ), \
4112 mkU32( NONZERO_FRAC_MASK32 ) )
4114 // Returns exponent part of floating point src as I32
4115 static IRExpr * fp_exp_part( IRType size, IRTemp src )
4117 IRExpr *shift_by, *mask, *tsrc;
4119 vassert( ( size == Ity_I16 ) || ( size == Ity_I32 )
4120 || ( size == Ity_I64 ) );
4122 if( size == Ity_I16 ) {
4123 /* The 16-bit floating point value is in the lower 16-bits
4124 * of the 32-bit input value.
4126 tsrc = mkexpr( src );
4127 mask = mkU32( 0x1F );
4128 shift_by = mkU8( 10 );
4130 } else if( size == Ity_I32 ) {
4131 tsrc = mkexpr( src );
4132 mask = mkU32( 0xFF );
4133 shift_by = mkU8( 23 );
4135 } else if( size == Ity_I64 ) {
4136 tsrc = unop( Iop_64HIto32, mkexpr( src ) );
4137 mask = mkU32( 0x7FF );
4138 shift_by = mkU8( 52 - 32 );
4140 } else {
4141 /*NOTREACHED*/
4142 vassert(0); // Stops gcc complaining at "-Og"
4145 return binop( Iop_And32, binop( Iop_Shr32, tsrc, shift_by ), mask );
4148 /* The following functions check the floating point value to see if it
4149 is zero, infinity, NaN, Normalized, Denormalized.
4151 /* 16-bit floating point number is stored in the lower 16-bits of 32-bit value */
4152 #define I16_EXP_MASK 0x7C00
4153 #define I16_FRACTION_MASK 0x03FF
4154 #define I16_MSB_FRACTION_MASK 0x0200
4155 #define I32_EXP_MASK 0x7F800000
4156 #define I32_FRACTION_MASK 0x007FFFFF
4157 #define I32_MSB_FRACTION_MASK 0x00400000
4158 #define I32_SIGN_MASK 0x80000000
4159 #define I64_EXP_MASK 0x7FF0000000000000ULL
4160 #define I64_FRACTION_MASK 0x000FFFFFFFFFFFFFULL
4161 #define I64_MSB_FRACTION_MASK 0x0008000000000000ULL
4162 #define V128_EXP_MASK 0x7FFF000000000000ULL
4163 #define V128_FRACTION_MASK 0x0000FFFFFFFFFFFFULL /* upper 64-bit fractional mask */
4164 #define V128_MSB_FRACTION_MASK 0x0000800000000000ULL /* upper 64-bit fractional mask */
4166 void setup_value_check_args( IRType size, IRTemp *exp_mask, IRTemp *frac_mask,
4167 IRTemp *msb_frac_mask, IRTemp *zero );
4169 void setup_value_check_args( IRType size, IRTemp *exp_mask, IRTemp *frac_mask,
4170 IRTemp *msb_frac_mask, IRTemp *zero ) {
4172 vassert( ( size == Ity_I16 ) || ( size == Ity_I32 )
4173 || ( size == Ity_I64 ) || ( size == Ity_V128 ) );
4175 if( size == Ity_I16 ) {
4176 /* The 16-bit floating point value is in the lower 16-bits of
4177 the 32-bit input value */
4178 *frac_mask = newTemp( Ity_I32 );
4179 *msb_frac_mask = newTemp( Ity_I32 );
4180 *exp_mask = newTemp( Ity_I32 );
4181 *zero = newTemp( Ity_I32 );
4182 assign( *exp_mask, mkU32( I16_EXP_MASK ) );
4183 assign( *frac_mask, mkU32( I16_FRACTION_MASK ) );
4184 assign( *msb_frac_mask, mkU32( I16_MSB_FRACTION_MASK ) );
4185 assign( *zero, mkU32( 0 ) );
4187 } else if( size == Ity_I32 ) {
4188 *frac_mask = newTemp( Ity_I32 );
4189 *msb_frac_mask = newTemp( Ity_I32 );
4190 *exp_mask = newTemp( Ity_I32 );
4191 *zero = newTemp( Ity_I32 );
4192 assign( *exp_mask, mkU32( I32_EXP_MASK ) );
4193 assign( *frac_mask, mkU32( I32_FRACTION_MASK ) );
4194 assign( *msb_frac_mask, mkU32( I32_MSB_FRACTION_MASK ) );
4195 assign( *zero, mkU32( 0 ) );
4197 } else if( size == Ity_I64 ) {
4198 *frac_mask = newTemp( Ity_I64 );
4199 *msb_frac_mask = newTemp( Ity_I64 );
4200 *exp_mask = newTemp( Ity_I64 );
4201 *zero = newTemp( Ity_I64 );
4202 assign( *exp_mask, mkU64( I64_EXP_MASK ) );
4203 assign( *frac_mask, mkU64( I64_FRACTION_MASK ) );
4204 assign( *msb_frac_mask, mkU64( I64_MSB_FRACTION_MASK ) );
4205 assign( *zero, mkU64( 0 ) );
4207 } else {
4208 /* V128 is converted to upper and lower 64 bit values, */
4209 /* uses 64-bit operators and temps */
4210 *frac_mask = newTemp( Ity_I64 );
4211 *msb_frac_mask = newTemp( Ity_I64 );
4212 *exp_mask = newTemp( Ity_I64 );
4213 *zero = newTemp( Ity_I64 );
4214 assign( *exp_mask, mkU64( V128_EXP_MASK ) );
4215 /* upper 64-bit fractional mask */
4216 assign( *frac_mask, mkU64( V128_FRACTION_MASK ) );
4217 assign( *msb_frac_mask, mkU64( V128_MSB_FRACTION_MASK ) );
4218 assign( *zero, mkU64( 0 ) );
4222 /* Helper function for the various function which check the value of
4223 the floating point value.
4225 static IRExpr * exponent_compare( IRType size, IRTemp src,
4226 IRTemp exp_mask, IRExpr *exp_val )
4228 IROp opAND, opCmpEQ;
4230 if( ( size == Ity_I16 ) || ( size == Ity_I32 ) ) {
4231 /* The 16-bit floating point value is in the lower 16-bits of
4232 the 32-bit input value */
4233 opAND = Iop_And32;
4234 opCmpEQ = Iop_CmpEQ32;
4236 } else {
4237 opAND = Iop_And64;
4238 opCmpEQ = Iop_CmpEQ64;
4241 if( size == Ity_V128 ) {
4242 return binop( opCmpEQ,
4243 binop ( opAND,
4244 unop( Iop_V128HIto64, mkexpr( src ) ),
4245 mkexpr( exp_mask ) ),
4246 exp_val );
4248 } else if( ( size == Ity_I16 ) || ( size == Ity_I32 ) ) {
4249 return binop( opCmpEQ,
4250 binop ( opAND, mkexpr( src ), mkexpr( exp_mask ) ),
4251 exp_val );
4252 } else {
4253 /* 64-bit operands */
4255 if (mode64) {
4256 return binop( opCmpEQ,
4257 binop ( opAND, mkexpr( src ), mkexpr( exp_mask ) ),
4258 exp_val );
4259 } else {
4260 /* No support for 64-bit compares in 32-bit mode, need to do upper
4261 * and lower parts using 32-bit compare operators.
4263 return
4264 mkAND1( binop( Iop_CmpEQ32,
4265 binop ( Iop_And32,
4266 unop(Iop_64HIto32, mkexpr( src ) ),
4267 unop(Iop_64HIto32, mkexpr( exp_mask ) ) ),
4268 unop(Iop_64HIto32, exp_val ) ),
4269 binop( Iop_CmpEQ32,
4270 binop ( Iop_And32,
4271 unop(Iop_64to32, mkexpr( src ) ),
4272 unop(Iop_64to32, mkexpr( exp_mask ) ) ),
4273 unop(Iop_64to32, exp_val ) ) );
4278 static IRExpr *fractional_part_compare( IRType size, IRTemp src,
4279 IRTemp frac_mask, IRExpr *zero )
4281 IROp opAND, opCmpEQ;
4283 if( ( size == Ity_I16 ) || ( size == Ity_I32 ) ) {
4284 /*The 16-bit floating point value is in the lower 16-bits of
4285 the 32-bit input value */
4286 opAND = Iop_And32;
4287 opCmpEQ = Iop_CmpEQ32;
4289 } else {
4290 opAND = Iop_And64;
4291 opCmpEQ = Iop_CmpEQ64;
4294 if( size == Ity_V128 ) {
4295 /* 128-bit, note we only care if the fractional part is zero so take upper
4296 52-bits of fractional part and lower 64-bits and OR them together and test
4297 for zero. This keeps the temp variables and operators all 64-bit.
4299 return binop( opCmpEQ,
4300 binop( Iop_Or64,
4301 binop( opAND,
4302 unop( Iop_V128HIto64, mkexpr( src ) ),
4303 mkexpr( frac_mask ) ),
4304 unop( Iop_V128to64, mkexpr( src ) ) ),
4305 zero );
4307 } else if( ( size == Ity_I16 ) || ( size == Ity_I32 ) ) {
4308 return binop( opCmpEQ,
4309 binop( opAND, mkexpr( src ), mkexpr( frac_mask ) ),
4310 zero );
4311 } else {
4312 if (mode64) {
4313 return binop( opCmpEQ,
4314 binop( opAND, mkexpr( src ), mkexpr( frac_mask ) ),
4315 zero );
4316 } else {
4317 /* No support for 64-bit compares in 32-bit mode, need to do upper
4318 * and lower parts using 32-bit compare operators.
4320 return
4321 mkAND1( binop( Iop_CmpEQ32,
4322 binop ( Iop_And32,
4323 unop(Iop_64HIto32, mkexpr( src ) ),
4324 unop(Iop_64HIto32, mkexpr( frac_mask ) ) ),
4325 mkU32 ( 0 ) ),
4326 binop( Iop_CmpEQ32,
4327 binop ( Iop_And32,
4328 unop(Iop_64to32, mkexpr( src ) ),
4329 unop(Iop_64to32, mkexpr( frac_mask ) ) ),
4330 mkU32 ( 0 ) ) );
4335 // Infinity: exp has all bits set, and fraction is zero; s = 0/1
4336 static IRExpr * is_Inf( IRType size, IRTemp src )
4338 IRExpr *max_exp, *zero_frac;
4339 IRTemp exp_mask, frac_mask, msb_frac_mask, zero;
4341 setup_value_check_args( size, &exp_mask, &frac_mask, &msb_frac_mask,
4342 &zero );
4344 /* check exponent is all ones, i.e. (exp AND exp_mask) = exp_mask */
4345 max_exp = exponent_compare( size, src, exp_mask, mkexpr( exp_mask ) );
4347 /* check fractional part is all zeros */
4348 zero_frac = fractional_part_compare( size, src, frac_mask, mkexpr( zero ) );
4350 return mkAND1( max_exp, zero_frac );
4353 // Zero: exp is zero and fraction is zero; s = 0/1
4354 static IRExpr * is_Zero( IRType size, IRTemp src )
4356 IRExpr *zero_exp, *zero_frac;
4357 IRTemp exp_mask, frac_mask, msb_frac_mask, zero;
4359 setup_value_check_args( size, &exp_mask, &frac_mask, &msb_frac_mask,
4360 &zero );
4362 /* check the exponent is all zeros, i.e. (exp AND exp_mask) = zero */
4363 zero_exp = exponent_compare( size, src, exp_mask, mkexpr( zero ) );
4365 /* check fractional part is all zeros */
4366 zero_frac = fractional_part_compare( size, src, frac_mask, mkexpr( zero ) );
4368 return mkAND1( zero_exp, zero_frac );
4371 /* SNAN: s = 1/0; exp all 1's; fraction is nonzero, with highest bit '1'
4372 * QNAN: s = 1/0; exp all 1's; fraction is nonzero, with highest bit '0'
4374 static IRExpr * is_NaN( IRType size, IRTemp src )
4376 IRExpr *max_exp, *not_zero_frac;
4377 IRTemp exp_mask, frac_mask, msb_frac_mask, zero;
4379 setup_value_check_args( size, &exp_mask, &frac_mask, &msb_frac_mask,
4380 &zero );
4382 /* check exponent is all ones, i.e. (exp AND exp_mask) = exp_mask */
4383 max_exp = exponent_compare( size, src, exp_mask, mkexpr( exp_mask ) );
4385 /* check fractional part is not zero */
4386 not_zero_frac = unop( Iop_Not1,
4387 fractional_part_compare( size, src, frac_mask,
4388 mkexpr( zero ) ) );
4390 return mkAND1( max_exp, not_zero_frac );
4393 static IRExpr * is_sNaN( IRType size, IRTemp src )
4395 IRExpr *max_exp, *not_zero_frac, *msb_zero;
4396 IRTemp exp_mask, frac_mask, msb_frac_mask, zero;
4398 setup_value_check_args( size, &exp_mask, &frac_mask, &msb_frac_mask,
4399 &zero );
4401 /* check exponent is all ones, i.e. (exp AND exp_mask) = exp_mask */
4402 max_exp = exponent_compare( size, src, exp_mask, mkexpr( exp_mask ) );
4404 /* Most significant fractional bit is zero for sNaN */
4405 msb_zero = fractional_part_compare ( size, src, msb_frac_mask,
4406 mkexpr( zero ) );
4408 /* check fractional part is not zero */
4409 not_zero_frac = unop( Iop_Not1,
4410 fractional_part_compare( size, src, frac_mask,
4411 mkexpr( zero ) ) );
4413 return mkAND1( msb_zero, mkAND1( max_exp, not_zero_frac ) );
4416 /* Denormalized number has a zero exponent and non zero fraction. */
4417 static IRExpr * is_Denorm( IRType size, IRTemp src )
4419 IRExpr *zero_exp, *not_zero_frac;
4420 IRTemp exp_mask, frac_mask, msb_frac_mask, zero;
4422 setup_value_check_args( size, &exp_mask, &frac_mask, &msb_frac_mask,
4423 &zero );
4425 /* check exponent is all zeros */
4426 zero_exp = exponent_compare( size, src, exp_mask, mkexpr( zero ) );
4428 /* check fractional part is not zero */
4429 not_zero_frac = unop( Iop_Not1,
4430 fractional_part_compare( size, src, frac_mask,
4431 mkexpr( zero ) ) );
4433 return mkAND1( zero_exp, not_zero_frac );
4436 static IRExpr * is_Zero_Vector( IRType element_size, IRExpr *src )
4438 /* Check elements of a 128-bit floating point vector, with element size are
4439 zero. Return 1's in the elements of the vector which are values. */
4440 IRTemp exp_maskV128 = newTemp( Ity_V128 );
4441 IRTemp exp_zeroV128 = newTemp( Ity_V128 );
4442 IRTemp frac_maskV128 = newTemp( Ity_V128 );
4443 IRTemp frac_zeroV128 = newTemp( Ity_V128 );
4444 IRTemp zeroV128 = newTemp( Ity_V128 );
4446 assign( zeroV128, mkV128( 0 ) );
4448 if ( element_size == Ity_I32 ) {
4449 assign( exp_maskV128, unop( Iop_Dup32x4, mkU32( I32_EXP_MASK ) ) );
4450 assign( frac_maskV128, unop( Iop_Dup32x4, mkU32( I32_FRACTION_MASK ) ) );
4452 } else
4453 vpanic("ERROR, is_Zero_Vector: Unknown input size");
4455 /* CmpEQ32x4 returns all 1's in elements where comparison is true */
4456 assign( exp_zeroV128,
4457 binop( Iop_CmpEQ32x4,
4458 binop( Iop_AndV128,
4459 mkexpr( exp_maskV128 ), src ),
4460 mkexpr( zeroV128 ) ) );
4462 assign( frac_zeroV128,
4463 binop( Iop_CmpEQ32x4,
4464 binop( Iop_AndV128,
4465 mkexpr( frac_maskV128 ), src ),
4466 mkexpr( zeroV128 ) ) );
4468 return binop( Iop_AndV128, mkexpr( exp_zeroV128 ),
4469 mkexpr( frac_zeroV128 ) );
4472 static IRExpr * Abs_Zero_Vector( IRType element_size, IRExpr *src )
4473 /* Vector of four 32-bit elements, convert any negative zeros to
4474 positive zeros. */
4476 IRTemp result = newTemp( Ity_V128 );
4478 if ( element_size == Ity_I32 ) {
4479 assign( result, binop( Iop_AndV128,
4480 src,
4481 unop( Iop_NotV128,
4482 is_Zero_Vector( element_size, src) ) ) );
4483 } else
4484 vex_printf("ERROR, Abs_Zero_Vector: Unknown input size\n");
4486 return mkexpr( result );
4489 static IRExpr * is_Denorm_Vector( IRType element_size, IRExpr *src )
4491 /* Check elements of a 128-bit floating point vector, with element size
4492 element_size, are Denorm. Return 1's in the elements of the vector
4493 which are denormalized values. */
4494 IRTemp exp_maskV128 = newTemp( Ity_V128 );
4495 IRTemp exp_zeroV128 = newTemp( Ity_V128 );
4496 IRTemp frac_maskV128 = newTemp( Ity_V128 );
4497 IRTemp frac_nonzeroV128 = newTemp( Ity_V128 );
4498 IRTemp zeroV128 = newTemp( Ity_V128 );
4500 assign( zeroV128, mkV128(0 ) );
4502 if ( element_size == Ity_I32 ) {
4503 assign( exp_maskV128, unop( Iop_Dup32x4, mkU32( I32_EXP_MASK ) ) );
4504 assign( frac_maskV128, unop( Iop_Dup32x4, mkU32( I32_FRACTION_MASK ) ) );
4506 } else
4507 vpanic("ERROR, is_Denorm_Vector: Unknown input size");
4509 /* CmpEQ32x4 returns all 1's in elements where comparison is true */
4510 assign( exp_zeroV128,
4511 binop( Iop_CmpEQ32x4,
4512 binop( Iop_AndV128,
4513 mkexpr( exp_maskV128 ), src ),
4514 mkexpr( zeroV128 ) ) );
4516 assign( frac_nonzeroV128,
4517 unop( Iop_NotV128,
4518 binop( Iop_CmpEQ32x4,
4519 binop( Iop_AndV128,
4520 mkexpr( frac_maskV128 ), src ),
4521 mkexpr( zeroV128 ) ) ) );
4523 return binop( Iop_AndV128, mkexpr( exp_zeroV128 ),
4524 mkexpr( frac_nonzeroV128 ) );
4527 static IRExpr * is_NaN_Vector( IRType element_size, IRExpr *src )
4529 IRTemp max_expV128 = newTemp( Ity_V128 );
4530 IRTemp not_zero_fracV128 = newTemp( Ity_V128 );
4531 IRTemp zeroV128 = newTemp( Ity_V128 );
4532 IRTemp exp_maskV128 = newTemp( Ity_V128 );
4533 IRTemp frac_maskV128 = newTemp( Ity_V128 );
4534 IROp opCmpEQ = Iop_INVALID;
4536 assign( zeroV128, mkV128( 0 ) );
4538 if ( element_size == Ity_I32 ) {
4539 assign( exp_maskV128, unop( Iop_Dup32x4, mkU32( I32_EXP_MASK ) ) );
4540 assign( frac_maskV128, unop( Iop_Dup32x4, mkU32( I32_FRACTION_MASK ) ) );
4541 opCmpEQ = Iop_CmpEQ32x4;
4543 } else
4544 vpanic("ERROR, is_NaN_Vector: Unknown input size");
4546 /* check exponent is all ones, i.e. (exp AND exp_mask) = exp_mask */
4547 assign( max_expV128,
4548 binop( opCmpEQ,
4549 binop( Iop_AndV128, src, mkexpr( exp_maskV128 ) ),
4550 mkexpr( exp_maskV128 ) ) );
4552 /* check fractional part is not zero */
4553 assign( not_zero_fracV128,
4554 unop( Iop_NotV128,
4555 binop( opCmpEQ,
4556 binop( Iop_AndV128, src, mkexpr( frac_maskV128 ) ),
4557 mkexpr( zeroV128 ) ) ) );
4559 return binop( Iop_AndV128, mkexpr( max_expV128 ),
4560 mkexpr( not_zero_fracV128 ) );
4563 #if 0
4564 /* Normalized number has exponent between 1 and max_exp -1, or in other words
4565 the exponent is not zero and not equal to the max exponent value. */
4566 Currently not needed since generate_C_FPCC is now done with a C helper.
4567 Keep it around, might be useful in the future.
4568 static IRExpr * is_Norm( IRType size, IRTemp src )
4570 IRExpr *not_zero_exp, *not_max_exp;
4571 IRTemp exp_mask, zero;
4573 vassert( ( size == Ity_I16 ) || ( size == Ity_I32 )
4574 || ( size == Ity_I64 ) || ( size == Ity_V128 ) );
4576 if( size == Ity_I16 ) {
4577 /* The 16-bit floating point value is in the lower 16-bits of
4578 the 32-bit input value */
4579 exp_mask = newTemp( Ity_I32 );
4580 zero = newTemp( Ity_I32 );
4581 assign( exp_mask, mkU32( I16_EXP_MASK ) );
4582 assign( zero, mkU32( 0 ) );
4584 } else if( size == Ity_I32 ) {
4585 exp_mask = newTemp( Ity_I32 );
4586 zero = newTemp( Ity_I32 );
4587 assign( exp_mask, mkU32( I32_EXP_MASK ) );
4588 assign( zero, mkU32( 0 ) );
4590 } else if( size == Ity_I64 ) {
4591 exp_mask = newTemp( Ity_I64 );
4592 zero = newTemp( Ity_I64 );
4593 assign( exp_mask, mkU64( I64_EXP_MASK ) );
4594 assign( zero, mkU64( 0 ) );
4596 } else {
4597 /* V128 is converted to upper and lower 64 bit values, */
4598 /* uses 64-bit operators and temps */
4599 exp_mask = newTemp( Ity_I64 );
4600 zero = newTemp( Ity_I64 );
4601 assign( exp_mask, mkU64( V128_EXP_MASK ) );
4602 assign( zero, mkU64( 0 ) );
4605 not_zero_exp = unop( Iop_Not1,
4606 exponent_compare( size, src,
4607 exp_mask, mkexpr( zero ) ) );
4608 not_max_exp = unop( Iop_Not1,
4609 exponent_compare( size, src,
4610 exp_mask, mkexpr( exp_mask ) ) );
4612 return mkAND1( not_zero_exp, not_max_exp );
4614 #endif
4616 static void generate_store_FPRF( IRType size, IRTemp src,
4617 const VexAbiInfo* vbi )
4620 /* This function was originally written using IR code. It has been
4621 * replaced with a clean helper due to the large amount of IR code
4622 * needed by this function.
4625 IRTemp tmp = newTemp( Ity_I64 );
4626 vassert( ( size == Ity_I16 ) || ( size == Ity_I32 )
4627 || ( size == Ity_I64 ) || ( size == Ity_F128 ) );
4629 vassert( ( typeOfIRExpr(irsb->tyenv, mkexpr( src ) ) == Ity_I32 )
4630 || ( typeOfIRExpr(irsb->tyenv, mkexpr( src ) ) == Ity_I64 )
4631 || ( typeOfIRExpr(irsb->tyenv, mkexpr( src ) ) == Ity_F128 ) );
4633 if( size == Ity_I16 ) {
4634 assign( tmp,
4635 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
4636 "generate_store_C_FPCC_helper",
4637 fnptr_to_fnentry( vbi, &generate_C_FPCC_helper ),
4638 mkIRExprVec_3( mkU64( size ), mkU64( 0 ),
4639 mkexpr( src ) ) ) );
4640 } else if( size == Ity_I32 ) {
4641 assign( tmp,
4642 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
4643 "generate_store_C_FPCC_helper",
4644 fnptr_to_fnentry( vbi, &generate_C_FPCC_helper ),
4645 mkIRExprVec_3( mkU64( size ), mkU64( 0 ),
4646 mkexpr( src ) ) ) );
4647 } else if( size == Ity_I64 ) {
4648 assign( tmp,
4649 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
4650 "generate_store_C_FPCC_helper",
4651 fnptr_to_fnentry( vbi, &generate_C_FPCC_helper ),
4652 mkIRExprVec_3( mkU64( size ), mkU64( 0 ),
4653 mkexpr( src ) ) ) );
4654 } else if( size == Ity_F128 ) {
4655 assign( tmp,
4656 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
4657 "generate_store_C_FPCC_helper",
4658 fnptr_to_fnentry( vbi, &generate_C_FPCC_helper ),
4659 mkIRExprVec_3( mkU64( size ),
4660 unop( Iop_ReinterpF64asI64,
4661 unop( Iop_F128HItoF64,
4662 mkexpr( src ) ) ),
4663 unop( Iop_ReinterpF64asI64,
4664 unop( Iop_F128LOtoF64,
4665 mkexpr( src ) ) ) ) ) );
4668 /* C is in the upper 32-bits, FPCC is in the lower 32-bits of the
4669 * value returned by the helper function
4671 putC( unop( Iop_64HIto32, mkexpr( tmp) ) );
4672 putFPCC( unop( Iop_64to32, mkexpr( tmp) ) );
4675 /* This function takes an Ity_I32 input argument interpreted
4676 as a single-precision floating point value. If src is a
4677 SNaN, it is changed to a QNaN and returned; otherwise,
4678 the original value is returned. */
4679 static IRExpr * handle_SNaN_to_QNaN_32(IRExpr * src)
4681 #define SNAN_MASK32 0x00400000
4682 IRTemp tmp = newTemp(Ity_I32);
4683 IRTemp mask = newTemp(Ity_I32);
4684 IRTemp is_SNAN = newTemp(Ity_I1);
4686 vassert( typeOfIRExpr(irsb->tyenv, src ) == Ity_I32 );
4687 assign(tmp, src);
4689 /* check if input is SNaN, if it is convert to QNaN */
4690 assign( is_SNAN,
4691 mkAND1( is_NaN( Ity_I32, tmp ),
4692 binop( Iop_CmpEQ32,
4693 binop( Iop_And32, mkexpr( tmp ),
4694 mkU32( SNAN_MASK32 ) ),
4695 mkU32( 0 ) ) ) );
4696 /* create mask with QNaN bit set to make it a QNaN if tmp is SNaN */
4697 assign ( mask, binop( Iop_And32,
4698 unop( Iop_1Sto32, mkexpr( is_SNAN ) ),
4699 mkU32( SNAN_MASK32 ) ) );
4700 return binop( Iop_Or32, mkexpr( mask ), mkexpr( tmp) );
4704 /* This helper function performs the negation part of operations of the form:
4705 * "Negate Multiply-<op>"
4706 * where "<op>" is either "Add" or "Sub".
4708 * This function takes one argument -- the floating point intermediate result (converted to
4709 * Ity_I64 via Iop_ReinterpF64asI64) that was obtained from the "Multip-<op>" part of
4710 * the operation described above.
4712 static IRTemp getNegatedResult(IRTemp intermediateResult)
4714 ULong signbit_mask = 0x8000000000000000ULL;
4715 IRTemp signbit_32 = newTemp(Ity_I32);
4716 IRTemp resultantSignbit = newTemp(Ity_I1);
4717 IRTemp negatedResult = newTemp(Ity_I64);
4718 assign( signbit_32, binop( Iop_Shr32,
4719 unop( Iop_64HIto32,
4720 binop( Iop_And64, mkexpr( intermediateResult ),
4721 mkU64( signbit_mask ) ) ),
4722 mkU8( 31 ) ) );
4723 /* We negate the signbit if and only if the intermediate result from the
4724 * multiply-<op> was NOT a NaN. This is an XNOR predicate.
4726 assign( resultantSignbit,
4727 unop( Iop_Not1,
4728 binop( Iop_CmpEQ32,
4729 binop( Iop_Xor32,
4730 mkexpr( signbit_32 ),
4731 unop( Iop_1Uto32, is_NaN( Ity_I64,
4732 intermediateResult ) ) ),
4733 mkU32( 1 ) ) ) );
4735 assign( negatedResult,
4736 binop( Iop_Or64,
4737 binop( Iop_And64,
4738 mkexpr( intermediateResult ),
4739 mkU64( ~signbit_mask ) ),
4740 binop( Iop_32HLto64,
4741 binop( Iop_Shl32,
4742 unop( Iop_1Uto32, mkexpr( resultantSignbit ) ),
4743 mkU8( 31 ) ),
4744 mkU32( 0 ) ) ) );
4746 return negatedResult;
4749 /* This helper function performs the negation part of operations of the form:
4750 * "Negate Multiply-<op>"
4751 * where "<op>" is either "Add" or "Sub".
4753 * This function takes one argument -- the floating point intermediate result (converted to
4754 * Ity_I32 via Iop_ReinterpF32asI32) that was obtained from the "Multip-<op>" part of
4755 * the operation described above.
4757 static IRTemp getNegatedResult_32(IRTemp intermediateResult)
4759 UInt signbit_mask = 0x80000000;
4760 IRTemp signbit_32 = newTemp(Ity_I32);
4761 IRTemp resultantSignbit = newTemp(Ity_I1);
4762 IRTemp negatedResult = newTemp(Ity_I32);
4763 assign( signbit_32, binop( Iop_Shr32,
4764 binop( Iop_And32, mkexpr( intermediateResult ),
4765 mkU32( signbit_mask ) ),
4766 mkU8( 31 ) ) );
4767 /* We negate the signbit if and only if the intermediate result from the
4768 * multiply-<op> was NOT a NaN. This is an XNOR predicate.
4770 assign( resultantSignbit,
4771 unop( Iop_Not1,
4772 binop( Iop_CmpEQ32,
4773 binop( Iop_Xor32,
4774 mkexpr( signbit_32 ),
4775 unop( Iop_1Uto32, is_NaN( Ity_I32,
4776 intermediateResult ) ) ),
4777 mkU32( 1 ) ) ) );
4779 assign( negatedResult,
4780 binop( Iop_Or32,
4781 binop( Iop_And32,
4782 mkexpr( intermediateResult ),
4783 mkU32( ~signbit_mask ) ),
4784 binop( Iop_Shl32,
4785 unop( Iop_1Uto32, mkexpr( resultantSignbit ) ),
4786 mkU8( 31 ) ) ) );
4788 return negatedResult;
4791 static IRExpr* negate_Vector ( IRType element_size, IRExpr* value )
4793 /* This function takes a vector of floats. If the value is
4794 not a NaN, the value is negated. */
4796 IRTemp not_nan_mask = newTemp( Ity_V128 );
4797 IRTemp sign_maskV128 = newTemp( Ity_V128 );
4799 if ( element_size == Ity_I32 ) {
4800 assign( sign_maskV128, unop( Iop_Dup32x4, mkU32( I32_SIGN_MASK ) ) );
4802 } else
4803 vpanic("ERROR, negate_Vector: Unknown input size");
4805 /* Determine if vector elementes are not a NaN, negate sign bit
4806 for non NaN elements */
4807 assign ( not_nan_mask,
4808 unop( Iop_NotV128, is_NaN_Vector( element_size, value ) ) );
4810 return binop( Iop_XorV128,
4811 binop( Iop_AndV128,
4812 mkexpr( sign_maskV128 ), mkexpr( not_nan_mask ) ),
4813 value );
4816 /* This function takes two quad_precision unsigned/signed integers of type
4817 V128 and return 1 (Ity_Bit) if src_A = src_B, 0 otherwise. */
4818 static IRExpr * Quad_precision_int_eq ( IRTemp src_A, IRTemp src_B )
4820 return mkAND1( binop( Iop_CmpEQ64,
4821 unop( Iop_V128HIto64, mkexpr( src_A ) ),
4822 unop( Iop_V128HIto64, mkexpr( src_B ) ) ),
4823 binop( Iop_CmpEQ64,
4824 unop( Iop_V128to64, mkexpr( src_A ) ),
4825 unop( Iop_V128to64, mkexpr( src_B ) ) ) );
4828 /* This function takes two quad_precision unsigned integers of type
4829 V128 and return 1 if src_A > src_B, 0 otherwise. */
4830 static IRExpr * Quad_precision_uint_gt ( IRTemp src_A, IRTemp src_B )
4832 IRExpr * hi_eq = binop( Iop_CmpEQ64,
4833 unop( Iop_V128HIto64, mkexpr( src_A ) ),
4834 unop( Iop_V128HIto64, mkexpr( src_B ) ) );
4836 IRExpr * hi_gt = binop( Iop_CmpLT64U,
4837 unop( Iop_V128HIto64, mkexpr( src_B ) ),
4838 unop( Iop_V128HIto64, mkexpr( src_A ) ) );
4840 IRExpr * lo_gt = binop( Iop_CmpLT64U,
4841 unop( Iop_V128to64, mkexpr( src_B ) ),
4842 unop( Iop_V128to64, mkexpr( src_A ) ) );
4844 return mkOR1( hi_gt, mkAND1( hi_eq, lo_gt ) );
4847 /* This function takes two quad_precision signed integers of type
4848 V128 and return 1 if src_A > src_B, 0 otherwise. */
4849 static IRExpr * Quad_precision_sint_gt ( IRTemp src_A, IRTemp src_B )
4852 IRExpr * hi_eq = binop( Iop_CmpEQ64,
4853 unop( Iop_V128HIto64, mkexpr( src_A ) ),
4854 unop( Iop_V128HIto64, mkexpr( src_B ) ) );
4856 IRExpr * lo_eq = binop( Iop_CmpEQ64,
4857 unop( Iop_V128to64, mkexpr( src_A ) ),
4858 unop( Iop_V128to64, mkexpr( src_B ) ) );
4860 IRExpr * hi_gt = binop( Iop_CmpLT64S,
4861 unop( Iop_V128HIto64, mkexpr( src_B ) ),
4862 unop( Iop_V128HIto64, mkexpr( src_A ) ) );
4864 /* If srcA and srcB are positive and srcA > srcB then lo_gteq = 1.
4865 If srcA and srcB are negative and srcA > srcB, then the unsigned value
4866 of the lower 64-bits are 2's complemented values means lower bits of srcB
4867 must be less then the lower bits of srcA.
4869 srcA = 8000012380000123 7000000080000000 => (smaller/less negative)
4870 - 7FFFFEDC7FFFFEDD 8FFFFFFF7FFFFFFF
4871 srcB = 8000012380000123 8000012380000123 =>
4872 - 7FFFFEDC7FFFFEDD 7FFFFEDC7FFFFEDD
4874 IRExpr * lo_gteq = binop( Iop_CmpLT64U,
4875 unop( Iop_V128to64, mkexpr( src_B ) ),
4876 unop( Iop_V128to64, mkexpr( src_A ) ) );
4878 /* If hi is eq, then lower must be GT and not equal. */
4879 return mkOR1( hi_gt, mkAND1( hi_eq, mkAND1( lo_gteq, mkNOT1 ( lo_eq ) ) ) );
4882 /* This function takes two quad_precision floating point numbers of type
4883 V128 and return 1 if src_A > src_B, 0 otherwise. */
4884 static IRExpr * Quad_precision_gt ( IRTemp src_A, IRTemp src_B )
4886 #define FRAC_MASK64Hi 0x0000ffffffffffffULL
4887 #define MASK 0x7FFFFFFFFFFFFFFFULL /* exclude sign bit in upper 64 bits */
4888 #define EXP_MASK 0x7fff
4890 IRType ty = Ity_I64;
4891 IRTemp sign_A = newTemp( ty );
4892 IRTemp sign_B = newTemp( ty );
4893 IRTemp exp_A = newTemp( ty );
4894 IRTemp exp_B = newTemp( ty );
4895 IRTemp frac_A_hi = newTemp( ty );
4896 IRTemp frac_B_hi = newTemp( ty );
4897 IRTemp frac_A_lo = newTemp( ty );
4898 IRTemp frac_B_lo = newTemp( ty );
4901 /* extract exponents, and fractional parts so they can be compared */
4902 assign( sign_A, binop( Iop_Shr64,
4903 unop( Iop_V128HIto64, mkexpr( src_A ) ),
4904 mkU8( 63 ) ) );
4905 assign( sign_B, binop( Iop_Shr64,
4906 unop( Iop_V128HIto64, mkexpr( src_B ) ),
4907 mkU8( 63 ) ) );
4908 assign( exp_A, binop( Iop_And64,
4909 binop( Iop_Shr64,
4910 unop( Iop_V128HIto64, mkexpr( src_A ) ),
4911 mkU8( 48 ) ),
4912 mkU64( EXP_MASK ) ) );
4913 assign( exp_B, binop( Iop_And64,
4914 binop( Iop_Shr64,
4915 unop( Iop_V128HIto64, mkexpr( src_B ) ),
4916 mkU8( 48 ) ),
4917 mkU64( EXP_MASK ) ) );
4918 assign( frac_A_hi, binop( Iop_And64,
4919 unop( Iop_V128HIto64, mkexpr( src_A ) ),
4920 mkU64( FRAC_MASK64Hi ) ) );
4921 assign( frac_B_hi, binop( Iop_And64,
4922 unop( Iop_V128HIto64, mkexpr( src_B ) ),
4923 mkU64( FRAC_MASK64Hi ) ) );
4924 assign( frac_A_lo, unop( Iop_V128to64, mkexpr( src_A ) ) );
4925 assign( frac_B_lo, unop( Iop_V128to64, mkexpr( src_B ) ) );
4927 IRExpr * A_zero = mkAND1( binop( Iop_CmpEQ64,
4928 binop( Iop_And64,
4929 unop( Iop_V128HIto64,
4930 mkexpr( src_A ) ),
4931 mkU64( MASK ) ),
4932 mkU64( 0 ) ),
4933 binop( Iop_CmpEQ64,
4934 unop( Iop_V128to64, mkexpr( src_A ) ),
4935 mkU64( 0 ) ) );
4936 IRExpr * B_zero = mkAND1( binop( Iop_CmpEQ64,
4937 binop( Iop_And64,
4938 unop( Iop_V128HIto64,
4939 mkexpr( src_B ) ),
4940 mkU64( MASK ) ),
4941 mkU64( 0 ) ),
4942 binop( Iop_CmpEQ64,
4943 unop( Iop_V128to64, mkexpr( src_B ) ),
4944 mkU64( 0 ) ) );
4945 IRExpr * A_B_zero = mkAND1( A_zero, B_zero );
4947 /* Compare numbers */
4948 IRExpr * both_pos = mkAND1( binop( Iop_CmpEQ64, mkexpr( sign_A ),
4949 mkU64( 0 ) ),
4950 binop( Iop_CmpEQ64, mkexpr( sign_B ),
4951 mkU64( 0 ) ) );
4952 IRExpr * both_neg = mkAND1( binop( Iop_CmpEQ64, mkexpr( sign_A ),
4953 mkU64( 1 ) ),
4954 binop( Iop_CmpEQ64, mkexpr( sign_B ),
4955 mkU64( 1 ) ) );
4956 IRExpr * sign_eq = binop( Iop_CmpEQ64, mkexpr( sign_A ), mkexpr( sign_B ) );
4957 IRExpr * sign_gt = binop( Iop_CmpLT64U, mkexpr( sign_A ),
4958 mkexpr( sign_B ) ); /* A pos, B neg */
4960 IRExpr * exp_eq = binop( Iop_CmpEQ64, mkexpr( exp_A ), mkexpr( exp_B ) );
4961 IRExpr * exp_gt = binop( Iop_CmpLT64U, mkexpr( exp_B ), mkexpr( exp_A ) );
4962 IRExpr * exp_lt = binop( Iop_CmpLT64U, mkexpr( exp_A ), mkexpr( exp_B ) );
4964 IRExpr * frac_hi_eq = binop( Iop_CmpEQ64, mkexpr( frac_A_hi),
4965 mkexpr( frac_B_hi ) );
4966 IRExpr * frac_hi_gt = binop( Iop_CmpLT64U, mkexpr( frac_B_hi ),
4967 mkexpr( frac_A_hi ) );
4968 IRExpr * frac_hi_lt = binop( Iop_CmpLT64U, mkexpr( frac_A_hi ),
4969 mkexpr( frac_B_hi ) );
4971 IRExpr * frac_lo_gt = binop( Iop_CmpLT64U, mkexpr( frac_B_lo ),
4972 mkexpr( frac_A_lo ) );
4973 IRExpr * frac_lo_lt = binop( Iop_CmpLT64U, mkexpr( frac_A_lo ),
4974 mkexpr( frac_B_lo ) );
4976 /* src_A and src_B both positive */
4977 IRExpr *pos_cmp = mkOR1( exp_gt,
4978 mkAND1( exp_eq,
4979 mkOR1( frac_hi_gt,
4980 mkAND1( frac_hi_eq, frac_lo_gt ) )
4981 ) );
4983 /* src_A and src_B both negative */
4984 IRExpr *neg_cmp = mkOR1( exp_lt,
4985 mkAND1( exp_eq,
4986 mkOR1( frac_hi_lt,
4987 mkAND1( frac_hi_eq, frac_lo_lt ) )
4988 ) );
4990 /* Need to check the case where one value is a positive
4991 * zero and the other value is a negative zero
4993 return mkAND1( mkNOT1( A_B_zero ),
4994 mkOR1( sign_gt,
4995 mkAND1( sign_eq,
4996 mkOR1( mkAND1( both_pos, pos_cmp ),
4997 mkAND1( both_neg, neg_cmp ) ) ) ) );
5000 /*-----------------------------------------------------------
5001 * Helpers for VX instructions that work on National decimal values,
5002 * Zoned decimal values and BCD values.
5004 *------------------------------------------------------------*/
5005 static IRExpr * is_National_decimal (IRTemp src)
5007 /* The src is a 128-bit value containing a sign code in half word 7
5008 * and seven digits in halfwords 0 to 6 (IBM numbering). A valid
5009 * national decimal value has the following:
5010 * - the sign code must be 0x002B (positive) or 0x002D (negative)
5011 * - the digits must be in the range 0x0030 to 0x0039
5013 Int i;
5014 IRExpr * valid_pos_sign;
5015 IRExpr * valid_neg_sign;
5016 IRTemp valid_num[8];
5017 IRTemp digit[7];
5019 valid_pos_sign = binop( Iop_CmpEQ64,
5020 binop( Iop_And64,
5021 mkU64( 0xFFFF ),
5022 unop( Iop_V128to64, mkexpr( src ) ) ),
5023 mkU64( 0x002B ) );
5025 valid_neg_sign = binop( Iop_CmpEQ64,
5026 binop( Iop_And64,
5027 mkU64( 0xFFFF ),
5028 unop( Iop_V128to64, mkexpr( src ) ) ),
5029 mkU64( 0x002D ) );
5031 valid_num[0] = newTemp( Ity_I1 );
5032 digit[0] = newTemp( Ity_I64 );
5033 assign( valid_num[0], mkU1( 1 ) ); // Assume true to start
5035 for(i = 0; i < 7; i++) {
5036 valid_num[i+1] = newTemp( Ity_I1 );
5037 digit[i] = newTemp( Ity_I64 );
5038 assign( digit[i], binop( Iop_And64,
5039 unop( Iop_V128to64,
5040 binop( Iop_ShrV128,
5041 mkexpr( src ),
5042 mkU8( (7-i)*16 ) ) ),
5043 mkU64( 0xFFFF ) ) );
5045 assign( valid_num[i+1],
5046 mkAND1( mkexpr( valid_num[i] ),
5047 mkAND1( binop( Iop_CmpLE64U,
5048 mkexpr( digit[i] ),
5049 mkU64( 0x39 ) ),
5050 binop( Iop_CmpLE64U,
5051 mkU64( 0x30 ),
5052 mkexpr( digit[i] ) ) ) ) );
5055 return mkAND1( mkOR1( valid_pos_sign, valid_neg_sign),
5056 mkexpr( valid_num[7] ) );
5059 static IRExpr * is_Zoned_decimal (IRTemp src, UChar ps)
5061 /* The src is a 128-bit value containing a sign code the least significant
5062 * two bytes. The upper pairs of bytes contain digits. A valid Zoned
5063 * decimal value has the following:
5064 * - the sign code must be between 0x0X to 0xFX inclusive (X - don't care)
5065 * - bits [0:3] of each digit must be equal to 0x3
5066 * - bits [4:7] of each digit must be between 0x0 and 0x9
5068 * If ps = 0
5069 * Positive sign codes are: 0x0, 0x1, 0x2, 0x3, 0x8, 0x9, 0xA, 0xB
5070 * (note 0bX0XX XXXX is positive)
5072 * Negative sign codes are 0x4, 0x5, 0x6, 0x7, 0xC, 0xD, 0xE, 0xF
5073 * (note 0bX1XX XXXX is negative)
5075 * If ps = 1, then the sign code must be in the range 0xA to 0xF
5076 * Positive sign codes are: 0xA, 0xC, 0xE, 0xF
5078 * Negative sign codes are 0xB, 0xD
5080 Int i, mask_hi, mask_lo;
5081 IRExpr *valid_range;
5082 IRTemp valid_num[16];
5083 IRTemp digit[15];
5085 /* check the range of the sign value based on the value of ps */
5086 valid_range = mkOR1(
5087 mkAND1( binop( Iop_CmpEQ64,
5088 mkU64( 1 ),
5089 mkU64( ps ) ),
5090 mkAND1( binop( Iop_CmpLE64U,
5091 binop( Iop_And64,
5092 mkU64( 0xF0 ),
5093 unop( Iop_V128to64,
5094 mkexpr( src ) ) ),
5096 mkU64( 0xF0 ) ),
5097 binop( Iop_CmpLE64U,
5098 mkU64( 0xA0 ),
5099 binop( Iop_And64,
5100 mkU64( 0xF0 ),
5101 unop( Iop_V128to64,
5102 mkexpr( src ) ))))),
5103 binop( Iop_CmpEQ64,
5104 mkU64( 0 ),
5105 mkU64( ps ) ) );
5107 valid_num[0] = newTemp( Ity_I1 );
5108 assign( valid_num[0], mkU1( 1) ); // Assume true to start
5110 if (ps == 0) {
5111 mask_hi = 0x39;
5112 mask_lo = 0x30;
5113 } else {
5114 mask_hi = 0xF9;
5115 mask_lo = 0xF0;
5118 for(i = 0; i < 15; i++) {
5119 valid_num[i+1] = newTemp( Ity_I1 );
5120 digit[i] = newTemp( Ity_I64 );
5121 assign( digit[i], binop( Iop_And64,
5122 unop( Iop_V128to64,
5123 binop( Iop_ShrV128,
5124 mkexpr( src ),
5125 mkU8( (15-i)*8 ) ) ),
5126 mkU64( 0xFF ) ) );
5128 assign( valid_num[i+1],
5129 mkAND1( mkexpr( valid_num[i] ),
5130 mkAND1( binop( Iop_CmpLE64U,
5131 mkexpr( digit[i] ),
5132 mkU64( mask_hi ) ),
5133 binop( Iop_CmpLE64U,
5134 mkU64( mask_lo ),
5135 mkexpr( digit[i] ) ) ) ) );
5138 return mkAND1( valid_range, mkexpr( valid_num[15] ) );
5141 static IRExpr * CmpGT128U ( IRExpr *src1, IRExpr *src2 )
5143 /* Unsigend compare of two 128-bit values */
5144 IRExpr *pos_upper_gt, *pos_upper_eq, *pos_lower_gt;
5146 pos_upper_gt = binop( Iop_CmpLT64U,
5147 unop( Iop_V128HIto64, src2 ),
5148 unop( Iop_V128HIto64, src1 ) );
5149 pos_upper_eq = binop( Iop_CmpEQ64,
5150 unop( Iop_V128HIto64, src1 ),
5151 unop( Iop_V128HIto64, src2 ) );
5152 pos_lower_gt = binop( Iop_CmpLT64U,
5153 unop( Iop_V128to64, src2),
5154 unop( Iop_V128to64, src1) );
5155 return mkOR1( pos_upper_gt,
5156 mkAND1( pos_upper_eq,
5157 pos_lower_gt ) );
5161 static IRExpr * is_BCDstring128 ( const VexAbiInfo* vbi,
5162 UInt Signed, IRExpr *src )
5165 IRTemp valid = newTemp( Ity_I64 );
5167 /* The src is a 128-bit value containing a MAX_DIGITS BCD digits and
5168 * a sign. The upper bytes are BCD values between 0x0 and 0x9. The sign
5169 * byte is the least significant byte. This function returns 64-bit 1
5170 * value if sign and digits are valid, 0 otherwise.
5172 * This function was originally written using IR code. It has been
5173 * replaced with a clean helper due to the large amount of IR code
5174 * needed by this function.
5176 assign( valid,
5177 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5178 "is_BCDstring128_helper",
5179 fnptr_to_fnentry( vbi, &is_BCDstring128_helper ),
5180 mkIRExprVec_3( mkU64( Signed ),
5181 unop( Iop_V128HIto64, src ),
5182 unop( Iop_V128to64, src ) ) ) );
5183 return mkexpr( valid );
5186 static IRExpr * BCDstring_zero (IRExpr *src)
5188 /* The src is a 128-bit value containing a BCD string. The function
5189 * returns a 1 if the BCD string values are all zero, 0 otherwise.
5191 IRTemp tsrc = newTemp( Ity_V128 );
5192 assign( tsrc, src);
5194 if ( mode64 ) {
5195 return mkAND1( binop( Iop_CmpEQ64,
5196 mkU64( 0 ),
5197 unop( Iop_V128HIto64,
5198 mkexpr( tsrc ) ) ),
5199 binop( Iop_CmpEQ64,
5200 mkU64( 0 ),
5201 unop( Iop_V128to64,
5202 mkexpr( tsrc ) ) ) );
5203 } else {
5204 /* make this work in 32-bit mode */
5205 return mkAND1(
5206 mkAND1( binop( Iop_CmpEQ32,
5207 mkU32( 0 ),
5208 unop( Iop_64HIto32,
5209 unop( Iop_V128HIto64,
5210 mkexpr( tsrc ) ) ) ),
5211 binop( Iop_CmpEQ32,
5212 mkU32( 0 ),
5213 unop( Iop_64to32,
5214 unop( Iop_V128HIto64,
5215 mkexpr( tsrc ) ) ) ) ),
5216 mkAND1( binop( Iop_CmpEQ32,
5217 mkU32( 0 ),
5218 unop( Iop_64HIto32,
5219 unop( Iop_V128to64,
5220 mkexpr( tsrc ) ) ) ),
5221 binop( Iop_CmpEQ32,
5222 mkU32( 0 ),
5223 unop( Iop_64to32,
5224 unop( Iop_V128to64,
5225 mkexpr( tsrc ) ) ) ) ) );
5229 static IRExpr * check_BCD_round (IRExpr *src, IRTemp shift)
5231 /* The src is a 128-bit value containing 31 BCD digits with the sign in
5232 * the least significant byte. The bytes are BCD values between 0x0 and 0x9.
5233 * This routine checks the BCD digit in position shift (counting from
5234 * the least significant digit). If the digit is greater then five,
5235 * a 1 is returned indicating the string needs to be rounded up,
5236 * otherwise, 0 is returned. The value of shift (I64) is the index of
5237 * the BCD digit times four bits.
5239 return binop( Iop_CmpLE64U,
5240 mkU64( 6 ),
5241 binop( Iop_And64,
5242 unop( Iop_V128to64,
5243 binop( Iop_ShrV128,
5244 src,
5245 unop( Iop_64to8, mkexpr( shift ) ) ) ),
5246 mkU64( 0xF ) ) );
5249 static IRTemp increment_BCDstring ( const VexAbiInfo* vbi,
5250 IRExpr *src, IRExpr *carry_in )
5252 /* The src is a 128-bit value containing 31 BCD digits with the sign in
5253 * the least significant byte. The bytes are BCD values between 0x0 and 0x9.
5254 * This function returns the BCD string incremented by 1.
5256 * Call a clean helper to do the computation as it requires a lot of
5257 * IR code to do this.
5259 * The helper function takes a 32-bit BCD string, in a 64-bit value, and
5260 * increments the string by the 32-bi carry in value.
5262 * The incremented value is returned in the lower 32-bits of the result.
5263 * The carry out is returned in bits [35:32] of the result. The
5264 * helper function will be called for each of the four 32-bit strings
5265 * that make up the src string passing the returned carry out to the
5266 * next call.
5268 IRTemp bcd_result = newTemp( Ity_V128 );
5269 IRTemp bcd_result0 = newTemp( Ity_I64 );
5270 IRTemp bcd_result1 = newTemp( Ity_I64 );
5271 IRTemp bcd_result2 = newTemp( Ity_I64 );
5272 IRTemp bcd_result3 = newTemp( Ity_I64 );
5273 IRExpr *bcd_string0, *bcd_string1, *bcd_string2, *bcd_string3;
5275 bcd_string0 = binop( Iop_And64,
5276 mkU64( 0xFFFFFFFF ), unop( Iop_V128to64, src ) );
5277 bcd_string1 = binop( Iop_Shr64, unop( Iop_V128to64, src ), mkU8( 32 ) );
5278 bcd_string2 = binop( Iop_And64,
5279 mkU64( 0xFFFFFFFF ), unop( Iop_V128HIto64, src ) );
5280 bcd_string3 = binop( Iop_Shr64, unop( Iop_V128HIto64, src ), mkU8( 32 ) );
5282 assign( bcd_result0,
5283 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5284 "increment_BCDstring32_helper",
5285 fnptr_to_fnentry( vbi,
5286 &increment_BCDstring32_helper ),
5287 mkIRExprVec_3( mkU64( True /*Signed*/ ),
5288 bcd_string0,
5289 binop( Iop_32HLto64, mkU32( 0 ),
5290 carry_in ) ) ) );
5292 assign( bcd_result1,
5293 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5294 "increment_BCDstring32_helper",
5295 fnptr_to_fnentry( vbi,
5296 &increment_BCDstring32_helper ),
5297 mkIRExprVec_3( mkU64( False /*Unsigned*/ ),
5298 bcd_string1,
5299 binop( Iop_Shr64,
5300 mkexpr( bcd_result0 ),
5301 mkU8( 32 ) ) ) ) );
5302 assign( bcd_result2,
5303 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5304 "increment_BCDstring32_helper",
5305 fnptr_to_fnentry( vbi,
5306 &increment_BCDstring32_helper ),
5307 mkIRExprVec_3( mkU64( False /*Unsigned*/ ),
5308 bcd_string2,
5309 binop( Iop_Shr64,
5310 mkexpr( bcd_result1 ),
5311 mkU8( 32 ) ) ) ) );
5312 assign( bcd_result3,
5313 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5314 "increment_BCDstring32_helper",
5315 fnptr_to_fnentry( vbi,
5316 &increment_BCDstring32_helper ),
5317 mkIRExprVec_3( mkU64( False /*Unsigned*/ ),
5318 bcd_string3,
5319 binop( Iop_Shr64,
5320 mkexpr( bcd_result2 ),
5321 mkU8( 32 ) ) ) ) );
5323 /* Put the 128-bit result together from the intermediate results. Remember
5324 * to mask out the carry out from the upper 32 bits of the results.
5326 assign( bcd_result,
5327 binop( Iop_64HLtoV128,
5328 binop( Iop_Or64,
5329 binop( Iop_And64,
5330 mkU64( 0xFFFFFFFF ), mkexpr (bcd_result2 ) ),
5331 binop( Iop_Shl64,
5332 mkexpr (bcd_result3 ), mkU8( 32 ) ) ),
5333 binop( Iop_Or64,
5334 binop( Iop_And64,
5335 mkU64( 0xFFFFFFFF ), mkexpr (bcd_result0 ) ),
5336 binop( Iop_Shl64,
5337 mkexpr (bcd_result1 ), mkU8( 32 ) ) ) ) );
5338 return bcd_result;
5341 static IRExpr * convert_to_zoned ( const VexAbiInfo* vbi,
5342 IRExpr *src, IRExpr *upper_byte )
5344 /* The function takes a V128 packed decimal value and returns
5345 * the value in zoned format. Note, the sign of the value is ignored.
5347 IRTemp result_low = newTemp( Ity_I64 );
5348 IRTemp result_hi = newTemp( Ity_I64 );
5349 IRTemp result = newTemp( Ity_V128 );
5351 /* Since we can only return 64-bits from a clean helper, we will
5352 * have to get the lower and upper 64-bits separately.
5355 assign( result_low,
5356 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5357 "convert_to_zoned_helper",
5358 fnptr_to_fnentry( vbi, &convert_to_zoned_helper ),
5359 mkIRExprVec_4( unop( Iop_V128HIto64, src ),
5360 unop( Iop_V128to64, src ),
5361 upper_byte,
5362 mkU64( 0 ) ) ) );
5364 assign( result_hi,
5365 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5366 "convert_to_zoned_helper",
5367 fnptr_to_fnentry( vbi, &convert_to_zoned_helper ),
5368 mkIRExprVec_4( unop( Iop_V128HIto64, src ),
5369 unop( Iop_V128to64, src ),
5370 upper_byte,
5371 mkU64( 1 ) ) ) );
5374 assign( result,
5375 binop( Iop_64HLtoV128, mkexpr( result_hi ), mkexpr( result_low ) ) );
5377 return mkexpr( result );
5380 static IRExpr * convert_to_national ( const VexAbiInfo* vbi, IRExpr *src ) {
5381 /* The function takes 128-bit value which has a 64-bit packed decimal
5382 * value in the lower 64-bits of the source. The packed decimal is
5383 * converted to the national format via a clean helper. The clean
5384 * helper is used to to the large amount of IR code needed to do the
5385 * conversion. The helper returns the upper 64-bits of the 128-bit
5386 * result if return_upper != 0. Otherwise, the lower 64-bits of the
5387 * result is returned.
5389 IRTemp result_low = newTemp( Ity_I64 );
5390 IRTemp result_hi = newTemp( Ity_I64 );
5391 IRTemp result = newTemp( Ity_V128 );
5393 /* Since we can only return 64-bits from a clean helper, we will
5394 * have to get the lower and upper 64-bits separately.
5397 assign( result_low,
5398 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5399 "convert_to_national_helper",
5400 fnptr_to_fnentry( vbi, &convert_to_national_helper ),
5401 mkIRExprVec_2( unop( Iop_V128to64, src ),
5402 mkU64( 0 ) ) ) );
5404 assign( result_hi,
5405 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5406 "convert_to_national_helper",
5407 fnptr_to_fnentry( vbi, &convert_to_national_helper ),
5408 mkIRExprVec_2( unop( Iop_V128to64, src ),
5409 mkU64( 1 ) ) ) );
5411 assign( result,
5412 binop( Iop_64HLtoV128, mkexpr( result_hi ), mkexpr( result_low ) ) );
5414 return mkexpr( result );
5417 static IRExpr * convert_from_zoned ( const VexAbiInfo* vbi, IRExpr *src ) {
5418 /* The function takes 128-bit zoned value and returns a signless 64-bit
5419 * packed decimal value in the lower 64-bits of the 128-bit result.
5421 IRTemp result = newTemp( Ity_V128 );
5423 assign( result,
5424 binop( Iop_ShlV128,
5425 binop( Iop_64HLtoV128,
5426 mkU64( 0 ),
5427 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5428 "convert_from_zoned_helper",
5429 fnptr_to_fnentry( vbi,
5430 &convert_from_zoned_helper ),
5431 mkIRExprVec_2( unop( Iop_V128HIto64,
5432 src ),
5433 unop( Iop_V128to64,
5434 src ) ) ) ),
5435 mkU8( 4 ) ) );
5437 return mkexpr( result );
5440 static IRExpr * convert_from_national ( const VexAbiInfo* vbi, IRExpr *src ) {
5441 /* The function takes 128-bit national value and returns a 64-bit
5442 * packed decimal value.
5444 IRTemp result = newTemp( Ity_I64);
5446 assign( result,
5447 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5448 "convert_from_national_helper",
5449 fnptr_to_fnentry( vbi,
5450 &convert_from_national_helper ),
5451 mkIRExprVec_2( unop( Iop_V128HIto64,
5452 src ),
5453 unop( Iop_V128to64,
5454 src ) ) ) );
5456 return mkexpr( result );
5459 static IRExpr * vector_convert_floattobf16 ( const VexAbiInfo* vbi,
5460 IRExpr *src ) {
5461 /* The function takes 128-bit value containing four 32-bit floats and
5462 returns a 128-bit value containint four 16-bit bfloats in the lower
5463 halfwords. */
5465 IRTemp resultHi = newTemp( Ity_I64);
5466 IRTemp resultLo = newTemp( Ity_I64);
5468 assign( resultHi,
5469 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5470 "vector_convert_floattobf16_helper",
5471 fnptr_to_fnentry( vbi,
5472 &convert_from_floattobf16_helper ),
5473 mkIRExprVec_1( unop( Iop_V128HIto64, src ) ) ) );
5475 assign( resultLo,
5476 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5477 "vector_convert_floattobf16_helper",
5478 fnptr_to_fnentry( vbi,
5479 &convert_from_floattobf16_helper ),
5480 mkIRExprVec_1( unop( Iop_V128to64, src ) ) ) );
5482 return binop( Iop_64HLtoV128, mkexpr( resultHi ), mkexpr( resultLo ) );
5485 static IRExpr * vector_convert_bf16tofloat ( const VexAbiInfo* vbi,
5486 IRExpr *src ) {
5487 /* The function takes 128-bit value containing four 16-bit bfloats in
5488 the lower halfwords and returns a 128-bit value containint four
5489 32-bit floats. */
5490 IRTemp resultHi = newTemp( Ity_I64);
5491 IRTemp resultLo = newTemp( Ity_I64);
5493 assign( resultHi,
5494 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5495 "vector_convert_bf16tofloat_helper",
5496 fnptr_to_fnentry( vbi,
5497 &convert_from_bf16tofloat_helper ),
5498 mkIRExprVec_1( unop( Iop_V128HIto64, src ) ) ) );
5500 assign( resultLo,
5501 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5502 "vector_convert_bf16tofloat_helper",
5503 fnptr_to_fnentry( vbi,
5504 &convert_from_bf16tofloat_helper ),
5505 mkIRExprVec_1( unop( Iop_V128to64, src ) ) ) );
5507 return binop( Iop_64HLtoV128, mkexpr( resultHi ), mkexpr( resultLo ) );
5510 static IRExpr * popcnt64 ( const VexAbiInfo* vbi,
5511 IRExpr *src ){
5512 /* The function takes a 64-bit source and counts the number of bits in the
5513 source that are 1's. */
5514 IRTemp result = newTemp( Ity_I64);
5516 assign( result,
5517 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5518 "population_count64_helper",
5519 fnptr_to_fnentry( vbi,
5520 &population_count64_helper ),
5521 mkIRExprVec_1( src ) ) );
5523 return mkexpr( result );
5526 static IRExpr * extract_bits_under_mask ( const VexAbiInfo* vbi,
5527 IRExpr *src, IRExpr *mask,
5528 IRExpr *flag ) {
5530 /* The function takes a 64-bit value and a 64-bit mask. It will extract the
5531 * bits from the source that align with 1's in the mask or it will extract
5532 * the bits from the source that align with 0's in the mask.
5534 IRTemp result = newTemp( Ity_I64);
5536 assign( result,
5537 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5538 "extract_bits_under_mask_helper",
5539 fnptr_to_fnentry( vbi,
5540 &extract_bits_under_mask_helper ),
5541 mkIRExprVec_3( src, mask, flag ) ) );
5543 return mkexpr( result );
5546 static IRExpr * count_bits_under_mask ( const VexAbiInfo* vbi,
5547 IRExpr *src , IRExpr *mask,
5548 IRExpr *flag ) {
5550 /* The function takes a 64-bit value and a 64-bit mask. It will count the
5551 * bits from the source that align with 1's in the mask or it will count
5552 * the bits from the source that align with 0's in the mask.
5554 IRTemp result = newTemp( Ity_I32);
5556 assign( result,
5557 mkIRExprCCall( Ity_I32, 0 /*regparms*/,
5558 "count_bits_under_mask_helper",
5559 fnptr_to_fnentry( vbi,
5560 &count_bits_under_mask_helper ),
5561 mkIRExprVec_3( src, mask, flag ) ) );
5563 return mkexpr( result );
5566 static IRExpr * deposit_bits_under_mask ( const VexAbiInfo* vbi,
5567 IRExpr *src , IRExpr *mask ) {
5569 /* The contents of the rightmost n bits of src are placed into bits_rtn
5570 * under the control of the mask. The LSB (bit 63) of src is placed into
5571 * the bit of bits_rtn corresponding to the right most bit of mask that is
5572 * a 1. The LSB+1 (bit 62) of src is placed into the bit of bits_rtn
5573 * corresponding to the second right most bit of mask that is a 1, etc.
5576 IRTemp result = newTemp( Ity_I64);
5578 assign( result,
5579 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5580 "deposit_bits_under_mask_helper",
5581 fnptr_to_fnentry( vbi,
5582 &deposit_bits_under_mask_helper ),
5583 mkIRExprVec_2( src, mask) ) );
5585 return mkexpr( result );
5588 static IRExpr * vector_evaluate_inst ( const VexAbiInfo* vbi,
5589 IRExpr *srcA, IRExpr *srcB,
5590 IRExpr *srcC, IRExpr *IMM ){
5591 /* This function implements the ISA 3.1 instruction xxeval. The
5592 instruction is too complex to do with Iops. */
5594 /* The instruction description, note the IBM bit numbering is left to right:
5596 For each integer value i, 0 to 127, do the following.
5598 Let j be the value of the concatenation of the contents of bit i of
5599 srcA, bit i of srcB, bit i of srcC. (j = srcA[i] | srcB[i] | srcC[i])
5601 The value of bit IMM[j] is placed into bit result[i].
5603 Basically the instruction lets you set each of the 128 bits in the result
5604 by selecting one of the eight bits in the IMM value. */
5606 /* Calling clean helpers with 128-bit args is currently not supported. We
5607 will simply call a 64-bit clean helper to do the upper 64-bits of the
5608 result and then call it do do the lower 64-bits of the result. */
5610 IRTemp result_hi = newTemp( Ity_I64 );
5611 IRTemp result_lo = newTemp( Ity_I64 );
5612 IRExpr *srcA_hi;
5613 IRExpr *srcB_hi;
5614 IRExpr *srcC_hi;
5615 IRExpr *srcA_lo;
5616 IRExpr *srcB_lo;
5617 IRExpr *srcC_lo;
5619 srcA_hi = unop( Iop_V128HIto64, srcA );
5620 srcA_lo = unop( Iop_V128to64, srcA );
5621 srcB_hi = unop( Iop_V128HIto64, srcB );
5622 srcB_lo = unop( Iop_V128to64, srcB );
5623 srcC_hi = unop( Iop_V128HIto64, srcC );
5624 srcC_lo = unop( Iop_V128to64, srcC );
5626 assign( result_hi,
5627 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5628 "vector_evaluate64_helper",
5629 fnptr_to_fnentry( vbi,
5630 &vector_evaluate64_helper ),
5631 mkIRExprVec_4( srcA_hi, srcB_hi, srcC_hi, IMM ) ) );
5633 assign( result_lo,
5634 mkIRExprCCall( Ity_I64, 0 /*regparms*/,
5635 "vector_evaluate64_helper",
5636 fnptr_to_fnentry( vbi,
5637 &vector_evaluate64_helper ),
5638 mkIRExprVec_4( srcA_lo, srcB_lo, srcC_lo, IMM ) ) );
5640 return binop( Iop_64HLtoV128, mkexpr( result_hi ), mkexpr( result_lo ) );
5643 static void setup_fxstate_struct( IRDirty* d, UInt AT, IREffect AT_fx,
5644 Bool ACC_mapped_on_VSR ) {
5645 UInt acc_base_address;
5647 /* declare guest state effects, writing to four ACC 128-bit regs. */
5648 d->nFxState = 4;
5649 vex_bzero(&d->fxState, sizeof(d->fxState));
5650 d->fxState[0].fx = AT_fx;
5651 d->fxState[0].size = sizeof(U128);
5652 d->fxState[1].fx = AT_fx;
5653 d->fxState[1].size = sizeof(U128);
5654 d->fxState[2].fx = AT_fx;
5655 d->fxState[2].size = sizeof(U128);
5656 d->fxState[3].fx = AT_fx;
5657 d->fxState[3].size = sizeof(U128);
5659 vassert(AT < 8);
5661 acc_base_address = base_acc_addr( ACC_mapped_on_VSR );
5663 d->fxState[0].offset = acc_base_address + ACC_offset( AT, 0);
5664 d->fxState[1].offset = acc_base_address + ACC_offset( AT, 1);
5665 d->fxState[2].offset = acc_base_address + ACC_offset( AT, 2);
5666 d->fxState[3].offset = acc_base_address + ACC_offset( AT, 3);
5667 return;
5669 #define MATRIX_4BIT_INT_GER 1
5670 #define MATRIX_8BIT_INT_GER 2
5671 #define MATRIX_16BIT_INT_GER 3
5672 #define MATRIX_16BIT_FLOAT_GER 4
5673 #define MATRIX_32BIT_FLOAT_GER 5
5674 /* Note, the 64-bit float instructions have their caller. */
5676 static void vsx_matrix_ger ( const VexAbiInfo* vbi,
5677 UInt inst_class,
5678 IRExpr *srcA, IRExpr *srcB,
5679 UInt AT, UInt mask_inst,
5680 Bool ACC_mapped_on_VSR) {
5682 UInt acc_base_addr = base_acc_addr( ACC_mapped_on_VSR );
5684 /* This helper function does the VSX Matrix 4-bit Signed Integer GER
5685 (Rank-8 Update) instructions xvi4ger8, xvi4ger8pp, pmxvi4ger8,
5686 pmxvi4ger8pp. The instructions work on four V128 values, and three
5687 8-bit masks. */
5689 IRTemp srcA_hi = newTemp( Ity_I64);
5690 IRTemp srcA_lo = newTemp( Ity_I64);
5691 IRTemp srcB_hi = newTemp( Ity_I64);
5692 IRTemp srcB_lo = newTemp( Ity_I64);
5693 IRDirty* d;
5694 UInt instruction = mask_inst & 0xFF; /* Instruction is lower 8-bits. */
5695 IREffect AT_fx;
5697 assign( srcA_hi, unop( Iop_V128HIto64, srcA ) );
5698 assign( srcA_lo, unop( Iop_V128to64, srcA ) );
5699 assign( srcB_hi, unop( Iop_V128HIto64, srcB ) );
5700 assign( srcB_lo, unop( Iop_V128to64, srcB ) );
5702 /* Using a dirty helper so we can access the contents of the ACC for use in
5703 by the instruction and then write the result directly back to the ACC.
5704 The dirty helper does not return data. */
5705 IRExpr** args = mkIRExprVec_7(
5706 IRExpr_GSPTR(),
5707 mkU32( acc_base_addr ),
5708 mkexpr(srcA_hi), mkexpr(srcA_lo),
5709 mkexpr(srcB_hi), mkexpr(srcB_lo),
5710 mkU32( (mask_inst << 5) | AT ));
5712 /* Set AT_fx to Write if the instruction only writes the ACC. Set
5713 AT_fx to modify if the instruction uses the AT entry and writes
5714 to the ACC entry. */
5715 switch (instruction) {
5716 case XVI4GER8:
5717 case XVI8GER4:
5718 case XVI16GER2:
5719 case XVI16GER2S:
5720 case XVF16GER2:
5721 case XVBF16GER2:
5722 case XVF32GER:
5723 AT_fx = Ifx_Write;
5724 break;
5725 case XVI4GER8PP:
5726 case XVI8GER4PP:
5727 case XVI8GER4SPP:
5728 case XVI16GER2PP:
5729 case XVI16GER2SPP:
5730 case XVBF16GER2PP:
5731 case XVBF16GER2PN:
5732 case XVBF16GER2NP:
5733 case XVBF16GER2NN:
5734 case XVF16GER2PP:
5735 case XVF16GER2PN:
5736 case XVF16GER2NP:
5737 case XVF16GER2NN:
5738 case XVF32GERPP:
5739 case XVF32GERPN:
5740 case XVF32GERNP:
5741 case XVF32GERNN:
5742 AT_fx = Ifx_Modify;
5743 break;
5744 default:
5745 vassert(0); /* Unknown instruction */
5748 switch(inst_class) {
5749 case MATRIX_4BIT_INT_GER:
5751 d = unsafeIRDirty_0_N (
5752 0/*regparms*/,
5753 "vsx_matrix_4bit_ger_dirty_helper",
5754 fnptr_to_fnentry( vbi, &vsx_matrix_4bit_ger_dirty_helper ),
5755 args );
5756 break;
5758 case MATRIX_8BIT_INT_GER:
5760 d = unsafeIRDirty_0_N (
5761 0/*regparms*/,
5762 "vsx_matrix_8bit_ger_dirty_helper",
5763 fnptr_to_fnentry( vbi, &vsx_matrix_8bit_ger_dirty_helper ),
5764 args );
5765 break;
5767 case MATRIX_16BIT_INT_GER:
5769 d = unsafeIRDirty_0_N (
5770 0/*regparms*/,
5771 "vsx_matrix_16bit_ger_dirty_helper",
5772 fnptr_to_fnentry( vbi, &vsx_matrix_16bit_ger_dirty_helper ),
5773 args );
5774 break;
5776 case MATRIX_16BIT_FLOAT_GER:
5778 d = unsafeIRDirty_0_N (
5779 0/*regparms*/,
5780 "vsx_matrix_16bit_float_ger_dirty_helper",
5781 fnptr_to_fnentry( vbi, &vsx_matrix_16bit_float_ger_dirty_helper ),
5782 args );
5783 break;
5785 case MATRIX_32BIT_FLOAT_GER:
5787 d = unsafeIRDirty_0_N (
5788 0/*regparms*/,
5789 "vsx_matrix_32bit_float_ger_dirty_helper",
5790 fnptr_to_fnentry( vbi, &vsx_matrix_32bit_float_ger_dirty_helper ),
5791 args );
5792 break;
5794 default:
5795 vex_printf("ERROR: Unkown inst_class = %u in vsx_matrix_ger()\n",
5796 inst_class);
5797 return;
5800 setup_fxstate_struct( d, AT, AT_fx, ACC_mapped_on_VSR );
5802 /* execute the dirty call, side-effecting guest state */
5803 stmt( IRStmt_Dirty(d) );
5806 static void vsx_matrix_64bit_float_ger ( const VexAbiInfo* vbi,
5807 IRExpr *srcA, IRExpr *srcA1,
5808 IRExpr *srcB,
5809 UInt AT, UInt mask_inst,
5810 Bool ACC_mapped_on_VSR ) {
5812 UInt acc_base_addr = base_acc_addr( ACC_mapped_on_VSR );
5814 /* This helper function does the VSX Matrix 64-bit floating-point GER
5815 (Rank-1 Update) instructions xvf64ger, xvf64gerpp, xvf64gerpn,
5816 xvf64gernp, xvf64gernn, pmxvf64ger, pmxvf64gerpp, pmxvf64gerpn,
5817 pmxvf64gernp, pmxvf64gernn. */
5818 IRTemp srcX_hi = newTemp( Ity_I64);
5819 IRTemp srcX_lo = newTemp( Ity_I64);
5820 IRTemp srcX1_hi = newTemp( Ity_I64);
5821 IRTemp srcX1_lo = newTemp( Ity_I64);
5822 IRTemp srcY_hi = newTemp( Ity_I64);
5823 IRTemp srcY_lo = newTemp( Ity_I64);
5824 UInt start_i;
5825 IRDirty* d;
5826 ULong combined_args;
5827 UInt instruction = mask_inst & 0xFF; /* Instruction is lower 8-bits. */
5828 IREffect AT_fx;
5830 assign( srcX_lo, unop( Iop_V128HIto64, srcA ) );
5831 assign( srcX_hi, unop( Iop_V128to64, srcA ) );
5832 assign( srcX1_lo, unop( Iop_V128HIto64, srcA1 ) );
5833 assign( srcX1_hi, unop( Iop_V128to64, srcA1 ) );
5834 assign( srcY_lo, unop( Iop_V128HIto64, srcB ) );
5835 assign( srcY_hi, unop( Iop_V128to64, srcB ) );
5837 /* Using a dirty helper so we can access the contents of the ACC for use in
5838 by the instruction and then write the result directly back to the ACC.
5839 The dirty helper does not return data.
5841 There is a restriction of 8 args in a dirty helper. Can't pass the four
5842 srcX values. So, just do two calls calculating the first two ACC
5843 results then the second two ACC results. */
5845 start_i = 0;
5846 combined_args = (mask_inst << 8) | (start_i << 4) | AT;
5848 IRExpr** args1 = mkIRExprVec_7(
5849 IRExpr_GSPTR(),
5850 mkU32( acc_base_addr ),
5851 mkexpr(srcX1_hi), mkexpr(srcX1_lo),
5852 mkexpr(srcY_hi), mkexpr(srcY_lo),
5853 mkU32( combined_args ));
5855 /* Set AT_fx to Write if the instruction only writes the ACC. Set
5856 AT_fx to modify if the instruction uses the AT entry and writes
5857 to the ACC entry. */
5858 switch (instruction) {
5859 case XVF64GER:
5860 AT_fx = Ifx_Write;
5861 break;
5862 case XVF64GERPP:
5863 case XVF64GERPN:
5864 case XVF64GERNP:
5865 case XVF64GERNN:
5866 AT_fx = Ifx_Modify;
5867 break;
5868 default:
5869 vassert(0); /* Unknown instruction */
5872 d = unsafeIRDirty_0_N (
5873 0/*regparms*/,
5874 "vsx_matrix_64bit_float_ger_dirty_helper",
5875 fnptr_to_fnentry( vbi, &vsx_matrix_64bit_float_ger_dirty_helper ),
5876 args1 );
5878 setup_fxstate_struct( d, AT, AT_fx, ACC_mapped_on_VSR );
5880 /* execute the dirty call, side-effecting guest state */
5881 stmt( IRStmt_Dirty(d) );
5883 start_i = 2;
5884 combined_args = (mask_inst << 8) | (start_i << 4) | AT;
5886 IRExpr** args2 = mkIRExprVec_7(
5887 IRExpr_GSPTR(),
5888 mkU32( acc_base_addr ),
5889 mkexpr(srcX_hi), mkexpr(srcX_lo),
5890 mkexpr(srcY_hi), mkexpr(srcY_lo),
5891 mkU32( combined_args ));
5893 d = unsafeIRDirty_0_N (
5894 0/*regparms*/,
5895 "vsx_matrix_64bit_float_ger_dirty_helper",
5896 fnptr_to_fnentry( vbi, &vsx_matrix_64bit_float_ger_dirty_helper ),
5897 args2 );
5899 setup_fxstate_struct( d, AT, AT_fx, ACC_mapped_on_VSR );
5901 /* execute the dirty call, side-effecting guest state */
5902 stmt( IRStmt_Dirty(d) );
5905 static void vector_gen_pvc_mask ( const VexAbiInfo* vbi,
5906 IRExpr *src, UInt IMM,
5907 UInt opc2, UInt VSX_addr ) {
5908 /* The function takes a 64-bit source and an immediate value. The function
5909 calls a helper to execute the xxgenpcvbm, xxgenpcvhm, xxgenpcvwm,
5910 xxgenpcvdm instruction. The instructions are not practical to do with
5911 Iops. The instruction is implemented with a dirty helper that
5912 calculates the 128-bit result and writes it directly into the guest
5913 state VSX register.
5915 IRTemp src_hi = newTemp( Ity_I64);
5916 IRTemp src_lo = newTemp( Ity_I64);
5918 IRDirty* d;
5920 vassert(VSX_addr < 64);
5921 UInt reg_offset = offsetofPPCGuestState( guest_VSR0 )
5922 + sizeof(U128) * VSX_addr;
5924 assign( src_hi, unop( Iop_V128HIto64, src ) );
5925 assign( src_lo, unop( Iop_V128to64, src ) );
5927 IRExpr** args = mkIRExprVec_5(
5928 IRExpr_GSPTR(),
5929 mkexpr( src_hi ),
5930 mkexpr( src_lo ),
5931 mkU32( reg_offset ),
5932 mkU64( IMM ) );
5934 switch( opc2 ) {
5935 case 0x394: // xxgenpcvbm
5936 d = unsafeIRDirty_0_N (
5937 0 /*regparms*/,
5938 "vector_gen_pvc_byte_mask_dirty_helper",
5939 fnptr_to_fnentry( vbi,
5940 &vector_gen_pvc_byte_mask_dirty_helper ),
5941 args);
5942 break;
5944 case 0x395: // xxgenpcvhm
5945 d = unsafeIRDirty_0_N (
5946 0 /*regparms*/,
5947 "vector_gen_pvc_hword_mask_dirty_helper",
5948 fnptr_to_fnentry( vbi,
5949 &vector_gen_pvc_hword_mask_dirty_helper ),
5950 args);
5951 break;
5953 case 0x3B4: // xxgenpcvwm
5954 d = unsafeIRDirty_0_N (
5955 0 /*regparms*/,
5956 "vector_gen_pvc_word_mask_dirty_helper",
5957 fnptr_to_fnentry( vbi,
5958 &vector_gen_pvc_word_mask_dirty_helper ),
5959 args);
5960 break;
5962 case 0x3B5: // xxgenpcvdm
5963 d = unsafeIRDirty_0_N (
5964 0 /*regparms*/,
5965 "vector_gen_pvc_dword_mask_dirty_helper",
5966 fnptr_to_fnentry( vbi,
5967 &vector_gen_pvc_dword_mask_dirty_helper ),
5968 args);
5969 break;
5970 default:
5971 vex_printf("ERROR: Unkown instruction = %u in vector_gen_pvc_mask()\n",
5972 opc2);
5973 return;
5976 d->nFxState = 1;
5977 vex_bzero(&d->fxState, sizeof(d->fxState));
5978 d->fxState[0].fx = Ifx_Modify;
5979 d->fxState[0].size = sizeof(U128);
5980 d->fxState[0].offset = reg_offset;
5982 /* execute the dirty call, side-effecting guest state */
5983 stmt( IRStmt_Dirty(d) );
5986 static IRExpr * UNSIGNED_CMP_GT_V128 ( IRExpr *vA, IRExpr *vB ) {
5987 /* This function does an unsigned compare of two V128 values. The
5988 * function is for use in 32-bit mode only as it is expensive. The
5989 * issue is that compares (GT, LT, EQ) are not supported for operands
5990 * larger then 32-bits when running in 32-bit mode. The function returns
5991 * a 1-bit expression, 1 for TRUE and 0 for FALSE.
5993 IRTemp vA_word0 = newTemp( Ity_I32);
5994 IRTemp vA_word1 = newTemp( Ity_I32);
5995 IRTemp vA_word2 = newTemp( Ity_I32);
5996 IRTemp vA_word3 = newTemp( Ity_I32);
5997 IRTemp vB_word0 = newTemp( Ity_I32);
5998 IRTemp vB_word1 = newTemp( Ity_I32);
5999 IRTemp vB_word2 = newTemp( Ity_I32);
6000 IRTemp vB_word3 = newTemp( Ity_I32);
6002 IRTemp eq_word1 = newTemp( Ity_I1);
6003 IRTemp eq_word2 = newTemp( Ity_I1);
6004 IRTemp eq_word3 = newTemp( Ity_I1);
6007 IRExpr *gt_word0, *gt_word1, *gt_word2, *gt_word3;
6008 IRExpr *eq_word3_2, *eq_word3_2_1;
6009 IRTemp result = newTemp( Ity_I1 );
6011 assign( vA_word0, unop( Iop_64to32, unop( Iop_V128to64, vA ) ) );
6012 assign( vA_word1, unop( Iop_64HIto32, unop( Iop_V128to64, vA ) ) );
6013 assign( vA_word2, unop( Iop_64to32, unop( Iop_V128HIto64, vA ) ) );
6014 assign( vA_word3, unop( Iop_64HIto32, unop( Iop_V128HIto64, vA ) ) );
6016 assign( vB_word0, unop( Iop_64to32, unop( Iop_V128to64, vB ) ) );
6017 assign( vB_word1, unop( Iop_64HIto32, unop( Iop_V128to64, vB ) ) );
6018 assign( vB_word2, unop( Iop_64to32, unop( Iop_V128HIto64, vB ) ) );
6019 assign( vB_word3, unop( Iop_64HIto32, unop( Iop_V128HIto64, vB ) ) );
6021 assign( eq_word3, binop( Iop_CmpEQ32, mkexpr( vA_word3 ),
6022 mkexpr( vB_word3 ) ) );
6023 assign( eq_word2, binop( Iop_CmpEQ32, mkexpr( vA_word2 ),
6024 mkexpr( vB_word2 ) ) );
6025 assign( eq_word1, binop( Iop_CmpEQ32, mkexpr( vA_word1 ),
6026 mkexpr( vB_word1 ) ) );
6028 gt_word3 = binop( Iop_CmpLT32U, mkexpr( vB_word3 ), mkexpr( vA_word3 ) );
6029 gt_word2 = binop( Iop_CmpLT32U, mkexpr( vB_word2 ), mkexpr( vA_word2 ) );
6030 gt_word1 = binop( Iop_CmpLT32U, mkexpr( vB_word1 ), mkexpr( vA_word1 ) );
6031 gt_word0 = binop( Iop_CmpLT32U, mkexpr( vB_word0 ), mkexpr( vA_word0 ) );
6033 eq_word3_2 = mkAND1( mkexpr( eq_word3 ), mkexpr( eq_word2 ) );
6034 eq_word3_2_1 = mkAND1( mkexpr( eq_word1 ), eq_word3_2 );
6036 assign( result, mkOR1(
6037 mkOR1( gt_word3,
6038 mkAND1( mkexpr( eq_word3 ), gt_word2 ) ),
6039 mkOR1( mkAND1( eq_word3_2, gt_word1 ),
6040 mkAND1( eq_word3_2_1, gt_word0 ) ) ) );
6041 return mkexpr( result );
6044 /*------------------------------------------------------------*/
6045 /*--- FP Helpers ---*/
6046 /*------------------------------------------------------------*/
6048 static IRExpr* /* :: Ity_I32 */ get_IR_roundingmode ( void ); // prototype
6050 /* Produce the 32-bit pattern corresponding to the supplied
6051 float. */
6052 static UInt float_to_bits ( Float f )
6054 union { UInt i; Float f; } u;
6055 vassert(4 == sizeof(UInt));
6056 vassert(4 == sizeof(Float));
6057 vassert(4 == sizeof(u));
6058 u.f = f;
6059 return u.i;
6062 static IRExpr* dnorm_adj_Vector ( IRExpr* src )
6064 /* This function takes a vector of 32-bit floats. It does the required
6065 adjustment on denormalized values based on the setting of the
6066 VSCR[NJ] bit.
6068 The VSCR[NJ] bit controlls how subnormal (denormalized) results for
6069 vector floating point operations are handled. VSCR[NJ] is bit 17
6070 (bit 111 IBM numbering).
6072 VSCR[NJ] = 0 Denormalized values are handled as
6073 specified by Java and the IEEE standard.
6075 VSCR[NJ] = 1 If an element in a source VR contains a denormalized
6076 value, the value 0 is used instead. If an instruction
6077 causes an Underflow Exception, the corresponding element
6078 in the target VR is set to 0. In both cases the 0 has
6079 the same sign as the denormalized or underflowing value.
6080 Convert negative zero to positive zero.
6082 The ABI for LE requires VSCR[NJ] = 0. For BE mode, VSCR[NJ] = 1 by
6083 default. The PPC guest state is initialized to match the HW setup.
6085 IRTemp sign_bit_maskV128 = newTemp( Ity_V128 );
6086 IRTemp ones_maskV128 = newTemp( Ity_V128 );
6087 IRTemp clear_dnorm_maskV128 = newTemp( Ity_V128 );
6088 IRTemp adj_valueV128 = newTemp( Ity_V128 );
6089 IRTemp dnormV128 = newTemp( Ity_V128 );
6090 IRTemp zeroV128 = newTemp( Ity_V128 );
6091 IRTemp VSCR_NJ = newTemp( Ity_I64 );
6092 IRTemp VSCR_NJ_mask = newTemp( Ity_V128 );
6093 IRTemp resultV128 = newTemp( Ity_V128 );
6095 /* get the VSCR[NJ] bit */
6096 assign( VSCR_NJ,
6097 unop( Iop_1Sto64,
6098 unop( Iop_32to1,
6099 binop( Iop_Shr32,
6100 getGST( PPC_GST_VSCR ),
6101 mkU8( 16 ) ) ) ) );
6103 assign ( VSCR_NJ_mask, binop( Iop_64HLtoV128,
6104 mkexpr( VSCR_NJ ) ,
6105 mkexpr( VSCR_NJ ) ) );
6107 /* Create the masks to do the rounding of dnorm values and absolute
6108 value of zero. */
6109 assign( dnormV128, is_Denorm_Vector( Ity_I32, src ) );
6110 assign( zeroV128, is_Zero_Vector( Ity_I32, src ) );
6112 /* If the value is dnorm, then we need to clear the significand and
6113 exponent but leave the sign bit. Put 1'x in elements that are not
6114 denormalized values. */
6115 assign( sign_bit_maskV128, unop( Iop_Dup32x4, mkU32( 0x80000000 ) ) );
6117 assign( clear_dnorm_maskV128,
6118 binop( Iop_OrV128,
6119 binop( Iop_AndV128,
6120 mkexpr( dnormV128 ),
6121 mkexpr( sign_bit_maskV128 ) ),
6122 unop( Iop_NotV128, mkexpr( dnormV128 ) ) ) );
6124 assign( ones_maskV128, mkV128( 0xFFFF ) );
6126 assign( adj_valueV128, binop( Iop_AndV128,
6127 mkexpr( clear_dnorm_maskV128 ),
6128 binop( Iop_AndV128,
6129 src,
6130 mkexpr( ones_maskV128 ) ) ) );
6132 /* If the VSCR[NJ] bit is 1, then clear the denormalized values,
6133 otherwise just return the input unchanged. */
6134 assign( resultV128,
6135 binop( Iop_OrV128,
6136 binop( Iop_AndV128,
6137 mkexpr( VSCR_NJ_mask ),
6138 mkexpr( adj_valueV128 ) ),
6139 binop( Iop_AndV128,
6140 unop( Iop_NotV128, mkexpr( VSCR_NJ_mask ) ),
6141 src ) ) );
6143 return mkexpr(resultV128);
6146 /*------------------------------------------------------------*/
6147 /* Transactional memory helpers
6149 *------------------------------------------------------------*/
6151 static ULong generate_TMreason( UInt failure_code,
6152 UInt persistent,
6153 UInt nest_overflow,
6154 UInt tm_exact )
6156 ULong tm_err_code =
6157 ( (ULong) 0) << (63-6) /* Failure code */
6158 | ( (ULong) persistent) << (63-7) /* Failure persistent */
6159 | ( (ULong) 0) << (63-8) /* Disallowed */
6160 | ( (ULong) nest_overflow) << (63-9) /* Nesting Overflow */
6161 | ( (ULong) 0) << (63-10) /* Footprint Overflow */
6162 | ( (ULong) 0) << (63-11) /* Self-Induced Conflict */
6163 | ( (ULong) 0) << (63-12) /* Non-Transactional Conflict */
6164 | ( (ULong) 0) << (63-13) /* Transactional Conflict */
6165 | ( (ULong) 0) << (63-14) /* Translation Invalidation Conflict */
6166 | ( (ULong) 0) << (63-15) /* Implementation-specific */
6167 | ( (ULong) 0) << (63-16) /* Instruction Fetch Conflict */
6168 | ( (ULong) 0) << (63-30) /* Reserved */
6169 | ( (ULong) 0) << (63-31) /* Abort */
6170 | ( (ULong) 0) << (63-32) /* Suspend */
6171 | ( (ULong) 0) << (63-33) /* Reserved */
6172 | ( (ULong) 0) << (63-35) /* Privilege */
6173 | ( (ULong) 0) << (63-36) /* Failure Summary */
6174 | ( (ULong) tm_exact) << (63-37) /* TFIAR Exact */
6175 | ( (ULong) 0) << (63-38) /* ROT */
6176 | ( (ULong) 0) << (63-51) /* Reserved */
6177 | ( (ULong) 0) << (63-63); /* Transaction Level */
6179 return tm_err_code;
6182 static void storeTMfailure( Addr64 err_address, ULong tm_reason,
6183 Addr64 handler_address )
6185 putGST( PPC_GST_TFIAR, mkU64( err_address ) );
6186 putGST( PPC_GST_TEXASR, mkU64( tm_reason ) );
6187 putGST( PPC_GST_TEXASRU, mkU32( 0 ) );
6188 putGST( PPC_GST_TFHAR, mkU64( handler_address ) );
6191 /*------------------------------------------------------------*/
6192 /*--- Integer Instruction Translation --- */
6193 /*------------------------------------------------------------*/
6196 Byte reverse instructions
6198 static Bool dis_byte_reverse ( UInt prefixInstr, UInt theInstr )
6200 UChar rS_addr = ifieldRegDS( theInstr );
6201 UChar rA_addr = ifieldRegA( theInstr );
6202 UInt opc2 = IFIELD( theInstr, 1, 10 );
6203 IRTemp rS = newTemp( Ity_I64 );
6204 IRTemp tmp_0 = newTemp( Ity_I64 );
6205 IRTemp tmp_1 = newTemp( Ity_I64 );
6206 IRTemp result = newTemp( Ity_I64 );
6208 assign( rS, getIReg( rS_addr ) );
6210 /* NOTE: rA is the destination and rS is the source. Reverse of the normal usage. */
6211 switch (opc2) {
6212 case 0xDB: // brh Byte-Reverse half word X-form
6213 DIP("brh r%u,r%u\n", rA_addr, rS_addr);
6214 assign( tmp_0,
6215 binop( Iop_And64, mkexpr( rS ), mkU64( 0xFF00FF00FF00FF00 ) ) );
6216 assign( tmp_1,
6217 binop( Iop_And64, mkexpr( rS ), mkU64( 0x00FF00FF00FF00FF ) ) );
6218 assign( result,
6219 binop( Iop_Or64,
6220 binop( Iop_Shr64, mkexpr( tmp_0 ), mkU8( 8 ) ),
6221 binop( Iop_Shl64, mkexpr( tmp_1 ), mkU8( 8 ) ) ) );
6222 break;
6224 case 0x9B: { // brw Byte-Reverse word X-form
6225 IRTemp tmp_2 = newTemp( Ity_I64 );
6226 IRTemp tmp_3 = newTemp( Ity_I64 );
6228 DIP("brw r%u,r%u\n", rA_addr, rS_addr);
6229 assign( tmp_0,
6230 binop( Iop_And64, mkexpr( rS ), mkU64( 0xFF000000FF000000 ) ) );
6231 assign( tmp_1,
6232 binop( Iop_And64, mkexpr( rS ), mkU64( 0x00FF000000FF0000 ) ) );
6233 assign( tmp_2,
6234 binop( Iop_And64, mkexpr( rS ), mkU64( 0x0000FF000000FF00 ) ) );
6235 assign( tmp_3,
6236 binop( Iop_And64, mkexpr( rS ), mkU64( 0x000000FF000000FF ) ) );
6237 assign( result,
6238 binop( Iop_Or64,
6239 binop( Iop_Or64,
6240 binop( Iop_Shl64, mkexpr( tmp_3 ), mkU8( 24 ) ),
6241 binop( Iop_Shl64, mkexpr( tmp_2 ), mkU8( 8 ) ) ),
6242 binop( Iop_Or64,
6243 binop( Iop_Shr64, mkexpr( tmp_1 ), mkU8( 8 ) ),
6244 binop( Iop_Shr64, mkexpr( tmp_0 ), mkU8( 24 ) ) )
6245 ) );
6246 break;
6249 case 0xBB: { // brd Byte-Reverse double word X-form
6250 IRTemp tmp_2 = newTemp( Ity_I64 );
6251 IRTemp tmp_3 = newTemp( Ity_I64 );
6252 IRTemp tmp_4 = newTemp( Ity_I64 );
6253 IRTemp tmp_5 = newTemp( Ity_I64 );
6254 IRTemp tmp_6 = newTemp( Ity_I64 );
6255 IRTemp tmp_7 = newTemp( Ity_I64 );
6257 DIP("brd r%u,r%u\n", rA_addr, rS_addr);
6258 assign( tmp_0,
6259 binop( Iop_And64, mkexpr( rS ), mkU64( 0xFF00000000000000 ) ) );
6260 assign( tmp_1,
6261 binop( Iop_And64, mkexpr( rS ), mkU64( 0x00FF000000000000 ) ) );
6262 assign( tmp_2,
6263 binop( Iop_And64, mkexpr( rS ), mkU64( 0x0000FF0000000000 ) ) );
6264 assign( tmp_3,
6265 binop( Iop_And64, mkexpr( rS ), mkU64( 0x000000FF00000000 ) ) );
6266 assign( tmp_4,
6267 binop( Iop_And64, mkexpr( rS ), mkU64( 0x00000000FF000000 ) ) );
6268 assign( tmp_5,
6269 binop( Iop_And64, mkexpr( rS ), mkU64( 0x0000000000FF0000 ) ) );
6270 assign( tmp_6,
6271 binop( Iop_And64, mkexpr( rS ), mkU64( 0x000000000000FF00 ) ) );
6272 assign( tmp_7,
6273 binop( Iop_And64, mkexpr( rS ), mkU64( 0x00000000000000FF ) ) );
6274 assign( result,
6275 binop( Iop_Or64,
6276 binop( Iop_Or64,
6277 binop( Iop_Or64,
6278 binop( Iop_Shl64, mkexpr( tmp_7 ), mkU8( 56 ) ),
6279 binop( Iop_Shl64, mkexpr( tmp_6 ), mkU8( 40 ) ) ),
6280 binop( Iop_Or64,
6281 binop( Iop_Shl64, mkexpr( tmp_5 ), mkU8( 24 ) ),
6282 binop( Iop_Shl64, mkexpr( tmp_4 ), mkU8( 8 ) ) ) ),
6283 binop( Iop_Or64,
6284 binop( Iop_Or64,
6285 binop( Iop_Shr64, mkexpr( tmp_3 ), mkU8( 8 ) ),
6286 binop( Iop_Shr64, mkexpr( tmp_2 ), mkU8( 24 ) ) ),
6287 binop( Iop_Or64,
6288 binop( Iop_Shr64, mkexpr( tmp_1 ), mkU8( 40 ) ),
6289 binop( Iop_Shr64, mkexpr( tmp_0 ), mkU8( 56 ) ) ) )
6290 ) );
6291 break;
6294 default:
6295 vex_printf("dis_byte_reverse(ppc): unrecognized instruction\n");
6296 return False;
6299 putIReg( rA_addr, mkexpr( result ) );
6300 return True;
6304 Integer Arithmetic Instructions
6306 static Bool dis_int_mult_add ( UInt prefix, UInt theInstr )
6308 /* VA-Form */
6309 UChar rD_addr = ifieldRegDS( theInstr );
6310 UChar rA_addr = ifieldRegA( theInstr );
6311 UChar rB_addr = ifieldRegB( theInstr );
6312 UChar rC_addr = ifieldRegC( theInstr );
6313 UInt opc2 = IFIELD( theInstr, 0, 6 );
6314 IRType ty = Ity_I64;
6315 IRTemp rA = newTemp( ty );
6316 IRTemp rB = newTemp( ty );
6317 IRTemp rC = newTemp( ty );
6318 IRTemp rD = newTemp( ty );
6319 IRTemp tmpLo = newTemp( Ity_I64 );
6320 IRTemp tmpHi = newTemp( Ity_I64 );
6321 IRTemp tmp2Hi = newTemp( Ity_I64 );
6322 IRTemp result = newTemp( Ity_I128 );
6323 IRTemp resultLo = newTemp( Ity_I64 );
6324 IRExpr* carryout;
6326 assign( rA, getIReg( rA_addr ) );
6327 assign( rB, getIReg( rB_addr ) );
6328 assign( rC, getIReg( rC_addr ) );
6330 /* There is no prefixed version of these instructions. */
6331 PREFIX_CHECK
6333 switch (opc2) {
6334 case 0x30: // maddhd multiply-add High doubleword signed
6335 DIP("maddhd r%u,r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr, rC_addr);
6337 assign( result, binop( Iop_MullS64, mkexpr( rA ), mkexpr( rB ) ) );
6338 assign( tmpLo, unop( Iop_128to64, mkexpr( result ) ) );
6339 assign( tmpHi, unop( Iop_128HIto64, mkexpr( result ) ) );
6341 /* Multiply rA and rB then add rC. If the lower 32-bits of the result
6342 * is less then rC and the result rA * rB, a carry out of the lower 32
6343 * bits occurred and the upper 32 bits must be incremented by 1. Sign
6344 * extend rC and do the add to the upper 64 bits to handle the
6345 * negative case for rC.
6347 assign( resultLo, binop( Iop_Add64, mkexpr( tmpLo ), mkexpr( rC ) ) );
6348 assign( tmp2Hi, binop( Iop_Add64,
6349 mkexpr( tmpHi ),
6350 unop( Iop_1Sto64,
6351 unop( Iop_64to1,
6352 binop( Iop_Shr64,
6353 mkexpr( rC ),
6354 mkU8( 63 ) ) ) ) ) );
6356 /* need to do calculation for the upper 32 bit result */
6357 carryout = mkAND1( binop( Iop_CmpLT64U,
6358 mkexpr( resultLo ), mkexpr( rC ) ),
6359 binop( Iop_CmpLT64U,
6360 mkexpr( resultLo ), mkexpr( tmpLo ) ) );
6361 assign( rD, binop( Iop_Add64,
6362 mkexpr( tmp2Hi ),
6363 unop( Iop_1Uto64, carryout ) ) );
6364 break;
6366 case 0x31: // maddhdu multiply-add High doubleword unsigned
6367 DIP("maddhdu r%u,r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr, rC_addr);
6369 assign( result, binop( Iop_MullU64, mkexpr( rA ), mkexpr( rB ) ) );
6370 assign( tmpLo, unop( Iop_128to64, mkexpr( result ) ) );
6371 assign( tmpHi, unop( Iop_128HIto64, mkexpr( result ) ) );
6373 /* Add rC, if the lower 32-bits of the result is less then rC and
6374 * tmpLo, a carry out of the lower 32 bits occurred. Upper 32 bits
6375 * must be incremented by 1.
6377 assign( resultLo, binop( Iop_Add64, mkexpr( tmpLo ), mkexpr( rC ) ) );
6379 /* need to do calculation for the upper 32 bit result */
6380 carryout = mkAND1( binop( Iop_CmpLT64U,
6381 mkexpr(resultLo), mkexpr( rC ) ),
6382 binop( Iop_CmpLT64U,
6383 mkexpr(resultLo), mkexpr( tmpLo ) ) );
6384 assign( rD, binop( Iop_Add64,
6385 mkexpr( tmpHi ),
6386 unop( Iop_1Uto64, carryout ) ) );
6387 break;
6389 case 0x33: // maddld multiply-add Low doubleword
6390 DIP("maddld r%u,r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr, rC_addr);
6392 assign( result, binop( Iop_MullS64, mkexpr( rA ), mkexpr( rB ) ) );
6393 assign( tmpLo, unop( Iop_128to64, mkexpr( result ) ) );
6394 assign( tmpHi, unop( Iop_128HIto64, mkexpr( result ) ) );
6396 assign( rD, binop( Iop_Add64, mkexpr( tmpLo ), mkexpr( rC ) ) );
6397 break;
6399 default:
6400 vex_printf("dis_int_mult(ppc): unrecognized instruction\n");
6401 return False;
6404 putIReg( rD_addr, mkexpr(rD) );
6406 return True;
6409 static Bool dis_int_arith_prefix ( UInt prefix, UInt theInstr )
6412 UChar opc1 = ifieldOPC(theInstr);
6413 UChar rT_addr = ifieldRegDS(theInstr);
6414 UChar rA_addr = ifieldRegA(theInstr);
6415 IRType ty = mode64 ? Ity_I64 : Ity_I32;
6416 IRTemp rA = newTemp(ty);
6417 IRTemp rT = newTemp(ty);
6418 IRTemp tmp = newTemp(ty);
6419 IRTemp value = newTemp(ty);
6420 ULong si0 = ifieldUIMM18(prefix);
6421 ULong si1 = ifieldUIMM16(theInstr); // AKA, SI
6422 UInt ptype = PrefixType(prefix);
6423 Long simm16 = extend_s_16to64(si1);
6424 Bool is_prefix = prefix_instruction( prefix );
6425 UInt R = 0; // must be zero for word instruction
6427 if ( !is_prefix ) {
6428 assign( value, mkSzExtendS16( ty, si1 ));
6430 } else {
6431 vassert( ty == Ity_I64 ); // prefix instructions must be 64-bit
6432 vassert( ptype == pType2 );
6434 R = ifieldR(prefix);
6435 assign( value, mkSzExtendS34( CONCAT( si0, si1, 16 )));
6438 assign( rA, getIReg(rA_addr) );
6440 switch (opc1) {
6441 /* D-Form */
6443 case 0x0E: // addi (Add Immediate, PPC32 p350)
6444 // li rD,val == addi rD,0,val
6445 // la disp(rA) == addi rD,rA,disp
6447 if ( rA_addr == 0 ) {
6448 pDIP(is_prefix, "li r%u,%d", rT_addr, (Int)simm16);
6449 DIPn(is_prefix);
6450 assign( tmp, mkexpr( value ) );
6452 } else {
6453 pDIP(is_prefix, "addi r%u,r%u,%d", rT_addr, rA_addr, (Int)simm16);
6454 DIPp(is_prefix, ",%u", R);
6455 assign( tmp, binop( mkSzOp(ty, Iop_Add8), mkexpr( rA ), mkexpr( value ) ) );
6458 if ( R == 0 )
6459 assign( rT, mkexpr( tmp ) );
6460 else
6461 /* Add immediate value from instruction to the current instruction addr.
6462 guest_CIA_curr_instr is pointing at the prefix, use address of the
6463 instruction prefix. */
6464 assign( rT, binop( Iop_Add64,
6465 mkU64( mkSzAddr( Ity_I64, guest_CIA_curr_instr ) ),
6466 mkexpr( tmp ) ) );
6468 break;
6470 default:
6471 vex_printf("dis_int_arith_prefix(ppc)(opc1)\n");
6472 return False;
6475 putIReg( rT_addr, mkexpr(rT) );
6477 return True;
6480 static Bool dis_int_arith ( UInt prefix, UInt theInstr )
6482 /* D-Form, XO-Form */
6483 UChar opc1 = ifieldOPC(theInstr);
6484 UChar rD_addr = ifieldRegDS(theInstr);
6485 UChar rA_addr = ifieldRegA(theInstr);
6486 UInt uimm16 = ifieldUIMM16(theInstr);
6487 UChar rB_addr = ifieldRegB(theInstr);
6488 UChar flag_OE = ifieldBIT10(theInstr);
6489 UInt opc2 = ifieldOPClo9(theInstr);
6490 UChar flag_rC = ifieldBIT0(theInstr);
6492 Long simm16 = extend_s_16to64(uimm16);
6493 IRType ty = mode64 ? Ity_I64 : Ity_I32;
6494 IRTemp rA = newTemp(ty);
6495 IRTemp rB = newTemp(ty);
6496 IRTemp rD = newTemp(ty);
6498 Bool do_rc = False;
6500 /* There is no prefixed version of these instructions. */
6501 PREFIX_CHECK
6503 assign( rA, getIReg(rA_addr) );
6504 assign( rB, getIReg(rB_addr) ); // XO-Form: rD, rA, rB
6506 switch (opc1) {
6507 /* D-Form */
6508 case 0x0C: // addic (Add Immediate Carrying, PPC32 p351
6509 DIP("addic r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
6510 assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
6511 mkSzExtendS16(ty, uimm16) ) );
6512 set_XER_CA_CA32( ty, PPCG_FLAG_OP_ADD,
6513 mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
6514 mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
6515 break;
6517 case 0x0D: // addic. (Add Immediate Carrying and Record, PPC32 p352)
6518 DIP("addic. r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
6519 assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
6520 mkSzExtendS16(ty, uimm16) ) );
6521 set_XER_CA_CA32( ty, PPCG_FLAG_OP_ADD,
6522 mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
6523 mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
6524 do_rc = True; // Always record to CR
6525 flag_rC = 1;
6526 break;
6528 case 0x0E: // addi (Add Immediate, PPC32 p350)
6529 // li rD,val == addi rD,0,val
6530 // la disp(rA) == addi rD,rA,disp
6531 if ( rA_addr == 0 ) {
6532 DIP("li r%u,%d\n", rD_addr, (Int)simm16);
6533 assign( rD, mkSzExtendS16(ty, uimm16) );
6534 } else {
6535 DIP("addi r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
6536 assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
6537 mkSzExtendS16(ty, uimm16) ) );
6539 break;
6541 case 0x0F: // addis (Add Immediate Shifted, PPC32 p353)
6542 // lis rD,val == addis rD,0,val
6543 if ( rA_addr == 0 ) {
6544 DIP("lis r%u,%d\n", rD_addr, (Int)simm16);
6545 assign( rD, mkSzExtendS32(ty, uimm16 << 16) );
6546 } else {
6547 DIP("addis r%u,r%u,0x%x\n", rD_addr, rA_addr, (UInt)simm16);
6548 assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
6549 mkSzExtendS32(ty, uimm16 << 16) ) );
6551 break;
6553 case 0x07: // mulli (Multiply Low Immediate, PPC32 p490)
6554 DIP("mulli r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
6555 if (mode64)
6556 assign( rD, unop(Iop_128to64,
6557 binop(Iop_MullS64, mkexpr(rA),
6558 mkSzExtendS16(ty, uimm16))) );
6559 else
6560 assign( rD, unop(Iop_64to32,
6561 binop(Iop_MullS32, mkexpr(rA),
6562 mkSzExtendS16(ty, uimm16))) );
6563 break;
6565 case 0x08: // subfic (Subtract from Immediate Carrying, PPC32 p540)
6566 DIP("subfic r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
6567 // rD = simm16 - rA
6568 assign( rD, binop( mkSzOp(ty, Iop_Sub8),
6569 mkSzExtendS16(ty, uimm16),
6570 mkexpr(rA)) );
6571 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SUBFI,
6572 mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
6573 mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
6574 break;
6576 /* XO-Form */
6577 case 0x1F:
6578 do_rc = True; // All below record to CR
6580 switch (opc2) {
6581 case 0x10A: // add (Add, PPC32 p347)
6582 DIP("add%s%s r%u,r%u,r%u\n",
6583 flag_OE ? "o" : "", flag_rC ? ".":"",
6584 rD_addr, rA_addr, rB_addr);
6585 assign( rD, binop( mkSzOp(ty, Iop_Add8),
6586 mkexpr(rA), mkexpr(rB) ) );
6587 if (flag_OE) {
6588 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_ADD,
6589 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6591 break;
6593 case 0x00A: // addc (Add Carrying, PPC32 p348)
6594 DIP("addc%s%s r%u,r%u,r%u\n",
6595 flag_OE ? "o" : "", flag_rC ? ".":"",
6596 rD_addr, rA_addr, rB_addr);
6597 assign( rD, binop( mkSzOp(ty, Iop_Add8),
6598 mkexpr(rA), mkexpr(rB)) );
6599 set_XER_CA_CA32( ty, PPCG_FLAG_OP_ADD,
6600 mkexpr(rD), mkexpr(rA), mkexpr(rB),
6601 mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
6602 if (flag_OE) {
6603 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_ADD,
6604 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6606 break;
6608 case 0x08A: { // adde (Add Extended, PPC32 p349)
6609 IRTemp old_xer_ca = newTemp(ty);
6610 DIP("adde%s%s r%u,r%u,r%u\n",
6611 flag_OE ? "o" : "", flag_rC ? ".":"",
6612 rD_addr, rA_addr, rB_addr);
6613 // rD = rA + rB + XER[CA]
6614 assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA_32(), False) );
6615 assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
6616 binop( mkSzOp(ty, Iop_Add8),
6617 mkexpr(rB), mkexpr(old_xer_ca))) );
6618 set_XER_CA_CA32( ty, PPCG_FLAG_OP_ADDE,
6619 mkexpr(rD), mkexpr(rA), mkexpr(rB),
6620 mkexpr(old_xer_ca) );
6621 if (flag_OE) {
6622 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_ADDE,
6623 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6625 break;
6628 case 0xAA: {// addex (Add Extended alternate carry bit Z23-form)
6629 IRTemp old_xer_ov = newTemp(ty);
6630 DIP("addex r%u,r%u,r%u,%d\n", rD_addr, rA_addr, rB_addr, (Int)flag_OE);
6631 assign( old_xer_ov, mkWidenFrom32(ty, getXER_OV_32(), False) );
6632 assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
6633 binop( mkSzOp(ty, Iop_Add8), mkexpr(rB),
6634 mkexpr(old_xer_ov) ) ) );
6636 /* CY bit is same as OE bit */
6637 if (flag_OE == 0) {
6638 /* Exception, do not set SO bit and set OV from carry. */
6639 set_XER_OV_OV32_ADDEX( ty, mkexpr(rD), mkexpr(rA), mkexpr(rB),
6640 mkexpr(old_xer_ov) );
6641 } else {
6642 /* CY=1, 2 and 3 (AKA flag_OE) are reserved */
6643 vex_printf("addex instruction, CY = %d is reserved.\n", flag_OE);
6644 vpanic("addex instruction\n");
6646 break;
6649 case 0x0EA: { // addme (Add to Minus One Extended, PPC32 p354)
6650 IRTemp old_xer_ca = newTemp(ty);
6651 IRExpr *min_one;
6652 if (rB_addr != 0) {
6653 vex_printf("dis_int_arith(ppc)(addme,rB_addr)\n");
6654 return False;
6656 DIP("addme%s%s r%u,r%u,r%u\n",
6657 flag_OE ? "o" : "", flag_rC ? ".":"",
6658 rD_addr, rA_addr, rB_addr);
6659 // rD = rA + (-1) + XER[CA]
6660 // => Just another form of adde
6661 assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA_32(), False) );
6662 min_one = mkSzImm(ty, (Long)-1);
6663 assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
6664 binop( mkSzOp(ty, Iop_Add8),
6665 min_one, mkexpr(old_xer_ca)) ));
6666 set_XER_CA_CA32( ty, PPCG_FLAG_OP_ADDE,
6667 mkexpr(rD), mkexpr(rA), min_one,
6668 mkexpr(old_xer_ca) );
6669 if (flag_OE) {
6670 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_ADDE,
6671 mkexpr(rD), mkexpr(rA), min_one );
6673 break;
6676 case 0x0CA: { // addze (Add to Zero Extended, PPC32 p355)
6677 IRTemp old_xer_ca = newTemp(ty);
6678 if (rB_addr != 0) {
6679 vex_printf("dis_int_arith(ppc)(addze,rB_addr)\n");
6680 return False;
6682 DIP("addze%s%s r%u,r%u,r%u\n",
6683 flag_OE ? "o" : "", flag_rC ? ".":"",
6684 rD_addr, rA_addr, rB_addr);
6685 // rD = rA + (0) + XER[CA]
6686 // => Just another form of adde
6687 assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA_32(), False) );
6688 assign( rD, binop( mkSzOp(ty, Iop_Add8),
6689 mkexpr(rA), mkexpr(old_xer_ca)) );
6690 set_XER_CA_CA32( ty, PPCG_FLAG_OP_ADDE,
6691 mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0),
6692 mkexpr(old_xer_ca) );
6693 if (flag_OE) {
6694 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_ADDE,
6695 mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0) );
6697 break;
6700 case 0x1EB: // divw (Divide Word, PPC32 p388)
6701 DIP("divw%s%s r%u,r%u,r%u\n",
6702 flag_OE ? "o" : "", flag_rC ? ".":"",
6703 rD_addr, rA_addr, rB_addr);
6704 if (mode64) {
6705 /* Note:
6706 XER settings are mode independent, and reflect the
6707 overflow of the low-order 32bit result
6708 CR0[LT|GT|EQ] are undefined if flag_rC && mode64
6710 /* rD[hi32] are undefined: setting them to sign of lo32
6711 - makes set_CR0 happy */
6712 IRExpr* dividend = mk64lo32Sto64( mkexpr(rA) );
6713 IRExpr* divisor = mk64lo32Sto64( mkexpr(rB) );
6714 assign( rD, mk64lo32Uto64( binop(Iop_DivS64, dividend,
6715 divisor) ) );
6716 if (flag_OE) {
6717 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_DIVW,
6718 mkexpr(rD), dividend, divisor );
6720 } else {
6721 assign( rD, binop(Iop_DivS32, mkexpr(rA), mkexpr(rB)) );
6722 if (flag_OE) {
6723 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_DIVW,
6724 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6727 /* Note:
6728 if (0x8000_0000 / -1) or (x / 0)
6729 => rD=undef, if(flag_rC) CR7=undef, if(flag_OE) XER_OV=1
6730 => But _no_ exception raised. */
6731 break;
6733 case 0x1CB: // divwu (Divide Word Unsigned, PPC32 p389)
6734 DIP("divwu%s%s r%u,r%u,r%u\n",
6735 flag_OE ? "o" : "", flag_rC ? ".":"",
6736 rD_addr, rA_addr, rB_addr);
6737 if (mode64) {
6738 /* Note:
6739 XER settings are mode independent, and reflect the
6740 overflow of the low-order 32bit result
6741 CR0[LT|GT|EQ] are undefined if flag_rC && mode64
6743 IRExpr* dividend = mk64lo32Uto64( mkexpr(rA) );
6744 IRExpr* divisor = mk64lo32Uto64( mkexpr(rB) );
6745 assign( rD, mk64lo32Uto64( binop(Iop_DivU64, dividend,
6746 divisor) ) );
6747 if (flag_OE) {
6748 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_DIVWU,
6749 mkexpr(rD), dividend, divisor );
6751 } else {
6752 assign( rD, binop(Iop_DivU32, mkexpr(rA), mkexpr(rB)) );
6753 if (flag_OE) {
6754 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_DIVWU,
6755 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6758 /* Note: ditto comment divw, for (x / 0) */
6759 break;
6761 case 0x04B: // mulhw (Multiply High Word, PPC32 p488)
6762 if (flag_OE != 0) {
6763 vex_printf("dis_int_arith(ppc)(mulhw,flag_OE)\n");
6764 return False;
6766 DIP("mulhw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
6767 rD_addr, rA_addr, rB_addr);
6768 if (mode64) {
6769 /* rD[hi32] are undefined: setting them to sign of lo32
6770 - makes set_CR0 happy */
6771 assign( rD, binop(Iop_Sar64,
6772 binop(Iop_Mul64,
6773 mk64lo32Sto64( mkexpr(rA) ),
6774 mk64lo32Sto64( mkexpr(rB) )),
6775 mkU8(32)) );
6776 } else {
6777 assign( rD, unop(Iop_64HIto32,
6778 binop(Iop_MullS32,
6779 mkexpr(rA), mkexpr(rB))) );
6781 break;
6783 case 0x00B: // mulhwu (Multiply High Word Unsigned, PPC32 p489)
6784 if (flag_OE != 0) {
6785 vex_printf("dis_int_arith(ppc)(mulhwu,flag_OE)\n");
6786 return False;
6788 DIP("mulhwu%s r%u,r%u,r%u\n", flag_rC ? ".":"",
6789 rD_addr, rA_addr, rB_addr);
6790 if (mode64) {
6791 /* rD[hi32] are undefined: setting them to sign of lo32
6792 - makes set_CR0 happy */
6793 assign( rD, binop(Iop_Sar64,
6794 binop(Iop_Mul64,
6795 mk64lo32Uto64( mkexpr(rA) ),
6796 mk64lo32Uto64( mkexpr(rB) ) ),
6797 mkU8(32)) );
6798 } else {
6799 assign( rD, unop(Iop_64HIto32,
6800 binop(Iop_MullU32,
6801 mkexpr(rA), mkexpr(rB))) );
6803 break;
6805 case 0x0EB: // mullw (Multiply Low Word, PPC32 p491)
6806 DIP("mullw%s%s r%u,r%u,r%u\n",
6807 flag_OE ? "o" : "", flag_rC ? ".":"",
6808 rD_addr, rA_addr, rB_addr);
6809 if (mode64) {
6810 /* rD[hi32] are undefined: setting them to sign of lo32
6811 - set_XER_OV() and set_CR0() depend on this */
6812 IRExpr *a = unop(Iop_64to32, mkexpr(rA) );
6813 IRExpr *b = unop(Iop_64to32, mkexpr(rB) );
6814 assign( rD, binop(Iop_MullS32, a, b) );
6815 if (flag_OE) {
6816 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_MULLW,
6817 mkexpr(rD),
6818 unop(Iop_32Uto64, a), unop(Iop_32Uto64, b) );
6820 } else {
6821 assign( rD, unop(Iop_64to32,
6822 binop(Iop_MullU32,
6823 mkexpr(rA), mkexpr(rB))) );
6824 if (flag_OE) {
6825 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_MULLW,
6826 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6829 break;
6831 case 0x068: // neg (Negate, PPC32 p493)
6832 if (rB_addr != 0) {
6833 vex_printf("dis_int_arith(ppc)(neg,rB_addr)\n");
6834 return False;
6836 DIP("neg%s%s r%u,r%u\n",
6837 flag_OE ? "o" : "", flag_rC ? ".":"",
6838 rD_addr, rA_addr);
6839 // rD = (~rA) + 1
6840 assign( rD, binop( mkSzOp(ty, Iop_Add8),
6841 unop( mkSzOp(ty, Iop_Not8), mkexpr(rA) ),
6842 mkSzImm(ty, 1)) );
6843 if (flag_OE) {
6844 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_NEG,
6845 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6847 break;
6849 case 0x028: // subf (Subtract From, PPC32 p537)
6850 DIP("subf%s%s r%u,r%u,r%u\n",
6851 flag_OE ? "o" : "", flag_rC ? ".":"",
6852 rD_addr, rA_addr, rB_addr);
6853 // rD = rB - rA
6854 assign( rD, binop( mkSzOp(ty, Iop_Sub8),
6855 mkexpr(rB), mkexpr(rA)) );
6856 if (flag_OE) {
6857 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_SUBF,
6858 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6860 break;
6862 case 0x008: // subfc (Subtract from Carrying, PPC32 p538)
6863 DIP("subfc%s%s r%u,r%u,r%u\n",
6864 flag_OE ? "o" : "", flag_rC ? ".":"",
6865 rD_addr, rA_addr, rB_addr);
6866 // rD = rB - rA
6867 assign( rD, binop( mkSzOp(ty, Iop_Sub8),
6868 mkexpr(rB), mkexpr(rA)) );
6869 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SUBFC,
6870 mkexpr(rD), mkexpr(rA), mkexpr(rB),
6871 mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
6872 if (flag_OE) {
6873 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_SUBFC,
6874 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6876 break;
6878 case 0x088: {// subfe (Subtract from Extended, PPC32 p539)
6879 IRTemp old_xer_ca = newTemp(ty);
6880 DIP("subfe%s%s r%u,r%u,r%u\n",
6881 flag_OE ? "o" : "", flag_rC ? ".":"",
6882 rD_addr, rA_addr, rB_addr);
6883 // rD = (log not)rA + rB + XER[CA]
6884 // ==>
6885 // rD = rB - rA - (XER[CA] ^ 1)
6886 assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA_32(), False) );
6887 assign( rD, binop( mkSzOp(ty, Iop_Sub8),
6888 binop( mkSzOp(ty, Iop_Sub8),
6889 mkexpr(rB), mkexpr(rA)),
6890 binop(mkSzOp(ty, Iop_Xor8),
6891 mkexpr(old_xer_ca),
6892 mkSzImm(ty, 1))) );
6893 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SUBFE,
6894 mkexpr(rD), mkexpr(rA), mkexpr(rB),
6895 mkexpr(old_xer_ca) );
6896 if (flag_OE) {
6897 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_SUBFE,
6898 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6900 break;
6903 case 0x0E8: { // subfme (Subtract from -1 Extended, PPC32 p541)
6904 IRTemp old_xer_ca = newTemp(ty);
6905 IRExpr *min_one;
6906 if (rB_addr != 0) {
6907 vex_printf("dis_int_arith(ppc)(subfme,rB_addr)\n");
6908 return False;
6910 DIP("subfme%s%s r%u,r%u\n",
6911 flag_OE ? "o" : "", flag_rC ? ".":"",
6912 rD_addr, rA_addr);
6913 // rD = (log not)rA + (-1) + XER[CA]
6914 // => Just another form of subfe
6915 assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA_32(), False) );
6916 min_one = mkSzImm(ty, (Long)-1);
6917 assign( rD, binop( mkSzOp(ty, Iop_Add8),
6918 unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)),
6919 binop( mkSzOp(ty, Iop_Add8),
6920 min_one, mkexpr(old_xer_ca))) );
6921 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SUBFE,
6922 mkexpr(rD), mkexpr(rA), min_one,
6923 mkexpr(old_xer_ca) );
6924 if (flag_OE) {
6925 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_SUBFE,
6926 mkexpr(rD), mkexpr(rA), min_one );
6928 break;
6931 case 0x0C8: { // subfze (Subtract from Zero Extended, PPC32 p542)
6932 IRTemp old_xer_ca = newTemp(ty);
6933 if (rB_addr != 0) {
6934 vex_printf("dis_int_arith(ppc)(subfze,rB_addr)\n");
6935 return False;
6937 DIP("subfze%s%s r%u,r%u\n",
6938 flag_OE ? "o" : "", flag_rC ? ".":"",
6939 rD_addr, rA_addr);
6940 // rD = (log not)rA + (0) + XER[CA]
6941 // => Just another form of subfe
6942 assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA_32(), False) );
6943 assign( rD, binop( mkSzOp(ty, Iop_Add8),
6944 unop( mkSzOp(ty, Iop_Not8),
6945 mkexpr(rA)), mkexpr(old_xer_ca)) );
6946 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SUBFE,
6947 mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0),
6948 mkexpr(old_xer_ca) );
6949 if (flag_OE) {
6950 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_SUBFE,
6951 mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0) );
6953 break;
6957 /* 64bit Arithmetic */
6958 case 0x49: // mulhd (Multiply High DWord, PPC64 p539)
6959 if (flag_OE != 0) {
6960 vex_printf("dis_int_arith(ppc)(mulhd,flagOE)\n");
6961 return False;
6963 DIP("mulhd%s r%u,r%u,r%u\n", flag_rC ? ".":"",
6964 rD_addr, rA_addr, rB_addr);
6965 assign( rD, unop(Iop_128HIto64,
6966 binop(Iop_MullS64,
6967 mkexpr(rA), mkexpr(rB))) );
6969 break;
6971 case 0x9: // mulhdu (Multiply High DWord Unsigned, PPC64 p540)
6972 if (flag_OE != 0) {
6973 vex_printf("dis_int_arith(ppc)(mulhdu,flagOE)\n");
6974 return False;
6976 DIP("mulhdu%s r%u,r%u,r%u\n", flag_rC ? ".":"",
6977 rD_addr, rA_addr, rB_addr);
6978 assign( rD, unop(Iop_128HIto64,
6979 binop(Iop_MullU64,
6980 mkexpr(rA), mkexpr(rB))) );
6981 break;
6983 case 0xE9: // mulld (Multiply Low DWord, PPC64 p543)
6984 DIP("mulld%s%s r%u,r%u,r%u\n",
6985 flag_OE ? "o" : "", flag_rC ? ".":"",
6986 rD_addr, rA_addr, rB_addr);
6987 assign( rD, binop(Iop_Mul64, mkexpr(rA), mkexpr(rB)) );
6988 if (flag_OE) {
6989 set_XER_OV_64( PPCG_FLAG_OP_MULLD,
6990 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
6991 /* OV is set to 1 if product isn't representable.
6992 * In this case also need to set OV32 and SO to 1,
6993 * i.e. copy OV to OV32 and SO.
6995 copy_OV_to_OV32();
6996 update_SO();
6998 break;
7000 case 0x1E9: // divd (Divide DWord, PPC64 p419)
7001 DIP("divd%s%s r%u,r%u,r%u\n",
7002 flag_OE ? "o" : "", flag_rC ? ".":"",
7003 rD_addr, rA_addr, rB_addr);
7004 assign( rD, binop(Iop_DivS64, mkexpr(rA), mkexpr(rB)) );
7005 if (flag_OE) {
7006 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_DIVW,
7007 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
7009 break;
7010 /* Note:
7011 if (0x8000_0000_0000_0000 / -1) or (x / 0)
7012 => rD=undef, if(flag_rC) CR7=undef, if(flag_OE) XER_OV=1
7013 => But _no_ exception raised. */
7015 case 0x1C9: // divdu (Divide DWord Unsigned, PPC64 p420)
7016 DIP("divdu%s%s r%u,r%u,r%u\n",
7017 flag_OE ? "o" : "", flag_rC ? ".":"",
7018 rD_addr, rA_addr, rB_addr);
7019 assign( rD, binop(Iop_DivU64, mkexpr(rA), mkexpr(rB)) );
7020 if (flag_OE) {
7021 set_XER_OV_OV32_SO( ty, PPCG_FLAG_OP_DIVWU,
7022 mkexpr(rD), mkexpr(rA), mkexpr(rB) );
7024 break;
7025 /* Note: ditto comment divd, for (x / 0) */
7027 case 0x18B: // divweu (Divide Word Extended Unsigned)
7030 * If (RA) >= (RB), or if an attempt is made to perform the division
7031 * <anything> / 0
7032 * then the contents of register RD are undefined as are (if Rc=1) the contents of
7033 * the LT, GT, and EQ bits of CR Field 0. In these cases, if OE=1 then OV is set
7034 * to 1.
7036 IRTemp res = newTemp(Ity_I32);
7037 IRExpr * dividend, * divisor;
7038 DIP("divweu%s%s r%u,r%u,r%u\n",
7039 flag_OE ? "o" : "", flag_rC ? ".":"",
7040 rD_addr, rA_addr, rB_addr);
7041 if (mode64) {
7042 dividend = unop( Iop_64to32, mkexpr( rA ) );
7043 divisor = unop( Iop_64to32, mkexpr( rB ) );
7044 assign( res, binop( Iop_DivU32E, dividend, divisor ) );
7045 assign( rD, binop( Iop_32HLto64, mkU32( 0 ), mkexpr( res ) ) );
7046 } else {
7047 dividend = mkexpr( rA );
7048 divisor = mkexpr( rB );
7049 assign( res, binop( Iop_DivU32E, dividend, divisor ) );
7050 assign( rD, mkexpr( res) );
7053 if (flag_OE) {
7054 set_XER_OV_OV32_32( PPCG_FLAG_OP_DIVWEU,
7055 mkexpr(res), dividend, divisor );
7056 update_SO( );
7058 break;
7061 case 0x1AB: // divwe (Divide Word Extended)
7064 * If the quotient cannot be represented in 32 bits, or if an
7065 * attempt is made to perform the division
7066 * <anything> / 0
7067 * then the contents of register RD are undefined as are (if
7068 * Rc=1) the contents of the LT, GT, and EQ bits of CR
7069 * Field 0. In these cases, if OE=1 then OV is set to 1.
7072 IRTemp res = newTemp(Ity_I32);
7073 IRExpr * dividend, * divisor;
7074 DIP("divwe%s%s r%u,r%u,r%u\n",
7075 flag_OE ? "o" : "", flag_rC ? ".":"",
7076 rD_addr, rA_addr, rB_addr);
7077 if (mode64) {
7078 dividend = unop( Iop_64to32, mkexpr( rA ) );
7079 divisor = unop( Iop_64to32, mkexpr( rB ) );
7080 assign( res, binop( Iop_DivS32E, dividend, divisor ) );
7081 assign( rD, binop( Iop_32HLto64, mkU32( 0 ), mkexpr( res ) ) );
7082 } else {
7083 dividend = mkexpr( rA );
7084 divisor = mkexpr( rB );
7085 assign( res, binop( Iop_DivS32E, dividend, divisor ) );
7086 assign( rD, mkexpr( res) );
7089 if (flag_OE) {
7090 set_XER_OV_OV32_32( PPCG_FLAG_OP_DIVWE,
7091 mkexpr(res), dividend, divisor );
7092 update_SO( );
7094 break;
7098 case 0x1A9: // divde (Divide Doubleword Extended)
7100 * If the quotient cannot be represented in 64 bits, or if an
7101 * attempt is made to perform the division
7102 * <anything> / 0
7103 * then the contents of register RD are undefined as are (if
7104 * Rc=1) the contents of the LT, GT, and EQ bits of CR
7105 * Field 0. In these cases, if OE=1 then OV is set to 1.
7107 DIP("divde%s%s r%u,r%u,r%u\n",
7108 flag_OE ? "o" : "", flag_rC ? ".":"",
7109 rD_addr, rA_addr, rB_addr);
7110 assign( rD, binop(Iop_DivS64E, mkexpr(rA), mkexpr(rB)) );
7111 if (flag_OE) {
7112 set_XER_OV_64( PPCG_FLAG_OP_DIVDE, mkexpr( rD ),
7113 mkexpr( rA ), mkexpr( rB ) );
7114 copy_OV_to_OV32();
7115 update_SO();
7117 break;
7119 case 0x189: // divdeuo (Divide Doubleword Extended Unsigned)
7120 // Same CR and OV rules as given for divweu above
7121 DIP("divdeu%s%s r%u,r%u,r%u\n",
7122 flag_OE ? "o" : "", flag_rC ? ".":"",
7123 rD_addr, rA_addr, rB_addr);
7124 assign( rD, binop(Iop_DivU64E, mkexpr(rA), mkexpr(rB)) );
7125 if (flag_OE) {
7126 set_XER_OV_64( PPCG_FLAG_OP_DIVDEU, mkexpr( rD ),
7127 mkexpr( rA ), mkexpr( rB ) );
7128 copy_OV_to_OV32();
7129 update_SO();
7131 break;
7133 default:
7134 vex_printf("dis_int_arith(ppc)(opc2)\n");
7135 return False;
7137 break;
7139 default:
7140 vex_printf("dis_int_arith(ppc)(opc1)\n");
7141 return False;
7144 putIReg( rD_addr, mkexpr(rD) );
7146 if (do_rc && flag_rC) {
7147 set_CR0( mkexpr(rD) );
7149 return True;
7152 static Bool dis_modulo_int ( UInt prefix, UInt theInstr )
7154 /* X-Form */
7155 UChar opc1 = ifieldOPC( theInstr );
7156 UInt opc2 = ifieldOPClo10( theInstr );
7157 UChar rA_addr = ifieldRegA( theInstr );
7158 UChar rB_addr = ifieldRegB( theInstr );
7159 UChar rD_addr = ifieldRegDS( theInstr );
7160 IRType ty = mode64 ? Ity_I64 : Ity_I32;
7161 IRTemp rD = newTemp( ty );
7163 /* There is no prefixed version of these instructions. */
7164 PREFIX_CHECK
7166 switch (opc1) {
7167 /* X-Form */
7168 case 0x1F:
7169 switch (opc2) {
7170 case 0x109: // modud Modulo Unsigned Double Word
7172 IRTemp rA = newTemp( Ity_I64 );
7173 IRTemp rB = newTemp( Ity_I64 );
7174 IRTemp quotient = newTemp( Ity_I64 );
7175 IRTemp quotientTimesDivisor = newTemp( Ity_I64 );
7176 IRTemp remainder = newTemp( Ity_I64 );
7177 IRTemp rB_0 = newTemp( Ity_I64 ); /* all 1's if rB = 0 */
7179 DIP("modud r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
7181 assign( rA, getIReg( rA_addr ) );
7182 assign( rB, getIReg( rB_addr ) );
7184 assign( quotient,
7185 binop( Iop_DivU64, mkexpr( rA ), mkexpr( rB ) ) );
7187 assign( quotientTimesDivisor,
7188 binop( Iop_Mul64,
7189 mkexpr( quotient ),
7190 mkexpr( rB ) ) );
7192 assign( remainder,
7193 binop( Iop_Sub64,
7194 mkexpr( rA ),
7195 mkexpr( quotientTimesDivisor ) ) );
7197 /* Need to match the HW for these special cases
7198 * rB = 0 result all zeros
7200 assign( rB_0, unop( Iop_1Sto64,
7201 binop( Iop_CmpEQ64,
7202 mkexpr( rB ),
7203 mkU64( 0x0 ) ) ) );
7205 assign (rD, binop( Iop_And64,
7206 unop( Iop_Not64, mkexpr( rB_0 ) ),
7207 mkexpr( remainder ) ) );
7208 break;
7211 case 0x10B: // moduw Modulo Unsigned Word
7213 IRTemp quotient = newTemp( Ity_I32 );
7214 IRTemp quotientTimesDivisor = newTemp( Ity_I32 );
7215 IRTemp remainder = newTemp( Ity_I32 );
7217 IRTemp rA = newTemp( Ity_I32 );
7218 IRTemp rB = newTemp( Ity_I32 );
7219 IRTemp rB_0 = newTemp( Ity_I32 ); /* all 1's if rB = 0 */
7221 DIP("moduw r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
7223 if ( ty == Ity_I64 ) {
7224 /* rA and rB are 32 bit values in bits 32:63 of the
7225 * 64-bit register.
7227 assign( rA, unop( Iop_64to32, getIReg( rA_addr ) ) );
7228 assign( rB, unop( Iop_64to32, getIReg( rB_addr ) ) );
7230 } else {
7231 assign( rA, getIReg( rA_addr ) );
7232 assign( rB, getIReg( rB_addr ) );
7235 assign( quotient,
7236 binop( Iop_DivU32, mkexpr( rA ), mkexpr( rB ) ) );
7238 assign( quotientTimesDivisor,
7239 unop( Iop_64to32,
7240 binop( Iop_MullU32,
7241 mkexpr( quotient ),
7242 mkexpr( rB ) ) ) );
7244 assign( remainder,
7245 binop( Iop_Sub32,
7246 mkexpr( rA ),
7247 mkexpr( quotientTimesDivisor ) ) );
7249 /* Need to match the HW for these special cases
7250 * rB = 0 result all zeros
7252 assign( rB_0, unop( Iop_1Sto32,
7253 binop( Iop_CmpEQ32,
7254 mkexpr( rB ),
7255 mkU32( 0x0 ) ) ) );
7257 assign (rD, binop( Iop_32HLto64,
7258 mkU32( 0 ),
7259 binop( Iop_And32,
7260 unop( Iop_Not32, mkexpr( rB_0 ) ),
7261 mkexpr( remainder ) ) ) );
7262 break;
7265 case 0x21A: // cnttzw, cnttzw. Count Trailing Zero Word
7267 /* Note cnttzw RA, RS - RA is dest, RS is source. But the
7268 * order of the operands in theInst is opc1 RS RA opc2 which has
7269 * the operand fields backwards to what the standard order.
7271 UChar rA_address = ifieldRegA(theInstr);
7272 UChar rS_address = ifieldRegDS(theInstr);
7273 IRTemp rA = newTemp(Ity_I64);
7274 IRTemp rS = newTemp(Ity_I64);
7275 UChar flag_rC = ifieldBIT0(theInstr);
7276 IRTemp result = newTemp(Ity_I32);
7278 DIP("cnttzw%s r%u,r%u\n", flag_rC ? "." : "",
7279 rA_address, rS_address);
7281 assign( rS, getIReg( rS_address ) );
7282 assign( result, unop( Iop_CtzNat32,
7283 unop( Iop_64to32, mkexpr( rS ) ) ) );
7284 assign( rA, binop( Iop_32HLto64, mkU32( 0 ), mkexpr( result ) ) );
7286 if ( flag_rC )
7287 set_CR0( mkexpr( rA ) );
7289 putIReg( rA_address, mkexpr( rA ) );
7291 return True; /* Return here since this inst is not consistent
7292 * with the other instructions
7295 break;
7297 case 0x23A: // cnttzd, cnttzd. Count Trailing Zero Double word
7299 /* Note cnttzd RA, RS - RA is dest, RS is source. But the
7300 * order of the operands in theInst is opc1 RS RA opc2 which has
7301 * the operand order listed backwards to what is standard.
7303 UChar rA_address = ifieldRegA(theInstr);
7304 UChar rS_address = ifieldRegDS(theInstr);
7305 IRTemp rA = newTemp(Ity_I64);
7306 IRTemp rS = newTemp(Ity_I64);
7307 UChar flag_rC = ifieldBIT0(theInstr);
7309 DIP("cnttzd%s r%u,r%u\n", flag_rC ? "." : "",
7310 rA_address, rS_address);
7312 assign( rS, getIReg( rS_address ) );
7313 assign( rA, unop( Iop_CtzNat64, mkexpr( rS ) ) );
7315 if ( flag_rC == 1 )
7316 set_CR0( mkexpr( rA ) );
7318 putIReg( rA_address, mkexpr( rA ) );
7320 return True; /* Return here since this inst is not consistent
7321 * with the other instructions
7324 break;
7326 case 0x309: // modsd Modulo Signed Double Word
7328 IRTemp rA = newTemp( Ity_I64 );
7329 IRTemp rB = newTemp( Ity_I64 );
7330 IRTemp rA2_63 = newTemp( Ity_I64 ); /* all 1's if rA != -2^63 */
7331 IRTemp rB_0 = newTemp( Ity_I1 ); /* 1 if rB = 0 */
7332 IRTemp rB_1 = newTemp( Ity_I1 ); /* 1 if rB = 1 */
7333 IRTemp rB_m1 = newTemp( Ity_I1 ); /* 1 if rB = -1 */
7334 IRTemp rA_m1 = newTemp( Ity_I1 ); /* 1 if rA = -1 */
7335 IRTemp resultis0 = newTemp( Ity_I64 );
7336 IRTemp quotient = newTemp( Ity_I64 );
7337 IRTemp quotientTimesDivisor = newTemp( Ity_I64 );
7338 IRTemp remainder = newTemp( Ity_I64 );
7340 DIP("modsd r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
7342 assign( rA, getIReg( rA_addr ) );
7343 assign( rB, getIReg( rB_addr ) );
7345 assign( rA2_63, unop ( Iop_1Sto64,
7346 binop( Iop_CmpNE64,
7347 mkexpr( rA ),
7348 mkU64( 0x8000000000000000 ) ) ) );
7349 assign( rB_0, binop( Iop_CmpEQ64,
7350 mkexpr( rB ),
7351 mkU64( 0x0 ) ) );
7353 assign( rB_1, binop( Iop_CmpEQ64,
7354 mkexpr( rB ),
7355 mkU64( 0x1 ) ) );
7357 assign( rB_m1, binop( Iop_CmpEQ64,
7358 mkexpr( rB ),
7359 mkU64( 0xFFFFFFFFFFFFFFFF ) ) );
7361 assign( rA_m1, binop( Iop_CmpEQ64,
7362 mkexpr( rA ),
7363 mkU64( 0xFFFFFFFFFFFFFFFF ) ) );
7365 /* Need to match the HW for these special cases
7366 rA = -2^31 and rB = -1 result all zeros
7367 rA = -1 and rB = -1 result all zeros
7369 if an attempt is made to perform any of the divisions:
7370 0x80000000 % -1
7371 <anything> % 0
7372 result is undefined. Force result to zero to match the
7373 HW behaviour. */
7375 assign( resultis0,
7376 binop( Iop_Or64,
7377 binop( Iop_Or64,
7378 /* -1 % 1 */
7379 binop( Iop_And64,
7380 unop( Iop_1Sto64, mkexpr( rA_m1 ) ),
7381 unop( Iop_1Sto64, mkexpr( rB_1 ) ) ),
7382 /* rA % 0 (division by zero) */
7383 unop( Iop_1Sto64, mkexpr( rB_0 ) ) ),
7384 binop( Iop_Or64,
7385 binop( Iop_And64,
7386 unop( Iop_Not64,
7387 mkexpr( rA2_63 ) ),
7388 unop ( Iop_1Sto64,
7389 mkexpr( rB_m1 ) ) ),
7390 /* -1 % -1 */
7391 binop( Iop_And64,
7392 unop( Iop_1Sto64, mkexpr( rA_m1 ) ),
7393 unop( Iop_1Sto64, mkexpr( rB_m1 ) )
7394 ) ) ) );
7396 /* The following remainder computation works as long as
7397 * rA != -2^63 and rB != -1.
7399 assign( quotient,
7400 binop( Iop_DivS64, mkexpr( rA ), mkexpr( rB ) ) );
7402 assign( quotientTimesDivisor,
7403 binop( Iop_Mul64,
7404 mkexpr( quotient ),
7405 mkexpr( rB ) ) );
7407 assign( remainder,
7408 binop( Iop_Sub64,
7409 mkexpr( rA ),
7410 mkexpr( quotientTimesDivisor ) ) );
7412 assign( rD, binop( Iop_And64,
7413 mkexpr( remainder ),
7414 unop( Iop_Not64,
7415 mkexpr( resultis0 ) ) ) );
7416 break;
7418 case 0x30B: // modsw Modulo Signed Word
7420 IRTemp rA = newTemp( Ity_I32 );
7421 IRTemp rB = newTemp( Ity_I32 );
7422 IRTemp rA2_32 = newTemp( Ity_I32 ); /* all 1's if rA = -2^32 */
7423 IRTemp rB_0 = newTemp( Ity_I1 ); /* 1 if rB = 0 */
7424 IRTemp rB_1 = newTemp( Ity_I1 ); /* 1 if rB = 1 */
7425 IRTemp rB_m1 = newTemp( Ity_I1 ); /* 1 if rB = -1, 0xFFFFFFFF */
7426 IRTemp rA_m1 = newTemp( Ity_I1 ); /* 1 if rA = -1, 0xFFFFFFFF */
7427 IRTemp resultis0 = newTemp( Ity_I32 );
7428 IRTemp quotient = newTemp( Ity_I32 );
7429 IRTemp quotientTimesDivisor = newTemp( Ity_I32 );
7430 IRTemp remainder = newTemp( Ity_I32 );
7432 DIP("modsw r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
7434 if ( ty == Ity_I64 ) {
7435 /* rA and rB are 32 bit values in bits 32:63 of the
7436 * 64-bit register.
7438 assign( rA, unop(Iop_64to32, getIReg(rA_addr) ) );
7439 assign( rB, unop(Iop_64to32, getIReg(rB_addr) ) );
7441 } else {
7442 assign( rA, getIReg(rA_addr) );
7443 assign( rB, getIReg(rB_addr) );
7446 assign( rA2_32, unop( Iop_1Sto32,
7447 binop( Iop_CmpEQ32,
7448 mkexpr( rA ),
7449 mkU32( 0x80000000 ) ) ) );
7450 /* If the divisor is zero, then the result is undefined.
7451 * However, we will make the result be zero to match what
7452 * the hardware does.
7454 assign( rB_0, binop( Iop_CmpEQ32,
7455 mkexpr( rB ),
7456 mkU32( 0x0 ) ) );
7458 assign( rB_1, binop( Iop_CmpEQ32,
7459 mkexpr( rB ),
7460 mkU32( 0x00000001 ) ) );
7462 assign( rB_m1, binop( Iop_CmpEQ32,
7463 mkexpr( rB ),
7464 mkU32( 0xFFFFFFFF ) ) );
7466 assign( rA_m1, binop( Iop_CmpEQ32,
7467 mkexpr( rA ),
7468 mkU32( 0xFFFFFFFF ) ) );
7470 /* Need to match the HW for these special cases
7471 rA = -2^31 and rB = -1 result all zeros
7472 rA = -1 and rB = -1 result all zeros
7473 rA = -1 and rB = 1 result all zeros
7475 if an attempt is made to perform any of the divisions:
7476 0x80000000 % -1
7477 <anything> % 0
7478 result is undefined. Force result to zero to match the
7479 HW beaviour. */
7481 assign( resultis0,
7482 binop( Iop_Or32,
7483 binop( Iop_Or32,
7484 /* -1 % 1 */
7485 binop( Iop_And32,
7486 unop( Iop_1Sto32, mkexpr( rA_m1 ) ),
7487 unop( Iop_1Sto32, mkexpr( rB_1 ) ) ),
7488 /* rA % 0 (division by zero) */
7489 unop( Iop_1Sto32, mkexpr( rB_0 ) ) ),
7491 binop( Iop_Or32,
7492 /* 0x8000000 % -1 */
7493 binop( Iop_And32,
7494 mkexpr( rA2_32 ),
7495 unop( Iop_1Sto32,
7496 mkexpr( rB_m1 ) ) ),
7497 /* -1 % -1 */
7498 binop( Iop_And32,
7499 unop( Iop_1Sto32, mkexpr( rA_m1 ) ),
7500 unop( Iop_1Sto32, mkexpr( rB_m1 ) )
7501 ) ) ) );
7503 /* The following remainder computation works as long as
7504 * rA != -2^31 and rB != -1.
7506 assign( quotient,
7507 binop( Iop_DivS32, mkexpr( rA ), mkexpr( rB ) ) );
7509 assign( quotientTimesDivisor,
7510 unop( Iop_64to32,
7511 binop( Iop_MullS32,
7512 mkexpr( quotient ),
7513 mkexpr( rB ) ) ) );
7515 assign( remainder,
7516 binop( Iop_Sub32,
7517 mkexpr( rA ),
7518 mkexpr( quotientTimesDivisor ) ) );
7520 assign( rD, binop( Iop_32HLto64,
7521 mkU32( 0 ),
7522 binop( Iop_And32,
7523 mkexpr( remainder ),
7524 unop( Iop_Not32,
7525 mkexpr( resultis0 ) ) ) ) );
7526 break;
7529 default:
7530 vex_printf("dis_modulo_int(ppc)(opc2)\n");
7531 return False;
7533 break;
7535 default:
7536 vex_printf("dis_modulo_int(ppc)(opc1)\n");
7537 return False;
7540 putIReg( rD_addr, mkexpr( rD ) );
7542 return True;
7547 Byte Compare Instructions
7549 static Bool dis_byte_cmp ( UInt prefix, UInt theInstr )
7551 /* X-Form */
7552 UChar opc1 = ifieldOPC(theInstr);
7553 UInt opc2 = ifieldOPClo10(theInstr);
7554 UChar rA_addr = ifieldRegA(theInstr);
7555 UChar rB_addr = ifieldRegB(theInstr);
7556 IRTemp rA = newTemp(Ity_I64);
7557 IRTemp rB = newTemp(Ity_I64);
7558 UChar L = toUChar( IFIELD( theInstr, 21, 1 ) );
7559 UChar BF = toUChar( IFIELD( theInstr, 23, 3 ) );
7561 /* There is no prefixed version of these instructions. */
7562 PREFIX_CHECK
7564 assign( rA, getIReg(rA_addr) );
7565 assign( rB, getIReg(rB_addr) );
7567 if (opc1 != 0x1F) {
7568 vex_printf("dis_byte_cmp(ppc)(opc1)\n");
7569 return False;
7572 switch (opc2) {
7573 case 0xc0: // cmprb (Compare Ranged Byte)
7575 IRExpr *value;
7576 IRExpr *hi_1, *lo_1, *hi_2, *lo_2;
7577 IRExpr *inrange_1, *inrange_2;
7579 DIP("cmprb %u,%u,r%u,r%u\n", BF, L, rA_addr, rB_addr);
7581 hi_1 = binop( Iop_Shr64,
7582 binop( Iop_And64,
7583 mkexpr( rB ),
7584 mkU64( 0xFF000000 ) ),
7585 mkU8( 24 ) );
7586 lo_1 = binop( Iop_Shr64,
7587 binop( Iop_And64,
7588 mkexpr( rB ),
7589 mkU64( 0xFF0000 ) ) ,
7590 mkU8( 16 ) );
7591 hi_2 = binop( Iop_Shr64,
7592 binop( Iop_And64,
7593 mkexpr( rB ),
7594 mkU64( 0xFF00 ) ),
7595 mkU8( 8 ) );
7596 lo_2 = binop( Iop_And64,
7597 mkexpr( rB ),
7598 mkU64( 0xFF ) );
7599 value = binop( Iop_And64,
7600 mkexpr( rA ),
7601 mkU64( 0xFF ) );
7603 inrange_1 = mkAND1( binop( Iop_CmpLE64U, value, hi_1 ),
7604 mkNOT1( binop( Iop_CmpLT64U, value, lo_1 ) ) );
7605 inrange_2 = mkAND1( binop( Iop_CmpLE64U, value, hi_2 ),
7606 mkNOT1( binop( Iop_CmpLT64U, value, lo_2 ) ) );
7608 putGST_field( PPC_GST_CR,
7609 binop( Iop_Shl32,
7610 binop( Iop_Or32,
7611 unop( Iop_1Uto32, inrange_2 ),
7612 binop( Iop_And32,
7613 mkU32 ( L ),
7614 unop( Iop_1Uto32, inrange_1 ) ) ),
7615 mkU8( 2 ) ),
7616 BF );
7618 break;
7620 case 0xE0: // cmpeqb (Compare Equal Byte)
7622 Int i;
7623 IRTemp tmp[9];
7624 IRExpr *value;
7626 DIP("cmpeqb %u,r%u,r%u\n", BF, rA_addr, rB_addr);
7628 value = binop( Iop_And64,
7629 mkexpr( rA ),
7630 mkU64( 0xFF ) );
7632 tmp[0] = newTemp(Ity_I32);
7633 assign( tmp[0], mkU32( 0 ) );
7635 for(i = 0; i < 8; i++) {
7636 tmp[i+1] = newTemp(Ity_I32);
7637 assign( tmp[i+1], binop( Iop_Or32,
7638 unop( Iop_1Uto32,
7639 binop( Iop_CmpEQ64,
7640 value,
7641 binop( Iop_And64,
7642 binop( Iop_Shr64,
7643 mkexpr( rB ),
7644 mkU8( i*8 ) ),
7645 mkU64( 0xFF ) ) ) ),
7646 mkexpr( tmp[i] ) ) );
7649 putGST_field( PPC_GST_CR,
7650 binop( Iop_Shl32,
7651 unop( Iop_1Uto32,
7652 mkNOT1( binop( Iop_CmpEQ32,
7653 mkexpr( tmp[8] ),
7654 mkU32( 0 ) ) ) ),
7655 mkU8( 2 ) ),
7656 BF );
7658 break;
7660 default:
7661 vex_printf("dis_byte_cmp(ppc)(opc2)\n");
7662 return False;
7664 return True;
7668 * Integer Miscellaneous instructions
7670 static Bool dis_int_misc ( UInt prefix, UInt theInstr )
7672 Int wc = IFIELD(theInstr, 21, 2);
7673 UChar opc1 = ifieldOPC(theInstr);
7674 UInt opc2 = ifieldOPClo10(theInstr);
7676 /* There is no prefixed version of these instructions. */
7677 PREFIX_CHECK
7679 if ( opc1 != 0x1F ) {
7680 vex_printf("dis_modulo_int(ppc)(opc1)\n");
7681 return False;
7684 switch (opc2) {
7685 case 0x01E: // wait, (X-from)
7686 DIP("wait %d\n", wc);
7688 /* The wait instruction causes instruction fetching and execution
7689 * to be suspended. Instruction fetching and execution are resumed
7690 * when the events specified by the WC field occur.
7692 * 0b00 Resume instruction fetching and execution when an
7693 * exception or an event-based branch exception occurs,
7694 * or a resume signal from the platform is received.
7696 * 0b01 Reserved.
7698 * For our purposes, we will just assume the contition is always
7699 * immediately satisfied.
7701 break;
7702 default:
7703 vex_printf("dis_int_misc(ppc)(opc2)\n");
7704 return False;
7707 return True;
7711 Integer Compare Instructions
7713 static Bool dis_int_cmp ( UInt prefix, UInt theInstr )
7715 /* D-Form, X-Form */
7716 UChar opc1 = ifieldOPC(theInstr);
7717 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
7718 UChar b22 = toUChar( IFIELD( theInstr, 22, 1 ) );
7719 UChar flag_L = toUChar( IFIELD( theInstr, 21, 1 ) );
7720 UChar rA_addr = ifieldRegA(theInstr);
7721 UInt uimm16 = ifieldUIMM16(theInstr);
7722 UChar rB_addr = ifieldRegB(theInstr);
7723 UInt opc2 = ifieldOPClo10(theInstr);
7724 UChar b0 = ifieldBIT0(theInstr);
7726 IRType ty = mode64 ? Ity_I64 : Ity_I32;
7727 IRExpr *a = getIReg(rA_addr);
7728 IRExpr *b;
7730 /* There is no prefixed version of these instructions. */
7731 PREFIX_CHECK
7733 if (!mode64 && flag_L==1) { // L==1 invalid for 32 bit.
7734 vex_printf("dis_int_cmp(ppc)(flag_L)\n");
7735 return False;
7738 if (( b22 != 0 ) && ( opc2 != 0x080 ) ) { // setb case exception
7739 vex_printf("dis_int_cmp(ppc)(b22)\n");
7740 return False;
7743 switch (opc1) {
7744 case 0x0B: // cmpi (Compare Immediate, PPC32 p368)
7745 DIP("cmpi cr%u,%u,r%u,%d\n", crfD, flag_L, rA_addr,
7746 (Int)extend_s_16to32(uimm16));
7747 b = mkSzExtendS16( ty, uimm16 );
7748 if (flag_L == 1) {
7749 putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
7750 } else {
7751 a = mkNarrowTo32( ty, a );
7752 b = mkNarrowTo32( ty, b );
7753 putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32S, a, b)));
7755 putCR0( crfD, getXER_SO() );
7756 break;
7758 case 0x0A: // cmpli (Compare Logical Immediate, PPC32 p370)
7759 DIP("cmpli cr%u,%u,r%u,0x%x\n", crfD, flag_L, rA_addr, uimm16);
7760 b = mkSzImm( ty, uimm16 );
7761 if (flag_L == 1) {
7762 putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
7763 } else {
7764 a = mkNarrowTo32( ty, a );
7765 b = mkNarrowTo32( ty, b );
7766 putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32U, a, b)));
7768 putCR0( crfD, getXER_SO() );
7769 break;
7771 /* X Form */
7772 case 0x1F:
7773 if (b0 != 0) {
7774 vex_printf("dis_int_cmp(ppc)(0x1F,b0)\n");
7775 return False;
7777 b = getIReg(rB_addr);
7779 switch (opc2) {
7780 case 0x000: // cmp (Compare, PPC32 p367)
7781 DIP("cmp cr%u,%u,r%u,r%u\n", crfD, flag_L, rA_addr, rB_addr);
7782 /* Comparing a reg with itself produces a result which
7783 doesn't depend on the contents of the reg. Therefore
7784 remove the false dependency, which has been known to cause
7785 memcheck to produce false errors. */
7786 if (rA_addr == rB_addr)
7787 a = b = typeOfIRExpr(irsb->tyenv,a) == Ity_I64
7788 ? mkU64(0) : mkU32(0);
7789 if (flag_L == 1) {
7790 putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
7791 } else {
7792 a = mkNarrowTo32( ty, a );
7793 b = mkNarrowTo32( ty, b );
7794 putCR321(crfD, unop(Iop_32to8,binop(Iop_CmpORD32S, a, b)));
7796 putCR0( crfD, getXER_SO() );
7797 break;
7799 case 0x020: // cmpl (Compare Logical, PPC32 p369)
7800 DIP("cmpl cr%u,%u,r%u,r%u\n", crfD, flag_L, rA_addr, rB_addr);
7801 /* Comparing a reg with itself produces a result which
7802 doesn't depend on the contents of the reg. Therefore
7803 remove the false dependency, which has been known to cause
7804 memcheck to produce false errors. */
7805 if (rA_addr == rB_addr)
7806 a = b = typeOfIRExpr(irsb->tyenv,a) == Ity_I64
7807 ? mkU64(0) : mkU32(0);
7808 if (flag_L == 1) {
7809 putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
7810 } else {
7811 a = mkNarrowTo32( ty, a );
7812 b = mkNarrowTo32( ty, b );
7813 putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32U, a, b)));
7815 putCR0( crfD, getXER_SO() );
7816 break;
7818 case 0x080: // setb (Set Boolean)
7820 /* Set Boolean Condition in result register. The result register
7821 is set to all ones if the condition is true and all zeros
7822 otherwise. */
7823 UChar rT_addr = ifieldRegDS(theInstr);
7824 Int bfa = IFIELD(theInstr, 18, 3);
7825 IRTemp cr = newTemp(Ity_I32);
7826 IRTemp cr0 = newTemp(Ity_I32);
7827 IRTemp cr1 = newTemp(Ity_I32);
7828 IRTemp result = newTemp(Ity_I64);
7830 DIP("setb r%u,%d\n", rT_addr, bfa);
7832 /* Fetch the entire condition code value */
7833 assign( cr, getGST( PPC_GST_CR ) );
7835 /* Get bit zero (IBM numbering) of the CR field specified
7836 * by bfa.
7838 assign( cr0, binop( Iop_And32,
7839 binop( Iop_Shr32,
7840 mkexpr( cr ),
7841 mkU8( (7-bfa)*4 ) ),
7842 mkU32( 0x8 ) ) );
7843 assign( cr1, binop( Iop_And32,
7844 binop( Iop_Shr32,
7845 mkexpr( cr ),
7846 mkU8( (7-bfa)*4 ) ),
7847 mkU32( 0x4 ) ) );
7848 assign( result, binop( Iop_Or64,
7849 unop( Iop_1Sto64,
7850 binop( Iop_CmpEQ32,
7851 mkexpr( cr0 ),
7852 mkU32( 0x8 ) ) ),
7853 binop( Iop_32HLto64,
7854 mkU32( 0 ),
7855 unop( Iop_1Uto32,
7856 binop( Iop_CmpEQ32,
7857 mkexpr( cr1 ),
7858 mkU32( 0x4 ) ) ) ) ) );
7859 if ( ty == Ity_I64 )
7860 putIReg( rT_addr, mkexpr( result ) );
7861 else
7862 putIReg( rT_addr, unop( Iop_64to32, mkexpr(result ) ) );
7864 break;
7865 default:
7866 vex_printf("dis_int_cmp(ppc)(opc2)\n");
7867 return False;
7869 break;
7871 default:
7872 vex_printf("dis_int_cmp(ppc)(opc1)\n");
7873 return False;
7876 return True;
7881 Integer Logical Instructions
7883 static Bool dis_int_logic ( UInt prefix, UInt theInstr )
7885 /* D-Form, X-Form */
7886 UChar opc1 = ifieldOPC(theInstr);
7887 UChar rS_addr = ifieldRegDS(theInstr);
7888 UChar rA_addr = ifieldRegA(theInstr);
7889 UInt uimm16 = ifieldUIMM16(theInstr);
7890 UChar rB_addr = ifieldRegB(theInstr);
7891 UInt opc2 = ifieldOPClo10(theInstr);
7892 UChar flag_rC = ifieldBIT0(theInstr);
7894 IRType ty = mode64 ? Ity_I64 : Ity_I32;
7895 IRTemp rS = newTemp(ty);
7896 IRTemp rA = newTemp(ty);
7897 IRTemp rB = newTemp(ty);
7898 Bool do_rc = False;
7900 /* There is no prefixed version of these instructions. */
7901 PREFIX_CHECK
7903 assign( rS, getIReg(rS_addr) );
7904 assign( rB, getIReg(rB_addr) );
7906 switch (opc1) {
7907 case 0x1C: // andi. (AND Immediate, PPC32 p358)
7908 DIP("andi. r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
7909 assign( rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS),
7910 mkSzImm(ty, uimm16)) );
7911 do_rc = True; // Always record to CR
7912 flag_rC = 1;
7913 break;
7915 case 0x1D: // andis. (AND Immediate Shifted, PPC32 p359)
7916 DIP("andis r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
7917 assign( rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS),
7918 mkSzImm(ty, uimm16 << 16)) );
7919 do_rc = True; // Always record to CR
7920 flag_rC = 1;
7921 break;
7923 case 0x18: // ori (OR Immediate, PPC32 p497)
7924 DIP("ori r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
7925 assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS),
7926 mkSzImm(ty, uimm16)) );
7927 break;
7929 case 0x19: // oris (OR Immediate Shifted, PPC32 p498)
7930 DIP("oris r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
7931 assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS),
7932 mkSzImm(ty, uimm16 << 16)) );
7933 break;
7935 case 0x1A: // xori (XOR Immediate, PPC32 p550)
7936 DIP("xori r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
7937 assign( rA, binop( mkSzOp(ty, Iop_Xor8), mkexpr(rS),
7938 mkSzImm(ty, uimm16)) );
7939 break;
7941 case 0x1B: // xoris (XOR Immediate Shifted, PPC32 p551)
7942 DIP("xoris r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
7943 assign( rA, binop( mkSzOp(ty, Iop_Xor8), mkexpr(rS),
7944 mkSzImm(ty, uimm16 << 16)) );
7945 break;
7947 /* X Form */
7948 case 0x1F:
7950 opc2 = IFIELD( theInstr, 2, 9 );
7952 switch ( opc2 ) {
7953 case 0x1BD: // extswsli (Extend Sign Word shift left)
7955 /* sh[5] is in bit 1, sh[0:4] is in bits [14:10] of theInstr */
7956 UChar sh = IFIELD( theInstr, 11, 5 ) | (IFIELD(theInstr, 1, 1) << 5);
7957 IRTemp temp = newTemp( ty );
7959 DIP("extswsli%s r%u,r%u,%u\n", flag_rC ? ".":"",
7960 rA_addr, rS_addr, sh);
7962 assign( temp, unop( Iop_32Sto64,
7963 unop( Iop_64to32, mkexpr( rS ) ) ) );
7964 assign( rA, binop( Iop_Shl64, mkexpr( temp ), mkU8( sh ) ) );
7965 putIReg( rA_addr, mkexpr( rA ) );
7967 if ( flag_rC ) {
7968 set_CR0( mkexpr( rA ) );
7970 return True;
7972 default:
7973 break; // drop to next opc2 check
7976 do_rc = True; // All below record to CR, except for where we return at case end.
7978 opc2 = ifieldOPClo10( theInstr );
7980 switch (opc2) {
7981 case 0x01C: // and (AND, PPC32 p356)
7982 DIP("and%s r%u,r%u,r%u\n",
7983 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
7984 assign(rA, binop( mkSzOp(ty, Iop_And8),
7985 mkexpr(rS), mkexpr(rB)));
7986 break;
7988 case 0x03C: // andc (AND with Complement, PPC32 p357)
7989 DIP("andc%s r%u,r%u,r%u\n",
7990 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
7991 assign(rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS),
7992 unop( mkSzOp(ty, Iop_Not8),
7993 mkexpr(rB))));
7994 break;
7996 case 0x01A: { // cntlzw (Count Leading Zeros Word, PPC32 p371)
7997 if (rB_addr!=0) {
7998 vex_printf("dis_int_logic(ppc)(cntlzw,rB_addr)\n");
7999 return False;
8001 DIP("cntlzw%s r%u,r%u\n", flag_rC ? ".":"", rA_addr, rS_addr);
8003 // mode64: count in low word only
8004 IRExpr* lo32 = mode64 ? unop(Iop_64to32, mkexpr(rS)) : mkexpr(rS);
8005 IRExpr* res32 = unop(Iop_ClzNat32, lo32);
8006 assign(rA, mode64 ? unop(Iop_32Uto64, res32) : res32);
8007 break;
8010 case 0x11C: // eqv (Equivalent, PPC32 p396)
8011 DIP("eqv%s r%u,r%u,r%u\n",
8012 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
8013 assign( rA, unop( mkSzOp(ty, Iop_Not8),
8014 binop( mkSzOp(ty, Iop_Xor8),
8015 mkexpr(rS), mkexpr(rB))) );
8016 break;
8018 case 0x3BA: // extsb (Extend Sign Byte, PPC32 p397
8019 if (rB_addr!=0) {
8020 vex_printf("dis_int_logic(ppc)(extsb,rB_addr)\n");
8021 return False;
8023 DIP("extsb%s r%u,r%u\n",
8024 flag_rC ? ".":"", rA_addr, rS_addr);
8025 if (mode64)
8026 assign( rA, unop(Iop_8Sto64, unop(Iop_64to8, mkexpr(rS))) );
8027 else
8028 assign( rA, unop(Iop_8Sto32, unop(Iop_32to8, mkexpr(rS))) );
8029 break;
8031 case 0x39A: // extsh (Extend Sign Half Word, PPC32 p398)
8032 if (rB_addr!=0) {
8033 vex_printf("dis_int_logic(ppc)(extsh,rB_addr)\n");
8034 return False;
8036 DIP("extsh%s r%u,r%u\n",
8037 flag_rC ? ".":"", rA_addr, rS_addr);
8038 if (mode64)
8039 assign( rA, unop(Iop_16Sto64,
8040 unop(Iop_64to16, mkexpr(rS))) );
8041 else
8042 assign( rA, unop(Iop_16Sto32,
8043 unop(Iop_32to16, mkexpr(rS))) );
8044 break;
8046 case 0x1DC: // nand (NAND, PPC32 p492)
8047 DIP("nand%s r%u,r%u,r%u\n",
8048 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
8049 assign( rA, unop( mkSzOp(ty, Iop_Not8),
8050 binop( mkSzOp(ty, Iop_And8),
8051 mkexpr(rS), mkexpr(rB))) );
8052 break;
8054 case 0x07C: // nor (NOR, PPC32 p494)
8055 DIP("nor%s r%u,r%u,r%u\n",
8056 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
8057 assign( rA, unop( mkSzOp(ty, Iop_Not8),
8058 binop( mkSzOp(ty, Iop_Or8),
8059 mkexpr(rS), mkexpr(rB))) );
8060 break;
8062 case 0x1BC: // or (OR, PPC32 p495)
8063 if ((!flag_rC) && rS_addr == rB_addr) {
8064 DIP("mr r%u,r%u\n", rA_addr, rS_addr);
8065 assign( rA, mkexpr(rS) );
8066 } else {
8067 DIP("or%s r%u,r%u,r%u\n",
8068 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
8069 assign( rA, binop( mkSzOp(ty, Iop_Or8),
8070 mkexpr(rS), mkexpr(rB)) );
8072 break;
8074 case 0x19C: // orc (OR with Complement, PPC32 p496)
8075 DIP("orc%s r%u,r%u,r%u\n",
8076 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
8077 assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS),
8078 unop(mkSzOp(ty, Iop_Not8), mkexpr(rB))));
8079 break;
8081 case 0x13C: // xor (XOR, PPC32 p549)
8082 DIP("xor%s r%u,r%u,r%u\n",
8083 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
8084 assign( rA, binop( mkSzOp(ty, Iop_Xor8),
8085 mkexpr(rS), mkexpr(rB)) );
8086 break;
8089 /* 64bit Integer Logical Instructions */
8090 case 0x3DA: // extsw (Extend Sign Word, PPC64 p430)
8091 if (rB_addr!=0) {
8092 vex_printf("dis_int_logic(ppc)(extsw,rB_addr)\n");
8093 return False;
8095 DIP("extsw%s r%u,r%u\n", flag_rC ? ".":"", rA_addr, rS_addr);
8096 assign(rA, unop(Iop_32Sto64, unop(Iop_64to32, mkexpr(rS))));
8097 break;
8099 case 0x03A: // cntlzd (Count Leading Zeros DWord, PPC64 p401)
8100 if (rB_addr!=0) {
8101 vex_printf("dis_int_logic(ppc)(cntlzd,rB_addr)\n");
8102 return False;
8104 DIP("cntlzd%s r%u,r%u\n", flag_rC ? ".":"", rA_addr, rS_addr);
8105 assign(rA, unop(Iop_ClzNat64, mkexpr(rS)));
8106 break;
8108 case 0x1FC: // cmpb (Power6: compare bytes)
8109 DIP("cmpb r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
8111 if (mode64)
8112 assign( rA, unop( Iop_V128to64,
8113 binop( Iop_CmpEQ8x16,
8114 binop( Iop_64HLtoV128, mkU64(0), mkexpr(rS) ),
8115 binop( Iop_64HLtoV128, mkU64(0), mkexpr(rB) )
8116 )) );
8117 else
8118 assign( rA, unop( Iop_V128to32,
8119 binop( Iop_CmpEQ8x16,
8120 unop( Iop_32UtoV128, mkexpr(rS) ),
8121 unop( Iop_32UtoV128, mkexpr(rB) )
8122 )) );
8123 break;
8125 case 0x2DF: { // mftgpr (move floating-point to general purpose register)
8126 /* The mftgpr instruction was deprecated in Power 7, 2009 timeframe.
8127 Leaving support in Valgrind for now (9/10/2021). Can remove the
8128 mftgpr support in Valgrind if the opcode ever gets reused. */
8129 IRTemp frB = newTemp(Ity_F64);
8130 DIP("mftgpr r%u,fr%u\n", rS_addr, rB_addr);
8132 assign( frB, getFReg(rB_addr)); // always F64
8133 if (mode64)
8134 assign( rA, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
8135 else
8136 assign( rA, unop( Iop_64to32, unop( Iop_ReinterpF64asI64, mkexpr(frB))) );
8138 putIReg( rS_addr, mkexpr(rA));
8139 return True;
8142 case 0x25F: { // mffgpr (move floating-point from general purpose register)
8143 /* The mffgpr instruction was deprecated in Power 7, 2009 timeframe.
8144 Leaving support in Valgrind for now (9/10/2021). Can remove the
8145 mftgpr support in Valgrind if the opcode ever gets reused. */
8146 IRTemp frA = newTemp(Ity_F64);
8147 DIP("mffgpr fr%u,r%u\n", rS_addr, rB_addr);
8149 if (mode64)
8150 assign( frA, unop( Iop_ReinterpI64asF64, mkexpr(rB)) );
8151 else
8152 assign( frA, unop( Iop_ReinterpI64asF64, unop( Iop_32Uto64, mkexpr(rB))) );
8154 putFReg( rS_addr, mkexpr(frA));
8155 return True;
8157 case 0x1FA: // popcntd (population count doubleword)
8159 vassert(mode64);
8160 DIP("popcntd r%u,r%u\n", rA_addr, rS_addr);
8161 IRTemp result = gen_POPCOUNT(ty, rS, DWORD);
8162 putIReg( rA_addr, mkexpr(result) );
8163 return True;
8165 case 0x17A: // popcntw (Population Count Words)
8167 DIP("popcntw r%u,r%u\n", rA_addr, rS_addr);
8168 if (mode64) {
8169 IRTemp resultHi, resultLo;
8170 IRTemp argLo = newTemp(Ity_I32);
8171 IRTemp argHi = newTemp(Ity_I32);
8172 assign(argLo, unop(Iop_64to32, mkexpr(rS)));
8173 assign(argHi, unop(Iop_64HIto32, mkexpr(rS)));
8174 resultLo = gen_POPCOUNT(Ity_I32, argLo, WORD);
8175 resultHi = gen_POPCOUNT(Ity_I32, argHi, WORD);
8176 putIReg( rA_addr, binop(Iop_32HLto64, mkexpr(resultHi), mkexpr(resultLo)));
8177 } else {
8178 IRTemp result = gen_POPCOUNT(ty, rS, WORD);
8179 putIReg( rA_addr, mkexpr(result) );
8181 return True;
8183 case 0x7A: // popcntb (Population Count Byte)
8185 DIP("popcntb r%u,r%u\n", rA_addr, rS_addr);
8187 if (mode64) {
8188 IRTemp resultHi, resultLo;
8189 IRTemp argLo = newTemp(Ity_I32);
8190 IRTemp argHi = newTemp(Ity_I32);
8191 assign(argLo, unop(Iop_64to32, mkexpr(rS)));
8192 assign(argHi, unop(Iop_64HIto32, mkexpr(rS)));
8193 resultLo = gen_POPCOUNT(Ity_I32, argLo, BYTE);
8194 resultHi = gen_POPCOUNT(Ity_I32, argHi, BYTE);
8195 putIReg( rA_addr, binop(Iop_32HLto64, mkexpr(resultHi),
8196 mkexpr(resultLo)));
8197 } else {
8198 IRTemp result = gen_POPCOUNT(ty, rS, BYTE);
8199 putIReg( rA_addr, mkexpr(result) );
8201 return True;
8203 case 0x0FC: // bpermd (Bit Permute Doubleword)
8205 /* This is a lot of rigmarole to emulate bpermd like this, as it
8206 * could be done much faster by implementing a call to the native
8207 * instruction. However, where possible I want to avoid using new
8208 * native instructions so that we can use valgrind to emulate those
8209 * instructions on older PPC64 hardware.
8211 #define BPERMD_IDX_MASK 0x00000000000000FFULL
8212 #define BPERMD_BIT_MASK 0x8000000000000000ULL
8213 int i;
8214 IRExpr * rS_expr = mkexpr(rS);
8215 IRExpr * res = binop(Iop_And64, mkU64(0), mkU64(0));
8216 DIP("bpermd r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
8217 for (i = 0; i < 8; i++) {
8218 IRTemp idx_tmp = newTemp( Ity_I64 );
8219 IRTemp perm_bit = newTemp( Ity_I64 );
8220 IRTemp idx = newTemp( Ity_I8 );
8221 IRTemp idx_LT64 = newTemp( Ity_I1 );
8222 IRTemp idx_LT64_ity64 = newTemp( Ity_I64 );
8224 assign( idx_tmp,
8225 binop( Iop_And64, mkU64( BPERMD_IDX_MASK ), rS_expr ) );
8226 assign( idx_LT64,
8227 binop( Iop_CmpLT64U, mkexpr( idx_tmp ), mkU64( 64 ) ) );
8228 assign( idx,
8229 binop( Iop_And8,
8230 unop( Iop_1Sto8,
8231 mkexpr(idx_LT64) ),
8232 unop( Iop_64to8, mkexpr( idx_tmp ) ) ) );
8233 /* If idx_LT64 == 0, we must force the perm bit to '0'. Below, we se idx
8234 * to determine which bit of rB to use for the perm bit, and then we shift
8235 * that bit to the MSB position. We AND that with a 64-bit-ized idx_LT64
8236 * to set the final perm bit.
8238 assign( idx_LT64_ity64,
8239 unop( Iop_32Uto64, unop( Iop_1Uto32, mkexpr(idx_LT64 ) ) ) );
8240 assign( perm_bit,
8241 binop( Iop_And64,
8242 mkexpr( idx_LT64_ity64 ),
8243 binop( Iop_Shr64,
8244 binop( Iop_And64,
8245 mkU64( BPERMD_BIT_MASK ),
8246 binop( Iop_Shl64,
8247 mkexpr( rB ),
8248 mkexpr( idx ) ) ),
8249 mkU8( 63 ) ) ) );
8250 res = binop( Iop_Or64,
8251 res,
8252 binop( Iop_Shl64,
8253 mkexpr( perm_bit ),
8254 mkU8( i ) ) );
8255 rS_expr = binop( Iop_Shr64, rS_expr, mkU8( 8 ) );
8257 putIReg(rA_addr, res);
8258 return True;
8261 default:
8262 vex_printf("dis_int_logic(ppc)(opc2)\n");
8263 return False;
8265 break;
8267 default:
8268 vex_printf("dis_int_logic(ppc)(opc1)\n");
8269 return False;
8272 putIReg( rA_addr, mkexpr(rA) );
8274 if (do_rc && flag_rC) {
8275 set_CR0( mkexpr(rA) );
8277 return True;
8281 Integer Parity Instructions
8283 static Bool dis_int_parity ( UInt prefix, UInt theInstr )
8285 /* X-Form */
8286 UChar opc1 = ifieldOPC(theInstr);
8287 UChar rS_addr = ifieldRegDS(theInstr);
8288 UChar rA_addr = ifieldRegA(theInstr);
8289 UChar rB_addr = ifieldRegB(theInstr);
8290 UInt opc2 = ifieldOPClo10(theInstr);
8291 UChar b0 = ifieldBIT0(theInstr);
8292 IRType ty = mode64 ? Ity_I64 : Ity_I32;
8294 IRTemp rS = newTemp(ty);
8295 IRTemp rA = newTemp(ty);
8296 IRTemp iTot1 = newTemp(Ity_I32);
8297 IRTemp iTot2 = newTemp(Ity_I32);
8298 IRTemp iTot3 = newTemp(Ity_I32);
8299 IRTemp iTot4 = newTemp(Ity_I32);
8300 IRTemp iTot5 = newTemp(Ity_I32);
8301 IRTemp iTot6 = newTemp(Ity_I32);
8302 IRTemp iTot7 = newTemp(Ity_I32);
8303 IRTemp iTot8 = newTemp(Ity_I32);
8304 IRTemp rS1 = newTemp(ty);
8305 IRTemp rS2 = newTemp(ty);
8306 IRTemp rS3 = newTemp(ty);
8307 IRTemp rS4 = newTemp(ty);
8308 IRTemp rS5 = newTemp(ty);
8309 IRTemp rS6 = newTemp(ty);
8310 IRTemp rS7 = newTemp(ty);
8311 IRTemp iHi = newTemp(Ity_I32);
8312 IRTemp iLo = newTemp(Ity_I32);
8313 IROp to_bit = (mode64 ? Iop_64to1 : Iop_32to1);
8314 IROp shr_op = (mode64 ? Iop_Shr64 : Iop_Shr32);
8316 /* There is no prefixed version of these instructions. */
8317 PREFIX_CHECK
8319 if (opc1 != 0x1f || rB_addr || b0) {
8320 vex_printf("dis_int_parity(ppc)(0x1F,opc1:rB|b0)\n");
8321 return False;
8324 assign( rS, getIReg(rS_addr) );
8326 switch (opc2) {
8327 case 0xba: // prtyd (Parity Doubleword, ISA 2.05 p320)
8328 DIP("prtyd r%u,r%u\n", rA_addr, rS_addr);
8329 assign( iTot1, unop(Iop_1Uto32, unop(to_bit, mkexpr(rS))) );
8330 assign( rS1, binop(shr_op, mkexpr(rS), mkU8(8)) );
8331 assign( iTot2, binop(Iop_Add32,
8332 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS1))),
8333 mkexpr(iTot1)) );
8334 assign( rS2, binop(shr_op, mkexpr(rS1), mkU8(8)) );
8335 assign( iTot3, binop(Iop_Add32,
8336 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS2))),
8337 mkexpr(iTot2)) );
8338 assign( rS3, binop(shr_op, mkexpr(rS2), mkU8(8)) );
8339 assign( iTot4, binop(Iop_Add32,
8340 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS3))),
8341 mkexpr(iTot3)) );
8342 if (mode64) {
8343 assign( rS4, binop(shr_op, mkexpr(rS3), mkU8(8)) );
8344 assign( iTot5, binop(Iop_Add32,
8345 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS4))),
8346 mkexpr(iTot4)) );
8347 assign( rS5, binop(shr_op, mkexpr(rS4), mkU8(8)) );
8348 assign( iTot6, binop(Iop_Add32,
8349 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS5))),
8350 mkexpr(iTot5)) );
8351 assign( rS6, binop(shr_op, mkexpr(rS5), mkU8(8)) );
8352 assign( iTot7, binop(Iop_Add32,
8353 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS6))),
8354 mkexpr(iTot6)) );
8355 assign( rS7, binop(shr_op, mkexpr(rS6), mkU8(8)) );
8356 assign( iTot8, binop(Iop_Add32,
8357 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS7))),
8358 mkexpr(iTot7)) );
8359 assign( rA, unop(Iop_32Uto64,
8360 binop(Iop_And32, mkexpr(iTot8), mkU32(1))) );
8361 } else
8362 assign( rA, mkexpr(iTot4) );
8364 break;
8365 case 0x9a: // prtyw (Parity Word, ISA 2.05 p320)
8366 assign( iTot1, unop(Iop_1Uto32, unop(to_bit, mkexpr(rS))) );
8367 assign( rS1, binop(shr_op, mkexpr(rS), mkU8(8)) );
8368 assign( iTot2, binop(Iop_Add32,
8369 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS1))),
8370 mkexpr(iTot1)) );
8371 assign( rS2, binop(shr_op, mkexpr(rS1), mkU8(8)) );
8372 assign( iTot3, binop(Iop_Add32,
8373 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS2))),
8374 mkexpr(iTot2)) );
8375 assign( rS3, binop(shr_op, mkexpr(rS2), mkU8(8)) );
8376 assign( iTot4, binop(Iop_Add32,
8377 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS3))),
8378 mkexpr(iTot3)) );
8379 assign( iLo, unop(Iop_1Uto32, unop(Iop_32to1, mkexpr(iTot4) )) );
8381 if (mode64) {
8382 assign( rS4, binop(shr_op, mkexpr(rS3), mkU8(8)) );
8383 assign( iTot5, unop(Iop_1Uto32, unop(to_bit, mkexpr(rS4))) );
8384 assign( rS5, binop(shr_op, mkexpr(rS4), mkU8(8)) );
8385 assign( iTot6, binop(Iop_Add32,
8386 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS5))),
8387 mkexpr(iTot5)) );
8388 assign( rS6, binop(shr_op, mkexpr(rS5), mkU8(8)) );
8389 assign( iTot7, binop(Iop_Add32,
8390 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS6))),
8391 mkexpr(iTot6)) );
8392 assign( rS7, binop(shr_op, mkexpr(rS6), mkU8(8)));
8393 assign( iTot8, binop(Iop_Add32,
8394 unop(Iop_1Uto32, unop(to_bit, mkexpr(rS7))),
8395 mkexpr(iTot7)) );
8396 assign( iHi, binop(Iop_And32, mkU32(1), mkexpr(iTot8)) ),
8397 assign( rA, binop(Iop_32HLto64, mkexpr(iHi), mkexpr(iLo)) );
8398 } else
8399 assign( rA, binop(Iop_Or32, mkU32(0), mkexpr(iLo)) );
8400 break;
8401 default:
8402 vex_printf("dis_int_parity(ppc)(opc2)\n");
8403 return False;
8406 putIReg( rA_addr, mkexpr(rA) );
8408 return True;
8413 Integer Rotate Instructions
8415 static Bool dis_int_rot ( UInt prefix, UInt theInstr )
8417 /* M-Form, MDS-Form */
8418 UChar opc1 = ifieldOPC(theInstr);
8419 UChar rS_addr = ifieldRegDS(theInstr);
8420 UChar rA_addr = ifieldRegA(theInstr);
8421 UChar rB_addr = ifieldRegB(theInstr);
8422 UChar sh_imm = rB_addr;
8423 UChar MaskBeg = toUChar( IFIELD( theInstr, 6, 5 ) );
8424 UChar MaskEnd = toUChar( IFIELD( theInstr, 1, 5 ) );
8425 UChar msk_imm = toUChar( IFIELD( theInstr, 5, 6 ) );
8426 UChar opc2 = toUChar( IFIELD( theInstr, 2, 3 ) );
8427 UChar b1 = ifieldBIT1(theInstr);
8428 UChar flag_rC = ifieldBIT0(theInstr);
8430 IRType ty = mode64 ? Ity_I64 : Ity_I32;
8431 IRTemp rS = newTemp(ty);
8432 IRTemp rA = newTemp(ty);
8433 IRTemp rB = newTemp(ty);
8434 IRTemp rot = newTemp(ty);
8435 IRExpr *r;
8436 UInt mask32;
8437 ULong mask64;
8439 /* There is no prefixed version of these instructions. */
8440 PREFIX_CHECK
8442 assign( rS, getIReg(rS_addr) );
8443 assign( rB, getIReg(rB_addr) );
8445 switch (opc1) {
8446 case 0x14: {
8447 // rlwimi (Rotate Left Word Imm then Mask Insert, PPC32 p500)
8448 DIP("rlwimi%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
8449 rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
8450 if (mode64) {
8451 // tmp32 = (ROTL(rS_Lo32, Imm)
8452 // rA = ((tmp32 || tmp32) & mask64) | (rA & ~mask64)
8453 mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
8454 r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
8455 r = unop(Iop_32Uto64, r);
8456 assign( rot, binop(Iop_Or64, r,
8457 binop(Iop_Shl64, r, mkU8(32))) );
8458 assign( rA,
8459 binop(Iop_Or64,
8460 binop(Iop_And64, mkexpr(rot), mkU64(mask64)),
8461 binop(Iop_And64, getIReg(rA_addr), mkU64(~mask64))) );
8463 else {
8464 // rA = (ROTL(rS, Imm) & mask) | (rA & ~mask);
8465 mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
8466 r = ROTL(mkexpr(rS), mkU8(sh_imm));
8467 assign( rA,
8468 binop(Iop_Or32,
8469 binop(Iop_And32, mkU32(mask32), r),
8470 binop(Iop_And32, getIReg(rA_addr), mkU32(~mask32))) );
8472 break;
8475 case 0x15: {
8476 // rlwinm (Rotate Left Word Imm then AND with Mask, PPC32 p501)
8477 vassert(MaskBeg < 32);
8478 vassert(MaskEnd < 32);
8479 vassert(sh_imm < 32);
8481 if (mode64) {
8482 IRTemp rTmp = newTemp(Ity_I64);
8483 mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
8484 DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
8485 rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
8486 // tmp32 = (ROTL(rS_Lo32, Imm)
8487 // rA = ((tmp32 || tmp32) & mask64)
8488 r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
8489 r = unop(Iop_32Uto64, r);
8490 assign( rTmp, r );
8491 r = NULL;
8492 assign( rot, binop(Iop_Or64, mkexpr(rTmp),
8493 binop(Iop_Shl64, mkexpr(rTmp), mkU8(32))) );
8494 assign( rA, binop(Iop_And64, mkexpr(rot), mkU64(mask64)) );
8496 else {
8497 if (MaskBeg == 0 && sh_imm+MaskEnd == 31) {
8498 /* Special-case the ,n,0,31-n form as that is just n-bit
8499 shift left, PPC32 p501 */
8500 DIP("slwi%s r%u,r%u,%d\n", flag_rC ? ".":"",
8501 rA_addr, rS_addr, sh_imm);
8502 assign( rA, binop(Iop_Shl32, mkexpr(rS), mkU8(sh_imm)) );
8504 else if (MaskEnd == 31 && sh_imm+MaskBeg == 32) {
8505 /* Special-case the ,32-n,n,31 form as that is just n-bit
8506 unsigned shift right, PPC32 p501 */
8507 DIP("srwi%s r%u,r%u,%d\n", flag_rC ? ".":"",
8508 rA_addr, rS_addr, MaskBeg);
8509 assign( rA, binop(Iop_Shr32, mkexpr(rS), mkU8(MaskBeg)) );
8511 else {
8512 /* General case. */
8513 mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
8514 DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
8515 rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
8516 // rA = ROTL(rS, Imm) & mask
8517 assign( rA, binop(Iop_And32,
8518 ROTL(mkexpr(rS), mkU8(sh_imm)),
8519 mkU32(mask32)) );
8522 break;
8525 case 0x17: {
8526 // rlwnm (Rotate Left Word then AND with Mask, PPC32 p503
8527 DIP("rlwnm%s r%u,r%u,r%u,%d,%d\n", flag_rC ? ".":"",
8528 rA_addr, rS_addr, rB_addr, MaskBeg, MaskEnd);
8529 if (mode64) {
8530 mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
8531 /* weird insn alert!
8532 tmp32 = (ROTL(rS_Lo32, rB[0-4])
8533 rA = ((tmp32 || tmp32) & mask64)
8535 // note, ROTL does the masking, so we don't do it here
8536 r = ROTL( unop(Iop_64to32, mkexpr(rS)),
8537 unop(Iop_64to8, mkexpr(rB)) );
8538 r = unop(Iop_32Uto64, r);
8539 assign(rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))));
8540 assign( rA, binop(Iop_And64, mkexpr(rot), mkU64(mask64)) );
8541 } else {
8542 mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
8543 // rA = ROTL(rS, rB[0-4]) & mask
8544 // note, ROTL does the masking, so we don't do it here
8545 assign( rA, binop(Iop_And32,
8546 ROTL(mkexpr(rS),
8547 unop(Iop_32to8, mkexpr(rB))),
8548 mkU32(mask32)) );
8550 break;
8553 /* 64bit Integer Rotates */
8554 case 0x1E: {
8555 msk_imm = ((msk_imm & 1) << 5) | (msk_imm >> 1);
8556 sh_imm |= b1 << 5;
8558 vassert( msk_imm < 64 );
8559 vassert( sh_imm < 64 );
8561 switch (opc2) {
8562 case 0x4: {
8563 /* r = ROTL64( rS, rB_lo6) */
8564 r = ROTL( mkexpr(rS), unop(Iop_64to8, mkexpr(rB)) );
8566 if (b1 == 0) { // rldcl (Rotl DWord, Clear Left, PPC64 p555)
8567 DIP("rldcl%s r%u,r%u,r%u,%u\n", flag_rC ? ".":"",
8568 rA_addr, rS_addr, rB_addr, msk_imm);
8569 // note, ROTL does the masking, so we don't do it here
8570 mask64 = MASK64(0, 63-msk_imm);
8571 assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
8572 break;
8573 } else { // rldcr (Rotl DWord, Clear Right, PPC64 p556)
8574 DIP("rldcr%s r%u,r%u,r%u,%u\n", flag_rC ? ".":"",
8575 rA_addr, rS_addr, rB_addr, msk_imm);
8576 mask64 = MASK64(63-msk_imm, 63);
8577 assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
8578 break;
8580 break;
8582 case 0x2: // rldic (Rotl DWord Imm, Clear, PPC64 p557)
8583 DIP("rldic%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
8584 rA_addr, rS_addr, sh_imm, msk_imm);
8585 r = ROTL(mkexpr(rS), mkU8(sh_imm));
8586 mask64 = MASK64(sh_imm, 63-msk_imm);
8587 assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
8588 break;
8589 // later: deal with special case: (msk_imm==0) => SHL(sh_imm)
8591 Hmm... looks like this'll do the job more simply:
8592 r = SHL(rS, sh_imm)
8593 m = ~(1 << (63-msk_imm))
8594 assign(rA, r & m);
8597 case 0x0: // rldicl (Rotl DWord Imm, Clear Left, PPC64 p558)
8598 if (mode64
8599 && sh_imm + msk_imm == 64 && msk_imm >= 1 && msk_imm <= 63) {
8600 /* special-case the ,64-n,n form as that is just
8601 unsigned shift-right by n */
8602 DIP("srdi%s r%u,r%u,%u\n",
8603 flag_rC ? ".":"", rA_addr, rS_addr, msk_imm);
8604 assign( rA, binop(Iop_Shr64, mkexpr(rS), mkU8(msk_imm)) );
8605 } else {
8606 DIP("rldicl%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
8607 rA_addr, rS_addr, sh_imm, msk_imm);
8608 r = ROTL(mkexpr(rS), mkU8(sh_imm));
8609 mask64 = MASK64(0, 63-msk_imm);
8610 assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
8612 break;
8614 case 0x1: // rldicr (Rotl DWord Imm, Clear Right, PPC64 p559)
8615 if (mode64
8616 && sh_imm + msk_imm == 63 && sh_imm >= 1 && sh_imm <= 63) {
8617 /* special-case the ,n,63-n form as that is just
8618 shift-left by n */
8619 DIP("sldi%s r%u,r%u,%u\n",
8620 flag_rC ? ".":"", rA_addr, rS_addr, sh_imm);
8621 assign( rA, binop(Iop_Shl64, mkexpr(rS), mkU8(sh_imm)) );
8622 } else {
8623 DIP("rldicr%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
8624 rA_addr, rS_addr, sh_imm, msk_imm);
8625 r = ROTL(mkexpr(rS), mkU8(sh_imm));
8626 mask64 = MASK64(63-msk_imm, 63);
8627 assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
8629 break;
8631 case 0x3: { // rldimi (Rotl DWord Imm, Mask Insert, PPC64 p560)
8632 IRTemp rA_orig = newTemp(ty);
8633 DIP("rldimi%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
8634 rA_addr, rS_addr, sh_imm, msk_imm);
8635 r = ROTL(mkexpr(rS), mkU8(sh_imm));
8636 mask64 = MASK64(sh_imm, 63-msk_imm);
8637 assign( rA_orig, getIReg(rA_addr) );
8638 assign( rA, binop(Iop_Or64,
8639 binop(Iop_And64, mkU64(mask64), r),
8640 binop(Iop_And64, mkU64(~mask64),
8641 mkexpr(rA_orig))) );
8642 break;
8644 default:
8645 vex_printf("dis_int_rot(ppc)(opc2)\n");
8646 return False;
8648 break;
8651 default:
8652 vex_printf("dis_int_rot(ppc)(opc1)\n");
8653 return False;
8656 putIReg( rA_addr, mkexpr(rA) );
8658 if (flag_rC) {
8659 set_CR0( mkexpr(rA) );
8661 return True;
8666 Integer Load Instructions
8668 static Bool dis_int_load_ds_form_prefix ( UInt prefix,
8669 UInt theInstr )
8671 /* DS-Form Prefixed versions */
8672 UChar opc1 = ifieldOPC(theInstr);
8673 UChar rT_addr = ifieldRegDS(theInstr);
8674 UChar rA_addr = ifieldRegA(theInstr);
8675 IRType ty = mode64 ? Ity_I64 : Ity_I32;
8676 UChar b0 = ifieldBIT0(theInstr);
8677 UChar b1 = ifieldBIT1(theInstr);
8678 IRTemp EA = newTemp(ty);
8679 UInt ptype = PrefixType(prefix);
8680 Bool is_prefix = prefix_instruction( prefix );
8681 ULong immediate_val = 0;
8682 UInt R = 0;
8684 /* Some of these instructions have different encodings for their word
8685 versions and their prefix versions. */
8687 if (opc1 == 0x29) { //plwa
8688 pDIP( is_prefix, "lwa r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr);
8689 DIPp( is_prefix, ",%u", R );
8690 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
8691 ptype, DSFORM_IMMASK,
8692 &immediate_val, &R ) );
8694 putIReg( rT_addr,
8695 unop(Iop_32Sto64, load( Ity_I32, mkexpr( EA ) ) ) );
8696 return True;
8698 } else if (opc1 == 0x39) { // pld
8699 pDIP( is_prefix, "ld r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr);
8700 DIPn( is_prefix);
8701 assign( EA, calculate_prefix_EA( prefix, theInstr,
8702 rA_addr, ptype, DFORM_IMMASK,
8703 &immediate_val, &R ) );
8705 putIReg( rT_addr, load( Ity_I64, mkexpr( EA ) ) );
8706 return True;
8708 } else if (opc1 == 0x3A) {
8709 /* Word version DS Form - 64bit Loads. In each case EA will have been
8710 formed with the lowest 2 bits masked off the immediate offset. */
8711 UInt uimm16 = ifieldUIMM16(theInstr);
8712 Int simm16 = extend_s_16to32(uimm16);
8714 simm16 = simm16 & DSFORM_IMMASK;
8715 assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
8717 switch ((b1<<1) | b0) {
8718 case 0x0: // ld (Load DWord, PPC64 p472)
8719 DIP("ld r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr);
8720 putIReg( rT_addr, load( Ity_I64, mkexpr( EA ) ) );
8721 break;
8723 case 0x1: // ldu (Load DWord, Update, PPC64 p474)
8724 /* There is no prefixed version of this instructions. */
8725 if (rA_addr == 0 || rA_addr == rT_addr) {
8726 vex_printf("dis_int_load_ds_form_prefix(ppc)(ldu,rA_addr|rT_addr)\n");
8727 return False;
8729 DIP("ldu r%u,%llu(r%u)\n", rT_addr, immediate_val, rA_addr);
8731 putIReg( rT_addr, load( Ity_I64, mkexpr( EA ) ) );
8732 putIReg( rA_addr, mkexpr( EA ) );
8733 break;
8735 case 0x2: // lwa (Load Word Alg, PPC64 p499)
8736 pDIP( is_prefix, "lwa r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr);
8737 DIPp( is_prefix, ",%u", R );
8739 putIReg( rT_addr,
8740 unop(Iop_32Sto64, load( Ity_I32, mkexpr( EA ) ) ) );
8741 break;
8743 default:
8744 vex_printf("dis_int_load_ds_form_prefix(ppc)(0x3A, opc2)\n");
8745 return False;
8747 return True;
8749 return False;
8752 static Bool dis_int_load_prefix ( UInt prefix, UInt theInstr )
8754 /* D-Form, X-Form, Prefixed versions */
8755 UChar opc1 = ifieldOPC(theInstr);
8756 UChar rT_addr = ifieldRegDS(theInstr);
8757 UChar rA_addr = ifieldRegA(theInstr);
8759 IRType ty = mode64 ? Ity_I64 : Ity_I32;
8760 IRTemp EA = newTemp(ty);
8761 UInt ptype = PrefixType(prefix);
8762 Bool is_prefix = prefix_instruction( prefix );
8763 UInt size = 0;
8764 ULong immediate_val = 0;
8765 UInt R = 0;
8766 IRExpr* val;
8768 if (opc1 == 0x22) {
8769 // byte loads
8770 size = Ity_I8;
8771 assign( EA, calculate_prefix_EA( prefix, theInstr,
8772 rA_addr, ptype, DFORM_IMMASK,
8773 &immediate_val, &R ) );
8775 } else if ( opc1 == 0x28 ) {
8776 // half word loads lhz, plhz
8777 size = Ity_I16;
8778 assign( EA, calculate_prefix_EA( prefix, theInstr,
8779 rA_addr, ptype, DFORM_IMMASK,
8780 &immediate_val, &R ) );
8782 } else if ( opc1 == 0x2A ) {
8783 // half word loads lha, plha
8784 size = Ity_I16;
8785 assign( EA, calculate_prefix_EA( prefix, theInstr,
8786 rA_addr, ptype, DFORM_IMMASK,
8787 &immediate_val, &R ) );
8789 } else if (opc1 == 0x20 ) {
8790 // word load lwz, plwz
8791 size = Ity_I32;
8792 assign( EA, calculate_prefix_EA( prefix, theInstr,
8793 rA_addr, ptype, DFORM_IMMASK,
8794 &immediate_val, &R ) );
8796 } else if (opc1 == 0x38 ) { // lq, plq
8797 // word load
8798 size = Ity_I64;
8800 if (!is_prefix)
8801 assign( EA, calculate_prefix_EA( prefix, theInstr,
8802 rA_addr, ptype, DQFORM_IMMASK,
8803 &immediate_val, &R ) );
8805 else
8806 assign( EA, calculate_prefix_EA( prefix, theInstr,
8807 rA_addr, ptype, DFORM_IMMASK,
8808 &immediate_val, &R ) );
8811 val = load( size, mkexpr( EA ) );
8813 /* Store the load value in the destination and print the instruction
8814 details. */
8815 switch (opc1) {
8816 case 0x20: // lwz (Load W & Zero, PPC32 p460)
8817 pDIP( is_prefix, "lwz r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr);
8818 DIPp( is_prefix, ",%u", R );
8820 putIReg( rT_addr, mkWidenFrom32(ty, val, False) );
8821 break;
8823 case 0x22: // lbz (Load B & Zero, PPC32 p433)
8824 pDIP( is_prefix, "lbz r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr );
8825 DIPp( is_prefix, ",%u", R );
8827 putIReg( rT_addr, mkWidenFrom8( ty, val, False ) );
8828 break;
8830 case 0x28: // lhz (Load HW & Zero, PPC32 p450)
8831 pDIP( is_prefix, "lhz r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr );
8832 DIPp( is_prefix, ",%u", R );
8834 putIReg( rT_addr, mkWidenFrom16( ty, val, False ) );
8835 break;
8837 case 0x2A: // lha (Load HW Alg, PPC32 p445)
8838 pDIP( is_prefix, "lha r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr);
8839 DIPp( is_prefix, ",%u", R );
8840 putIReg( rT_addr, mkWidenFrom16(ty, val, True) );
8841 break;
8843 case 0x38: { // lq, plq
8844 IRTemp high = newTemp(ty);
8845 IRTemp low = newTemp(ty);
8846 /* DQ Form - 128bit Loads. Lowest bits [1:0] are the PT field. */
8847 pDIP( is_prefix, "lq r%u,%llu(r%u)", rT_addr, immediate_val, rA_addr);
8848 DIPp( is_prefix, ",%u", R );
8849 /* NOTE: there are some changes to XER[41:42] that have not been
8850 * implemented.
8852 //trap if EA misaligned on 16 byte address
8853 if (mode64) {
8854 if (host_endness == VexEndnessBE) {
8855 assign(high, load(ty, mkexpr( EA ) ) );
8856 assign(low, load(ty, binop( Iop_Add64,
8857 mkexpr( EA ),
8858 mkU64( 8 ) ) ) );
8859 } else {
8860 assign(low, load(ty, mkexpr( EA ) ) );
8861 assign(high, load(ty, binop( Iop_Add64,
8862 mkexpr( EA ),
8863 mkU64( 8 ) ) ) );
8865 } else {
8866 assign(high, load(ty, binop( Iop_Add32,
8867 mkexpr( EA ),
8868 mkU32( 4 ) ) ) );
8869 assign(low, load(ty, binop( Iop_Add32,
8870 mkexpr( EA ),
8871 mkU32( 12 ) ) ) );
8874 /* Note, the load order for lq is the same for BE and LE. However,
8875 plq does an endian aware load. */
8876 if (is_prefix &&( host_endness == VexEndnessLE )) {
8877 putIReg( rT_addr, mkexpr( low) );
8878 putIReg( rT_addr+1, mkexpr( high) );
8879 } else {
8880 putIReg( rT_addr, mkexpr( high) );
8881 putIReg( rT_addr+1, mkexpr( low) );
8883 break;
8886 default:
8887 vex_printf("dis_int_load_prefix(ppc)(opc1)\n");
8888 return False;
8890 return True;
8893 static Bool dis_int_load ( UInt prefix, UInt theInstr )
8895 /* D-Form, X-Form, DS-Form */
8896 UChar opc1 = ifieldOPC(theInstr);
8897 UChar rD_addr = ifieldRegDS(theInstr);
8898 UChar rA_addr = ifieldRegA(theInstr);
8899 UInt uimm16 = ifieldUIMM16(theInstr);
8900 UChar rB_addr = ifieldRegB(theInstr);
8901 UInt opc2 = ifieldOPClo10(theInstr);
8902 UChar b0 = ifieldBIT0(theInstr);
8904 Int simm16 = extend_s_16to32(uimm16);
8905 IRType ty = mode64 ? Ity_I64 : Ity_I32;
8906 IRTemp EA = newTemp(ty);
8907 IRExpr* val;
8909 /* There is no prefixed version of these instructions. */
8910 PREFIX_CHECK
8912 switch (opc1) {
8913 case 0x1F: // register offset
8914 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
8915 break;
8916 case 0x38: // immediate offset: 64bit: lq: maskoff
8917 // lowest 4 bits of immediate before forming EA
8918 simm16 = simm16 & 0xFFFFFFF0;
8919 assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
8920 break;
8921 default: // immediate offset
8922 assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
8923 break;
8926 switch (opc1) {
8927 case 0x23: // lbzu (Load B & Zero, Update, PPC32 p434)
8928 if (rA_addr == 0 || rA_addr == rD_addr) {
8929 vex_printf("dis_int_load(ppc)(lbzu,rA_addr|rD_addr)\n");
8930 return False;
8932 DIP("lbzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
8933 val = load(Ity_I8, mkexpr(EA));
8934 putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
8935 putIReg( rA_addr, mkexpr(EA) );
8936 break;
8938 case 0x2B: // lhau (Load HW Alg, Update, PPC32 p446)
8939 if (rA_addr == 0 || rA_addr == rD_addr) {
8940 vex_printf("dis_int_load(ppc)(lhau,rA_addr|rD_addr)\n");
8941 return False;
8943 DIP("lhau r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
8944 val = load(Ity_I16, mkexpr(EA));
8945 putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
8946 putIReg( rA_addr, mkexpr(EA) );
8947 break;
8949 case 0x29: // lhzu (Load HW & and Zero, Update, PPC32 p451)
8950 if (rA_addr == 0 || rA_addr == rD_addr) {
8951 vex_printf("dis_int_load(ppc)(lhzu,rA_addr|rD_addr)\n");
8952 return False;
8954 DIP("lhzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
8955 val = load(Ity_I16, mkexpr(EA));
8956 putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
8957 putIReg( rA_addr, mkexpr(EA) );
8958 break;
8960 case 0x21: // lwzu (Load W & Zero, Update, PPC32 p461))
8961 if (rA_addr == 0 || rA_addr == rD_addr) {
8962 vex_printf("dis_int_load(ppc)(lwzu,rA_addr|rD_addr)\n");
8963 return False;
8965 DIP("lwzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
8966 val = load(Ity_I32, mkexpr(EA));
8967 putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
8968 putIReg( rA_addr, mkexpr(EA) );
8969 break;
8971 /* X Form */
8972 case 0x1F:
8973 if (b0 != 0) {
8974 vex_printf("dis_int_load(ppc)(Ox1F,b0)\n");
8975 return False;
8978 switch (opc2) {
8979 case 0x077: // lbzux (Load B & Zero, Update Indexed, PPC32 p435)
8980 DIP("lbzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
8981 if (rA_addr == 0 || rA_addr == rD_addr) {
8982 vex_printf("dis_int_load(ppc)(lwzux,rA_addr|rD_addr)\n");
8983 return False;
8985 val = load(Ity_I8, mkexpr(EA));
8986 putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
8987 putIReg( rA_addr, mkexpr(EA) );
8988 break;
8990 case 0x057: // lbzx (Load B & Zero, Indexed, PPC32 p436)
8991 DIP("lbzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
8992 val = load(Ity_I8, mkexpr(EA));
8993 putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
8994 break;
8996 case 0x177: // lhaux (Load HW Alg, Update Indexed, PPC32 p447)
8997 if (rA_addr == 0 || rA_addr == rD_addr) {
8998 vex_printf("dis_int_load(ppc)(lhaux,rA_addr|rD_addr)\n");
8999 return False;
9001 DIP("lhaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9002 val = load(Ity_I16, mkexpr(EA));
9003 putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
9004 putIReg( rA_addr, mkexpr(EA) );
9005 break;
9007 case 0x157: // lhax (Load HW Alg, Indexed, PPC32 p448)
9008 DIP("lhax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9009 val = load(Ity_I16, mkexpr(EA));
9010 putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
9011 break;
9013 case 0x137: // lhzux (Load HW & Zero, Update Indexed, PPC32 p452)
9014 if (rA_addr == 0 || rA_addr == rD_addr) {
9015 vex_printf("dis_int_load(ppc)(lhzux,rA_addr|rD_addr)\n");
9016 return False;
9018 DIP("lhzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9019 val = load(Ity_I16, mkexpr(EA));
9020 putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
9021 putIReg( rA_addr, mkexpr(EA) );
9022 break;
9024 case 0x117: // lhzx (Load HW & Zero, Indexed, PPC32 p453)
9025 DIP("lhzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9026 val = load(Ity_I16, mkexpr(EA));
9027 putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
9028 break;
9030 case 0x037: // lwzux (Load W & Zero, Update Indexed, PPC32 p462)
9031 if (rA_addr == 0 || rA_addr == rD_addr) {
9032 vex_printf("dis_int_load(ppc)(lwzux,rA_addr|rD_addr)\n");
9033 return False;
9035 DIP("lwzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9036 val = load(Ity_I32, mkexpr(EA));
9037 putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
9038 putIReg( rA_addr, mkexpr(EA) );
9039 break;
9041 case 0x017: // lwzx (Load W & Zero, Indexed, PPC32 p463)
9042 DIP("lwzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9043 val = load(Ity_I32, mkexpr(EA));
9044 putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
9045 break;
9048 /* 64bit Loads */
9049 case 0x035: // ldux (Load DWord, Update Indexed, PPC64 p475)
9050 if (rA_addr == 0 || rA_addr == rD_addr) {
9051 vex_printf("dis_int_load(ppc)(ldux,rA_addr|rD_addr)\n");
9052 return False;
9054 DIP("ldux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9055 putIReg( rD_addr, load(Ity_I64, mkexpr(EA)) );
9056 putIReg( rA_addr, mkexpr(EA) );
9057 break;
9059 case 0x015: // ldx (Load DWord, Indexed, PPC64 p476)
9060 DIP("ldx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9061 putIReg( rD_addr, load(Ity_I64, mkexpr(EA)) );
9062 break;
9064 case 0x175: // lwaux (Load W Alg, Update Indexed, PPC64 p501)
9065 if (rA_addr == 0 || rA_addr == rD_addr) {
9066 vex_printf("dis_int_load(ppc)(lwaux,rA_addr|rD_addr)\n");
9067 return False;
9069 DIP("lwaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9070 putIReg( rD_addr,
9071 unop(Iop_32Sto64, load(Ity_I32, mkexpr(EA))) );
9072 putIReg( rA_addr, mkexpr(EA) );
9073 break;
9075 case 0x155: // lwax (Load W Alg, Indexed, PPC64 p502)
9076 DIP("lwax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
9077 putIReg( rD_addr,
9078 unop(Iop_32Sto64, load(Ity_I32, mkexpr(EA))) );
9079 break;
9081 default:
9082 vex_printf("dis_int_load(ppc)(opc2)\n");
9083 return False;
9085 break;
9087 default:
9088 vex_printf("dis_int_load(ppc)(opc1)\n");
9089 return False;
9091 return True;
9095 VSX Vector Splat Immediate Word 8RR:D-form
9097 static Bool dis_vector_splat_imm_prefix ( UInt prefix, UInt theInstr )
9099 UChar opc1 = ifieldOPC(theInstr);
9100 UChar opc2 = IFIELD(theInstr, (31-(46-32)), 4); // bits[43:46]
9102 UInt imm0 = ifield_imm_8RR_D(prefix);
9103 UInt imm1 = ifield_imm_8RR_D(theInstr);
9104 UInt IMM32 = (imm0 << 16) | imm1;
9106 UInt XT_addr = ifieldRegXT_8RR_D(theInstr);
9108 if (opc1 != 0x20) return False;
9110 /* These are prefix instructions, no equivalent word instruction. */
9111 switch(opc2) {
9112 case 0x0:
9113 case 0x1:
9115 /* VSX Vector Splat Immediate32 Doubleword Indexed 8RR:D-form */
9116 UInt IX = IFIELD(theInstr, (31-(46-32)), 1); // bit[46]
9117 IRTemp tmp = newTemp(Ity_V128);
9118 IRTemp mask = newTemp(Ity_V128);
9119 IRTemp new_elements = newTemp(Ity_V128);
9121 DIP("xxsplti32dx %u,%u,%u\n", XT_addr, IX, IMM32);
9123 assign( tmp, getVSReg( XT_addr ) );
9125 if (IX == 0) {
9126 assign( mask, binop( Iop_64HLtoV128,
9127 binop( Iop_32HLto64,
9128 mkU32( 0 ), mkU32( 0xFFFFFFFF ) ),
9129 binop( Iop_32HLto64,
9130 mkU32( 0 ), mkU32( 0xFFFFFFFF ) ) ) );
9131 assign( new_elements, binop( Iop_64HLtoV128,
9132 binop( Iop_32HLto64,
9133 mkU32( IMM32 ), mkU32( 0 ) ),
9134 binop( Iop_32HLto64,
9135 mkU32( IMM32 ), mkU32( 0 ) ) ) );
9136 } else {
9137 assign( mask, binop( Iop_64HLtoV128,
9138 binop( Iop_32HLto64,
9139 mkU32( 0xFFFFFFFF ), mkU32( 0 ) ),
9140 binop( Iop_32HLto64,
9141 mkU32( 0xFFFFFFFF ), mkU32( 0 ) ) ) );
9142 assign( new_elements, binop( Iop_64HLtoV128,
9143 binop( Iop_32HLto64,
9144 mkU32( 0 ), mkU32( IMM32 ) ),
9145 binop( Iop_32HLto64,
9146 mkU32( 0 ), mkU32( IMM32 ) ) ) );
9149 putVSReg( XT_addr,
9150 binop( Iop_OrV128,
9151 binop( Iop_AndV128, mkexpr( tmp ), mkexpr( mask) ),
9152 mkexpr( new_elements ) ) );
9153 break;
9155 case 0x2:
9157 IRTemp result = newTemp(Ity_I64);
9159 /* VSX Vector Splat Immediate Double-precision 8RR:D-form */
9160 DIP("xxspltidp %u,%u\n", XT_addr, IMM32);
9162 assign( result,
9163 unop( Iop_ReinterpF64asI64,
9164 unop( Iop_F32toF64,
9165 unop( Iop_ReinterpI32asF32,
9166 mkU32( IMM32 ) ) ) ) );
9167 putVSReg( XT_addr, binop( Iop_64HLtoV128,
9168 mkexpr( result ), mkexpr( result ) ) );
9170 break;
9172 case 0x3:
9173 /* VSX Vector Splat Immediate Word 8RR:D-form */
9174 DIP("xxspltiw %u,%u\n", XT_addr, IMM32);
9176 putVSReg( XT_addr,
9177 binop( Iop_64HLtoV128,
9178 binop( Iop_32HLto64,
9179 mkU32( IMM32 ), mkU32( IMM32 ) ),
9180 binop( Iop_32HLto64,
9181 mkU32( IMM32 ), mkU32( IMM32 ) ) ) );
9182 break;
9183 default:
9184 vex_printf("dis_vector_splat_imm_prefix (opc2)\n");
9185 return False;
9188 return True;
9192 VSX Vector Permute Extended 8RR:D-form
9194 static Bool dis_vector_permute_prefix ( UInt prefix, UInt theInstr,
9195 const VexAbiInfo* vbi )
9197 #define MAX_ELE 16
9198 UChar opc1 = ifieldOPC(theInstr);
9199 UChar opc2 = IFIELD(theInstr, (63-59), 2); // bits[58:59]
9200 UChar rXT_addr = ifieldRegXT_8RR_XX4( theInstr );
9201 UChar rXA_addr = ifieldRegXA_8RR_XX4( theInstr );
9202 UChar rXB_addr = ifieldRegXB_8RR_XX4( theInstr );
9203 UChar rXC_addr = ifieldRegXC_8RR_XX4( theInstr );
9205 Int i;
9206 IRTemp rXA = newTemp(Ity_V128);
9207 IRTemp rXB = newTemp(Ity_V128);
9208 IRTemp rXC = newTemp(Ity_V128);
9209 IRTemp cmp_mask = newTemp(Ity_I64);
9210 IRTemp eidx_mask = newTemp(Ity_I64);
9211 IRTemp result[MAX_ELE+1];
9212 IRTemp result_mask[MAX_ELE];
9213 IRTemp byte[MAX_ELE];
9214 IRTemp eidx[MAX_ELE];
9216 /* These are prefix instructions, no equivalent word instruction. */
9217 if ((opc1 != 0x22) && (opc2 != 0)) return False;
9219 assign( rXA, getVSReg( rXA_addr ) );
9220 assign( rXB, getVSReg( rXB_addr ) );
9221 assign( rXC, getVSReg( rXC_addr ) );
9223 switch(opc2) {
9224 case 0:
9226 UInt UIM = IFIELD(prefix, 0, 3); // bit [29:31] of the prefix
9228 DIP("xxpermx v%u,v%u,v%u,v%u,%u\n",
9229 rXT_addr, rXA_addr, rXB_addr, rXC_addr, UIM);
9231 result[MAX_ELE] = newTemp(Ity_V128);
9232 assign( eidx_mask, mkU64( 0x1F ) );
9233 assign( cmp_mask, mkU64( 0x7 ) );
9234 assign( result[MAX_ELE], binop( Iop_64HLtoV128, mkU64( 0 ),
9235 mkU64( 0 ) ) );
9237 for (i = MAX_ELE-1; i >= 0; i--) {
9238 eidx[i] = newTemp( Ity_I64 );
9239 byte[i] = newTemp( Ity_I64 );
9240 result[i] = newTemp( Ity_V128 );
9241 result_mask[i] = newTemp( Ity_I64 );
9243 /* The eidx is left based, make index right based for
9244 extractBytefromV256(). */
9245 if ( i >= 8) {
9246 assign( eidx[i],
9247 binop( Iop_Sub64,
9248 mkU64( 31 ),
9249 binop( Iop_And64,
9250 mkexpr( eidx_mask ),
9251 binop( Iop_Shr64,
9252 unop( Iop_V128HIto64, mkexpr( rXC ) ),
9253 mkU8( (i - 8)*8 ) ) ) ) );
9254 assign( result_mask[i],
9255 unop( Iop_1Sto64,
9256 binop( Iop_CmpEQ64,
9257 mkU64( UIM ),
9258 binop( Iop_And64,
9259 mkexpr ( cmp_mask ),
9260 // bits 0:2 of ith byte
9261 binop( Iop_Shr64,
9262 unop( Iop_V128HIto64,
9263 mkexpr( rXC ) ),
9264 mkU8( (i - 8)*8 + 5 ) ) )
9265 ) ) );
9266 } else {
9267 assign( eidx[i],
9268 binop( Iop_Sub64,
9269 mkU64( 31 ),
9270 binop( Iop_And64,
9271 mkexpr( eidx_mask ),
9272 binop( Iop_Shr64,
9273 unop( Iop_V128to64, mkexpr( rXC ) ),
9274 mkU8( i*8 ) ) ) ) );
9275 assign( result_mask[i],
9276 unop( Iop_1Sto64,
9277 binop( Iop_CmpEQ64,
9278 mkU64( UIM ),
9279 binop( Iop_And64,
9280 mkexpr ( cmp_mask ),
9281 // bits 0:2 of ith byte
9282 binop( Iop_Shr64,
9283 unop( Iop_V128to64,
9284 mkexpr( rXC ) ),
9285 mkU8( i*8 + 5 ) ) ) ) ) );
9288 assign( byte[i],
9289 binop( Iop_And64,
9290 mkexpr( result_mask[i] ),
9291 extractBytefromV256( rXA, rXB, eidx[i] ) ) );
9293 assign( result[i], insert_field_into_vector( result[i+1],
9294 mkU64( i ),
9295 mkexpr( byte[i] ),
9296 mkU64( 0xFF ) ) );
9298 putVSReg( rXT_addr, mkexpr( result[0] ) );
9300 break;
9302 case 1:
9304 UInt IMM = IFIELD(prefix, 0, 8); // bit [24:31] of the prefix
9305 DIP("xxeval v%u,v%u,v%u,v%u,%u\n",
9306 rXT_addr, rXA_addr, rXB_addr, rXC_addr, IMM);
9307 putVSReg( rXT_addr,
9308 vector_evaluate_inst ( vbi, mkexpr( rXA ), mkexpr( rXB ),
9309 mkexpr( rXC ), mkU64( IMM ) ) );
9311 break;
9313 default:
9314 vex_printf("dis_vector_permute_prefix(ppc)(opc2)\n");
9315 return False;
9318 return True;
9319 #undef MAX_ELE
9323 VSX Vector Splat Immediate Word 8RR:D-form
9325 static Bool dis_vector_blend_prefix ( UInt prefix, UInt theInstr )
9327 UChar opc1 = ifieldOPC(theInstr);
9328 UChar opc2 = IFIELD(theInstr, (63-59), 2); // bits[58:59]
9329 UChar rXT_addr = ifieldRegXT_8RR_XX4( theInstr );
9330 UChar rXA_addr = ifieldRegXA_8RR_XX4( theInstr );
9331 UChar rXB_addr = ifieldRegXB_8RR_XX4( theInstr );
9332 UChar rXC_addr = ifieldRegXC_8RR_XX4( theInstr );
9334 IRTemp rXA = newTemp(Ity_V128);
9335 IRTemp rXB = newTemp(Ity_V128);
9336 IRTemp rXC = newTemp(Ity_V128);
9337 IRTemp bit_mask = newTemp(Ity_V128);
9338 IRTemp mask_gen = newTemp(Ity_V128);
9339 IRTemp mask = newTemp(Ity_V128);
9341 /* These are prefix instructions, no equivalent word instruction. */
9342 if (opc1 != 0x21) return False;
9344 /* Generate the mask to select the elements from rXA or rXB. Use a vector
9345 multiply to generate the mask to select the elments. Take the selctor
9346 bit for the element (rXC & bit_mask) and multiply it by all 1's
9347 (mask_gen). If the selector bit was 0, then we get zero bits for that
9348 element entry, otherwise we get 1's.
9350 Unfortunately, we don't have an integer vector multipy have to do it as
9351 an even and odd multiply for byt, halfword and word elements. Note, the
9352 MK_Iop_MullOddXUxY shifts the operands right and uses the MullEven
9353 operator, so we have to move the result back to its correct lane
9354 position. */
9356 assign( rXA, getVSReg( rXA_addr ) );
9357 assign( rXB, getVSReg( rXB_addr ) );
9358 assign( rXC, getVSReg( rXC_addr ) );
9360 assign( mask_gen,
9361 binop( Iop_64HLtoV128,
9362 mkU64( 0xFFFFFFFFFFFFFFFFULL),
9363 mkU64( 0xFFFFFFFFFFFFFFFFULL) ) );
9365 switch(opc2) {
9366 case 0:
9367 /* VSX Vector Blend Variable Byte 8RR:XX4-Form */
9368 DIP("xxblendvb v%u,v%u,v%u,v%u\n",
9369 rXT_addr, rXA_addr, rXB_addr, rXC_addr);
9371 assign( bit_mask,
9372 binop( Iop_ShrV128,
9373 binop( Iop_AndV128,
9374 mkexpr( rXC ),
9375 binop( Iop_64HLtoV128,
9376 mkU64( 0x8080808080808080ULL ),
9377 mkU64( 0x8080808080808080ULL ) ) ),
9378 mkU8 ( 7 ) ) );
9379 assign( mask,
9380 binop( Iop_OrV128,
9381 binop( Iop_MullEven8Ux16,
9382 mkexpr( mask_gen ),
9383 mkexpr( bit_mask ) ),
9384 binop( Iop_ShlV128,
9385 MK_Iop_MullOdd8Ux16(
9386 mkexpr( mask_gen ),
9387 mkexpr( bit_mask ) ),
9388 mkU8( 8 ) ) ) );
9389 break;
9391 case 1:
9392 /* VSX Vector Blend Variable Halfword 8RR:XX4-Form */
9393 DIP("xxblendvh v%u,v%u,v%u,v%u\n",
9394 rXT_addr, rXA_addr, rXB_addr, rXC_addr);
9396 assign( bit_mask,
9397 binop( Iop_ShrV128,
9398 binop( Iop_AndV128,
9399 mkexpr( rXC ),
9400 binop( Iop_64HLtoV128,
9401 mkU64( 0x8000800080008000ULL ),
9402 mkU64( 0x8000800080008000ULL ) ) ),
9403 mkU8 ( 15 ) ) );
9404 assign( mask,
9405 binop( Iop_OrV128,
9406 binop( Iop_MullEven16Ux8,
9407 mkexpr( mask_gen ),
9408 mkexpr( bit_mask ) ),
9409 binop( Iop_ShlV128,
9410 MK_Iop_MullOdd16Ux8(
9411 mkexpr( mask_gen ),
9412 mkexpr( bit_mask ) ),
9413 mkU8( 16 ) ) ) );
9414 break;
9416 case 2:
9417 /* VSX Vector Blend Variable Word 8RR:XX4-Form */
9418 DIP("xxblendvw v%u,v%u,v%u,v%u\n",
9419 rXT_addr, rXA_addr, rXB_addr, rXC_addr);
9421 assign( bit_mask,
9422 binop( Iop_ShrV128,
9423 binop( Iop_AndV128,
9424 mkexpr( rXC ),
9425 binop( Iop_64HLtoV128,
9426 mkU64( 0x8000000080000000ULL ),
9427 mkU64( 0x8000000080000000ULL ) ) ),
9428 mkU8 ( 31 ) ) );
9429 assign( mask,
9430 binop( Iop_OrV128,
9431 binop( Iop_MullEven32Ux4,
9432 mkexpr( mask_gen ),
9433 mkexpr( bit_mask ) ),
9434 binop( Iop_ShlV128,
9435 MK_Iop_MullOdd32Ux4(
9436 mkexpr( mask_gen ),
9437 mkexpr( bit_mask ) ),
9438 mkU8( 32 ) ) ) );
9439 break;
9441 case 3:
9442 /* VSX Vector Blend Variable Double 8RR:XX4-Form */
9443 DIP("xxblendvd v%u,v%u,v%u,v%u\n",
9444 rXT_addr, rXA_addr, rXB_addr, rXC_addr);
9446 /* Have to use a different trick here */
9447 assign( mask,
9448 binop( Iop_64HLtoV128,
9449 unop( Iop_1Sto64,
9450 unop( Iop_64to1,
9451 binop( Iop_Shr64,
9452 unop( Iop_V128HIto64,
9453 mkexpr( rXC ) ),
9454 mkU8( 63) ) ) ),
9455 unop( Iop_1Sto64,
9456 unop( Iop_64to1,
9457 binop( Iop_Shr64,
9458 unop( Iop_V128to64,
9459 mkexpr( rXC ) ),
9460 mkU8( 63) ) ) ) ) );
9461 break;
9463 default:
9464 vex_printf("dis_vector_blend_prefix (opc2)\n");
9465 return False;
9467 putVSReg( rXT_addr, binop( Iop_OrV128,
9468 binop( Iop_AndV128,
9469 unop( Iop_NotV128, mkexpr( mask ) ),
9470 mkexpr( rXA ) ),
9471 binop( Iop_AndV128,
9472 mkexpr( mask ),
9473 mkexpr( rXB ) ) ) );
9474 return True;
9479 Integer Store Instructions
9481 static Bool dis_int_store_ds_prefix ( UInt prefix,
9482 UInt theInstr, const VexAbiInfo* vbi)
9484 UChar opc1 = ifieldOPC(theInstr);
9485 UInt rS_addr = ifieldRegDS(theInstr);
9486 UInt rA_addr = ifieldRegA(theInstr);
9487 UChar b0 = ifieldBIT0(theInstr);
9488 UChar b1 = ifieldBIT1(theInstr);
9489 IRType ty = mode64 ? Ity_I64 : Ity_I32;
9490 IRTemp rS = newTemp(ty);
9491 IRTemp EA = newTemp(ty);
9492 UInt ptype = PrefixType(prefix);
9493 Bool is_prefix = prefix_instruction( prefix );
9494 UInt R = 0; // must be zero for word instruction
9495 ULong immediate_val = 0;
9496 Int simm16 = extend_s_16to32(ifieldUIMM16(theInstr));
9498 if (opc1 == 0x3C) {
9499 // force opc2 to 2 to map pstq to stq inst
9500 b0 = 0;
9501 b1 = 1;
9502 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
9503 ptype, DSFORM_IMMASK, &immediate_val,
9504 &R ) );
9505 } else if (opc1 == 0x3D) {
9506 // force opc2 to 0 to map pstd to std inst
9507 b0 = 0;
9508 b1 = 0;
9509 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
9510 ptype, DFORM_IMMASK, &immediate_val,
9511 &R ) );
9513 } else if ( opc1 == 0x3 ) {
9514 assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
9516 } else if ( opc1 == 0x3E ) { // std, stdu, stq
9517 // lowest 2 bits of immediate before forming EA
9518 immediate_val = simm16 & 0xFFFFFFFC;
9519 assign( EA, ea_rAor0_simm( rA_addr, immediate_val ) );
9521 } else {
9522 return False;
9525 assign( rS, getIReg(rS_addr) );
9527 /* DS Form - 64bit Stores. In each case EA will have been formed
9528 with the lowest 2 bits masked off the immediate offset. */
9529 switch ((b1<<1) | b0) {
9530 case 0x0: // std (Store DWord, PPC64 p580)
9531 if (!mode64)
9532 return False;
9534 pDIP( is_prefix,"std r%u,%llu(r%u)", rS_addr, immediate_val, rA_addr );
9535 DIPp( is_prefix, ",%u", R );
9536 store( mkexpr(EA), mkexpr(rS) );
9537 break;
9539 case 0x1: // stdu (Store DWord, Update, PPC64 p583)
9540 /* Note this instruction is handled here but it isn't actually a
9541 prefix instruction. Just makes the parsing easier to handle it
9542 here. */
9543 if (!mode64)
9544 return False;
9546 DIP("stdu r%u,%llu(r%u)\n", rS_addr, immediate_val, rA_addr);
9547 putIReg( rA_addr, mkexpr(EA) );
9548 store( mkexpr(EA), mkexpr(rS) );
9549 break;
9551 case 0x2: // stq, pstq (Store QuadWord, Update, PPC64 p583)
9553 IRTemp EA_hi = newTemp(ty);
9554 IRTemp EA_lo = newTemp(ty);
9556 pDIP( is_prefix, "stq r%u,%llu(r%u)", rS_addr, immediate_val, rA_addr);
9557 DIPp( is_prefix, ",%u", R );
9559 if (mode64) {
9560 if (host_endness == VexEndnessBE) {
9562 /* upper 64-bits */
9563 assign( EA_hi, mkexpr(EA));
9565 /* lower 64-bits */
9566 assign( EA_lo, binop(Iop_Add64, mkexpr(EA), mkU64(8)));
9568 } else {
9569 /* upper 64-bits */
9570 assign( EA_hi, binop(Iop_Add64, mkexpr(EA), mkU64(8)));
9572 /* lower 64-bits */
9573 assign( EA_lo, mkexpr(EA));
9575 } else {
9576 /* upper half of upper 64-bits */
9577 assign( EA_hi, binop(Iop_Add32, mkexpr(EA), mkU32(4)));
9579 /* lower half of upper 64-bits */
9580 assign( EA_lo, binop(Iop_Add32, mkexpr(EA), mkU32(12)));
9583 /* Note, the store order for stq instruction is the same for BE
9584 and LE. The store order for the pstq instruction is endian aware
9585 store. */
9586 if (is_prefix &&( host_endness == VexEndnessLE )) {
9587 // LE and pstq
9588 store( mkexpr(EA_hi), getIReg( rS_addr+1 ) );
9589 store( mkexpr(EA_lo), mkexpr(rS) );
9590 } else {
9591 store( mkexpr(EA_hi), mkexpr(rS) );
9592 store( mkexpr(EA_lo), getIReg( rS_addr+1 ) );
9594 break;
9596 default:
9597 vex_printf("dis_int_store_ds_prefix(ppc)(opc1)\n");
9598 return False;
9600 return True;
9603 static Bool dis_int_store_prefix ( UInt prefix,
9604 UInt theInstr, const VexAbiInfo* vbi)
9606 UChar opc1 = ifieldOPC(theInstr);
9607 UInt rS_addr = ifieldRegDS(theInstr);
9608 UInt rA_addr = ifieldRegA(theInstr);
9609 IRType ty = mode64 ? Ity_I64 : Ity_I32;
9610 IRTemp rS = newTemp(ty);
9611 IRTemp EA = newTemp(ty);
9612 UInt ptype = PrefixType(prefix);
9613 Bool is_prefix = prefix_instruction( prefix );
9614 ULong immediate_val = 0;
9615 UInt R = 0; // must be zero for word instruction
9617 assign( rS, getIReg(rS_addr) );
9618 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
9619 ptype, DFORM_IMMASK,
9620 &immediate_val, &R ) );
9622 switch (opc1) {
9623 case 0x24: // stw (Store W, PPC32 p530)
9624 pDIP( is_prefix, "stw r%u,%llu(r%u)\n", rS_addr, immediate_val, rA_addr );
9625 DIPp( is_prefix, ",%u", R );
9626 store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
9627 break;
9629 case 0x26: // stb (Store B, PPC32 p509)
9630 pDIP( is_prefix, "stb r%u,%llu(r%u)", rS_addr, immediate_val, rA_addr );
9631 DIPp( is_prefix, ",%u", R );
9632 store( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
9633 break;
9635 case 0x2C: // sth (Store HW, PPC32 p522)
9636 pDIP( is_prefix, "sth r%u,%llu(r%u)", rS_addr, immediate_val, rA_addr );
9637 DIPp( is_prefix, ",%u", R );
9638 store( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
9639 break;
9641 default:
9642 vex_printf("dis_int_store_prefix(ppc)(opc1)\n");
9643 return False;
9645 return True;
9648 static Bool dis_int_store ( UInt prefix, UInt theInstr, const VexAbiInfo* vbi )
9650 /* D-Form, X-Form, DS-Form */
9651 UChar opc1 = ifieldOPC(theInstr);
9652 UInt rS_addr = ifieldRegDS(theInstr);
9653 UInt rA_addr = ifieldRegA(theInstr);
9654 UInt uimm16 = ifieldUIMM16(theInstr);
9655 UInt rB_addr = ifieldRegB(theInstr);
9656 UInt opc2 = ifieldOPClo10(theInstr);
9657 UChar b0 = ifieldBIT0(theInstr);
9659 Int simm16 = extend_s_16to32(uimm16);
9660 IRType ty = mode64 ? Ity_I64 : Ity_I32;
9661 IRTemp rS = newTemp(ty);
9662 IRTemp rB = newTemp(ty);
9663 IRTemp EA = newTemp(ty);
9665 /* There is no prefixed version of these instructions. */
9666 PREFIX_CHECK
9668 assign( rB, getIReg(rB_addr) );
9669 assign( rS, getIReg(rS_addr) );
9671 switch (opc1) {
9672 case 0x1F: // register offset
9673 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
9674 break;
9676 /* fallthrough */
9677 default: // immediate offset
9678 assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
9679 break;
9682 switch (opc1) {
9683 case 0x27: // stbu (Store B, Update, PPC32 p510)
9684 if (rA_addr == 0 ) {
9685 vex_printf("dis_int_store(ppc)(stbu,rA_addr)\n");
9686 return False;
9688 DIP("stbu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
9689 putIReg( rA_addr, mkexpr(EA) );
9690 store( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
9691 break;
9693 case 0x2D: // sthu (Store HW, Update, PPC32 p524)
9694 if (rA_addr == 0) {
9695 vex_printf("dis_int_store(ppc)(sthu,rA_addr)\n");
9696 return False;
9698 DIP("sthu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
9699 putIReg( rA_addr, mkexpr(EA) );
9700 store( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
9701 break;
9703 case 0x24: // stw (Store W, PPC32 p530)
9705 DIP("stw r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
9706 store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
9708 break;
9710 case 0x25: // stwu (Store W, Update, PPC32 p534)
9711 if (rA_addr == 0) {
9712 vex_printf("dis_int_store(ppc)(stwu,rA_addr)\n");
9713 return False;
9715 DIP("stwu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
9716 putIReg( rA_addr, mkexpr(EA) );
9717 store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
9718 break;
9720 /* X Form : all these use EA_indexed */
9721 case 0x1F:
9722 if (b0 != 0) {
9723 vex_printf("dis_int_store(ppc)(0x1F,b0)\n");
9724 return False;
9727 switch (opc2) {
9728 case 0x0F7: // stbux (Store B, Update Indexed, PPC32 p511)
9729 if (rA_addr == 0) {
9730 vex_printf("dis_int_store(ppc)(stbux,rA_addr)\n");
9731 return False;
9733 DIP("stbux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
9734 putIReg( rA_addr, mkexpr(EA) );
9735 store( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
9736 break;
9738 case 0x0D7: // stbx (Store B Indexed, PPC32 p512)
9739 DIP("stbx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
9740 store( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
9741 break;
9743 case 0x1B7: // sthux (Store HW, Update Indexed, PPC32 p525)
9744 if (rA_addr == 0) {
9745 vex_printf("dis_int_store(ppc)(sthux,rA_addr)\n");
9746 return False;
9748 DIP("sthux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
9749 putIReg( rA_addr, mkexpr(EA) );
9750 store( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
9751 break;
9753 case 0x197: // sthx (Store HW Indexed, PPC32 p526)
9754 DIP("sthx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
9755 store( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
9756 break;
9758 case 0x0B7: // stwux (Store W, Update Indexed, PPC32 p535)
9759 if (rA_addr == 0) {
9760 vex_printf("dis_int_store(ppc)(stwux,rA_addr)\n");
9761 return False;
9763 DIP("stwux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
9764 putIReg( rA_addr, mkexpr(EA) );
9765 store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
9766 break;
9768 case 0x097: // stwx (Store W Indexed, PPC32 p536)
9769 DIP("stwx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
9770 store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
9771 break;
9774 /* 64bit Stores */
9775 case 0x0B5: // stdux (Store DWord, Update Indexed, PPC64 p584)
9776 if (rA_addr == 0) {
9777 vex_printf("dis_int_store(ppc)(stdux,rA_addr)\n");
9778 return False;
9780 DIP("stdux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
9781 putIReg( rA_addr, mkexpr(EA) );
9782 store( mkexpr(EA), mkexpr(rS) );
9783 break;
9785 case 0x095: // stdx (Store DWord Indexed, PPC64 p585)
9786 DIP("stdx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
9787 store( mkexpr(EA), mkexpr(rS) );
9788 break;
9790 default:
9791 vex_printf("dis_int_store(ppc)(opc2)\n");
9792 return False;
9794 break;
9796 default:
9797 vex_printf("dis_int_store(ppc)(opc1)\n");
9798 return False;
9800 return True;
9806 Integer Load/Store Multiple Instructions
9808 static Bool dis_int_ldst_mult ( UInt prefix, UInt theInstr )
9810 /* D-Form */
9811 UChar opc1 = ifieldOPC(theInstr);
9812 UChar rD_addr = ifieldRegDS(theInstr);
9813 UChar rS_addr = rD_addr;
9814 UChar rA_addr = ifieldRegA(theInstr);
9815 UInt uimm16 = ifieldUIMM16(theInstr);
9817 Int simm16 = extend_s_16to32(uimm16);
9818 IRType ty = mode64 ? Ity_I64 : Ity_I32;
9819 IROp mkAdd = mode64 ? Iop_Add64 : Iop_Add32;
9820 IRTemp EA = newTemp(ty);
9821 UInt r = 0;
9822 UInt ea_off = 0;
9823 IRExpr* irx_addr;
9825 /* There is no prefixed version of these instructions. */
9826 PREFIX_CHECK
9828 assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
9830 switch (opc1) {
9831 case 0x2E: // lmw (Load Multiple Word, PPC32 p454)
9832 if (rA_addr >= rD_addr) {
9833 vex_printf("dis_int_ldst_mult(ppc)(lmw,rA_addr)\n");
9834 return False;
9836 DIP("lmw r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
9837 for (r = rD_addr; r <= 31; r++) {
9838 irx_addr = binop(mkAdd, mkexpr(EA), mode64 ? mkU64(ea_off) : mkU32(ea_off));
9839 putIReg( r, mkWidenFrom32(ty, load(Ity_I32, irx_addr ),
9840 False) );
9841 ea_off += 4;
9843 break;
9845 case 0x2F: // stmw (Store Multiple Word, PPC32 p527)
9846 DIP("stmw r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
9847 for (r = rS_addr; r <= 31; r++) {
9848 irx_addr = binop(mkAdd, mkexpr(EA), mode64 ? mkU64(ea_off) : mkU32(ea_off));
9849 store( irx_addr, mkNarrowTo32(ty, getIReg(r)) );
9850 ea_off += 4;
9852 break;
9854 default:
9855 vex_printf("dis_int_ldst_mult(ppc)(opc1)\n");
9856 return False;
9858 return True;
9864 Integer Load/Store String Instructions
9866 static
9867 void generate_lsw_sequence ( IRTemp tNBytes, // # bytes, :: Ity_I32
9868 IRTemp EA, // EA
9869 Int rD, // first dst register
9870 Int maxBytes ) // 32 or 128
9872 Int i, shift = 24;
9873 IRExpr* e_nbytes = mkexpr(tNBytes);
9874 IRExpr* e_EA = mkexpr(EA);
9875 IRType ty = mode64 ? Ity_I64 : Ity_I32;
9877 vassert(rD >= 0 && rD < 32);
9878 rD--; if (rD < 0) rD = 31;
9880 for (i = 0; i < maxBytes; i++) {
9881 /* if (nBytes < (i+1)) goto NIA; */
9882 stmt( IRStmt_Exit( binop(Iop_CmpLT32U, e_nbytes, mkU32(i+1)),
9883 Ijk_Boring,
9884 mkSzConst( ty, nextInsnAddr()), OFFB_CIA ));
9885 /* when crossing into a new dest register, set it to zero. */
9886 if ((i % 4) == 0) {
9887 rD++; if (rD == 32) rD = 0;
9888 putIReg(rD, mkSzImm(ty, 0));
9889 shift = 24;
9891 /* rD |= (8Uto32(*(EA+i))) << shift */
9892 vassert(shift == 0 || shift == 8 || shift == 16 || shift == 24);
9893 putIReg(
9894 rD,
9895 mkWidenFrom32(
9896 ty,
9897 binop(
9898 Iop_Or32,
9899 mkNarrowTo32(ty, getIReg(rD)),
9900 binop(
9901 Iop_Shl32,
9902 unop(
9903 Iop_8Uto32,
9904 load( Ity_I8,
9905 binop( mkSzOp(ty,Iop_Add8),
9906 e_EA, mkSzImm(ty,i)))
9908 mkU8(toUChar(shift))
9911 /*Signed*/False
9914 shift -= 8;
9918 static
9919 void generate_stsw_sequence ( IRTemp tNBytes, // # bytes, :: Ity_I32
9920 IRTemp EA, // EA
9921 Int rS, // first src register
9922 Int maxBytes ) // 32 or 128
9924 Int i, shift = 24;
9925 IRExpr* e_nbytes = mkexpr(tNBytes);
9926 IRExpr* e_EA = mkexpr(EA);
9927 IRType ty = mode64 ? Ity_I64 : Ity_I32;
9929 vassert(rS >= 0 && rS < 32);
9930 rS--; if (rS < 0) rS = 31;
9932 for (i = 0; i < maxBytes; i++) {
9933 /* if (nBytes < (i+1)) goto NIA; */
9934 stmt( IRStmt_Exit( binop(Iop_CmpLT32U, e_nbytes, mkU32(i+1)),
9935 Ijk_Boring,
9936 mkSzConst( ty, nextInsnAddr() ), OFFB_CIA ));
9937 /* check for crossing into a new src register. */
9938 if ((i % 4) == 0) {
9939 rS++; if (rS == 32) rS = 0;
9940 shift = 24;
9942 /* *(EA+i) = 32to8(rS >> shift) */
9943 vassert(shift == 0 || shift == 8 || shift == 16 || shift == 24);
9944 store(
9945 binop( mkSzOp(ty,Iop_Add8), e_EA, mkSzImm(ty,i)),
9946 unop( Iop_32to8,
9947 binop( Iop_Shr32,
9948 mkNarrowTo32( ty, getIReg(rS) ),
9949 mkU8( toUChar(shift) )))
9951 shift -= 8;
9955 static Bool dis_int_ldst_str ( UInt prefix, UInt theInstr, /*OUT*/Bool* stopHere )
9957 /* X-Form */
9958 UChar opc1 = ifieldOPC(theInstr);
9959 UChar rD_addr = ifieldRegDS(theInstr);
9960 UChar rS_addr = rD_addr;
9961 UChar rA_addr = ifieldRegA(theInstr);
9962 UChar rB_addr = ifieldRegB(theInstr);
9963 UChar NumBytes = rB_addr;
9964 UInt opc2 = ifieldOPClo10(theInstr);
9965 UChar b0 = ifieldBIT0(theInstr);
9967 IRType ty = mode64 ? Ity_I64 : Ity_I32;
9968 IRTemp t_EA = newTemp(ty);
9969 IRTemp t_nbytes = IRTemp_INVALID;
9971 /* There is no prefixed version of these instructions. */
9972 PREFIX_CHECK
9974 *stopHere = False;
9976 if (opc1 != 0x1F || b0 != 0) {
9977 vex_printf("dis_int_ldst_str(ppc)(opc1)\n");
9978 return False;
9981 switch (opc2) {
9982 case 0x255: // lswi (Load String Word Immediate, PPC32 p455)
9983 /* NB: does not reject the case where RA is in the range of
9984 registers to be loaded. It should. */
9985 DIP("lswi r%u,r%u,%d\n", rD_addr, rA_addr, NumBytes);
9986 assign( t_EA, ea_rAor0(rA_addr) );
9987 if (NumBytes == 8 && !mode64) {
9988 /* Special case hack */
9989 /* rD = Mem[EA]; (rD+1)%32 = Mem[EA+4] */
9990 putIReg( rD_addr,
9991 load(Ity_I32, mkexpr(t_EA)) );
9992 putIReg( (rD_addr+1) % 32,
9993 load(Ity_I32,
9994 binop(Iop_Add32, mkexpr(t_EA), mkU32(4))) );
9995 } else {
9996 t_nbytes = newTemp(Ity_I32);
9997 assign( t_nbytes, mkU32(NumBytes==0 ? 32 : NumBytes) );
9998 generate_lsw_sequence( t_nbytes, t_EA, rD_addr, 32 );
9999 *stopHere = True;
10001 return True;
10003 case 0x215: // lswx (Load String Word Indexed, PPC32 p456)
10004 /* NB: does not reject the case where RA is in the range of
10005 registers to be loaded. It should. Although considering
10006 that that can only be detected at run time, it's not easy to
10007 do so. */
10008 if (rD_addr == rA_addr || rD_addr == rB_addr)
10009 return False;
10010 if (rD_addr == 0 && rA_addr == 0)
10011 return False;
10012 DIP("lswx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
10013 t_nbytes = newTemp(Ity_I32);
10014 assign( t_EA, ea_rAor0_idxd(rA_addr,rB_addr) );
10015 assign( t_nbytes, unop( Iop_8Uto32, getXER_BC() ) );
10016 generate_lsw_sequence( t_nbytes, t_EA, rD_addr, 128 );
10017 *stopHere = True;
10018 return True;
10020 case 0x2D5: // stswi (Store String Word Immediate, PPC32 p528)
10021 DIP("stswi r%u,r%u,%d\n", rS_addr, rA_addr, NumBytes);
10022 assign( t_EA, ea_rAor0(rA_addr) );
10023 if (NumBytes == 8 && !mode64) {
10024 /* Special case hack */
10025 /* Mem[EA] = rD; Mem[EA+4] = (rD+1)%32 */
10026 store( mkexpr(t_EA),
10027 getIReg(rD_addr) );
10028 store( binop(Iop_Add32, mkexpr(t_EA), mkU32(4)),
10029 getIReg((rD_addr+1) % 32) );
10030 } else {
10031 t_nbytes = newTemp(Ity_I32);
10032 assign( t_nbytes, mkU32(NumBytes==0 ? 32 : NumBytes) );
10033 generate_stsw_sequence( t_nbytes, t_EA, rD_addr, 32 );
10034 *stopHere = True;
10036 return True;
10038 case 0x295: // stswx (Store String Word Indexed, PPC32 p529)
10039 DIP("stswx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
10040 t_nbytes = newTemp(Ity_I32);
10041 assign( t_EA, ea_rAor0_idxd(rA_addr,rB_addr) );
10042 assign( t_nbytes, unop( Iop_8Uto32, getXER_BC() ) );
10043 generate_stsw_sequence( t_nbytes, t_EA, rS_addr, 128 );
10044 *stopHere = True;
10045 return True;
10047 default:
10048 vex_printf("dis_int_ldst_str(ppc)(opc2)\n");
10049 return False;
10051 return True;
10055 /* ------------------------------------------------------------------
10056 Integer Branch Instructions
10057 ------------------------------------------------------------------ */
10060 Branch helper function
10061 ok = BO[2] | ((CTR[0] != 0) ^ BO[1])
10062 Returns an I32 which is 0x00000000 if the ctr condition failed
10063 and 0xFFFFFFFF otherwise.
10065 static IRExpr* /* :: Ity_I32 */ branch_ctr_ok( UInt BO )
10067 IRType ty = mode64 ? Ity_I64 : Ity_I32;
10068 IRTemp ok = newTemp(Ity_I32);
10070 if ((BO >> 2) & 1) { // independent of ctr
10071 assign( ok, mkU32(0xFFFFFFFF) );
10072 } else {
10073 if ((BO >> 1) & 1) { // ctr == 0 ?
10074 assign( ok, unop( Iop_1Sto32,
10075 binop( mkSzOp(ty, Iop_CmpEQ8),
10076 getGST( PPC_GST_CTR ),
10077 mkSzImm(ty,0))) );
10078 } else { // ctr != 0 ?
10079 assign( ok, unop( Iop_1Sto32,
10080 binop( mkSzOp(ty, Iop_CmpNE8),
10081 getGST( PPC_GST_CTR ),
10082 mkSzImm(ty,0))) );
10085 return mkexpr(ok);
10090 Branch helper function cond_ok = BO[4] | (CR[BI] == BO[3])
10091 Returns an I32 which is either 0 if the condition failed or
10092 some arbitrary nonzero value otherwise. */
10094 static IRExpr* /* :: Ity_I32 */ branch_cond_ok( UInt BO, UInt BI )
10096 Int where;
10097 IRTemp res = newTemp(Ity_I32);
10098 IRTemp cr_bi = newTemp(Ity_I32);
10100 if ((BO >> 4) & 1) {
10101 assign( res, mkU32(1) );
10102 } else {
10103 // ok = (CR[BI] == BO[3]) Note, the following relies on
10104 // getCRbit_anywhere returning a value which
10105 // is either zero or has exactly 1 bit set.
10106 assign( cr_bi, getCRbit_anywhere( BI, &where ) );
10108 if ((BO >> 3) & 1) {
10109 /* We can use cr_bi as-is. */
10110 assign( res, mkexpr(cr_bi) );
10111 } else {
10112 /* We have to invert the sense of the information held in
10113 cr_bi. For that we need to know which bit
10114 getCRbit_anywhere regards as significant. */
10115 assign( res, binop(Iop_Xor32, mkexpr(cr_bi),
10116 mkU32(1<<where)) );
10119 return mkexpr(res);
10124 Integer Branch Instructions
10126 static Bool dis_branch ( UInt prefix, UInt theInstr,
10127 const VexAbiInfo* vbi,
10128 /*OUT*/DisResult* dres )
10130 UChar opc1 = ifieldOPC(theInstr);
10131 UChar BO = ifieldRegDS(theInstr);
10132 UChar BI = ifieldRegA(theInstr);
10133 UInt BD_u16 = ifieldUIMM16(theInstr) & 0xFFFFFFFC; /* mask off */
10134 UChar b11to15 = ifieldRegB(theInstr);
10135 UInt opc2 = ifieldOPClo10(theInstr);
10136 UInt LI_u26 = ifieldUIMM26(theInstr) & 0xFFFFFFFC; /* mask off */
10137 UChar flag_AA = ifieldBIT1(theInstr);
10138 UChar flag_LK = ifieldBIT0(theInstr);
10140 IRType ty = mode64 ? Ity_I64 : Ity_I32;
10141 Addr64 tgt = 0;
10142 UInt BD = extend_s_16to32(BD_u16);
10143 IRTemp do_branch = newTemp(Ity_I32);
10144 IRTemp ctr_ok = newTemp(Ity_I32);
10145 IRTemp cond_ok = newTemp(Ity_I32);
10146 IRExpr* e_nia = mkSzImm(ty, nextInsnAddr());
10147 IRConst* c_nia = mkSzConst(ty, nextInsnAddr());
10148 IRTemp lr_old = newTemp(ty);
10150 /* There is no prefixed version of these instructions. */
10151 PREFIX_CHECK
10153 /* Hack to pass through code that just wants to read the PC */
10154 if (theInstr == 0x429F0005) {
10155 DIP("bcl 0x%x, 0x%x (a.k.a mr lr,cia+4)\n", BO, BI);
10156 putGST( PPC_GST_LR, e_nia );
10157 return True;
10160 /* The default what-next. Individual cases can override it. */
10161 dres->whatNext = Dis_StopHere;
10162 vassert(dres->jk_StopHere == Ijk_INVALID);
10164 switch (opc1) {
10165 case 0x12: // b (Branch, PPC32 p360)
10166 if (flag_AA) {
10167 tgt = mkSzAddr( ty, extend_s_26to64(LI_u26) );
10168 } else {
10169 tgt = mkSzAddr( ty, guest_CIA_curr_instr +
10170 (Long)extend_s_26to64(LI_u26) );
10172 if (mode64) {
10173 DIP("b%s%s 0x%llx\n",
10174 flag_LK ? "l" : "", flag_AA ? "a" : "", tgt);
10175 } else {
10176 DIP("b%s%s 0x%x\n",
10177 flag_LK ? "l" : "", flag_AA ? "a" : "", (Addr32)tgt);
10180 if (flag_LK) {
10181 putGST( PPC_GST_LR, e_nia );
10182 if (vbi->guest_ppc_zap_RZ_at_bl
10183 && vbi->guest_ppc_zap_RZ_at_bl( (ULong)tgt) ) {
10184 IRTemp t_tgt = newTemp(ty);
10185 assign(t_tgt, mode64 ? mkU64(tgt) : mkU32(tgt) );
10186 make_redzone_AbiHint( vbi, t_tgt,
10187 "branch-and-link (unconditional call)" );
10191 dres->jk_StopHere = flag_LK ? Ijk_Call : Ijk_Boring; ;
10192 putGST( PPC_GST_CIA, mkSzImm(ty, tgt) );
10193 break;
10195 case 0x10: // bc (Branch Conditional, PPC32 p361)
10196 DIP("bc%s%s 0x%x, 0x%x, 0x%x\n",
10197 flag_LK ? "l" : "", flag_AA ? "a" : "", BO, BI, BD);
10199 if (!(BO & 0x4)) {
10200 putGST( PPC_GST_CTR,
10201 binop(mkSzOp(ty, Iop_Sub8),
10202 getGST( PPC_GST_CTR ), mkSzImm(ty, 1)) );
10205 /* This is a bit subtle. ctr_ok is either all 0s or all 1s.
10206 cond_ok is either zero or nonzero, since that's the cheapest
10207 way to compute it. Anding them together gives a value which
10208 is either zero or non zero and so that's what we must test
10209 for in the IRStmt_Exit. */
10210 assign( ctr_ok, branch_ctr_ok( BO ) );
10211 assign( cond_ok, branch_cond_ok( BO, BI ) );
10212 assign( do_branch,
10213 binop(Iop_And32, mkexpr(cond_ok), mkexpr(ctr_ok)) );
10215 if (flag_AA) {
10216 tgt = mkSzAddr(ty, extend_s_16to64(BD_u16));
10217 } else {
10218 tgt = mkSzAddr(ty, guest_CIA_curr_instr +
10219 (Long)extend_s_16to64(BD_u16));
10221 if (flag_LK)
10222 putGST( PPC_GST_LR, e_nia );
10224 stmt( IRStmt_Exit(
10225 binop(Iop_CmpNE32, mkexpr(do_branch), mkU32(0)),
10226 flag_LK ? Ijk_Call : Ijk_Boring,
10227 mkSzConst(ty, tgt), OFFB_CIA ) );
10229 dres->jk_StopHere = Ijk_Boring;
10230 putGST( PPC_GST_CIA, e_nia );
10231 break;
10233 case 0x13:
10234 /* For bclr and bcctr, it appears that the lowest two bits of
10235 b11to15 are a branch hint, and so we only need to ensure it's
10236 of the form 000XX. */
10237 if ((b11to15 & ~3) != 0) {
10238 vex_printf("dis_int_branch(ppc)(0x13,b11to15)(%d)\n", b11to15);
10239 return False;
10242 switch (opc2) {
10243 case 0x210: // bcctr (Branch Cond. to Count Register, PPC32 p363)
10244 if ((BO & 0x4) == 0) { // "decr and test CTR" option invalid
10245 vex_printf("dis_int_branch(ppc)(bcctr,BO)\n");
10246 return False;
10248 DIP("bcctr%s 0x%x, 0x%x\n", flag_LK ? "l" : "", BO, BI);
10250 assign( cond_ok, branch_cond_ok( BO, BI ) );
10252 /* FIXME: this is confusing. lr_old holds the old value
10253 of ctr, not lr :-) */
10254 assign( lr_old, addr_align( getGST( PPC_GST_CTR ), 4 ));
10256 if (flag_LK)
10257 putGST( PPC_GST_LR, e_nia );
10259 stmt( IRStmt_Exit(
10260 binop(Iop_CmpEQ32, mkexpr(cond_ok), mkU32(0)),
10261 Ijk_Boring,
10262 c_nia, OFFB_CIA ));
10264 if (flag_LK && vbi->guest_ppc_zap_RZ_at_bl) {
10265 make_redzone_AbiHint( vbi, lr_old,
10266 "b-ctr-l (indirect call)" );
10269 dres->jk_StopHere = flag_LK ? Ijk_Call : Ijk_Boring;;
10270 putGST( PPC_GST_CIA, mkexpr(lr_old) );
10271 break;
10273 case 0x010: { // bclr (Branch Cond. to Link Register, PPC32 p365)
10274 Bool vanilla_return = False;
10275 if ((BO & 0x14 /* 1z1zz */) == 0x14 && flag_LK == 0) {
10276 DIP("blr\n");
10277 vanilla_return = True;
10278 } else {
10279 DIP("bclr%s 0x%x, 0x%x\n", flag_LK ? "l" : "", BO, BI);
10282 if (!(BO & 0x4)) {
10283 putGST( PPC_GST_CTR,
10284 binop(mkSzOp(ty, Iop_Sub8),
10285 getGST( PPC_GST_CTR ), mkSzImm(ty, 1)) );
10288 /* See comments above for 'bc' about this */
10289 assign( ctr_ok, branch_ctr_ok( BO ) );
10290 assign( cond_ok, branch_cond_ok( BO, BI ) );
10291 assign( do_branch,
10292 binop(Iop_And32, mkexpr(cond_ok), mkexpr(ctr_ok)) );
10294 assign( lr_old, addr_align( getGST( PPC_GST_LR ), 4 ));
10296 if (flag_LK)
10297 putGST( PPC_GST_LR, e_nia );
10299 stmt( IRStmt_Exit(
10300 binop(Iop_CmpEQ32, mkexpr(do_branch), mkU32(0)),
10301 Ijk_Boring,
10302 c_nia, OFFB_CIA ));
10304 if (vanilla_return && vbi->guest_ppc_zap_RZ_at_blr) {
10305 make_redzone_AbiHint( vbi, lr_old,
10306 "branch-to-lr (unconditional return)" );
10309 /* blrl is pretty strange; it's like a return that sets the
10310 return address of its caller to the insn following this
10311 one. Mark it as a return. */
10312 dres->jk_StopHere = Ijk_Ret; /* was flag_LK ? Ijk_Call : Ijk_Ret; */
10313 putGST( PPC_GST_CIA, mkexpr(lr_old) );
10314 break;
10316 default:
10317 vex_printf("dis_int_branch(ppc)(opc2)\n");
10318 return False;
10320 break;
10322 default:
10323 vex_printf("dis_int_branch(ppc)(opc1)\n");
10324 return False;
10327 return True;
10331 * PC relative instruction
10333 static Bool dis_pc_relative ( UInt prefix, UInt theInstr )
10335 /* DX-Form */
10336 UChar opc1 = ifieldOPC(theInstr);
10337 unsigned long long D;
10338 UInt d0 = IFIELD(theInstr, 6, 10);
10339 UInt d1 = IFIELD(theInstr, 16, 5);
10340 UInt d2 = IFIELD(theInstr, 0, 1);
10341 UChar rT_addr = ifieldRegDS(theInstr);
10342 UInt opc2 = ifieldOPClo5(theInstr);
10343 IRType ty = mode64 ? Ity_I64 : Ity_I32;
10345 /* There is no prefixed version of these instructions. */
10346 PREFIX_CHECK
10348 if ( opc1 != 0x13) {
10349 vex_printf("dis_pc_relative(ppc)(opc1)\n");
10350 return False;
10353 switch (opc2) {
10354 case 0x002: // addpcis (Add PC immediate Shifted DX-form)
10356 IRExpr* nia = mkSzImm(ty, nextInsnAddr());
10357 IRExpr* result;
10359 D = (d0 << 6) | (d1 << 1) | d2;
10360 DIP("addpcis %u,%llu\n", rT_addr, D);
10362 if ( (D & 0x8000) == 0x8000 )
10363 D = 0xFFFFFFFFFFFF0000UL | D; // sign extend
10365 if ( ty == Ity_I32 ) {
10366 result = binop( Iop_Add32, nia, mkU32( D << 16 ) );
10367 } else {
10368 vassert( ty == Ity_I64 );
10369 result = binop( Iop_Add64, nia, mkU64( D << 16 ) );
10372 putIReg( rT_addr, result);
10374 break;
10376 default:
10377 vex_printf("dis_pc_relative(ppc)(opc2)\n");
10378 return False;
10381 return True;
10385 Condition Register Logical Instructions
10387 static Bool dis_cond_logic ( UInt prefix, UInt theInstr )
10389 /* XL-Form */
10390 UChar opc1 = ifieldOPC(theInstr);
10391 UChar crbD_addr = ifieldRegDS(theInstr);
10392 UChar crfD_addr = toUChar( IFIELD(theInstr, 23, 3) );
10393 UChar crbA_addr = ifieldRegA(theInstr);
10394 UChar crfS_addr = toUChar( IFIELD(theInstr, 18, 3) );
10395 UChar crbB_addr = ifieldRegB(theInstr);
10396 UInt opc2 = ifieldOPClo10(theInstr);
10397 UChar b0 = ifieldBIT0(theInstr);
10399 IRTemp crbD = newTemp(Ity_I32);
10400 IRTemp crbA = newTemp(Ity_I32);
10401 IRTemp crbB = newTemp(Ity_I32);
10403 /* There is no prefixed version of these instructions. */
10404 PREFIX_CHECK
10406 if (opc1 != 19 || b0 != 0) {
10407 vex_printf("dis_cond_logic(ppc)(opc1)\n");
10408 return False;
10411 if (opc2 == 0) { // mcrf (Move Cond Reg Field, PPC32 p464)
10412 if (((crbD_addr & 0x3) != 0) ||
10413 ((crbA_addr & 0x3) != 0) || (crbB_addr != 0)) {
10414 vex_printf("dis_cond_logic(ppc)(crbD|crbA|crbB != 0)\n");
10415 return False;
10417 DIP("mcrf cr%u,cr%u\n", crfD_addr, crfS_addr);
10418 putCR0( crfD_addr, getCR0( crfS_addr) );
10419 putCR321( crfD_addr, getCR321(crfS_addr) );
10420 } else {
10421 assign( crbA, getCRbit(crbA_addr) );
10422 if (crbA_addr == crbB_addr)
10423 crbB = crbA;
10424 else
10425 assign( crbB, getCRbit(crbB_addr) );
10427 switch (opc2) {
10428 case 0x101: // crand (Cond Reg AND, PPC32 p372)
10429 DIP("crand crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
10430 assign( crbD, binop(Iop_And32, mkexpr(crbA), mkexpr(crbB)) );
10431 break;
10432 case 0x081: // crandc (Cond Reg AND w. Complement, PPC32 p373)
10433 DIP("crandc crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
10434 assign( crbD, binop(Iop_And32,
10435 mkexpr(crbA),
10436 unop(Iop_Not32, mkexpr(crbB))) );
10437 break;
10438 case 0x121: // creqv (Cond Reg Equivalent, PPC32 p374)
10439 DIP("creqv crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
10440 assign( crbD, unop(Iop_Not32,
10441 binop(Iop_Xor32, mkexpr(crbA), mkexpr(crbB))) );
10442 break;
10443 case 0x0E1: // crnand (Cond Reg NAND, PPC32 p375)
10444 DIP("crnand crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
10445 assign( crbD, unop(Iop_Not32,
10446 binop(Iop_And32, mkexpr(crbA), mkexpr(crbB))) );
10447 break;
10448 case 0x021: // crnor (Cond Reg NOR, PPC32 p376)
10449 DIP("crnor crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
10450 assign( crbD, unop(Iop_Not32,
10451 binop(Iop_Or32, mkexpr(crbA), mkexpr(crbB))) );
10452 break;
10453 case 0x1C1: // cror (Cond Reg OR, PPC32 p377)
10454 DIP("cror crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
10455 assign( crbD, binop(Iop_Or32, mkexpr(crbA), mkexpr(crbB)) );
10456 break;
10457 case 0x1A1: // crorc (Cond Reg OR w. Complement, PPC32 p378)
10458 DIP("crorc crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
10459 assign( crbD, binop(Iop_Or32,
10460 mkexpr(crbA),
10461 unop(Iop_Not32, mkexpr(crbB))) );
10462 break;
10463 case 0x0C1: // crxor (Cond Reg XOR, PPC32 p379)
10464 DIP("crxor crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
10465 assign( crbD, binop(Iop_Xor32, mkexpr(crbA), mkexpr(crbB)) );
10466 break;
10467 default:
10468 vex_printf("dis_cond_logic(ppc)(opc2)\n");
10469 return False;
10472 putCRbit( crbD_addr, mkexpr(crbD) );
10474 return True;
10477 static Bool dis_set_bool_condition ( UInt prefixInstr, UInt theInstr )
10479 UInt opc2 = ifieldOPClo10(theInstr);
10480 UChar BI = toUChar( IFIELD( theInstr, 16, 5 ) );
10481 UInt rT_addr = ifieldRegDS( theInstr );
10482 IRType ty = mode64 ? Ity_I64 : Ity_I32;
10483 IROp Iop_1XtoX;
10485 /* There is no prefixed version of these instructions. */
10486 vassert( !prefix_instruction( prefixInstr ) );
10488 switch (opc2) {
10489 case 0x180: // setbc
10490 /* If bit BI of the CR contains a 1, register RT is set to 1.
10491 Otherwise, register RT is set to 0. */
10492 DIP(" setbc %u,%u\n", rT_addr, BI);
10493 Iop_1XtoX = mode64 ? Iop_1Uto64 : Iop_1Uto32;
10494 putIReg( rT_addr, unop( Iop_1XtoX,
10495 binop( Iop_CmpEQ32,
10496 getCRbit( BI ),
10497 mkU32( 1 ) ) ) );
10498 break;
10500 case 0x1A0: // setbcr
10501 /* If bit BI of the CR contains a 1, register RT is set to 0.
10502 Otherwise, register RT is set to 1. */
10503 DIP(" setbcr %u,%u\n", rT_addr, BI);
10504 Iop_1XtoX = mode64 ? Iop_1Uto64 : Iop_1Uto32;
10505 putIReg( rT_addr, unop( Iop_1XtoX,
10506 binop( Iop_CmpNE32,
10507 getCRbit( BI ),
10508 mkU32( 1 ) ) ) );
10509 break;
10511 case 0x1C0: // setnbc
10512 /* If bit BI of the CR contains a 1, register RT is set to -1.
10513 Otherwise, register RT is set to 0. */
10514 DIP(" setnbc %u,%u\n", rT_addr, BI);
10515 Iop_1XtoX = mode64 ? Iop_1Sto64 : Iop_1Sto32;
10516 putIReg( rT_addr, binop( mkSzOp(ty, Iop_And8),
10517 mkSzImm( ty, -1 ),
10518 unop( Iop_1XtoX,
10519 binop( Iop_CmpEQ32,
10520 getCRbit( BI ),
10521 mkU32( 1 ) ) ) ) );
10522 break;
10524 case 0x1E0: // setnbcr
10525 /* If bit BI of the CR contains a 1, register RT is set to -1.
10526 Otherwise, register RT is set to 0. */
10527 DIP(" setnbcr %u,%u\n", rT_addr, BI);
10528 Iop_1XtoX = mode64 ? Iop_1Sto64 : Iop_1Sto32;
10529 putIReg( rT_addr, binop( mkSzOp(ty, Iop_And8),
10530 mkSzImm( ty, -1 ),
10531 unop( Iop_1XtoX,
10532 binop( Iop_CmpNE32,
10533 getCRbit( BI ),
10534 mkU32( 1 ) ) ) ) );
10535 break;
10537 default:
10538 vex_printf("dis_set_bool_condition(ppc)(opc2)\n");
10539 return False;
10542 return True;
10546 Trap instructions
10549 /* Do the code generation for a trap. Returned Bool is true iff
10550 this is an unconditional trap. If the two arg IRExpr*s are
10551 Ity_I32s then the comparison is 32-bit. If they are Ity_I64s
10552 then they are 64-bit, and we must be disassembling 64-bit
10553 instructions. */
10554 static Bool do_trap ( UChar TO,
10555 IRExpr* argL0, IRExpr* argR0, Addr64 cia )
10557 IRTemp argL, argR;
10558 IRExpr *argLe, *argRe, *cond, *tmp;
10560 Bool is32bit = typeOfIRExpr(irsb->tyenv, argL0 ) == Ity_I32;
10562 IROp opAND = is32bit ? Iop_And32 : Iop_And64;
10563 IROp opOR = is32bit ? Iop_Or32 : Iop_Or64;
10564 IROp opCMPORDS = is32bit ? Iop_CmpORD32S : Iop_CmpORD64S;
10565 IROp opCMPORDU = is32bit ? Iop_CmpORD32U : Iop_CmpORD64U;
10566 IROp opCMPNE = is32bit ? Iop_CmpNE32 : Iop_CmpNE64;
10567 IROp opCMPEQ = is32bit ? Iop_CmpEQ32 : Iop_CmpEQ64;
10568 IRExpr* const0 = is32bit ? mkU32(0) : mkU64(0);
10569 IRExpr* const2 = is32bit ? mkU32(2) : mkU64(2);
10570 IRExpr* const4 = is32bit ? mkU32(4) : mkU64(4);
10571 IRExpr* const8 = is32bit ? mkU32(8) : mkU64(8);
10573 const UChar b11100 = 0x1C;
10574 const UChar b00111 = 0x07;
10576 if (is32bit) {
10577 vassert( typeOfIRExpr(irsb->tyenv, argL0) == Ity_I32 );
10578 vassert( typeOfIRExpr(irsb->tyenv, argR0) == Ity_I32 );
10579 } else {
10580 vassert( typeOfIRExpr(irsb->tyenv, argL0) == Ity_I64 );
10581 vassert( typeOfIRExpr(irsb->tyenv, argR0) == Ity_I64 );
10582 vassert( mode64 );
10585 if ((TO & b11100) == b11100 || (TO & b00111) == b00111) {
10586 /* Unconditional trap. Just do the exit without
10587 testing the arguments. */
10588 stmt( IRStmt_Exit(
10589 binop(opCMPEQ, const0, const0),
10590 Ijk_SigTRAP,
10591 mode64 ? IRConst_U64(cia) : IRConst_U32((UInt)cia),
10592 OFFB_CIA
10594 return True; /* unconditional trap */
10597 if (is32bit) {
10598 argL = newTemp(Ity_I32);
10599 argR = newTemp(Ity_I32);
10600 } else {
10601 argL = newTemp(Ity_I64);
10602 argR = newTemp(Ity_I64);
10605 assign( argL, argL0 );
10606 assign( argR, argR0 );
10608 argLe = mkexpr(argL);
10609 argRe = mkexpr(argR);
10611 cond = const0;
10612 if (TO & 16) { // L <s R
10613 tmp = binop(opAND, binop(opCMPORDS, argLe, argRe), const8);
10614 cond = binop(opOR, tmp, cond);
10616 if (TO & 8) { // L >s R
10617 tmp = binop(opAND, binop(opCMPORDS, argLe, argRe), const4);
10618 cond = binop(opOR, tmp, cond);
10620 if (TO & 4) { // L == R
10621 tmp = binop(opAND, binop(opCMPORDS, argLe, argRe), const2);
10622 cond = binop(opOR, tmp, cond);
10624 if (TO & 2) { // L <u R
10625 tmp = binop(opAND, binop(opCMPORDU, argLe, argRe), const8);
10626 cond = binop(opOR, tmp, cond);
10628 if (TO & 1) { // L >u R
10629 tmp = binop(opAND, binop(opCMPORDU, argLe, argRe), const4);
10630 cond = binop(opOR, tmp, cond);
10632 stmt( IRStmt_Exit(
10633 binop(opCMPNE, cond, const0),
10634 Ijk_SigTRAP,
10635 mode64 ? IRConst_U64(cia) : IRConst_U32((UInt)cia),
10636 OFFB_CIA
10638 return False; /* not an unconditional trap */
10641 static Bool dis_trapi ( UInt prefix, UInt theInstr,
10642 /*OUT*/DisResult* dres )
10644 /* D-Form */
10645 UChar opc1 = ifieldOPC(theInstr);
10646 UChar TO = ifieldRegDS(theInstr);
10647 UChar rA_addr = ifieldRegA(theInstr);
10648 UInt uimm16 = ifieldUIMM16(theInstr);
10649 ULong simm16 = extend_s_16to64(uimm16);
10650 Addr64 cia = guest_CIA_curr_instr;
10651 IRType ty = mode64 ? Ity_I64 : Ity_I32;
10652 Bool uncond = False;
10654 /* There is no prefixed version of these instructions. */
10655 PREFIX_CHECK
10657 switch (opc1) {
10658 case 0x03: // twi (Trap Word Immediate, PPC32 p548)
10659 uncond = do_trap( TO,
10660 mode64 ? unop(Iop_64to32, getIReg(rA_addr))
10661 : getIReg(rA_addr),
10662 mkU32( (UInt)simm16 ),
10663 cia );
10664 if (TO == 4) {
10665 DIP("tweqi r%u,%d\n", rA_addr, (Int)simm16);
10666 } else {
10667 DIP("tw%di r%u,%d\n", TO, rA_addr, (Int)simm16);
10669 break;
10670 case 0x02: // tdi
10671 if (!mode64)
10672 return False;
10673 uncond = do_trap( TO, getIReg(rA_addr), mkU64( (ULong)simm16 ), cia );
10674 if (TO == 4) {
10675 DIP("tdeqi r%u,%d\n", rA_addr, (Int)simm16);
10676 } else {
10677 DIP("td%di r%u,%d\n", TO, rA_addr, (Int)simm16);
10679 break;
10680 default:
10681 return False;
10684 if (uncond) {
10685 /* If the trap shows signs of being unconditional, don't
10686 continue decoding past it. */
10687 putGST( PPC_GST_CIA, mkSzImm( ty, nextInsnAddr() ));
10688 dres->jk_StopHere = Ijk_Boring;
10689 dres->whatNext = Dis_StopHere;
10692 return True;
10695 static Bool dis_trap ( UInt prefix, UInt theInstr,
10696 /*OUT*/DisResult* dres )
10698 /* X-Form */
10699 UInt opc2 = ifieldOPClo10(theInstr);
10700 UChar TO = ifieldRegDS(theInstr);
10701 UChar rA_addr = ifieldRegA(theInstr);
10702 UChar rB_addr = ifieldRegB(theInstr);
10703 Addr64 cia = guest_CIA_curr_instr;
10704 IRType ty = mode64 ? Ity_I64 : Ity_I32;
10705 Bool uncond = False;
10707 /* There is no prefixed version of these instructions. */
10708 PREFIX_CHECK
10710 if (ifieldBIT0(theInstr) != 0)
10711 return False;
10713 switch (opc2) {
10714 case 0x004: // tw (Trap Word, PPC64 p540)
10715 uncond = do_trap( TO,
10716 mode64 ? unop(Iop_64to32, getIReg(rA_addr))
10717 : getIReg(rA_addr),
10718 mode64 ? unop(Iop_64to32, getIReg(rB_addr))
10719 : getIReg(rB_addr),
10720 cia );
10721 if (TO == 4) {
10722 DIP("tweq r%u,r%u\n", rA_addr, rB_addr);
10723 } else {
10724 DIP("tw%d r%u,r%u\n", TO, rA_addr, rB_addr);
10726 break;
10727 case 0x044: // td (Trap Doubleword, PPC64 p534)
10728 if (!mode64)
10729 return False;
10730 uncond = do_trap( TO, getIReg(rA_addr), getIReg(rB_addr), cia );
10731 if (TO == 4) {
10732 DIP("tdeq r%u,r%u\n", rA_addr, rB_addr);
10733 } else {
10734 DIP("td%d r%u,r%u\n", TO, rA_addr, rB_addr);
10736 break;
10737 default:
10738 return False;
10741 if (uncond) {
10742 /* If the trap shows signs of being unconditional, don't
10743 continue decoding past it. */
10744 putGST( PPC_GST_CIA, mkSzImm( ty, nextInsnAddr() ));
10745 dres->jk_StopHere = Ijk_Boring;
10746 dres->whatNext = Dis_StopHere;
10749 return True;
10754 System Linkage Instructions
10757 static Bool dis_syslink ( UInt prefix, UInt theInstr,
10758 const VexAbiInfo* abiinfo, DisResult* dres,
10759 Bool allow_scv, Bool sigill_diag )
10761 IRType ty = mode64 ? Ity_I64 : Ity_I32;
10763 /* There is no prefixed version of these instructions. */
10764 PREFIX_CHECK
10766 if ((theInstr != 0x44000002) // sc
10767 && (theInstr != 0x44000001)) { // scv
10768 vex_printf("dis_syslink(ppc)(theInstr)\n");
10769 return False;
10772 /* The PPC syscall uses guest_GPR9 to pass a flag to indicate which
10773 system call instruction is to be used. Arg7 = SC_FLAG for the sc
10774 instruction; Arg7 = SCV_FLAG for the scv instruction. */
10775 if (theInstr == 0x44000002) {
10776 // sc (System Call, PPC32 p504)
10777 DIP("sc\n");
10778 put_syscall_flag( mkU32(SC_FLAG) );
10779 } else if (theInstr == 0x44000001) {
10780 if (allow_scv) { // scv
10781 DIP("scv\n");
10782 put_syscall_flag( mkU32(SCV_FLAG) );
10783 } else {
10784 if (sigill_diag)
10785 vex_printf("The scv instruction is not supported in this environment per the HWCAPS2 capability flags.\n");
10786 return False;
10788 } else {
10789 /* Unknown instruction */
10790 return False;
10793 /* Copy CIA into the IP_AT_SYSCALL pseudo-register, so that on Darwin
10794 Valgrind can back the guest up to this instruction if it needs
10795 to restart the syscall. */
10796 putGST( PPC_GST_IP_AT_SYSCALL, getGST( PPC_GST_CIA ) );
10798 /* It's important that all ArchRegs carry their up-to-date value
10799 at this point. So we declare an end-of-block here, which
10800 forces any TempRegs caching ArchRegs to be flushed. */
10801 putGST( PPC_GST_CIA, mkSzImm( ty, nextInsnAddr() ));
10803 dres->whatNext = Dis_StopHere;
10804 dres->jk_StopHere = Ijk_Sys_syscall;
10805 return True;
10810 Memory Synchronization Instructions
10812 Note on Reservations:
10813 We rely on the assumption that V will in fact only allow one thread at
10814 once to run. In effect, a thread can make a reservation, but we don't
10815 check any stores it does. Instead, the reservation is cancelled when
10816 the scheduler switches to another thread (run_thread_for_a_while()).
10818 static Bool dis_memsync ( UInt prefix, UInt theInstr,
10819 UInt allow_isa_3_0, UInt allow_isa_3_1)
10821 /* X-Form, XL-Form */
10822 UChar opc1 = ifieldOPC(theInstr);
10823 UInt b11to25 = IFIELD(theInstr, 11, 15);
10824 /* The L-field is 2 bits in ISA 3.0 and earlier and 3 bits in ISA 3.1 */
10825 UChar flag_L = IFIELD(theInstr, 21, (allow_isa_3_1 ? 3 : 2));
10826 UInt b11to20 = IFIELD(theInstr, 11, 10);
10827 UInt M0 = IFIELD(theInstr, 11, 5);
10828 UChar rD_addr = ifieldRegDS(theInstr);
10829 UChar rS_addr = rD_addr;
10830 UChar rA_addr = ifieldRegA(theInstr);
10831 UChar rB_addr = ifieldRegB(theInstr);
10832 UInt opc2 = ifieldOPClo10(theInstr);
10833 UChar b0 = ifieldBIT0(theInstr);
10835 IRType ty = mode64 ? Ity_I64 : Ity_I32;
10836 IRTemp EA = newTemp(ty);
10838 /* There is no prefixed version of these instructions. */
10839 PREFIX_CHECK
10841 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
10843 switch (opc1) {
10844 /* XL-Form */
10845 case 0x13: // isync (Instruction Synchronize, PPC32 p432)
10846 if (opc2 != 0x096) {
10847 vex_printf("dis_memsync(ppc)(0x13,opc2)\n");
10848 return False;
10850 if (b11to25 != 0 || b0 != 0) {
10851 vex_printf("dis_memsync(ppc)(0x13,b11to25|b0)\n");
10852 return False;
10854 DIP("isync\n");
10855 stmt( IRStmt_MBE(Imbe_Fence) );
10856 break;
10858 /* X-Form */
10859 case 0x1F:
10860 switch (opc2) {
10861 case 0x356: // eieio or mbar (Enforce In-Order Exec of I/O, PPC32 p394)
10862 if (M0 == 0) {
10863 if (b11to20 != 0 || b0 != 0) {
10864 vex_printf("dis_memsync(ppc)(eieio,b11to20|b0)\n");
10865 return False;
10867 DIP("eieio\n");
10868 } else {
10869 if (b11to20 != 0 || b0 != 0) {
10870 vex_printf("dis_memsync(ppc)(mbar,b11to20|b0)\n");
10871 return False;
10873 DIP("mbar %u\n", M0);
10875 /* Insert a memory fence, just to be on the safe side. */
10876 stmt( IRStmt_MBE(Imbe_Fence) );
10877 break;
10879 case 0x014: { // lwarx (Load Word and Reserve Indexed, PPC32 p458)
10880 IRTemp res;
10881 /* According to the PowerPC ISA version 2.05, b0 (called EH
10882 in the documentation) is merely a hint bit to the
10883 hardware, I think as to whether or not contention is
10884 likely. So we can just ignore it. */
10885 DIP("lwarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
10887 // trap if misaligned
10888 gen_SIGBUS_if_misaligned( EA, 4 );
10890 // and actually do the load
10891 res = newTemp(Ity_I32);
10892 stmt( stmt_load(res, mkexpr(EA), NULL/*this is a load*/) );
10894 putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(res), False) );
10895 break;
10898 case 0x034: { // lbarx (Load Word and Reserve Indexed)
10899 IRTemp res;
10900 /* According to the PowerPC ISA version 2.05, b0 (called EH
10901 in the documentation) is merely a hint bit to the
10902 hardware, I think as to whether or not contention is
10903 likely. So we can just ignore it. */
10904 DIP("lbarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
10906 // and actually do the load
10907 res = newTemp(Ity_I8);
10908 stmt( stmt_load(res, mkexpr(EA), NULL/*this is a load*/) );
10910 putIReg( rD_addr, mkWidenFrom8(ty, mkexpr(res), False) );
10911 break;
10914 case 0x074: { // lharx (Load Word and Reserve Indexed)
10915 IRTemp res;
10916 /* According to the PowerPC ISA version 2.05, b0 (called EH
10917 in the documentation) is merely a hint bit to the
10918 hardware, I think as to whether or not contention is
10919 likely. So we can just ignore it. */
10920 DIP("lharx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
10922 // trap if misaligned
10923 gen_SIGBUS_if_misaligned( EA, 2 );
10925 // and actually do the load
10926 res = newTemp(Ity_I16);
10927 stmt( stmt_load(res, mkexpr(EA), NULL/*this is a load*/) );
10929 putIReg( rD_addr, mkWidenFrom16(ty, mkexpr(res), False) );
10930 break;
10933 case 0x096: {
10934 // stwcx. (Store Word Conditional Indexed, PPC32 p532)
10935 // Note this has to handle stwcx. in both 32- and 64-bit modes,
10936 // so isn't quite as straightforward as it might otherwise be.
10937 IRTemp rS = newTemp(Ity_I32);
10938 IRTemp resSC;
10939 if (b0 != 1) {
10940 vex_printf("dis_memsync(ppc)(stwcx.,b0)\n");
10941 return False;
10943 DIP("stwcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
10945 // trap if misaligned
10946 gen_SIGBUS_if_misaligned( EA, 4 );
10948 // Get the data to be stored, and narrow to 32 bits if necessary
10949 assign( rS, mkNarrowTo32(ty, getIReg(rS_addr)) );
10951 // Do the store, and get success/failure bit into resSC
10952 resSC = newTemp(Ity_I1);
10953 stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS)) );
10955 // Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
10956 // Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
10957 putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
10958 putCR0(0, getXER_SO());
10960 /* Note:
10961 If resaddr != lwarx_resaddr, CR0[EQ] is undefined, and
10962 whether rS is stored is dependent on that value. */
10963 /* So I guess we can just ignore this case? */
10964 break;
10967 case 0x2B6: {
10968 // stbcx. (Store Byte Conditional Indexed)
10969 // Note this has to handle stbcx. in both 32- and 64-bit modes,
10970 // so isn't quite as straightforward as it might otherwise be.
10971 IRTemp rS = newTemp(Ity_I8);
10972 IRTemp resSC;
10973 if (b0 != 1) {
10974 vex_printf("dis_memsync(ppc)(stbcx.,b0)\n");
10975 return False;
10977 DIP("stbcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
10979 // Get the data to be stored, and narrow to 32 bits if necessary
10980 assign( rS, mkNarrowTo8(ty, getIReg(rS_addr)) );
10982 // Do the store, and get success/failure bit into resSC
10983 resSC = newTemp(Ity_I1);
10984 stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS)) );
10986 // Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
10987 // Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
10988 putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
10989 putCR0(0, getXER_SO());
10991 /* Note:
10992 If resaddr != lbarx_resaddr, CR0[EQ] is undefined, and
10993 whether rS is stored is dependent on that value. */
10994 /* So I guess we can just ignore this case? */
10995 break;
10998 case 0x2D6: {
10999 // sthcx. (Store Word Conditional Indexed, PPC32 p532)
11000 // Note this has to handle sthcx. in both 32- and 64-bit modes,
11001 // so isn't quite as straightforward as it might otherwise be.
11002 IRTemp rS = newTemp(Ity_I16);
11003 IRTemp resSC;
11004 if (b0 != 1) {
11005 vex_printf("dis_memsync(ppc)(stwcx.,b0)\n");
11006 return False;
11008 DIP("sthcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
11010 // trap if misaligned
11011 gen_SIGBUS_if_misaligned( EA, 2 );
11013 // Get the data to be stored, and narrow to 16 bits if necessary
11014 assign( rS, mkNarrowTo16(ty, getIReg(rS_addr)) );
11016 // Do the store, and get success/failure bit into resSC
11017 resSC = newTemp(Ity_I1);
11018 stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS)) );
11020 // Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
11021 // Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
11022 putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
11023 putCR0(0, getXER_SO());
11025 /* Note:
11026 If resaddr != lharx_resaddr, CR0[EQ] is undefined, and
11027 whether rS is stored is dependent on that value. */
11028 /* So I guess we can just ignore this case? */
11029 break;
11032 case 0x256: // sync (Synchronize, PPC32 p543),
11033 // also lwsync (L==1), ptesync (L==2)
11034 /* http://sources.redhat.com/ml/binutils/2000-12/msg00311.html
11036 The PowerPC architecture used in IBM chips has expanded
11037 the sync instruction into two variants: lightweight sync
11038 and heavyweight sync. The original sync instruction is
11039 the new heavyweight sync and lightweight sync is a strict
11040 subset of the heavyweight sync functionality. This allows
11041 the programmer to specify a less expensive operation on
11042 high-end systems when the full sync functionality is not
11043 necessary.
11045 The basic "sync" mnemonic now utilizes an operand. "sync"
11046 without an operand now becomes a extended mnemonic for
11047 heavyweight sync. Processors without the lwsync
11048 instruction will not decode the L field and will perform a
11049 heavyweight sync. Everything is backward compatible.
11051 sync = sync 0
11052 lwsync = sync 1
11053 ptesync = sync 2 ISA 3.0 and newer
11054 persistent heavyweight sync (phsync) = sync 4 ISA 3.1 and newer
11055 persistent lightweight sync (plsync) = sync 5 ISA 3.1 and newer
11057 if (b11to20 != 0 || b0 != 0) {
11058 vex_printf("dis_memsync(ppc)(sync/lwsync,b11to20|b0)\n");
11059 return False;
11062 if (!((flag_L == 0/*sync*/ || flag_L == 1/*lwsync*/)
11063 || (flag_L == 2/*ptesync*/ && allow_isa_3_0 == True)
11064 || ((flag_L == 4/*phsync*/ || flag_L == 5/*plsync*/)
11065 && allow_isa_3_1 == True)))
11067 vex_printf("dis_memsync(ppc)(sync/lwsync,flag_L)\n");
11068 return False;
11071 DIP("%ssync\n", flag_L == 1 ? "lw" : "");
11072 /* Insert a memory fence. It's sometimes important that these
11073 are carried through to the generated code. */
11074 stmt( IRStmt_MBE(Imbe_Fence) );
11075 break;
11077 /* 64bit Memsync */
11078 case 0x054: { // ldarx (Load DWord and Reserve Indexed, PPC64 p473)
11079 IRTemp res;
11080 /* According to the PowerPC ISA version 2.05, b0 (called EH
11081 in the documentation) is merely a hint bit to the
11082 hardware, I think as to whether or not contention is
11083 likely. So we can just ignore it. */
11084 if (!mode64)
11085 return False;
11086 DIP("ldarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
11088 // trap if misaligned
11089 gen_SIGBUS_if_misaligned( EA, 8 );
11091 // and actually do the load
11092 res = newTemp(Ity_I64);
11093 stmt( stmt_load( res, mkexpr(EA), NULL/*this is a load*/) );
11095 putIReg( rD_addr, mkexpr(res) );
11096 break;
11099 case 0x0D6: { // stdcx. (Store DWord Condition Indexd, PPC64 p581)
11100 // A marginally simplified version of the stwcx. case
11101 IRTemp rS = newTemp(Ity_I64);
11102 IRTemp resSC;
11103 if (b0 != 1) {
11104 vex_printf("dis_memsync(ppc)(stdcx.,b0)\n");
11105 return False;
11107 if (!mode64)
11108 return False;
11109 DIP("stdcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
11111 // trap if misaligned
11112 gen_SIGBUS_if_misaligned( EA, 8 );
11114 // Get the data to be stored
11115 assign( rS, getIReg(rS_addr) );
11117 // Do the store, and get success/failure bit into resSC
11118 resSC = newTemp(Ity_I1);
11119 stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS)) );
11121 // Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
11122 // Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
11123 putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
11124 putCR0(0, getXER_SO());
11126 /* Note:
11127 If resaddr != lwarx_resaddr, CR0[EQ] is undefined, and
11128 whether rS is stored is dependent on that value. */
11129 /* So I guess we can just ignore this case? */
11130 break;
11133 /* 128bit Memsync */
11134 case 0x114: { // lqarx (Load QuadWord and Reserve Indexed)
11135 IRTemp res_hi = newTemp(ty);
11136 IRTemp res_lo = newTemp(ty);
11138 /* According to the PowerPC ISA version 2.07, b0 (called EH
11139 in the documentation) is merely a hint bit to the
11140 hardware, I think as to whether or not contention is
11141 likely. So we can just ignore it. */
11142 DIP("lqarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
11144 // trap if misaligned
11145 gen_SIGBUS_if_misaligned( EA, 16 );
11147 // and actually do the load
11148 if (mode64) {
11149 if (host_endness == VexEndnessBE) {
11150 stmt( stmt_load( res_hi,
11151 mkexpr(EA), NULL/*this is a load*/) );
11152 stmt( stmt_load( res_lo,
11153 binop(Iop_Add64, mkexpr(EA), mkU64(8) ),
11154 NULL/*this is a load*/) );
11155 } else {
11156 stmt( stmt_load( res_lo,
11157 mkexpr(EA), NULL/*this is a load*/) );
11158 stmt( stmt_load( res_hi,
11159 binop(Iop_Add64, mkexpr(EA), mkU64(8) ),
11160 NULL/*this is a load*/) );
11162 } else {
11163 stmt( stmt_load( res_hi,
11164 binop( Iop_Add32, mkexpr(EA), mkU32(4) ),
11165 NULL/*this is a load*/) );
11166 stmt( stmt_load( res_lo,
11167 binop( Iop_Add32, mkexpr(EA), mkU32(12) ),
11168 NULL/*this is a load*/) );
11170 putIReg( rD_addr, mkexpr(res_hi) );
11171 putIReg( rD_addr+1, mkexpr(res_lo) );
11172 break;
11175 case 0x0B6: { // stqcx. (Store QuadWord Condition Indexd, PPC64)
11176 // A marginally simplified version of the stwcx. case
11177 IRTemp rS_hi = newTemp(ty);
11178 IRTemp rS_lo = newTemp(ty);
11179 IRTemp resSC;
11180 if (b0 != 1) {
11181 vex_printf("dis_memsync(ppc)(stqcx.,b0)\n");
11182 return False;
11185 DIP("stqcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
11187 // trap if misaligned
11188 gen_SIGBUS_if_misaligned( EA, 16 );
11189 // Get the data to be stored
11190 assign( rS_hi, getIReg(rS_addr) );
11191 assign( rS_lo, getIReg(rS_addr+1) );
11193 // Do the store, and get success/failure bit into resSC
11194 resSC = newTemp(Ity_I1);
11196 if (mode64) {
11197 if (host_endness == VexEndnessBE) {
11198 stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS_hi) ) );
11199 store( binop( Iop_Add64, mkexpr(EA), mkU64(8) ),
11200 mkexpr(rS_lo) );
11201 } else {
11202 stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS_lo) ) );
11203 store( binop( Iop_Add64, mkexpr(EA), mkU64(8) ),
11204 mkexpr(rS_hi) );
11206 } else {
11207 stmt( stmt_load( resSC, binop( Iop_Add32,
11208 mkexpr(EA),
11209 mkU32(4) ),
11210 mkexpr(rS_hi) ) );
11211 store( binop(Iop_Add32, mkexpr(EA), mkU32(12) ), mkexpr(rS_lo) );
11214 // Set CR0[LT GT EQ S0] = 0b000 || XER[SO] on failure
11215 // Set CR0[LT GT EQ S0] = 0b001 || XER[SO] on success
11216 putCR321(0, binop( Iop_Shl8,
11217 unop(Iop_1Uto8, mkexpr(resSC) ),
11218 mkU8(1)));
11219 putCR0(0, getXER_SO());
11220 break;
11223 default:
11224 vex_printf("dis_memsync(ppc)(opc2)\n");
11225 return False;
11227 break;
11229 default:
11230 vex_printf("dis_memsync(ppc)(opc1)\n");
11231 return False;
11233 return True;
11239 Integer Shift Instructions
11241 static Bool dis_int_shift ( UInt prefix, UInt theInstr, UInt allow_isa_3_0 )
11243 /* X-Form, XS-Form */
11244 UChar opc1 = ifieldOPC(theInstr);
11245 UChar rS_addr = ifieldRegDS(theInstr);
11246 UChar rA_addr = ifieldRegA(theInstr);
11247 UChar rB_addr = ifieldRegB(theInstr);
11248 UChar sh_imm = rB_addr;
11249 UInt opc2 = ifieldOPClo10(theInstr);
11250 UChar b1 = ifieldBIT1(theInstr);
11251 UChar flag_rC = ifieldBIT0(theInstr);
11253 IRType ty = mode64 ? Ity_I64 : Ity_I32;
11254 IRTemp rA = newTemp(ty);
11255 IRTemp rS = newTemp(ty);
11256 IRTemp rB = newTemp(ty);
11257 IRTemp outofrange = newTemp(Ity_I1);
11258 IRTemp rS_lo32 = newTemp(Ity_I32);
11259 IRTemp rB_lo32 = newTemp(Ity_I32);
11260 IRExpr* e_tmp;
11262 /* There is no prefixed version of these instructions. */
11263 PREFIX_CHECK
11265 assign( rS, getIReg(rS_addr) );
11266 assign( rB, getIReg(rB_addr) );
11267 assign( rS_lo32, mkNarrowTo32(ty, mkexpr(rS)) );
11268 assign( rB_lo32, mkNarrowTo32(ty, mkexpr(rB)) );
11270 if (opc1 == 0x1F) {
11271 switch (opc2) {
11272 case 0x018: { // slw (Shift Left Word, PPC32 p505)
11273 DIP("slw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
11274 rA_addr, rS_addr, rB_addr);
11275 /* rA = rS << rB */
11276 /* ppc32 semantics are:
11277 slw(x,y) = (x << (y & 31)) -- primary result
11278 & ~((y << 26) >>s 31) -- make result 0
11279 for y in 32 .. 63
11281 e_tmp =
11282 binop( Iop_And32,
11283 binop( Iop_Shl32,
11284 mkexpr(rS_lo32),
11285 unop( Iop_32to8,
11286 binop(Iop_And32,
11287 mkexpr(rB_lo32), mkU32(31)))),
11288 unop( Iop_Not32,
11289 binop( Iop_Sar32,
11290 binop(Iop_Shl32, mkexpr(rB_lo32), mkU8(26)),
11291 mkU8(31))) );
11292 assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */False) );
11293 break;
11296 case 0x318: { // sraw (Shift Right Alg Word, PPC32 p506)
11297 IRTemp sh_amt = newTemp(Ity_I32);
11298 DIP("sraw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
11299 rA_addr, rS_addr, rB_addr);
11300 /* JRS: my reading of the (poorly worded) PPC32 doc p506 is:
11301 amt = rB & 63
11302 rA = Sar32( rS, amt > 31 ? 31 : amt )
11303 XER.CA = amt > 31 ? sign-of-rS : (computation as per srawi)
11305 assign( sh_amt, binop(Iop_And32, mkU32(0x3F),
11306 mkexpr(rB_lo32)) );
11307 assign( outofrange,
11308 binop(Iop_CmpLT32U, mkU32(31), mkexpr(sh_amt)) );
11309 e_tmp = binop( Iop_Sar32,
11310 mkexpr(rS_lo32),
11311 unop( Iop_32to8,
11312 IRExpr_ITE( mkexpr(outofrange),
11313 mkU32(31),
11314 mkexpr(sh_amt)) ) );
11315 assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */True) );
11317 /* Set CA bit */
11318 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SRAW,
11319 mkexpr(rA),
11320 mkWidenFrom32(ty, mkexpr(rS_lo32), True),
11321 mkWidenFrom32(ty, mkexpr(sh_amt), True ),
11322 mkWidenFrom32(ty, getXER_CA_32(), True) );
11324 if (allow_isa_3_0)
11325 /* copy CA to CA32 */
11326 putXER_CA32( unop(Iop_32to8, getXER_CA_32()));
11327 break;
11330 case 0x338: // srawi (Shift Right Alg Word Immediate, PPC32 p507)
11331 DIP("srawi%s r%u,r%u,%d\n", flag_rC ? ".":"",
11332 rA_addr, rS_addr, sh_imm);
11333 vassert(sh_imm < 32);
11334 if (mode64) {
11335 assign( rA, binop(Iop_Sar64,
11336 binop(Iop_Shl64, getIReg(rS_addr),
11337 mkU8(32)),
11338 mkU8(32 + sh_imm)) );
11339 } else {
11340 assign( rA, binop(Iop_Sar32, mkexpr(rS_lo32),
11341 mkU8(sh_imm)) );
11344 /* Set CA bit */
11345 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SRAWI,
11346 mkexpr(rA),
11347 mkWidenFrom32(ty, mkexpr(rS_lo32), /* Syned */True),
11348 mkSzImm(ty, sh_imm),
11349 mkWidenFrom32(ty, getXER_CA_32(), /* Syned */False) );
11351 if (allow_isa_3_0)
11352 /* copy CA to CA32 */
11353 putXER_CA32( unop(Iop_32to8, getXER_CA_32()));
11354 break;
11356 case 0x218: // srw (Shift Right Word, PPC32 p508)
11357 DIP("srw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
11358 rA_addr, rS_addr, rB_addr);
11359 /* rA = rS >>u rB */
11360 /* ppc32 semantics are:
11361 srw(x,y) = (x >>u (y & 31)) -- primary result
11362 & ~((y << 26) >>s 31) -- make result 0
11363 for y in 32 .. 63
11365 e_tmp =
11366 binop(
11367 Iop_And32,
11368 binop( Iop_Shr32,
11369 mkexpr(rS_lo32),
11370 unop( Iop_32to8,
11371 binop(Iop_And32, mkexpr(rB_lo32),
11372 mkU32(31)))),
11373 unop( Iop_Not32,
11374 binop( Iop_Sar32,
11375 binop(Iop_Shl32, mkexpr(rB_lo32),
11376 mkU8(26)),
11377 mkU8(31))));
11378 assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */False) );
11379 break;
11382 /* 64bit Shifts */
11383 case 0x01B: // sld (Shift Left DWord, PPC64 p568)
11384 DIP("sld%s r%u,r%u,r%u\n",
11385 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
11386 /* rA = rS << rB */
11387 /* ppc64 semantics are:
11388 slw(x,y) = (x << (y & 63)) -- primary result
11389 & ~((y << 57) >>s 63) -- make result 0
11390 for y in 64 ..
11392 assign( rA,
11393 binop(
11394 Iop_And64,
11395 binop( Iop_Shl64,
11396 mkexpr(rS),
11397 unop( Iop_64to8,
11398 binop(Iop_And64, mkexpr(rB), mkU64(63)))),
11399 unop( Iop_Not64,
11400 binop( Iop_Sar64,
11401 binop(Iop_Shl64, mkexpr(rB), mkU8(57)),
11402 mkU8(63)))) );
11403 break;
11405 case 0x31A: { // srad (Shift Right Alg DWord, PPC64 p570)
11406 IRTemp sh_amt = newTemp(Ity_I64);
11407 DIP("srad%s r%u,r%u,r%u\n",
11408 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
11409 /* amt = rB & 127
11410 rA = Sar64( rS, amt > 63 ? 63 : amt )
11411 XER.CA = amt > 63 ? sign-of-rS : (computation as per srawi)
11413 assign( sh_amt, binop(Iop_And64, mkU64(0x7F), mkexpr(rB)) );
11414 assign( outofrange,
11415 binop(Iop_CmpLT64U, mkU64(63), mkexpr(sh_amt)) );
11416 assign( rA,
11417 binop( Iop_Sar64,
11418 mkexpr(rS),
11419 unop( Iop_64to8,
11420 IRExpr_ITE( mkexpr(outofrange),
11421 mkU64(63),
11422 mkexpr(sh_amt)) ))
11424 /* Set CA bit */
11425 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SRAD,
11426 mkexpr(rA), mkexpr(rS), mkexpr(sh_amt),
11427 mkWidenFrom32(ty, getXER_CA_32(), /* Syned */False) );
11429 if (allow_isa_3_0)
11430 /* copy CA to CA32 */
11431 putXER_CA32( unop(Iop_32to8, getXER_CA_32()));
11432 break;
11435 case 0x33A: case 0x33B: // sradi (Shr Alg DWord Imm, PPC64 p571)
11436 sh_imm |= b1<<5;
11437 vassert(sh_imm < 64);
11438 DIP("sradi%s r%u,r%u,%u\n",
11439 flag_rC ? ".":"", rA_addr, rS_addr, sh_imm);
11440 assign( rA, binop(Iop_Sar64, getIReg(rS_addr), mkU8(sh_imm)) );
11442 /* Set CA bit */
11443 set_XER_CA_CA32( ty, PPCG_FLAG_OP_SRADI,
11444 mkexpr(rA),
11445 getIReg(rS_addr),
11446 mkU64(sh_imm),
11447 mkWidenFrom32(ty, getXER_CA_32(), /* Syned */False) );
11449 if (allow_isa_3_0)
11450 /* copy CA to CA32 */
11451 putXER_CA32( unop(Iop_32to8, getXER_CA_32()));
11452 break;
11454 case 0x21B: // srd (Shift Right DWord, PPC64 p574)
11455 DIP("srd%s r%u,r%u,r%u\n",
11456 flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
11457 /* rA = rS >>u rB */
11458 /* ppc semantics are:
11459 srw(x,y) = (x >>u (y & 63)) -- primary result
11460 & ~((y << 57) >>s 63) -- make result 0
11461 for y in 64 .. 127
11463 assign( rA,
11464 binop(
11465 Iop_And64,
11466 binop( Iop_Shr64,
11467 mkexpr(rS),
11468 unop( Iop_64to8,
11469 binop(Iop_And64, mkexpr(rB), mkU64(63)))),
11470 unop( Iop_Not64,
11471 binop( Iop_Sar64,
11472 binop(Iop_Shl64, mkexpr(rB), mkU8(57)),
11473 mkU8(63)))) );
11474 break;
11476 default:
11477 vex_printf("dis_int_shift(ppc)(opc2)\n");
11478 return False;
11480 } else {
11481 vex_printf("dis_int_shift(ppc)(opc1)\n");
11482 return False;
11485 putIReg( rA_addr, mkexpr(rA) );
11487 if (flag_rC) {
11488 set_CR0( mkexpr(rA) );
11490 return True;
11496 Integer Load/Store Reverse Instructions
11498 /* Generates code to swap the byte order in an Ity_I32. */
11499 static IRExpr* /* :: Ity_I32 */ gen_byterev32 ( IRTemp t )
11501 vassert(typeOfIRTemp(irsb->tyenv, t) == Ity_I32);
11502 return unop(Iop_Reverse8sIn32_x1, mkexpr(t));
11505 /* Generates code to swap the byte order in the lower half of an Ity_I32,
11506 and zeroes the upper half. */
11507 static IRExpr* /* :: Ity_I32 */ gen_byterev16 ( IRTemp t )
11509 vassert(typeOfIRTemp(irsb->tyenv, t) == Ity_I32);
11510 return
11511 binop(Iop_Or32,
11512 binop(Iop_And32, binop(Iop_Shl32, mkexpr(t), mkU8(8)),
11513 mkU32(0x0000FF00)),
11514 binop(Iop_And32, binop(Iop_Shr32, mkexpr(t), mkU8(8)),
11515 mkU32(0x000000FF))
11519 static Bool dis_int_ldst_rev ( UInt prefix, UInt theInstr )
11521 /* X-Form */
11522 UChar opc1 = ifieldOPC(theInstr);
11523 UChar rD_addr = ifieldRegDS(theInstr);
11524 UChar rS_addr = rD_addr;
11525 UChar rA_addr = ifieldRegA(theInstr);
11526 UChar rB_addr = ifieldRegB(theInstr);
11527 UInt opc2 = ifieldOPClo10(theInstr);
11528 UChar b0 = ifieldBIT0(theInstr);
11530 IRType ty = mode64 ? Ity_I64 : Ity_I32;
11531 IRTemp EA = newTemp(ty);
11532 IRTemp w1 = newTemp(Ity_I32);
11533 IRTemp w2 = newTemp(Ity_I32);
11535 /* There is no prefixed version of these instructions. */
11536 PREFIX_CHECK
11538 if (opc1 != 0x1F || b0 != 0) {
11539 vex_printf("dis_int_ldst_rev(ppc)(opc1|b0)\n");
11540 return False;
11543 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
11545 switch (opc2) {
11547 case 0x316: // lhbrx (Load Halfword Byte-Reverse Indexed, PPC32 p449)
11548 DIP("lhbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
11549 assign( w1, unop(Iop_16Uto32, load(Ity_I16, mkexpr(EA))) );
11550 assign( w2, gen_byterev16(w1) );
11551 putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(w2),
11552 /* Signed */False) );
11553 break;
11555 case 0x216: // lwbrx (Load Word Byte-Reverse Indexed, PPC32 p459)
11556 DIP("lwbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
11557 assign( w1, load(Ity_I32, mkexpr(EA)) );
11558 assign( w2, gen_byterev32(w1) );
11559 putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(w2),
11560 /* Signed */False) );
11561 break;
11563 case 0x214: // ldbrx (Load Doubleword Byte-Reverse Indexed)
11565 /* Caller makes sure we are only called in mode64. */
11567 /* If we supported swapping LE/BE loads in the backend then we could
11568 just load the value with the bytes reversed by doing a BE load
11569 on an LE machine and a LE load on a BE machine.
11571 IRTemp dw1 = newTemp(Ity_I64);
11572 if (host_endness == VexEndnessBE)
11573 assign( dw1, IRExpr_Load(Iend_LE, Ity_I64, mkexpr(EA)));
11574 else
11575 assign( dw1, IRExpr_Load(Iend_BE, Ity_I64, mkexpr(EA)));
11576 putIReg( rD_addr, mkexpr(dw1) );
11578 But since we currently don't we load the value as is and then
11579 switch it around with Iop_Reverse8sIn64_x1. */
11581 IRTemp dw1 = newTemp(Ity_I64);
11582 IRTemp dw2 = newTemp(Ity_I64);
11583 DIP("ldbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
11584 assign( dw1, load(Ity_I64, mkexpr(EA)) );
11585 assign( dw2, unop(Iop_Reverse8sIn64_x1, mkexpr(dw1)) );
11586 putIReg( rD_addr, mkexpr(dw2) );
11587 break;
11590 case 0x396: // sthbrx (Store Half Word Byte-Reverse Indexed, PPC32 p523)
11591 DIP("sthbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
11592 assign( w1, mkNarrowTo32(ty, getIReg(rS_addr)) );
11593 store( mkexpr(EA), unop(Iop_32to16, gen_byterev16(w1)) );
11594 break;
11596 case 0x296: // stwbrx (Store Word Byte-Reverse Indxd, PPC32 p531)
11597 DIP("stwbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
11598 assign( w1, mkNarrowTo32(ty, getIReg(rS_addr)) );
11599 store( mkexpr(EA), gen_byterev32(w1) );
11600 break;
11602 case 0x294: // stdbrx (Store Doubleword Byte-Reverse Indexed)
11604 IRTemp lo = newTemp(Ity_I32);
11605 IRTemp hi = newTemp(Ity_I32);
11606 IRTemp rS = newTemp(Ity_I64);
11607 assign( rS, getIReg( rS_addr ) );
11608 DIP("stdbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
11609 assign(lo, unop(Iop_64HIto32, mkexpr(rS)));
11610 assign(hi, unop(Iop_64to32, mkexpr(rS)));
11611 store( mkexpr( EA ),
11612 binop( Iop_32HLto64, gen_byterev32( hi ),
11613 gen_byterev32( lo ) ) );
11614 break;
11617 default:
11618 vex_printf("dis_int_ldst_rev(ppc)(opc2)\n");
11619 return False;
11621 return True;
11627 Processor Control Instructions
11629 static Bool dis_proc_ctl ( const VexAbiInfo* vbi, UInt prefix, UInt theInstr )
11631 UChar opc1 = ifieldOPC(theInstr);
11633 /* X-Form */
11634 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
11635 UChar b21to22 = toUChar( IFIELD( theInstr, 21, 2 ) );
11636 UChar rD_addr = ifieldRegDS(theInstr);
11637 UInt b11to20 = IFIELD( theInstr, 11, 10 );
11639 /* XFX-Form */
11640 UChar rS_addr = rD_addr;
11641 UInt SPR = b11to20;
11642 UInt TBR = b11to20;
11643 UChar b20 = toUChar( IFIELD( theInstr, 20, 1 ) );
11644 UInt CRM = IFIELD( theInstr, 12, 8 );
11645 UChar b11 = toUChar( IFIELD( theInstr, 11, 1 ) );
11647 UInt opc2 = ifieldOPClo10(theInstr);
11648 UChar b0 = ifieldBIT0(theInstr);
11650 IRType ty = mode64 ? Ity_I64 : Ity_I32;
11651 IRTemp rS = newTemp(ty);
11653 /* There is no prefixed version of these instructions. */
11654 PREFIX_CHECK
11656 assign( rS, getIReg(rS_addr) );
11658 /* Reorder SPR field as per PPC32 p470 */
11659 SPR = ((SPR & 0x1F) << 5) | ((SPR >> 5) & 0x1F);
11660 /* Reorder TBR field as per PPC32 p475 */
11661 TBR = ((TBR & 31) << 5) | ((TBR >> 5) & 31);
11663 /* b0 = 0, inst is treated as floating point inst for reservation purposes
11664 * b0 = 1, inst is treated as vector inst for reservation purposes
11666 if (opc1 != 0x1F) {
11667 vex_printf("dis_proc_ctl(ppc)(opc1|b%d)\n", b0);
11668 return False;
11671 switch (opc2) {
11672 /* X-Form */
11673 case 0x200: { // mcrxr (Move to Cond Register from XER, PPC32 p466)
11674 if (b21to22 != 0 || b11to20 != 0) {
11675 vex_printf("dis_proc_ctl(ppc)(mcrxr,b21to22|b11to20)\n");
11676 return False;
11678 DIP("mcrxr crf%d\n", crfD);
11679 /* Move XER[0-3] (the top 4 bits of XER) to CR[crfD] */
11680 putGST_field( PPC_GST_CR,
11681 getGST_field( PPC_GST_XER, 7 ),
11682 crfD );
11684 // Clear XER[0-3]
11685 putXER_SO( mkU8(0) );
11686 putXER_OV( mkU8(0) );
11687 putXER_CA( mkU8(0) );
11688 break;
11691 case 0x240: { // mcrxrx (Move to Cond Register from XER)
11692 IRTemp OV = newTemp(Ity_I32);
11693 IRTemp CA = newTemp(Ity_I32);
11694 IRTemp OV32 = newTemp(Ity_I32);
11695 IRTemp CA32 = newTemp(Ity_I32);
11696 IRTemp tmp = newTemp(Ity_I32);
11698 if (b21to22 != 0 || b11to20 != 0) {
11699 vex_printf("dis_proc_ctl(ppc)(mcrxrx,b21to22|b11to20)\n");
11700 return False;
11702 DIP("mcrxrx crf%d\n", crfD);
11703 /* Move OV, OV32, CA, CA32 to condition register field BF */
11704 assign( OV, binop( Iop_Shl32, getXER_OV_32(), mkU8( 3 ) ));
11705 assign( CA, binop( Iop_Shl32, getXER_CA_32(), mkU8( 1 ) ));
11706 assign( OV32, binop( Iop_Shl32, getXER_OV32_32(), mkU8( 2 ) ));
11707 assign( CA32, getXER_CA32_32() );
11709 /* Put [OV | OV32 | CA | CA32] into the condition code register */
11710 assign( tmp,
11711 binop( Iop_Or32,
11712 binop( Iop_Or32, mkexpr ( OV ), mkexpr ( OV32 ) ),
11713 binop( Iop_Or32, mkexpr ( CA ), mkexpr ( CA32 ) )
11714 ) );
11716 putGST_field( PPC_GST_CR, mkexpr( tmp ), crfD );
11717 break;
11720 case 0x013:
11721 // b11to20==0: mfcr (Move from Cond Register, PPC32 p467)
11722 // b20==1 & b11==0: mfocrf (Move from One CR Field)
11723 // However it seems that the 'mfcr' behaviour is an acceptable
11724 // implementation of mfocr (from the 2.02 arch spec)
11725 if (b11to20 == 0) {
11726 DIP("mfcr r%u\n", rD_addr);
11727 putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_CR ),
11728 /* Signed */False) );
11729 break;
11731 if (b20 == 1 && b11 == 0) {
11732 DIP("mfocrf r%u,%u\n", rD_addr, CRM);
11733 putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_CR ),
11734 /* Signed */False) );
11735 break;
11737 /* not decodable */
11738 return False;
11740 /* XFX-Form */
11741 case 0x153: // mfspr (Move from Special-Purpose Register, PPC32 p470)
11743 switch (SPR) { // Choose a register...
11744 case 0x1:
11745 DIP("mfxer r%u\n", rD_addr);
11746 putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_XER ),
11747 /* Signed */False) );
11748 break;
11749 case 0x3: // 131
11750 DIP("mfspr r%u (DSCR)\n", rD_addr);
11751 putIReg( rD_addr, getGST( PPC_GST_DSCR) );
11752 break;
11753 case 0x8:
11754 DIP("mflr r%u\n", rD_addr);
11755 putIReg( rD_addr, getGST( PPC_GST_LR ) );
11756 break;
11757 case 0x9:
11758 DIP("mfctr r%u\n", rD_addr);
11759 putIReg( rD_addr, getGST( PPC_GST_CTR ) );
11760 break;
11761 case 0x80: // 128
11762 DIP("mfspr r%u (TFHAR)\n", rD_addr);
11763 putIReg( rD_addr, getGST( PPC_GST_TFHAR) );
11764 break;
11765 case 0x81: // 129
11766 DIP("mfspr r%u (TFIAR)\n", rD_addr);
11767 putIReg( rD_addr, getGST( PPC_GST_TFIAR) );
11768 break;
11769 case 0x82: // 130
11770 DIP("mfspr r%u (TEXASR)\n", rD_addr);
11771 putIReg( rD_addr, getGST( PPC_GST_TEXASR) );
11772 break;
11773 case 0x83: // 131
11774 DIP("mfspr r%u (TEXASRU)\n", rD_addr);
11775 putIReg( rD_addr, getGST( PPC_GST_TEXASRU) );
11776 break;
11777 case 0x9F: // 159
11778 DIP("mfspr r%u (PSPB)\n", rD_addr);
11779 putIReg( rD_addr, getGST( PPC_GST_PSPB) );
11780 break;
11781 case 0x380: // 896
11782 DIP("mfspr r%u (PPR)\n", rD_addr);
11783 putIReg( rD_addr, getGST( PPC_GST_PPR) );
11784 break;
11785 case 0x382: // 898
11786 DIP("mfspr r%u (PPR)32\n", rD_addr);
11787 putIReg( rD_addr, getGST( PPC_GST_PPR32) );
11788 break;
11789 case 0x100:
11790 DIP("mfvrsave r%u\n", rD_addr);
11791 putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_VRSAVE ),
11792 /* Signed */False) );
11793 break;
11795 case 0x103:
11796 DIP("mfspr r%u, SPRG3(readonly)\n", rD_addr);
11797 putIReg( rD_addr, getGST( PPC_GST_SPRG3_RO ) );
11798 break;
11800 case 268 /* 0x10C TB - 64 bit time base register */:
11802 IRTemp val = newTemp(Ity_I64);
11803 IRExpr** args = mkIRExprVec_0();
11804 IRDirty* d = unsafeIRDirty_1_N(
11805 val,
11806 0/*regparms*/,
11807 "ppcg_dirtyhelper_MFTB",
11808 fnptr_to_fnentry(vbi,
11809 &ppcg_dirtyhelper_MFTB),
11810 args );
11811 /* execute the dirty call, dumping the result in val. */
11812 stmt( IRStmt_Dirty(d) );
11813 putIReg( rD_addr, (mode64) ? mkexpr(val) :
11814 unop(Iop_64to32, mkexpr(val)) );
11816 break;
11818 case 269 /* 0x10D TBU - upper 32-bits of time base register */:
11820 DIP("mfspr r%u,%u", rD_addr, SPR);
11821 IRTemp val = newTemp(Ity_I64);
11822 IRExpr** args = mkIRExprVec_0();
11823 IRDirty* d = unsafeIRDirty_1_N(
11824 val,
11825 0/*regparms*/,
11826 "ppcg_dirtyhelper_MFTB",
11827 fnptr_to_fnentry(vbi,
11828 &ppcg_dirtyhelper_MFTB),
11829 args );
11830 /* execute the dirty call, dumping the result in val. */
11831 stmt( IRStmt_Dirty(d) );
11832 putIReg( rD_addr,
11833 mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(val)),
11834 /* Signed */False) );
11835 break;
11837 case 284 /* 0x1 TBL - lower 32-bits of time base register */:
11839 DIP("mfspr r%u,%u", rD_addr, SPR);
11840 IRTemp val = newTemp(Ity_I64);
11841 IRExpr** args = mkIRExprVec_0();
11842 IRDirty* d = unsafeIRDirty_1_N(
11843 val,
11844 0/*regparms*/,
11845 "ppcg_dirtyhelper_MFTB",
11846 fnptr_to_fnentry(vbi,
11847 &ppcg_dirtyhelper_MFTB),
11848 args );
11849 /* execute the dirty call, dumping the result in val. */
11850 stmt( IRStmt_Dirty(d) );
11851 putIReg( rD_addr,
11852 mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(val)),
11853 /* Signed */False) );
11854 break;
11857 /* Again, runs natively on PPC7400 (7447, really). Not
11858 bothering with a feature test. */
11859 case 287: /* 0x11F */ {
11860 IRTemp val = newTemp(Ity_I32);
11861 IRExpr** args = mkIRExprVec_0();
11862 IRDirty* d = unsafeIRDirty_1_N(
11863 val,
11864 0/*regparms*/,
11865 "ppc32g_dirtyhelper_MFSPR_287",
11866 fnptr_to_fnentry
11867 (vbi, &ppc32g_dirtyhelper_MFSPR_287),
11868 args
11870 /* execute the dirty call, dumping the result in val. */
11871 stmt( IRStmt_Dirty(d) );
11872 putIReg( rD_addr,
11873 mkWidenFrom32(ty, mkexpr(val), False/*unsigned*/) );
11874 DIP("mfspr r%u,%u", rD_addr, SPR);
11875 break;
11878 default:
11879 vex_printf("dis_proc_ctl(ppc)(mfspr,SPR)(0x%x)\n", SPR);
11880 return False;
11882 break;
11884 case 0x173: { // mftb (Move from Time Base, PPC32 p475)
11885 IRTemp val = newTemp(Ity_I64);
11886 IRExpr** args = mkIRExprVec_0();
11887 IRDirty* d = unsafeIRDirty_1_N(
11888 val,
11889 0/*regparms*/,
11890 "ppcg_dirtyhelper_MFTB",
11891 fnptr_to_fnentry(vbi, &ppcg_dirtyhelper_MFTB),
11892 args );
11893 /* execute the dirty call, dumping the result in val. */
11894 stmt( IRStmt_Dirty(d) );
11896 switch (TBR) {
11897 case 269:
11898 DIP("mftbu r%u", rD_addr);
11899 putIReg( rD_addr,
11900 mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(val)),
11901 /* Signed */False) );
11902 break;
11903 case 268:
11904 DIP("mftb r%u", rD_addr);
11905 putIReg( rD_addr, (mode64) ? mkexpr(val) :
11906 unop(Iop_64to32, mkexpr(val)) );
11907 break;
11908 case 284:
11909 DIP("mftbl r%u", rD_addr);
11910 putIReg( rD_addr,
11911 mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(val)),
11912 /* Signed */False) );
11913 break;
11914 default:
11915 return False; /* illegal instruction */
11917 break;
11920 case 0x090: {
11921 // b20==0: mtcrf (Move to Cond Register Fields, PPC32 p477)
11922 // b20==1: mtocrf (Move to One Cond Reg Field)
11923 Int cr;
11924 UChar shft;
11925 if (b11 != 0)
11926 return False;
11927 if (b20 == 1) {
11928 /* ppc64 v2.02 spec says mtocrf gives undefined outcome if >
11929 1 field is written. It seems more robust to decline to
11930 decode the insn if so. */
11931 switch (CRM) {
11932 case 0x01: case 0x02: case 0x04: case 0x08:
11933 case 0x10: case 0x20: case 0x40: case 0x80:
11934 break;
11935 default:
11936 return False;
11939 DIP("%s 0x%x,r%u\n", b20==1 ? "mtocrf" : "mtcrf",
11940 CRM, rS_addr);
11941 /* Write to each field specified by CRM */
11942 for (cr = 0; cr < 8; cr++) {
11943 if ((CRM & (1 << (7-cr))) == 0)
11944 continue;
11945 shft = 4*(7-cr);
11946 putGST_field( PPC_GST_CR,
11947 binop(Iop_Shr32,
11948 mkNarrowTo32(ty, mkexpr(rS)),
11949 mkU8(shft)), cr );
11951 break;
11954 case 0x1D3: // mtspr (Move to Special-Purpose Register, PPC32 p483)
11956 switch (SPR) { // Choose a register...
11957 case 0x1:
11958 DIP("mtxer r%u\n", rS_addr);
11959 putGST( PPC_GST_XER, mkNarrowTo32(ty, mkexpr(rS)) );
11960 break;
11961 case 0x3:
11962 DIP("mtspr r%u (DSCR)\n", rS_addr);
11963 putGST( PPC_GST_DSCR, mkexpr(rS) );
11964 break;
11965 case 0x8:
11966 DIP("mtlr r%u\n", rS_addr);
11967 putGST( PPC_GST_LR, mkexpr(rS) );
11968 break;
11969 case 0x9:
11970 DIP("mtctr r%u\n", rS_addr);
11971 putGST( PPC_GST_CTR, mkexpr(rS) );
11972 break;
11973 case 0x100:
11974 DIP("mtvrsave r%u\n", rS_addr);
11975 putGST( PPC_GST_VRSAVE, mkNarrowTo32(ty, mkexpr(rS)) );
11976 break;
11977 case 0x80: // 128
11978 DIP("mtspr r%u (TFHAR)\n", rS_addr);
11979 putGST( PPC_GST_TFHAR, mkexpr(rS) );
11980 break;
11981 case 0x81: // 129
11982 DIP("mtspr r%u (TFIAR)\n", rS_addr);
11983 putGST( PPC_GST_TFIAR, mkexpr(rS) );
11984 break;
11985 case 0x82: // 130
11986 DIP("mtspr r%u (TEXASR)\n", rS_addr);
11987 putGST( PPC_GST_TEXASR, mkexpr(rS) );
11988 break;
11989 case 0x9F: // 159
11990 DIP("mtspr r%u (PSPB)\n", rS_addr);
11991 putGST( PPC_GST_PSPB, mkexpr(rS) );
11992 break;
11993 case 0x380: // 896
11994 DIP("mtspr r%u (PPR)\n", rS_addr);
11995 putGST( PPC_GST_PPR, mkexpr(rS) );
11996 break;
11997 case 0x382: // 898
11998 DIP("mtspr r%u (PPR32)\n", rS_addr);
11999 putGST( PPC_GST_PPR32, mkexpr(rS) );
12000 break;
12001 default:
12002 vex_printf("dis_proc_ctl(ppc)(mtspr,SPR)(%u)\n", SPR);
12003 return False;
12005 break;
12007 case 0x33: // mfvsrd
12009 UChar XS = ifieldRegXS( theInstr );
12010 UChar rA_addr = ifieldRegA(theInstr);
12011 IRExpr * high64;
12012 IRTemp vS = newTemp( Ity_V128 );
12013 DIP("mfvsrd r%u,vsr%d\n", rA_addr, XS);
12015 /* XS = SX || S
12016 * For SX=0, mfvsrd is treated as a Floating-Point
12017 * instruction in terms of resource availability.
12018 * For SX=1, mfvsrd is treated as a Vector instruction in
12019 * terms of resource availability.
12020 * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
12022 assign( vS, getVSReg( XS ) );
12023 high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
12024 putIReg( rA_addr, (mode64) ? high64 :
12025 unop( Iop_64to32, high64 ) );
12026 break;
12029 case 0x73: // mfvsrwz
12031 UChar XS = ifieldRegXS( theInstr );
12032 UChar rA_addr = ifieldRegA(theInstr);
12033 IRExpr * high64;
12034 IRTemp vS = newTemp( Ity_V128 );
12035 DIP("mfvsrwz r%u,vsr%d\n", rA_addr, XS);
12036 /* XS = SX || S
12037 * For SX=0, mfvsrwz is treated as a Floating-Point
12038 * instruction in terms of resource availability.
12039 * For SX=1, mfvsrwz is treated as a Vector instruction in
12040 * terms of resource availability.
12041 * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
12044 assign( vS, getVSReg( XS ) );
12045 high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
12046 /* move value to the destination setting the upper 32-bits to zero */
12047 putIReg( rA_addr, (mode64) ?
12048 binop( Iop_And64, high64, mkU64( 0xFFFFFFFF ) ) :
12049 unop( Iop_64to32,
12050 binop( Iop_And64, high64, mkU64( 0xFFFFFFFF ) ) ) );
12051 break;
12054 case 0xB3: // mtvsrd
12056 UChar XT = ifieldRegXT( theInstr );
12057 UChar rA_addr = ifieldRegA(theInstr);
12058 IRTemp rA = newTemp(ty);
12059 DIP("mtvsrd vsr%d,r%u\n", XT, rA_addr);
12060 /* XS = SX || S
12061 * For SX=0, mfvsrd is treated as a Floating-Point
12062 * instruction in terms of resource availability.
12063 * For SX=1, mfvsrd is treated as a Vector instruction in
12064 * terms of resource availability.
12065 * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
12067 assign( rA, getIReg(rA_addr) );
12069 if (mode64)
12070 putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( rA ), mkU64( 0 ) ) );
12071 else
12072 putVSReg( XT, binop( Iop_64HLtoV128,
12073 binop( Iop_32HLto64,
12074 mkU32( 0 ),
12075 mkexpr( rA ) ),
12076 mkU64( 0 ) ) );
12077 break;
12080 case 0xD3: // mtvsrwa
12082 UChar XT = ifieldRegXT( theInstr );
12083 UChar rA_addr = ifieldRegA(theInstr);
12084 IRTemp rA = newTemp( Ity_I32 );
12085 DIP("mtvsrwa vsr%d,r%u\n", XT, rA_addr);
12086 /* XS = SX || S
12087 * For SX=0, mtvsrwa is treated as a Floating-Point
12088 * instruction in terms of resource availability.
12089 * For SX=1, mtvsrwa is treated as a Vector instruction in
12090 * terms of resource availability.
12091 * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
12093 if (mode64)
12094 assign( rA, unop( Iop_64to32, getIReg( rA_addr ) ) );
12095 else
12096 assign( rA, getIReg(rA_addr) );
12098 putVSReg( XT, binop( Iop_64HLtoV128,
12099 unop( Iop_32Sto64, mkexpr( rA ) ),
12100 mkU64( 0 ) ) );
12101 break;
12104 case 0xF3: // mtvsrwz
12106 UChar XT = ifieldRegXT( theInstr );
12107 UChar rA_addr = ifieldRegA(theInstr);
12108 IRTemp rA = newTemp( Ity_I32 );
12109 DIP("mtvsrwz vsr%d,r%u\n", rA_addr, XT);
12110 /* XS = SX || S
12111 * For SX=0, mtvsrwz is treated as a Floating-Point
12112 * instruction in terms of resource availability.
12113 * For SX=1, mtvsrwz is treated as a Vector instruction in
12114 * terms of resource availability.
12115 * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
12117 if (mode64)
12118 assign( rA, unop( Iop_64to32, getIReg( rA_addr ) ) );
12119 else
12120 assign( rA, getIReg(rA_addr) );
12122 putVSReg( XT, binop( Iop_64HLtoV128,
12123 binop( Iop_32HLto64, mkU32( 0 ), mkexpr ( rA ) ),
12124 mkU64( 0 ) ) );
12125 break;
12128 default:
12129 vex_printf("dis_proc_ctl(ppc)(opc2)\n");
12130 return False;
12132 return True;
12137 Cache Management Instructions
12139 static Bool dis_cache_manage ( UInt prefix, UInt theInstr,
12140 DisResult* dres,
12141 UInt allow_isa_3_1,
12142 const VexArchInfo* guest_archinfo )
12144 /* X-Form */
12145 UChar opc1 = ifieldOPC(theInstr);
12146 UChar b21to25 = ifieldRegDS(theInstr);
12147 /* The L-field is 2 bits in ISA 3.0 and earlier and 3 bits in ISA 3.1 */
12148 /* Relaxed the test to mach actual hardware, accept all L values from 0 to 7.
12149 The hardware ignores the L value if not supported. 10/23/2024
12150 UChar flag_L = IFIELD(theInstr, 21, (allow_isa_3_1 ? 3 : 2)); */
12152 UChar rA_addr = ifieldRegA(theInstr);
12153 UChar rB_addr = ifieldRegB(theInstr);
12154 UInt opc2 = ifieldOPClo10(theInstr);
12155 UChar b0 = ifieldBIT0(theInstr);
12156 UInt lineszB = guest_archinfo->ppc_icache_line_szB;
12157 Bool is_dcbzl = False;
12159 IRType ty = mode64 ? Ity_I64 : Ity_I32;
12161 /* There is no prefixed version of these instructions. */
12162 PREFIX_CHECK
12164 // Check for valid hint values for dcbt and dcbtst as currently described in
12165 // ISA 2.07. If valid, then we simply set b21to25 to zero since we have no
12166 // means of modeling the hint anyway.
12167 if (opc1 == 0x1F && ((opc2 == 0x116) || (opc2 == 0xF6))) {
12168 if (b21to25 == 0x10 || b21to25 < 0x10)
12169 b21to25 = 0;
12171 if (opc1 == 0x1F && opc2 == 0x116 && b21to25 == 0x11)
12172 b21to25 = 0;
12174 if (opc1 == 0x1F && opc2 == 0x3F6) { // dcbz
12175 if (b21to25 == 1) {
12176 is_dcbzl = True;
12177 b21to25 = 0;
12178 if (!(guest_archinfo->ppc_dcbzl_szB)) {
12179 vex_printf("dis_cache_manage(ppc)(dcbzl not supported by host)\n");
12180 return False;
12185 if (opc1 != 0x1F || b0 != 0) {
12186 if (0) vex_printf("dis_cache_manage %d %d\n",
12187 opc1, b0);
12188 vex_printf("dis_cache_manage(ppc)(opc1|b0)\n");
12189 return False;
12192 /* stay sane .. */
12193 vassert(lineszB == 16 || lineszB == 32 || lineszB == 64 || lineszB == 128);
12195 switch (opc2) {
12196 //zz case 0x2F6: // dcba (Data Cache Block Allocate, PPC32 p380)
12197 //zz vassert(0); /* AWAITING TEST CASE */
12198 //zz DIP("dcba r%u,r%u\n", rA_addr, rB_addr);
12199 //zz if (0) vex_printf("vex ppc->IR: kludged dcba\n");
12200 //zz break;
12202 case 0x056: // dcbf (Data Cache Block Flush, PPC32 p382)
12203 DIP("dcbf r%u,r%u\n", rA_addr, rB_addr);
12205 /* Check the L field and ISA version.
12206 dcbf ra, rb, 0 dcbf
12207 dcbf ra, rb, 1 dcbf local
12208 dcbf ra, rb, 3 dcbf local primary
12209 dcbf ra, rb, 4 dcbf block fjush to persistent storage isa 3.1
12210 dcbf ra, rb, 6 dcbf block store to persistent storage isa 3.1
12211 Relaxed requirement to allow all L values from 0 to 7 to match the
12212 operation of the real hardware. The real hardware accepts the
12213 unsupported L values. 10/23/2024
12215 if (!((flag_L == 0 || flag_L == 1 || flag_L == 3)
12216 || ((flag_L == 4 || flag_L == 6) && allow_isa_3_1 == True)))
12218 vex_printf("dis_cache_manage(ppc)(dcbf,flag_L)\n");
12219 return False;
12222 /* nop as far as vex is concerned */
12223 break;
12225 case 0x036: // dcbst (Data Cache Block Store, PPC32 p384)
12226 DIP("dcbst r%u,r%u\n", rA_addr, rB_addr);
12227 /* nop as far as vex is concerned */
12228 break;
12230 case 0x116: // dcbt (Data Cache Block Touch, PPC32 p385)
12231 DIP("dcbt r%u,r%u\n", rA_addr, rB_addr);
12232 /* nop as far as vex is concerned */
12233 break;
12235 case 0x0F6: // dcbtst (Data Cache Block Touch for Store, PPC32 p386)
12236 DIP("dcbtst r%u,r%u\n", rA_addr, rB_addr);
12237 /* nop as far as vex is concerned */
12238 break;
12240 case 0x3F6: { // dcbz (Data Cache Block Clear to Zero, PPC32 p387)
12241 // dcbzl (Data Cache Block Clear to Zero Long, bug#135264)
12242 /* Clear all bytes in cache block at (rA|0) + rB. */
12243 IRTemp EA = newTemp(ty);
12244 IRTemp addr = newTemp(ty);
12245 IRExpr* irx_addr;
12246 UInt i;
12247 UInt clearszB;
12248 if (is_dcbzl) {
12249 clearszB = guest_archinfo->ppc_dcbzl_szB;
12250 DIP("dcbzl r%u,r%u\n", rA_addr, rB_addr);
12252 else {
12253 clearszB = guest_archinfo->ppc_dcbz_szB;
12254 DIP("dcbz r%u,r%u\n", rA_addr, rB_addr);
12257 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
12259 if (mode64) {
12260 /* Round EA down to the start of the containing block. */
12261 assign( addr, binop( Iop_And64,
12262 mkexpr(EA),
12263 mkU64( ~((ULong)clearszB-1) )) );
12265 for (i = 0; i < clearszB / 8; i++) {
12266 irx_addr = binop( Iop_Add64, mkexpr(addr), mkU64(i*8) );
12267 store( irx_addr, mkU64(0) );
12269 } else {
12270 /* Round EA down to the start of the containing block. */
12271 assign( addr, binop( Iop_And32,
12272 mkexpr(EA),
12273 mkU32( ~(clearszB-1) )) );
12275 for (i = 0; i < clearszB / 4; i++) {
12276 irx_addr = binop( Iop_Add32, mkexpr(addr), mkU32(i*4) );
12277 store( irx_addr, mkU32(0) );
12280 break;
12283 case 0x3D6: {
12284 // icbi (Instruction Cache Block Invalidate, PPC32 p431)
12285 /* Invalidate all translations containing code from the cache
12286 block at (rA|0) + rB. */
12287 IRTemp EA = newTemp(ty);
12288 IRTemp addr = newTemp(ty);
12289 DIP("icbi r%u,r%u\n", rA_addr, rB_addr);
12290 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
12292 /* Round EA down to the start of the containing block. */
12293 assign( addr, binop( mkSzOp(ty, Iop_And8),
12294 mkexpr(EA),
12295 mkSzImm(ty, ~(((ULong)lineszB)-1) )) );
12296 putGST( PPC_GST_CMSTART, mkexpr(addr) );
12297 putGST( PPC_GST_CMLEN, mkSzImm(ty, lineszB) );
12299 /* be paranoid ... */
12300 stmt( IRStmt_MBE(Imbe_Fence) );
12302 putGST( PPC_GST_CIA, mkSzImm(ty, nextInsnAddr()));
12303 dres->jk_StopHere = Ijk_InvalICache;
12304 dres->whatNext = Dis_StopHere;
12305 break;
12308 default:
12309 vex_printf("dis_cache_manage(ppc)(opc2)\n");
12310 return False;
12312 return True;
12316 /*------------------------------------------------------------*/
12317 /*--- Floating Point Helpers ---*/
12318 /*------------------------------------------------------------*/
12320 /* --------- Synthesise a 2-bit FPU rounding mode. --------- */
12321 /* Produces a value in 0 .. 3, which is encoded as per the type
12322 IRRoundingMode. PPCRoundingMode encoding is different to
12323 IRRoundingMode, so need to map it.
12326 static IRExpr* /* :: Ity_I32 */ set_round_to_Oddmode ( void )
12328 /* PPC/ valgrind have two-bits to designate the rounding mode.
12329 ISA 3.0 adds instructions than can use a round to odd mode
12330 but did not change the number of bits for the rm. Basically,
12331 they added two instructions that only differ by the rounding
12332 mode the operation uses. In essesce, they encoded the rm
12333 in the name. In order to avoid having to create Iops, that
12334 encode the rm in th name, we will "expand" the definition of
12335 the rounding mode bits. We will just pass the rm and then
12336 map the to odd mode to the appropriate PPCFpOp name that
12337 will tell us which instruction to map to.
12339 rounding mode | PPC | IR
12340 ------------------------
12341 to nearest | 000 | 00
12342 to zero | 001 | 11
12343 to +infinity | 010 | 10
12344 to -infinity | 011 | 01
12345 to odd | 1xx | xx
12347 return mkU32(8);
12350 static IRExpr* /* :: Ity_I32 */ get_IR_roundingmode ( void )
12353 rounding mode | PPC | IR
12354 ------------------------
12355 to nearest | 00 | 00
12356 to zero | 01 | 11
12357 to +infinity | 10 | 10
12358 to -infinity | 11 | 01
12360 IRTemp rm_PPC32 = newTemp(Ity_I32);
12361 assign( rm_PPC32, getGST_masked( PPC_GST_FPSCR, MASK_FPSCR_RN ) );
12363 // rm_IR = XOR( rm_PPC32, (rm_PPC32 << 1) & 2)
12364 return binop( Iop_Xor32,
12365 mkexpr(rm_PPC32),
12366 binop( Iop_And32,
12367 binop(Iop_Shl32, mkexpr(rm_PPC32), mkU8(1)),
12368 mkU32(2) ));
12371 /* The DFP IR rounding modes were chosen such that the existing PPC to IR
12372 * mapping would still work with the extended three bit DFP rounding
12373 * mode designator.
12375 * rounding mode | PPC | IR
12376 * -----------------------------------------------
12377 * to nearest, ties to even | 000 | 000
12378 * to zero | 001 | 011
12379 * to +infinity | 010 | 010
12380 * to -infinity | 011 | 001
12381 * to nearest, ties away from 0 | 100 | 100
12382 * to nearest, ties toward 0 | 101 | 111
12383 * to away from 0 | 110 | 110
12384 * to prepare for shorter precision | 111 | 101
12386 static IRExpr* /* :: Ity_I32 */ get_IR_roundingmode_DFP( void )
12388 IRTemp rm_PPC32 = newTemp( Ity_I32 );
12389 assign( rm_PPC32, getGST_masked_upper( PPC_GST_FPSCR, MASK_FPSCR_DRN ) );
12391 // rm_IR = XOR( rm_PPC32, (rm_PPC32 << 1) & 2)
12392 return binop( Iop_Xor32,
12393 mkexpr( rm_PPC32 ),
12394 binop( Iop_And32,
12395 binop( Iop_Shl32, mkexpr( rm_PPC32 ), mkU8( 1 ) ),
12396 mkU32( 2 ) ) );
12399 #define NANmaskSingle 0x7F800000
12400 #define NANmaskDouble 0x7FF00000
12402 static IRExpr * Check_NaN( IRExpr * value, IRExpr * Hi32Mask )
12404 IRTemp exp_zero = newTemp(Ity_I8);
12405 IRTemp frac_mask = newTemp(Ity_I32);
12406 IRTemp frac_not_zero = newTemp(Ity_I8);
12408 /* Check if the result is QNAN or SNAN and not +infinity or -infinity.
12409 * The input value is always 64-bits, for single precision values, the
12410 * lower 32 bits must be zero.
12412 * Single Pricision
12413 * [62:54] exponent field is equal to 0xFF for NAN and Infinity.
12414 * [53:32] fraction field is zero for Infinity and non-zero for NAN
12415 * [31:0] unused for single precision representation
12417 * Double Pricision
12418 * [62:51] exponent field is equal to 0xFF for NAN and Infinity.
12419 * [50:0] fraction field is zero for Infinity and non-zero for NAN
12421 * Returned result is a U32 value of 0xFFFFFFFF for NaN and 0 otherwise.
12423 assign( frac_mask, unop( Iop_Not32,
12424 binop( Iop_Or32,
12425 mkU32( 0x80000000ULL ), Hi32Mask) ) );
12427 assign( exp_zero,
12428 unop( Iop_1Sto8,
12429 binop( Iop_CmpEQ32,
12430 binop( Iop_And32,
12431 unop( Iop_64HIto32,
12432 unop( Iop_ReinterpF64asI64,
12433 value ) ),
12434 Hi32Mask ),
12435 Hi32Mask ) ) );
12436 assign( frac_not_zero,
12437 binop( Iop_Or8,
12438 unop( Iop_1Sto8,
12439 binop( Iop_CmpNE32,
12440 binop( Iop_And32,
12441 unop( Iop_64HIto32,
12442 unop( Iop_ReinterpF64asI64,
12443 value ) ),
12444 mkexpr( frac_mask ) ),
12445 mkU32( 0x0 ) ) ),
12446 unop( Iop_1Sto8,
12447 binop( Iop_CmpNE32,
12448 binop( Iop_And32,
12449 unop( Iop_64to32,
12450 unop( Iop_ReinterpF64asI64,
12451 value ) ),
12452 mkU32( 0xFFFFFFFF ) ),
12453 mkU32( 0x0 ) ) ) ) );
12454 return unop( Iop_8Sto32,
12455 binop( Iop_And8,
12456 mkexpr( exp_zero ),
12457 mkexpr( frac_not_zero ) ) );
12460 static IRExpr * Complement_non_NaN( IRExpr * value, IRExpr * nan_mask )
12462 /* This function will only complement the 64-bit floating point value if it
12463 * is not Nan. NaN is not a signed value. Need to do computations using
12464 * 32-bit operands to ensure it will run in 32-bit mode.
12466 return binop( Iop_32HLto64,
12467 binop( Iop_Or32,
12468 binop( Iop_And32,
12469 nan_mask,
12470 unop( Iop_64HIto32,
12471 unop( Iop_ReinterpF64asI64,
12472 value ) ) ),
12473 binop( Iop_And32,
12474 unop( Iop_Not32,
12475 nan_mask ),
12476 unop( Iop_64HIto32,
12477 unop( Iop_ReinterpF64asI64,
12478 unop( Iop_NegF64,
12479 value ) ) ) ) ),
12480 unop( Iop_64to32,
12481 unop( Iop_ReinterpF64asI64, value ) ) );
12484 /*------------------------------------------------------------*/
12485 /*--- Floating Point Instruction Translation ---*/
12486 /*------------------------------------------------------------*/
12489 Floating Point Load Instructions
12491 static Bool dis_fp_load_prefix ( UInt prefix, UInt theInstr )
12493 /* X-Form, D-Form */
12494 UChar opc1 = ifieldOPC(theInstr);
12495 UChar frT_addr = ifieldRegDS(theInstr);
12496 UChar rA_addr = ifieldRegA(theInstr);
12498 IRType ty = mode64 ? Ity_I64 : Ity_I32;
12499 IRTemp EA = newTemp(ty);
12500 IRTemp rA = newTemp(ty);
12501 UInt ptype = PrefixType(prefix);
12502 Bool is_prefix = prefix_instruction( prefix );
12503 UInt R = 0; // must be zero for word instruction
12504 ULong immediate_val = 0;
12506 assign( rA, getIReg(rA_addr) );
12507 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
12508 ptype, DFORM_IMMASK,
12509 &immediate_val, &R ) );
12511 switch (opc1) {
12512 case 0x30: // lfs (Load Float Single, PPC32 p441)
12513 pDIP( is_prefix, "lfs fr%u,%llu(r%u)\n", frT_addr, immediate_val, rA_addr );
12514 DIPp( is_prefix, ",%u", R );
12515 putFReg( frT_addr,
12516 unop(Iop_F32toF64, load(Ity_F32, mkexpr(EA))) );
12517 break;
12519 case 0x32: // lfd (Load Float Double, PPC32 p437)
12520 pDIP( prefix, "lfd fr%u,%llu(r%u)", frT_addr, immediate_val, rA_addr );
12521 DIPp( is_prefix, ",%u", R );
12522 putFReg( frT_addr, load(Ity_F64, mkexpr(EA)) );
12523 break;
12525 default:
12526 vex_printf("dis_fp_load_prefix(ppc)(opc1)\n");
12527 return False;
12529 return True;
12532 static Bool dis_fp_load ( UInt prefix, UInt theInstr )
12534 /* X-Form, D-Form */
12535 UChar opc1 = ifieldOPC(theInstr);
12536 UChar frD_addr = ifieldRegDS(theInstr);
12537 UChar rA_addr = ifieldRegA(theInstr);
12538 UChar rB_addr = ifieldRegB(theInstr);
12539 UInt opc2 = ifieldOPClo10(theInstr);
12540 UChar b0 = ifieldBIT0(theInstr);
12541 UInt uimm16 = ifieldUIMM16(theInstr);
12543 Int simm16 = extend_s_16to32(uimm16);
12544 IRType ty = mode64 ? Ity_I64 : Ity_I32;
12545 IRTemp EA = newTemp(ty);
12546 IRTemp rA = newTemp(ty);
12547 IRTemp rB = newTemp(ty);
12548 IRTemp iHi = newTemp(Ity_I32);
12549 IRTemp iLo = newTemp(Ity_I32);
12551 /* There is no prefixed version of these instructions. */
12552 PREFIX_CHECK
12554 assign( rA, getIReg(rA_addr) );
12555 assign( rB, getIReg(rB_addr) );
12557 /* These are completely straightforward from a rounding and status
12558 bits perspective: no rounding involved and no funny status or CR
12559 bits affected. */
12561 switch (opc1) {
12562 case 0x30: // lfs (Load Float Single, PPC32 p441)
12563 DIP("lfs fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
12564 assign( EA, ea_rAor0_simm(rA_addr, simm16) );
12565 putFReg( frD_addr,
12566 unop(Iop_F32toF64, load(Ity_F32, mkexpr(EA))) );
12567 break;
12569 case 0x31: // lfsu (Load Float Single, Update, PPC32 p442)
12570 if (rA_addr == 0)
12571 return False;
12572 DIP("lfsu fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
12573 assign( EA, ea_rA_simm(rA_addr, simm16) );
12574 putFReg( frD_addr,
12575 unop(Iop_F32toF64, load(Ity_F32, mkexpr(EA))) );
12576 putIReg( rA_addr, mkexpr(EA) );
12577 break;
12579 case 0x32: // lfd (Load Float Double, PPC32 p437)
12580 DIP("lfd fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
12581 assign( EA, ea_rAor0_simm(rA_addr, simm16) );
12582 putFReg( frD_addr, load(Ity_F64, mkexpr(EA)) );
12583 break;
12585 case 0x33: // lfdu (Load Float Double, Update, PPC32 p438)
12586 if (rA_addr == 0)
12587 return False;
12588 DIP("lfdu fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
12589 assign( EA, ea_rA_simm(rA_addr, simm16) );
12590 putFReg( frD_addr, load(Ity_F64, mkexpr(EA)) );
12591 putIReg( rA_addr, mkexpr(EA) );
12592 break;
12594 case 0x1F:
12595 if (b0 != 0) {
12596 vex_printf("dis_fp_load(ppc)(instr,b0)\n");
12597 return False;
12600 switch(opc2) {
12601 case 0x217: // lfsx (Load Float Single Indexed, PPC32 p444)
12602 DIP("lfsx fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
12603 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
12604 putFReg( frD_addr, unop( Iop_F32toF64,
12605 load(Ity_F32, mkexpr(EA))) );
12606 break;
12608 case 0x237: // lfsux (Load Float Single, Update Indxd, PPC32 p443)
12609 if (rA_addr == 0)
12610 return False;
12611 DIP("lfsux fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
12612 assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
12613 putFReg( frD_addr,
12614 unop(Iop_F32toF64, load(Ity_F32, mkexpr(EA))) );
12615 putIReg( rA_addr, mkexpr(EA) );
12616 break;
12618 case 0x257: // lfdx (Load Float Double Indexed, PPC32 p440)
12619 DIP("lfdx fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
12620 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
12621 putFReg( frD_addr, load(Ity_F64, mkexpr(EA)) );
12622 break;
12624 case 0x277: // lfdux (Load Float Double, Update Indxd, PPC32 p439)
12625 if (rA_addr == 0)
12626 return False;
12627 DIP("lfdux fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
12628 assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
12629 putFReg( frD_addr, load(Ity_F64, mkexpr(EA)) );
12630 putIReg( rA_addr, mkexpr(EA) );
12631 break;
12633 case 0x357: // lfiwax (Load Float As Integer, Indxd, ISA 2.05 p120)
12634 DIP("lfiwax fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
12635 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
12636 assign( iLo, load(Ity_I32, mkexpr(EA)) );
12637 assign( iHi, binop(Iop_Sub32,
12638 mkU32(0),
12639 binop(Iop_Shr32, mkexpr(iLo), mkU8(31))) );
12640 putFReg( frD_addr, unop(Iop_ReinterpI64asF64,
12641 binop(Iop_32HLto64, mkexpr(iHi), mkexpr(iLo))) );
12642 break;
12644 case 0x377: // lfiwzx (Load floating-point as integer word, zero indexed
12646 IRTemp dw = newTemp( Ity_I64 );
12647 DIP("lfiwzx fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
12648 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
12649 assign( iLo, load(Ity_I32, mkexpr(EA)) );
12650 assign( dw, binop( Iop_32HLto64, mkU32( 0 ), mkexpr( iLo ) ) );
12651 putFReg( frD_addr, unop( Iop_ReinterpI64asF64, mkexpr( dw ) ) );
12652 break;
12655 default:
12656 vex_printf("dis_fp_load(ppc)(opc2)\n");
12657 return False;
12659 break;
12661 default:
12662 vex_printf("dis_fp_load(ppc)(opc1)\n");
12663 return False;
12665 return True;
12671 Floating Point Store Instructions
12673 static Bool dis_fp_store_prefix ( UInt prefix, UInt theInstr )
12675 /* X-Form, D-Form */
12676 UChar opc1 = ifieldOPC(theInstr);
12677 UChar frS_addr = ifieldRegDS(theInstr);
12678 UChar rA_addr = ifieldRegA(theInstr);
12680 IRType ty = mode64 ? Ity_I64 : Ity_I32;
12681 IRTemp frS = newTemp(Ity_F64);
12682 IRTemp EA = newTemp(ty);
12683 IRTemp rA = newTemp(ty);
12684 UInt ptype = PrefixType(prefix);
12685 Bool is_prefix = prefix_instruction( prefix );
12686 UInt R = 0; // must be zero for word instruction
12687 ULong immediate_val = 0;
12689 assign( frS, getFReg( frS_addr ) );
12690 assign( rA, getIReg( rA_addr ) );
12691 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
12692 ptype, DFORM_IMMASK,
12693 &immediate_val, &R ) );
12695 /* These are straightforward from a status bits perspective: no
12696 funny status or CR bits affected. For single precision stores,
12697 the values are truncated and denormalised (not rounded) to turn
12698 them into single precision values. */
12700 switch (opc1) {
12702 case 0x34: // stfs (Store Float Single, PPC32 p518)
12703 pDIP( is_prefix, "stfs fr%u,%llu(r%u)\n", frS_addr, immediate_val, rA_addr );
12704 DIPp( is_prefix, ",%u", R );
12705 /* Use Iop_TruncF64asF32 to truncate and possible denormalise
12706 the value to be stored in the correct way, without any
12707 rounding. */
12708 store( mkexpr(EA), unop(Iop_TruncF64asF32, mkexpr(frS)) );
12709 break;
12711 case 0x36: // stfd (Store Float Double, PPC32 p513)
12712 pDIP( is_prefix, "stfd fr%u,%llu(r%u)", frS_addr, immediate_val, rA_addr );
12713 DIPp( is_prefix, ",%u", R );
12714 store( mkexpr(EA), mkexpr(frS) );
12715 break;
12717 default:
12718 vex_printf("dis_fp_store_prefix(ppc)(opc1)\n");
12719 return False;
12721 return True;
12724 static Bool dis_fp_store ( UInt prefix, UInt theInstr )
12726 /* X-Form, D-Form */
12727 UChar opc1 = ifieldOPC(theInstr);
12728 UChar frS_addr = ifieldRegDS(theInstr);
12729 UChar rA_addr = ifieldRegA(theInstr);
12730 UChar rB_addr = ifieldRegB(theInstr);
12731 UInt opc2 = ifieldOPClo10(theInstr);
12732 UChar b0 = ifieldBIT0(theInstr);
12733 Int uimm16 = ifieldUIMM16(theInstr);
12735 Int simm16 = extend_s_16to32(uimm16);
12736 IRTemp frS = newTemp(Ity_F64);
12737 IRType ty = mode64 ? Ity_I64 : Ity_I32;
12738 IRTemp EA = newTemp(ty);
12739 IRTemp rA = newTemp(ty);
12740 IRTemp rB = newTemp(ty);
12742 /* There is no prefixed version of these instructions. */
12743 PREFIX_CHECK
12745 assign( frS, getFReg(frS_addr) );
12746 assign( rA, getIReg(rA_addr) );
12747 assign( rB, getIReg(rB_addr) );
12749 /* These are straightforward from a status bits perspective: no
12750 funny status or CR bits affected. For single precision stores,
12751 the values are truncated and denormalised (not rounded) to turn
12752 them into single precision values. */
12754 switch (opc1) {
12755 case 0x35: // stfsu (Store Float Single, Update, PPC32 p519)
12756 if (rA_addr == 0)
12757 return False;
12758 DIP("stfsu fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr);
12759 assign( EA, ea_rA_simm(rA_addr, simm16) );
12760 /* See comment for stfs */
12761 store( mkexpr(EA), unop(Iop_TruncF64asF32, mkexpr(frS)) );
12762 putIReg( rA_addr, mkexpr(EA) );
12763 break;
12765 case 0x37: // stfdu (Store Float Double, Update, PPC32 p514)
12766 if (rA_addr == 0)
12767 return False;
12768 DIP("stfdu fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr);
12769 assign( EA, ea_rA_simm(rA_addr, simm16) );
12770 store( mkexpr(EA), mkexpr(frS) );
12771 putIReg( rA_addr, mkexpr(EA) );
12772 break;
12774 case 0x1F:
12775 if (b0 != 0) {
12776 vex_printf("dis_fp_store(ppc)(instr,b0)\n");
12777 return False;
12779 switch(opc2) {
12780 case 0x297: // stfsx (Store Float Single Indexed, PPC32 p521)
12781 DIP("stfsx fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
12782 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
12783 /* See note for stfs */
12784 store( mkexpr(EA),
12785 unop(Iop_TruncF64asF32, mkexpr(frS)) );
12786 break;
12788 case 0x2B7: // stfsux (Store Float Sgl, Update Indxd, PPC32 p520)
12789 if (rA_addr == 0)
12790 return False;
12791 DIP("stfsux fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
12792 assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
12793 /* See note for stfs */
12794 store( mkexpr(EA), unop(Iop_TruncF64asF32, mkexpr(frS)) );
12795 putIReg( rA_addr, mkexpr(EA) );
12796 break;
12798 case 0x2D7: // stfdx (Store Float Double Indexed, PPC32 p516)
12799 DIP("stfdx fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
12800 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
12801 store( mkexpr(EA), mkexpr(frS) );
12802 break;
12804 case 0x2F7: // stfdux (Store Float Dbl, Update Indxd, PPC32 p515)
12805 if (rA_addr == 0)
12806 return False;
12807 DIP("stfdux fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
12808 assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
12809 store( mkexpr(EA), mkexpr(frS) );
12810 putIReg( rA_addr, mkexpr(EA) );
12811 break;
12813 case 0x3D7: // stfiwx (Store Float as Int, Indexed, PPC32 p517)
12814 // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
12815 DIP("stfiwx fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
12816 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
12817 store( mkexpr(EA),
12818 unop(Iop_64to32, unop(Iop_ReinterpF64asI64, mkexpr(frS))) );
12819 break;
12821 default:
12822 vex_printf("dis_fp_store(ppc)(opc2)\n");
12823 return False;
12825 break;
12827 default:
12828 vex_printf("dis_fp_store(ppc)(opc1)\n");
12829 return False;
12831 return True;
12837 Floating Point Arith Instructions
12839 static Bool dis_fp_arith ( UInt prefix, UInt theInstr )
12841 /* A-Form */
12842 UChar opc1 = ifieldOPC(theInstr);
12843 UChar frD_addr = ifieldRegDS(theInstr);
12844 UChar frA_addr = ifieldRegA(theInstr);
12845 UChar frB_addr = ifieldRegB(theInstr);
12846 UChar frC_addr = ifieldRegC(theInstr);
12847 UChar opc2 = ifieldOPClo5(theInstr);
12848 UChar flag_rC = ifieldBIT0(theInstr);
12850 IRTemp frD = newTemp(Ity_F64);
12851 IRTemp frA = newTemp(Ity_F64);
12852 IRTemp frB = newTemp(Ity_F64);
12853 IRTemp frC = newTemp(Ity_F64);
12854 IRExpr* rm = get_IR_roundingmode();
12856 /* By default, we will examine the results of the operation and set
12857 fpscr[FPRF] accordingly. */
12858 Bool set_FPRF = True;
12860 /* By default, if flag_RC is set, we will clear cr1 after the
12861 operation. In reality we should set cr1 to indicate the
12862 exception status of the operation, but since we're not
12863 simulating exceptions, the exception status will appear to be
12864 zero. Hence cr1 should be cleared if this is a . form insn. */
12865 Bool clear_CR1 = True;
12867 /* There is no prefixed version of these instructions. */
12868 PREFIX_CHECK
12870 assign( frA, getFReg(frA_addr));
12871 assign( frB, getFReg(frB_addr));
12872 assign( frC, getFReg(frC_addr));
12874 switch (opc1) {
12875 case 0x3B:
12876 switch (opc2) {
12877 case 0x12: // fdivs (Floating Divide Single, PPC32 p407)
12878 if (frC_addr != 0)
12879 return False;
12880 DIP("fdivs%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
12881 frD_addr, frA_addr, frB_addr);
12882 assign( frD, triop( Iop_DivF64r32,
12883 rm, mkexpr(frA), mkexpr(frB) ));
12884 break;
12886 case 0x14: // fsubs (Floating Subtract Single, PPC32 p430)
12887 if (frC_addr != 0)
12888 return False;
12889 DIP("fsubs%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
12890 frD_addr, frA_addr, frB_addr);
12891 assign( frD, triop( Iop_SubF64r32,
12892 rm, mkexpr(frA), mkexpr(frB) ));
12893 break;
12895 case 0x15: // fadds (Floating Add Single, PPC32 p401)
12896 if (frC_addr != 0)
12897 return False;
12898 DIP("fadds%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
12899 frD_addr, frA_addr, frB_addr);
12900 assign( frD, triop( Iop_AddF64r32,
12901 rm, mkexpr(frA), mkexpr(frB) ));
12902 break;
12904 case 0x16: // fsqrts (Floating SqRt (Single-Precision), PPC32 p428)
12905 // NOTE: POWERPC OPTIONAL, "General-Purpose Group" (PPC32_FX)
12906 if (frA_addr != 0 || frC_addr != 0)
12907 return False;
12908 DIP("fsqrts%s fr%u,fr%u\n", flag_rC ? ".":"",
12909 frD_addr, frB_addr);
12910 // however illogically, on ppc970 this insn behaves identically
12911 // to fsqrt (double-precision). So use SqrtF64, not SqrtF64r32.
12912 assign( frD, binop( Iop_SqrtF64, rm, mkexpr(frB) ));
12913 break;
12915 case 0x18: // fres (Floating Reciprocal Estimate Single, PPC32 p421)
12916 // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
12917 if (frA_addr != 0 || frC_addr != 0)
12918 return False;
12919 DIP("fres%s fr%u,fr%u\n", flag_rC ? ".":"",
12920 frD_addr, frB_addr);
12921 { IRExpr* ieee_one
12922 = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
12923 assign( frD, triop( Iop_DivF64r32,
12925 ieee_one, mkexpr(frB) ));
12927 break;
12929 case 0x19: // fmuls (Floating Multiply Single, PPC32 p414)
12930 if (frB_addr != 0)
12931 return False;
12932 DIP("fmuls%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
12933 frD_addr, frA_addr, frC_addr);
12934 assign( frD, triop( Iop_MulF64r32,
12935 rm, mkexpr(frA), mkexpr(frC) ));
12936 break;
12938 case 0x1A: // frsqrtes (Floating Recip SqRt Est Single)
12939 // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
12940 // Undocumented instruction?
12941 if (frA_addr != 0 || frC_addr != 0)
12942 return False;
12943 DIP("frsqrtes%s fr%u,fr%u\n", flag_rC ? ".":"",
12944 frD_addr, frB_addr);
12945 assign( frD, unop(Iop_RSqrtEst5GoodF64, mkexpr(frB)) );
12946 break;
12948 default:
12949 vex_printf("dis_fp_arith(ppc)(3B: opc2)\n");
12950 return False;
12952 break;
12954 case 0x3F:
12955 switch (opc2) {
12956 case 0x12: // fdiv (Floating Div (Double-Precision), PPC32 p406)
12957 if (frC_addr != 0)
12958 return False;
12959 DIP("fdiv%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
12960 frD_addr, frA_addr, frB_addr);
12961 assign( frD, triop(Iop_DivF64, rm, mkexpr(frA), mkexpr(frB)) );
12962 break;
12964 case 0x14: // fsub (Floating Sub (Double-Precision), PPC32 p429)
12965 if (frC_addr != 0)
12966 return False;
12967 DIP("fsub%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
12968 frD_addr, frA_addr, frB_addr);
12969 assign( frD, triop(Iop_SubF64, rm, mkexpr(frA), mkexpr(frB)) );
12970 break;
12972 case 0x15: // fadd (Floating Add (Double-Precision), PPC32 p400)
12973 if (frC_addr != 0)
12974 return False;
12975 DIP("fadd%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
12976 frD_addr, frA_addr, frB_addr);
12977 assign( frD, triop(Iop_AddF64, rm, mkexpr(frA), mkexpr(frB)) );
12978 break;
12980 case 0x16: // fsqrt (Floating SqRt (Double-Precision), PPC32 p427)
12981 // NOTE: POWERPC OPTIONAL, "General-Purpose Group" (PPC32_FX)
12982 if (frA_addr != 0 || frC_addr != 0)
12983 return False;
12984 DIP("fsqrt%s fr%u,fr%u\n", flag_rC ? ".":"",
12985 frD_addr, frB_addr);
12986 assign( frD, binop(Iop_SqrtF64, rm, mkexpr(frB)) );
12987 break;
12989 case 0x17: { // fsel (Floating Select, PPC32 p426)
12990 // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
12991 IRTemp cc = newTemp(Ity_I32);
12992 IRTemp cc_b0 = newTemp(Ity_I32);
12994 DIP("fsel%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
12995 frD_addr, frA_addr, frC_addr, frB_addr);
12997 // cc: UN == 0x41, LT == 0x01, GT == 0x00, EQ == 0x40
12998 // => GT|EQ == (cc & 0x1 == 0)
12999 assign( cc, binop(Iop_CmpF64, mkexpr(frA),
13000 IRExpr_Const(IRConst_F64(0))) );
13001 assign( cc_b0, binop(Iop_And32, mkexpr(cc), mkU32(1)) );
13003 // frD = (frA >= 0.0) ? frC : frB
13004 // = (cc_b0 == 0) ? frC : frB
13005 assign( frD,
13006 IRExpr_ITE(
13007 binop(Iop_CmpEQ32, mkexpr(cc_b0), mkU32(0)),
13008 mkexpr(frC),
13009 mkexpr(frB) ));
13011 /* One of the rare ones which don't mess with FPRF */
13012 set_FPRF = False;
13013 break;
13016 case 0x18: // fre (Floating Reciprocal Estimate)
13017 // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
13018 // Note: unclear whether this insn really exists or not
13019 // ppc970 doesn't have it, but POWER5 does
13020 if (frA_addr != 0 || frC_addr != 0)
13021 return False;
13022 DIP("fre%s fr%u,fr%u\n", flag_rC ? ".":"",
13023 frD_addr, frB_addr);
13024 { IRExpr* ieee_one
13025 = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
13026 assign( frD, triop( Iop_DivF64,
13028 ieee_one, mkexpr(frB) ));
13030 break;
13032 case 0x19: // fmul (Floating Mult (Double Precision), PPC32 p413)
13033 if (frB_addr != 0)
13034 vex_printf("dis_fp_arith(ppc)(instr,fmul)\n");
13035 DIP("fmul%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13036 frD_addr, frA_addr, frC_addr);
13037 assign( frD, triop(Iop_MulF64, rm, mkexpr(frA), mkexpr(frC)) );
13038 break;
13040 case 0x1A: // frsqrte (Floating Recip SqRt Est., PPC32 p424)
13041 // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
13042 if (frA_addr != 0 || frC_addr != 0)
13043 return False;
13044 DIP("frsqrte%s fr%u,fr%u\n", flag_rC ? ".":"",
13045 frD_addr, frB_addr);
13046 assign( frD, unop(Iop_RSqrtEst5GoodF64, mkexpr(frB)) );
13047 break;
13049 default:
13050 vex_printf("dis_fp_arith(ppc)(3F: opc2)\n");
13051 return False;
13053 break;
13055 default:
13056 vex_printf("dis_fp_arith(ppc)(opc1)\n");
13057 return False;
13060 putFReg( frD_addr, mkexpr(frD) );
13062 if (set_FPRF) {
13063 // XXX XXX XXX FIXME
13064 // set FPRF from frD
13067 if (flag_rC && clear_CR1) {
13068 putCR321( 1, mkU8(0) );
13069 putCR0( 1, mkU8(0) );
13072 return True;
13078 Floating Point Mult-Add Instructions
13080 static Bool dis_fp_multadd ( UInt prefix, UInt theInstr )
13082 /* A-Form */
13083 UChar opc1 = ifieldOPC(theInstr);
13084 UChar frD_addr = ifieldRegDS(theInstr);
13085 UChar frA_addr = ifieldRegA(theInstr);
13086 UChar frB_addr = ifieldRegB(theInstr);
13087 UChar frC_addr = ifieldRegC(theInstr);
13088 UChar opc2 = ifieldOPClo5(theInstr);
13089 UChar flag_rC = ifieldBIT0(theInstr);
13091 IRTemp frD = newTemp(Ity_F64);
13092 IRTemp frA = newTemp(Ity_F64);
13093 IRTemp frB = newTemp(Ity_F64);
13094 IRTemp frC = newTemp(Ity_F64);
13095 IRTemp rmt = newTemp(Ity_I32);
13096 IRTemp tmp = newTemp(Ity_F64);
13097 IRTemp sign_tmp = newTemp(Ity_I64);
13098 IRTemp nan_mask = newTemp(Ity_I32);
13099 IRExpr* rm;
13101 /* By default, we will examine the results of the operation and set
13102 fpscr[FPRF] accordingly. */
13103 Bool set_FPRF = True;
13105 /* By default, if flag_RC is set, we will clear cr1 after the
13106 operation. In reality we should set cr1 to indicate the
13107 exception status of the operation, but since we're not
13108 simulating exceptions, the exception status will appear to be
13109 zero. Hence cr1 should be cleared if this is a . form insn. */
13110 Bool clear_CR1 = True;
13112 /* There is no prefixed version of these instructions. */
13113 PREFIX_CHECK
13115 /* Bind the rounding mode expression to a temp; there's no
13116 point in creating gratuitous CSEs, as we know we'll need
13117 to use it twice. */
13118 assign( rmt, get_IR_roundingmode() );
13119 rm = mkexpr(rmt);
13121 assign( frA, getFReg(frA_addr));
13122 assign( frB, getFReg(frB_addr));
13123 assign( frC, getFReg(frC_addr));
13125 /* The rounding in this is all a bit dodgy. The idea is to only do
13126 one rounding. That clearly isn't achieveable without dedicated
13127 four-input IR primops, although in the single precision case we
13128 can sort-of simulate it by doing the inner multiply in double
13129 precision.
13131 In the negated cases, the negation happens after rounding. */
13133 switch (opc1) {
13134 case 0x3B:
13135 switch (opc2) {
13136 case 0x1C: // fmsubs (Floating Mult-Subtr Single, PPC32 p412)
13137 DIP("fmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13138 frD_addr, frA_addr, frC_addr, frB_addr);
13139 assign( frD, qop( Iop_MSubF64r32, rm,
13140 mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
13141 break;
13143 case 0x1D: // fmadds (Floating Mult-Add Single, PPC32 p409)
13144 DIP("fmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13145 frD_addr, frA_addr, frC_addr, frB_addr);
13146 assign( frD, qop( Iop_MAddF64r32, rm,
13147 mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
13148 break;
13150 case 0x1E: // fnmsubs (Float Neg Mult-Subtr Single, PPC32 p420)
13151 case 0x1F: // fnmadds (Floating Negative Multiply-Add Single, PPC32 p418)
13153 if (opc2 == 0x1E) {
13154 DIP("fnmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13155 frD_addr, frA_addr, frC_addr, frB_addr);
13156 assign( tmp, qop( Iop_MSubF64r32, rm,
13157 mkexpr(frA), mkexpr(frC), mkexpr(frB) ) );
13158 } else {
13159 DIP("fnmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13160 frD_addr, frA_addr, frC_addr, frB_addr);
13161 assign( tmp, qop( Iop_MAddF64r32, rm,
13162 mkexpr(frA), mkexpr(frC), mkexpr(frB) ) );
13165 assign( nan_mask, Check_NaN( mkexpr( tmp ),
13166 mkU32( NANmaskSingle ) ) );
13167 assign( sign_tmp, Complement_non_NaN( mkexpr( tmp ),
13168 mkexpr( nan_mask ) ) );
13169 assign( frD, unop( Iop_ReinterpI64asF64, mkexpr( sign_tmp ) ) );
13170 break;
13172 default:
13173 vex_printf("dis_fp_multadd(ppc)(3B: opc2)\n");
13174 return False;
13176 break;
13178 case 0x3F:
13179 switch (opc2) {
13180 case 0x1C: // fmsub (Float Mult-Sub (Dbl Precision), PPC32 p411)
13181 DIP("fmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13182 frD_addr, frA_addr, frC_addr, frB_addr);
13183 assign( frD, qop( Iop_MSubF64, rm,
13184 mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
13185 break;
13187 case 0x1D: // fmadd (Float Mult-Add (Dbl Precision), PPC32 p408)
13188 DIP("fmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13189 frD_addr, frA_addr, frC_addr, frB_addr);
13190 assign( frD, qop( Iop_MAddF64, rm,
13191 mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
13192 break;
13194 case 0x1E: // fnmsub (Float Neg Mult-Subtr (Dbl Precision), PPC32 p419)
13195 case 0x1F: // fnmadd (Float Neg Mult-Add (Dbl Precision), PPC32 p417)
13197 if (opc2 == 0x1E) {
13198 DIP("fnmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13199 frD_addr, frA_addr, frC_addr, frB_addr);
13200 assign( tmp, qop( Iop_MSubF64, rm,
13201 mkexpr(frA), mkexpr(frC), mkexpr(frB) ) );
13202 } else {
13203 DIP("fnmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
13204 frD_addr, frA_addr, frC_addr, frB_addr);
13205 assign( tmp, qop( Iop_MAddF64, rm,
13206 mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
13209 assign( nan_mask, Check_NaN( mkexpr( tmp ),
13210 mkU32( NANmaskDouble ) ) );
13211 assign( sign_tmp, Complement_non_NaN( mkexpr( tmp ),
13212 mkexpr( nan_mask ) ) );
13213 assign( frD, unop( Iop_ReinterpI64asF64, mkexpr( sign_tmp ) ) );
13214 break;
13216 default:
13217 vex_printf("dis_fp_multadd(ppc)(3F: opc2)\n");
13218 return False;
13220 break;
13222 default:
13223 vex_printf("dis_fp_multadd(ppc)(opc1)\n");
13224 return False;
13227 putFReg( frD_addr, mkexpr(frD) );
13229 if (set_FPRF) {
13230 // XXX XXX XXX FIXME
13231 // set FPRF from frD
13234 if (flag_rC && clear_CR1) {
13235 putCR321( 1, mkU8(0) );
13236 putCR0( 1, mkU8(0) );
13239 return True;
13243 * fe_flag is set to 1 if any of the following conditions occurs:
13244 * - The floating-point operand in register FRB is a Zero, a
13245 * NaN, an Infinity, or a negative value.
13246 * - e_b is less than or equal to: -970 for double precision; -103 for single precision
13247 * Otherwise fe_flag is set to 0.
13249 * fg_flag is set to 1 if either of the following conditions occurs.
13250 * - The floating-point operand in register FRB is a Zero, an
13251 * Infinity, or a denormalized value.
13252 * Otherwise fg_flag is set to 0.
13256 static void do_fp_tsqrt(IRTemp frB_Int, Bool sp, IRTemp * fe_flag_tmp, IRTemp * fg_flag_tmp)
13258 // The following temps are for holding intermediate results
13259 IRTemp e_b = newTemp(Ity_I32);
13260 IRExpr * fe_flag, * fg_flag;
13261 IRTemp frB_exp_shR = newTemp(Ity_I32);
13262 UInt bias = sp? 127 : 1023;
13263 IRExpr * frbNaN, * frbDenorm, * frBNeg;
13264 IRExpr * eb_LTE;
13265 IRTemp frbZero_tmp = newTemp(Ity_I1);
13266 IRTemp frbInf_tmp = newTemp(Ity_I1);
13267 *fe_flag_tmp = newTemp(Ity_I32);
13268 *fg_flag_tmp = newTemp(Ity_I32);
13270 if ( sp )
13271 assign( frB_exp_shR, fp_exp_part( Ity_I32, frB_Int ) );
13272 else
13273 assign( frB_exp_shR, fp_exp_part( Ity_I64, frB_Int ) );
13275 assign(e_b, binop( Iop_Sub32, mkexpr(frB_exp_shR), mkU32( bias ) ));
13277 ////////////////// fe_flag tests BEGIN //////////////////////
13278 /* We first do all tests that may result in setting fe_flag to '1'.
13279 * (NOTE: These tests are similar to those used for ftdiv. See do_fp_tdiv()
13280 * for details.)
13282 if ( sp ) {
13283 frbNaN = is_NaN( Ity_I32, frB_Int );
13284 assign( frbInf_tmp, is_Inf( Ity_I32, frB_Int ) );
13285 assign( frbZero_tmp, is_Zero( Ity_I32, frB_Int ) );
13287 } else {
13288 frbNaN = is_NaN( Ity_I64, frB_Int );
13289 assign( frbInf_tmp, is_Inf( Ity_I64, frB_Int ) );
13290 assign( frbZero_tmp, is_Zero( Ity_I64, frB_Int ) );
13294 // Test_value = -970 for double precision
13295 UInt test_value = sp ? 0xffffff99 : 0xfffffc36;
13296 eb_LTE = binop( Iop_CmpLE32S, mkexpr( e_b ), mkU32( test_value ) );
13298 frBNeg = binop( Iop_CmpEQ32,
13299 binop( Iop_Shr32,
13300 sp ? mkexpr( frB_Int ) : unop( Iop_64HIto32, mkexpr( frB_Int ) ),
13301 mkU8( 31 ) ),
13302 mkU32( 1 ) );
13303 ////////////////// fe_flag tests END //////////////////////
13305 ////////////////// fg_flag tests BEGIN //////////////////////
13307 * The following tests were already performed above in the fe_flag
13308 * tests. So these conditions will result in both fe_ and fg_ flags
13309 * being set.
13310 * - Test if FRB is Zero
13311 * - Test if FRB is an Infinity
13315 * Test if FRB holds a denormalized value. A denormalized value is one where
13316 * the exp is 0 and the fraction is non-zero.
13318 if (sp) {
13319 IRTemp frac_part = newTemp(Ity_I32);
13320 assign( frac_part, binop( Iop_And32, mkexpr(frB_Int), mkU32(0x007fffff)) );
13321 frbDenorm
13322 = mkAND1( binop( Iop_CmpEQ32, mkexpr( frB_exp_shR ), mkU32( 0 ) ),
13323 binop( Iop_CmpNE32, mkexpr( frac_part ), mkU32( 0 ) ) );
13324 } else {
13325 IRExpr * hi32, * low32, * fraction_is_nonzero;
13326 IRTemp frac_part = newTemp(Ity_I64);
13328 assign( frac_part, FP_FRAC_PART(frB_Int) );
13329 hi32 = unop( Iop_64HIto32, mkexpr( frac_part ) );
13330 low32 = unop( Iop_64to32, mkexpr( frac_part ) );
13331 fraction_is_nonzero = binop( Iop_CmpNE32, binop( Iop_Or32, low32, hi32 ),
13332 mkU32( 0 ) );
13333 frbDenorm
13334 = mkAND1( binop( Iop_CmpEQ32, mkexpr( frB_exp_shR ), mkU32( 0 ) ),
13335 fraction_is_nonzero );
13337 ////////////////// fg_flag tests END //////////////////////
13339 /////////////////////////
13340 fe_flag = mkOR1( mkexpr( frbZero_tmp ),
13341 mkOR1( frbNaN,
13342 mkOR1( mkexpr( frbInf_tmp ),
13343 mkOR1( frBNeg, eb_LTE ) ) ) );
13345 fe_flag = unop(Iop_1Uto32, fe_flag);
13347 fg_flag = mkOR1( mkexpr( frbZero_tmp ),
13348 mkOR1( mkexpr( frbInf_tmp ), frbDenorm ) );
13349 fg_flag = unop(Iop_1Uto32, fg_flag);
13350 assign (*fg_flag_tmp, fg_flag);
13351 assign (*fe_flag_tmp, fe_flag);
13354 * fe_flag is set to 1 if any of the following conditions occurs:
13355 * - The double-precision floating-point operand in register FRA is a NaN or an
13356 * Infinity.
13357 * - The double-precision floating-point operand in register FRB is a Zero, a
13358 * NaN, or an Infinity.
13359 * - e_b is less than or equal to -1022.
13360 * - e_b is greater than or equal to 1021.
13361 * - The double-precision floating-point operand in register FRA is not a zero
13362 * and the difference, e_a - e_b, is greater than or equal to 1023.
13363 * - The double-precision floating-point operand in register FRA is not a zero
13364 * and the difference, e_a - e_b, is less than or equal to -1021.
13365 * - The double-precision floating-point operand in register FRA is not a zero
13366 * and e_a is less than or equal to -970
13367 * Otherwise fe_flag is set to 0.
13369 * fg_flag is set to 1 if either of the following conditions occurs.
13370 * - The double-precision floating-point operand in register FRA is an Infinity.
13371 * - The double-precision floating-point operand in register FRB is a Zero, an
13372 * Infinity, or a denormalized value.
13373 * Otherwise fg_flag is set to 0.
13376 static void _do_fp_tdiv(IRTemp frA_int, IRTemp frB_int, Bool sp, IRTemp * fe_flag_tmp, IRTemp * fg_flag_tmp)
13378 // The following temps are for holding intermediate results
13379 IRTemp e_a = newTemp(Ity_I32);
13380 IRTemp e_b = newTemp(Ity_I32);
13381 IRTemp frA_exp_shR = newTemp(Ity_I32);
13382 IRTemp frB_exp_shR = newTemp(Ity_I32);
13384 UInt bias = sp? 127 : 1023;
13385 *fe_flag_tmp = newTemp(Ity_I32);
13386 *fg_flag_tmp = newTemp(Ity_I32);
13388 /* The following variables hold boolean results from tests
13389 * that are OR'ed together for setting the fe_ and fg_ flags.
13390 * For some cases, the booleans are used more than once, so
13391 * I make those IRTemp's instead of IRExpr's.
13393 IRExpr * fraNaN, * frbNaN, * frbDenorm;
13394 IRExpr * eb_LTE, * eb_GTE, * ea_eb_GTE, * ea_eb_LTE, * ea_LTE;
13395 IRTemp fraInf_tmp = newTemp(Ity_I1);
13396 IRTemp frbZero_tmp = newTemp(Ity_I1);
13397 IRTemp frbInf_tmp = newTemp(Ity_I1);
13398 IRTemp fraNotZero_tmp = newTemp(Ity_I1);
13400 /* The following are the flags that are set by OR'ing the results of
13401 * all the tests done for tdiv. These flags are the input to the specified CR.
13403 IRExpr * fe_flag, * fg_flag;
13405 // Create temps that will be used throughout the following tests.
13406 if ( sp ) {
13407 assign( frA_exp_shR, fp_exp_part( Ity_I32, frA_int ) );
13408 assign( frB_exp_shR, fp_exp_part( Ity_I32, frB_int ) );
13409 } else{
13410 assign( frA_exp_shR, fp_exp_part( Ity_I64, frA_int ) );
13411 assign( frB_exp_shR, fp_exp_part( Ity_I64, frB_int ) );
13414 /* Let e_[a|b] be the unbiased exponent: i.e. exp - 1023. */
13415 assign(e_a, binop( Iop_Sub32, mkexpr(frA_exp_shR), mkU32( bias ) ));
13416 assign(e_b, binop( Iop_Sub32, mkexpr(frB_exp_shR), mkU32( bias ) ));
13419 ////////////////// fe_flag tests BEGIN //////////////////////
13420 /* We first do all tests that may result in setting fe_flag to '1'. */
13423 * Test if the double-precision floating-point operand in register FRA is
13424 * a NaN:
13426 fraNaN = sp ? is_NaN( Ity_I32, frA_int ) : is_NaN( Ity_I64, frA_int );
13428 * Test if the double-precision floating-point operands in register FRA
13429 * and FRB is an Infinity. Test if FRB is zero.
13431 if ( sp ) {
13432 assign(fraInf_tmp, is_Inf( Ity_I32, frA_int ) );
13433 assign( frbInf_tmp, is_Inf( Ity_I32, frB_int ) );
13434 assign( frbZero_tmp, is_Zero( Ity_I32, frB_int ) );
13436 } else {
13437 assign(fraInf_tmp, is_Inf( Ity_I64, frA_int ) );
13438 assign( frbInf_tmp, is_Inf( Ity_I64, frB_int ) );
13439 assign( frbZero_tmp, is_Zero( Ity_I64, frB_int ) );
13442 * Test if the double-precision floating-point operand in register FRB is
13443 * a NaN:
13445 frbNaN = sp ? is_NaN( Ity_I32, frB_int ) : is_NaN( Ity_I64, frB_int );
13448 * Test if e_b <= -1022 for double precision;
13449 * or e_b <= -126 for single precision
13452 UInt test_value = sp ? 0xffffff82 : 0xfffffc02;
13453 eb_LTE = binop(Iop_CmpLE32S, mkexpr(e_b), mkU32(test_value));
13457 * Test if e_b >= 1021 (i.e., 1021 < e_b) for double precision;
13458 * or e_b >= -125 (125 < e_b) for single precision
13461 Int test_value = sp ? 125 : 1021;
13462 eb_GTE = binop(Iop_CmpLT32S, mkU32(test_value), mkexpr(e_b));
13466 * Test if FRA != Zero and (e_a - e_b) >= bias
13468 if ( sp )
13469 assign( fraNotZero_tmp, unop( Iop_Not1, is_Zero( Ity_I32, frA_int ) ) );
13470 else
13471 assign( fraNotZero_tmp, unop( Iop_Not1, is_Zero( Ity_I64, frA_int ) ) );
13473 ea_eb_GTE = mkAND1( mkexpr( fraNotZero_tmp ),
13474 binop( Iop_CmpLT32S, mkU32( bias ),
13475 binop( Iop_Sub32, mkexpr( e_a ),
13476 mkexpr( e_b ) ) ) );
13479 * Test if FRA != Zero and (e_a - e_b) <= [-1021 (double precision) or -125 (single precision)]
13482 UInt test_value = sp ? 0xffffff83 : 0xfffffc03;
13484 ea_eb_LTE = mkAND1( mkexpr( fraNotZero_tmp ),
13485 binop( Iop_CmpLE32S,
13486 binop( Iop_Sub32,
13487 mkexpr( e_a ),
13488 mkexpr( e_b ) ),
13489 mkU32( test_value ) ) );
13493 * Test if FRA != Zero and e_a <= [-970 (double precision) or -103 (single precision)]
13496 UInt test_value = 0xfffffc36; //Int test_value = -970;
13498 ea_LTE = mkAND1( mkexpr( fraNotZero_tmp ), binop( Iop_CmpLE32S,
13499 mkexpr( e_a ),
13500 mkU32( test_value ) ) );
13502 ////////////////// fe_flag tests END //////////////////////
13504 ////////////////// fg_flag tests BEGIN //////////////////////
13506 * The following tests were already performed above in the fe_flag
13507 * tests. So these conditions will result in both fe_ and fg_ flags
13508 * being set.
13509 * - Test if FRA is an Infinity
13510 * - Test if FRB ix Zero
13511 * - Test if FRB is an Infinity
13515 * Test if FRB holds a denormalized value. A denormalized value is one where
13516 * the exp is 0 and the fraction is non-zero.
13519 IRExpr * fraction_is_nonzero;
13521 if (sp) {
13522 fraction_is_nonzero = binop( Iop_CmpNE32, FP_FRAC_PART32(frB_int),
13523 mkU32( 0 ) );
13524 } else {
13525 IRExpr * hi32, * low32;
13526 IRTemp frac_part = newTemp(Ity_I64);
13527 assign( frac_part, FP_FRAC_PART(frB_int) );
13529 hi32 = unop( Iop_64HIto32, mkexpr( frac_part ) );
13530 low32 = unop( Iop_64to32, mkexpr( frac_part ) );
13531 fraction_is_nonzero = binop( Iop_CmpNE32, binop( Iop_Or32, low32, hi32 ),
13532 mkU32( 0 ) );
13534 frbDenorm = mkAND1( binop( Iop_CmpEQ32, mkexpr( frB_exp_shR ),
13535 mkU32( 0x0 ) ), fraction_is_nonzero );
13538 ////////////////// fg_flag tests END //////////////////////
13540 fe_flag
13541 = mkOR1(
13542 fraNaN,
13543 mkOR1(
13544 mkexpr( fraInf_tmp ),
13545 mkOR1(
13546 mkexpr( frbZero_tmp ),
13547 mkOR1(
13548 frbNaN,
13549 mkOR1(
13550 mkexpr( frbInf_tmp ),
13551 mkOR1( eb_LTE,
13552 mkOR1( eb_GTE,
13553 mkOR1( ea_eb_GTE,
13554 mkOR1( ea_eb_LTE,
13555 ea_LTE ) ) ) ) ) ) ) ) );
13557 fe_flag = unop(Iop_1Uto32, fe_flag);
13559 fg_flag = mkOR1( mkexpr( fraInf_tmp ), mkOR1( mkexpr( frbZero_tmp ),
13560 mkOR1( mkexpr( frbInf_tmp ),
13561 frbDenorm ) ) );
13562 fg_flag = unop(Iop_1Uto32, fg_flag);
13563 assign(*fe_flag_tmp, fe_flag);
13564 assign(*fg_flag_tmp, fg_flag);
13567 /* See description for _do_fp_tdiv() above. */
13568 static IRExpr * do_fp_tdiv(IRTemp frA_int, IRTemp frB_int)
13570 IRTemp fe_flag, fg_flag;
13571 /////////////////////////
13572 /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
13573 * where fl_flag == 1 on ppc64.
13575 IRExpr * fl_flag = unop(Iop_Not32, mkU32(0xFFFFFE));
13576 fe_flag = fg_flag = IRTemp_INVALID;
13577 _do_fp_tdiv(frA_int, frB_int, False/*not single precision*/, &fe_flag, &fg_flag);
13578 return binop( Iop_Or32,
13579 binop( Iop_Or32,
13580 binop( Iop_Shl32, fl_flag, mkU8( 3 ) ),
13581 binop( Iop_Shl32, mkexpr(fg_flag), mkU8( 2 ) ) ),
13582 binop( Iop_Shl32, mkexpr(fe_flag), mkU8( 1 ) ) );
13585 static Bool dis_fp_tests ( UInt prefix, UInt theInstr )
13587 UChar opc1 = ifieldOPC(theInstr);
13588 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
13589 UChar frB_addr = ifieldRegB(theInstr);
13590 UChar b0 = ifieldBIT0(theInstr);
13591 UInt opc2 = ifieldOPClo10(theInstr);
13592 IRTemp frB_I64 = newTemp(Ity_I64);
13594 /* There is no prefixed version of these instructions. */
13595 PREFIX_CHECK
13597 if (opc1 != 0x3F || b0 != 0 ){
13598 vex_printf("dis_fp_tests(ppc)(ftdiv)\n");
13599 return False;
13601 assign( frB_I64, unop( Iop_ReinterpF64asI64, getFReg( frB_addr ) ) );
13603 switch (opc2) {
13604 case 0x080: // ftdiv
13606 UChar frA_addr = ifieldRegA(theInstr);
13607 IRTemp frA_I64 = newTemp(Ity_I64);
13608 UChar b21to22 = toUChar( IFIELD( theInstr, 21, 2 ) );
13609 if (b21to22 != 0 ) {
13610 vex_printf("dis_fp_tests(ppc)(ftdiv)\n");
13611 return False;
13614 assign( frA_I64, unop( Iop_ReinterpF64asI64, getFReg( frA_addr ) ) );
13615 putGST_field( PPC_GST_CR, do_fp_tdiv(frA_I64, frB_I64), crfD );
13617 DIP("ftdiv crf%d,fr%u,fr%u\n", crfD, frA_addr, frB_addr);
13618 break;
13620 case 0x0A0: // ftsqrt
13622 IRTemp flags = newTemp(Ity_I32);
13623 IRTemp fe_flag, fg_flag;
13624 fe_flag = fg_flag = IRTemp_INVALID;
13625 UChar b18to22 = toUChar( IFIELD( theInstr, 18, 5 ) );
13626 if ( b18to22 != 0) {
13627 vex_printf("dis_fp_tests(ppc)(ftsqrt)\n");
13628 return False;
13630 DIP("ftsqrt crf%d,fr%u\n", crfD, frB_addr);
13631 do_fp_tsqrt(frB_I64, False /* not single precision*/, &fe_flag, &fg_flag);
13632 /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
13633 * where fl_flag == 1 on ppc64.
13635 assign( flags,
13636 binop( Iop_Or32,
13637 binop( Iop_Or32, mkU32( 8 ), // fl_flag
13638 binop( Iop_Shl32, mkexpr(fg_flag), mkU8( 2 ) ) ),
13639 binop( Iop_Shl32, mkexpr(fe_flag), mkU8( 1 ) ) ) );
13640 putGST_field( PPC_GST_CR, mkexpr(flags), crfD );
13641 break;
13644 default:
13645 vex_printf("dis_fp_tests(ppc)(opc2)\n");
13646 return False;
13649 return True;
13653 Floating Point Compare Instructions
13655 static Bool dis_fp_cmp ( UInt prefix, UInt theInstr )
13657 /* X-Form */
13658 UChar opc1 = ifieldOPC(theInstr);
13659 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
13660 UChar b21to22 = toUChar( IFIELD( theInstr, 21, 2 ) );
13661 UChar frA_addr = ifieldRegA(theInstr);
13662 UChar frB_addr = ifieldRegB(theInstr);
13663 UInt opc2 = ifieldOPClo10(theInstr);
13664 UChar b0 = ifieldBIT0(theInstr);
13666 IRTemp ccIR = newTemp(Ity_I32);
13667 IRTemp ccPPC32 = newTemp(Ity_I32);
13669 IRTemp frA = newTemp(Ity_F64);
13670 IRTemp frB = newTemp(Ity_F64);
13672 /* There is no prefixed version of these instructions. */
13673 PREFIX_CHECK
13675 if (opc1 != 0x3F || b21to22 != 0 || b0 != 0) {
13676 vex_printf("dis_fp_cmp(ppc)(instr)\n");
13677 return False;
13680 assign( frA, getFReg(frA_addr));
13681 assign( frB, getFReg(frB_addr));
13683 assign( ccIR, binop(Iop_CmpF64, mkexpr(frA), mkexpr(frB)) );
13685 /* Map compare result from IR to PPC32 */
13687 FP cmp result | PPC | IR
13688 --------------------------
13689 UN | 0x1 | 0x45
13690 EQ | 0x2 | 0x40
13691 GT | 0x4 | 0x00
13692 LT | 0x8 | 0x01
13695 // ccPPC32 = Shl(1, (~(ccIR>>5) & 2)
13696 // | ((ccIR ^ (ccIR>>6)) & 1)
13697 assign(
13698 ccPPC32,
13699 binop(
13700 Iop_Shl32,
13701 mkU32(1),
13702 unop(
13703 Iop_32to8,
13704 binop(
13705 Iop_Or32,
13706 binop(
13707 Iop_And32,
13708 unop(
13709 Iop_Not32,
13710 binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))
13712 mkU32(2)
13714 binop(
13715 Iop_And32,
13716 binop(
13717 Iop_Xor32,
13718 mkexpr(ccIR),
13719 binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))
13721 mkU32(1)
13728 putGST_field( PPC_GST_CR, mkexpr(ccPPC32), crfD );
13729 putFPCC( mkexpr( ccPPC32 ) );
13731 // XXX XXX XXX FIXME
13732 // Also write the result into FPRF (it's not entirely clear how)
13734 /* Note: Differences between fcmpu and fcmpo are only in exception
13735 flag settings, which aren't supported anyway. */
13736 switch (opc2) {
13737 case 0x000: // fcmpu (Floating Compare Unordered, PPC32 p403)
13738 DIP("fcmpu crf%d,fr%u,fr%u\n", crfD, frA_addr, frB_addr);
13739 break;
13740 case 0x020: // fcmpo (Floating Compare Ordered, PPC32 p402)
13741 DIP("fcmpo crf%d,fr%u,fr%u\n", crfD, frA_addr, frB_addr);
13742 break;
13743 default:
13744 vex_printf("dis_fp_cmp(ppc)(opc2)\n");
13745 return False;
13747 return True;
13753 Floating Point Rounding/Conversion Instructions
13755 static Bool dis_fp_round ( UInt prefix, UInt theInstr )
13757 /* X-Form */
13758 UChar opc1 = ifieldOPC(theInstr);
13759 UChar b16to20 = ifieldRegA(theInstr);
13760 UChar frD_addr = ifieldRegDS(theInstr);
13761 UChar frB_addr = ifieldRegB(theInstr);
13762 UInt opc2 = ifieldOPClo10(theInstr);
13763 UChar flag_rC = ifieldBIT0(theInstr);
13765 IRTemp frD = newTemp(Ity_F64);
13766 IRTemp frB = newTemp(Ity_F64);
13767 IRTemp r_tmp32 = newTemp(Ity_I32);
13768 IRTemp r_tmp64 = newTemp(Ity_I64);
13769 IRExpr* rm = get_IR_roundingmode();
13771 /* By default, we will examine the results of the operation and set
13772 fpscr[FPRF] accordingly. */
13773 Bool set_FPRF = True;
13775 /* By default, if flag_RC is set, we will clear cr1 after the
13776 operation. In reality we should set cr1 to indicate the
13777 exception status of the operation, but since we're not
13778 simulating exceptions, the exception status will appear to be
13779 zero. Hence cr1 should be cleared if this is a . form insn. */
13780 Bool clear_CR1 = True;
13782 /* There is no prefixed version of these instructions. */
13783 PREFIX_CHECK
13785 if ((!(opc1 == 0x3F || opc1 == 0x3B)) || b16to20 != 0) {
13786 vex_printf("dis_fp_round(ppc)(instr)\n");
13787 return False;
13790 assign( frB, getFReg(frB_addr));
13791 if (opc1 == 0x3B) {
13792 /* The fcfid[u]s instructions (from ISA 2.06) are a bit odd because
13793 * they're very similar to the other instructions handled here, but have
13794 * a different primary opcode.
13796 switch (opc2) {
13797 case 0x34E: // fcfids (Float convert from signed DWord to single precision)
13798 DIP("fcfids%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13799 assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
13800 assign( frD, binop( Iop_RoundF64toF32, rm, binop( Iop_I64StoF64, rm,
13801 mkexpr( r_tmp64 ) ) ) );
13802 goto putFR;
13804 case 0x3Ce: // fcfidus (Float convert from unsigned DWord to single precision)
13805 DIP("fcfidus%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13806 assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
13807 assign( frD, unop( Iop_F32toF64, binop( Iop_I64UtoF32, rm, mkexpr( r_tmp64 ) ) ) );
13808 goto putFR;
13810 return True;
13814 switch (opc2) {
13815 case 0x00C: // frsp (Float Round to Single, PPC32 p423)
13816 DIP("frsp%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13817 assign( frD, binop( Iop_RoundF64toF32, rm, mkexpr(frB) ));
13818 break;
13820 case 0x00E: // fctiw (Float Conv to Int, PPC32 p404)
13821 DIP("fctiw%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13822 assign( r_tmp32,
13823 binop(Iop_F64toI32S, rm, mkexpr(frB)) );
13824 assign( frD, unop( Iop_ReinterpI64asF64,
13825 unop( Iop_32Uto64, mkexpr(r_tmp32))));
13826 /* FPRF is undefined after fctiw. Leave unchanged. */
13827 set_FPRF = False;
13828 break;
13830 case 0x00F: // fctiwz (Float Conv to Int, Round to Zero, PPC32 p405)
13831 DIP("fctiwz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13832 assign( r_tmp32,
13833 binop(Iop_F64toI32S, mkU32(Irrm_ZERO), mkexpr(frB) ));
13834 assign( frD, unop( Iop_ReinterpI64asF64,
13835 unop( Iop_32Uto64, mkexpr(r_tmp32))));
13836 /* FPRF is undefined after fctiwz. Leave unchanged. */
13837 set_FPRF = False;
13838 break;
13840 case 0x08F: case 0x08E: // fctiwu[z]
13841 DIP("fctiwu%s%s fr%u,fr%u\n", opc2 == 0x08F ? "z" : "",
13842 flag_rC ? ".":"", frD_addr, frB_addr);
13843 assign( r_tmp32,
13844 binop( Iop_F64toI32U,
13845 opc2 == 0x08F ? mkU32( Irrm_ZERO ) : rm,
13846 mkexpr( frB ) ) );
13847 assign( frD, unop( Iop_ReinterpI64asF64,
13848 unop( Iop_32Uto64, mkexpr(r_tmp32))));
13849 /* FPRF is undefined after fctiwz. Leave unchanged. */
13850 set_FPRF = False;
13851 break;
13854 case 0x32E: // fctid (Float Conv to Int DWord, PPC64 p437)
13855 DIP("fctid%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13856 assign( r_tmp64,
13857 binop(Iop_F64toI64S, rm, mkexpr(frB)) );
13858 assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) );
13859 /* FPRF is undefined after fctid. Leave unchanged. */
13860 set_FPRF = False;
13861 break;
13863 case 0x32F: // fctidz (Float Conv to Int DWord, Round to Zero, PPC64 p437)
13864 DIP("fctidz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13865 assign( r_tmp64,
13866 binop(Iop_F64toI64S, mkU32(Irrm_ZERO), mkexpr(frB)) );
13867 assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) );
13868 /* FPRF is undefined after fctidz. Leave unchanged. */
13869 set_FPRF = False;
13870 break;
13872 case 0x3AE: case 0x3AF: // fctidu[z] (Float Conv to Int DWord Unsigned [Round to Zero])
13874 DIP("fctidu%s%s fr%u,fr%u\n", opc2 == 0x3AE ? "" : "z",
13875 flag_rC ? ".":"", frD_addr, frB_addr);
13876 assign( r_tmp64,
13877 binop(Iop_F64toI64U, opc2 == 0x3AE ? rm : mkU32(Irrm_ZERO), mkexpr(frB)) );
13878 assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) );
13879 /* FPRF is undefined after fctidz. Leave unchanged. */
13880 set_FPRF = False;
13881 break;
13883 case 0x34E: // fcfid (Float Conv from Int DWord, PPC64 p434)
13884 DIP("fcfid%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13885 assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
13886 assign( frD,
13887 binop(Iop_I64StoF64, rm, mkexpr(r_tmp64)) );
13888 break;
13890 case 0x3CE: // fcfidu (Float convert from unsigned DWord)
13891 DIP("fcfidu%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13892 assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
13893 assign( frD, binop( Iop_I64UtoF64, rm, mkexpr( r_tmp64 ) ) );
13894 break;
13896 case 0x188: case 0x1A8: case 0x1C8: case 0x1E8: // frin, friz, frip, frim
13897 switch(opc2) {
13898 case 0x188: // frin (Floating Round to Integer Nearest)
13899 DIP("frin%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13900 assign( r_tmp64,
13901 binop(Iop_F64toI64S, mkU32(Irrm_NEAREST), mkexpr(frB)) );
13902 break;
13903 case 0x1A8: // friz (Floating Round to Integer Toward Zero)
13904 DIP("friz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13905 assign( r_tmp64,
13906 binop(Iop_F64toI64S, mkU32(Irrm_ZERO), mkexpr(frB)) );
13907 break;
13908 case 0x1C8: // frip (Floating Round to Integer Plus)
13909 DIP("frip%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13910 assign( r_tmp64,
13911 binop(Iop_F64toI64S, mkU32(Irrm_PosINF), mkexpr(frB)) );
13912 break;
13913 case 0x1E8: // frim (Floating Round to Integer Minus)
13914 DIP("frim%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
13915 assign( r_tmp64,
13916 binop(Iop_F64toI64S, mkU32(Irrm_NegINF), mkexpr(frB)) );
13917 break;
13920 /* don't use the rounded integer if frB is outside -9e18..9e18 */
13921 /* F64 has only log10(2**52) significant digits anyway */
13922 /* need to preserve sign of zero */
13923 /* frD = (fabs(frB) > 9e18) ? frB :
13924 (sign(frB)) ? -fabs((double)r_tmp64) : (double)r_tmp64 */
13925 assign(frD, IRExpr_ITE(
13926 binop(Iop_CmpNE8,
13927 unop(Iop_32to8,
13928 binop(Iop_CmpF64,
13929 IRExpr_Const(IRConst_F64(9e18)),
13930 unop(Iop_AbsF64, mkexpr(frB)))),
13931 mkU8(0)),
13932 mkexpr(frB),
13933 IRExpr_ITE(
13934 binop(Iop_CmpNE32,
13935 binop(Iop_Shr32,
13936 unop(Iop_64HIto32,
13937 unop(Iop_ReinterpF64asI64,
13938 mkexpr(frB))),
13939 mkU8(31)),
13940 mkU32(0)),
13941 unop(Iop_NegF64,
13942 unop( Iop_AbsF64,
13943 binop(Iop_I64StoF64, mkU32(0),
13944 mkexpr(r_tmp64)) )),
13945 binop(Iop_I64StoF64, mkU32(0), mkexpr(r_tmp64) )
13948 break;
13950 default:
13951 vex_printf("dis_fp_round(ppc)(opc2)\n");
13952 return False;
13954 putFR:
13955 putFReg( frD_addr, mkexpr(frD) );
13957 if (set_FPRF) {
13958 // XXX XXX XXX FIXME
13959 // set FPRF from frD
13962 if (flag_rC && clear_CR1) {
13963 putCR321( 1, mkU8(0) );
13964 putCR0( 1, mkU8(0) );
13967 return True;
13971 Floating Point Pair Instructions
13973 static Bool dis_fp_pair_prefix ( UInt prefix, UInt theInstr )
13975 /* X-Form/DS-Form */
13976 UChar opc1 = ifieldOPC(theInstr);
13977 UChar rA_addr = ifieldRegA(theInstr);
13978 IRType ty = mode64 ? Ity_I64 : Ity_I32;
13979 IRTemp EA = newTemp(ty);
13980 IRTemp EA_16 = newTemp(ty);
13981 UInt ptype = PrefixType(prefix);
13982 Bool is_prefix = prefix_instruction( prefix );
13983 UInt R = 0;
13984 ULong immediate_val = 0;
13985 UInt opc2;
13987 switch (opc1) {
13988 case 0x6:
13990 UChar XTp = ifieldRegXTp(theInstr);
13991 opc2 = ifieldOPClo4(theInstr);
13993 assign( EA, calculate_prefix_EA( prefix, theInstr,
13994 rA_addr, ptype, DQFORM_IMMASK,
13995 &immediate_val, &R ) );
13997 switch (opc2) {
13999 case 0:
14001 /* Endian aware load */
14002 DIP( "lxvp %u,%llu(%u)\n", XTp, immediate_val, rA_addr );
14004 // address of next 128bits
14005 assign( EA_16, binop( Iop_Add64, mkU64( 16 ), mkexpr( EA ) ) );
14006 if (host_endness == VexEndnessBE) {
14007 putVSReg( XTp, load( Ity_V128, mkexpr( EA ) ) );
14008 putVSReg( XTp+1, load( Ity_V128, mkexpr( EA_16 ) ) );
14009 } else {
14010 putVSReg( XTp+1, load( Ity_V128, mkexpr( EA ) ) );
14011 putVSReg( XTp, load( Ity_V128, mkexpr( EA_16 ) ) );
14013 break;
14016 case 1:
14018 IRTemp EA_8 = newTemp(ty);
14019 IRTemp EA_24 = newTemp(ty);
14020 /* Endian aware store */
14021 DIP("stxvp %u,%llu(%u)\n", XTp, immediate_val, rA_addr );
14023 // address of next 128bits
14024 assign( EA_8, binop( Iop_Add64, mkU64( 8 ), mkexpr( EA ) ) );
14025 assign( EA_16, binop( Iop_Add64, mkU64( 16 ), mkexpr( EA ) ) );
14026 assign( EA_24, binop( Iop_Add64, mkU64( 24 ), mkexpr( EA ) ) );
14028 if (host_endness == VexEndnessBE) {
14029 store( mkexpr( EA ), unop( Iop_V128to64, getVSReg( XTp ) ) );
14030 store( mkexpr( EA_8 ), unop( Iop_V128HIto64, getVSReg( XTp ) ) );
14031 store( mkexpr( EA_16 ), unop( Iop_V128to64, getVSReg( XTp+1 ) ) );
14032 store( mkexpr( EA_24 ), unop( Iop_V128HIto64, getVSReg( XTp+1 ) ) );
14033 } else {
14034 store( mkexpr( EA ), unop( Iop_V128to64, getVSReg( XTp+1 ) ) );
14035 store( mkexpr( EA_8 ), unop( Iop_V128HIto64, getVSReg( XTp+1 ) ) );
14036 store( mkexpr( EA_16 ), unop( Iop_V128to64, getVSReg( XTp ) ) );
14037 store( mkexpr( EA_24 ), unop( Iop_V128HIto64, getVSReg( XTp ) ) );
14039 break;
14042 default:
14043 vex_printf("dis_fp_pair_prefix\n");
14044 return False;
14046 return True;
14048 break;
14050 case 0x2A: // plxsd
14052 UChar vRT = ifieldRegDS(theInstr);
14053 /* The prefixed word version uses the D-form. */
14054 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
14055 ptype, DFORM_IMMASK,
14056 &immediate_val, &R ) );
14058 pDIP( is_prefix, "lxsd v%u,%llu(r%u)\n", vRT, immediate_val, rA_addr );
14059 DIPp( is_prefix, ",%u", R );
14061 putVSReg( vRT+32, binop( Iop_64HLtoV128,
14062 load( Ity_I64, mkexpr( EA ) ),
14063 mkU64( 0 ) ) );
14064 return True;
14067 case 0x2B: // plxssp
14069 UChar vRT = ifieldRegDS(theInstr);
14070 /* The prefixed word version uses the D-form. */
14071 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
14072 ptype, DFORM_IMMASK,
14073 &immediate_val, &R ) );
14076 pDIP( is_prefix, "lxssp v%u,%llu(r%u)\n", vRT, immediate_val, rA_addr );
14077 DIPp( is_prefix, ",%u", R );
14078 putVSReg( vRT+32,
14079 binop( Iop_64HLtoV128,
14080 unop( Iop_ReinterpF64asI64,
14081 unop( Iop_F32toF64,
14082 unop( Iop_ReinterpI32asF32,
14083 load( Ity_I32, mkexpr( EA ) )
14084 ) ) ),
14085 mkU64( 0 ) ) );
14086 return True;
14089 case 0x32: // plxv0
14090 case 0x33: // plxv1 These are both plxv, but bit 5 is used for TX
14092 IRExpr* irx_addr;
14093 IRTemp word[2];
14094 UInt ea_off = 8;
14095 UChar vRS = ifieldRegDS(theInstr);
14096 UInt T = IFIELD( theInstr, 21, 5);
14097 UInt TX = IFIELD( theInstr, 26, 1);
14099 assign( EA,
14100 calculate_prefix_EA( prefix, theInstr, rA_addr, ptype,
14101 DFORM_IMMASK, &immediate_val, &R ) );
14103 // plxv (Load VSX Vector)
14104 pDIP( is_prefix, "lxv v%u,%llu(r%u)\n", vRS, immediate_val, rA_addr );
14105 DIPp( is_prefix, ",%u", R );
14107 word[0] = newTemp(Ity_I64);
14108 assign( word[0], load( Ity_I64, mkexpr( EA ) ) );
14110 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
14111 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
14113 word[1] = newTemp(Ity_I64);
14114 assign( word[1], load( Ity_I64, irx_addr ) );
14116 if (host_endness == VexEndnessBE)
14117 putVSReg( TX*32+T, binop( Iop_64HLtoV128,
14118 mkexpr( word[0] ),
14119 mkexpr( word[1] ) ) );
14120 else
14121 putVSReg( TX*32+T, binop( Iop_64HLtoV128,
14122 mkexpr( word[1] ),
14123 mkexpr( word[0] ) ) );
14124 return True;
14126 break;
14128 case 0x39: // lxsd, plxsd, lxssp, plxssp
14130 UChar vRT = ifieldRegDS(theInstr);
14131 opc2 = ifieldOPC0o2(theInstr);
14133 if (opc1 == 0x2A) { // plxsd
14134 opc2 = 0x2; // map plxsd to lxsd inst
14135 /* The prefixed word version uses the D-form. */
14136 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
14137 ptype, DFORM_IMMASK,
14138 &immediate_val, &R ) );
14140 } else if (opc1 == 0x2B) { // plxssp
14141 opc2 = 0x3; // map plxssp to lxssp inst
14142 /* The prefixed word version uses the D-form. */
14143 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
14144 ptype, DFORM_IMMASK,
14145 &immediate_val, &R ) );
14146 } else {
14147 /* The word version uses the DS-form. */
14148 assign( EA, calculate_prefix_EA( prefix, theInstr, rA_addr,
14149 ptype, DSFORM_IMMASK,
14150 &immediate_val, &R ) );
14153 switch(opc2) {
14154 case 0x2: // lxsd, plxsd (Load VSX Scalar Doubleword)
14156 pDIP( is_prefix, "lxsd v%u,%llu(r%u)\n", vRT, immediate_val,
14157 rA_addr );
14158 DIPp( is_prefix, ",%u", R );
14159 putVSReg( vRT+32, binop( Iop_64HLtoV128,
14160 load( Ity_I64, mkexpr( EA ) ),
14161 mkU64( 0 ) ) );
14162 return True;
14164 break;
14166 case 0x3: // lxssp (Load VSX Scalar Single from memory,
14167 // store as double in register)
14168 pDIP( is_prefix, "lxssp v%u,%llu(r%u)\n", vRT, immediate_val,
14169 rA_addr );
14170 DIPp( is_prefix, ",%u", R );
14172 putVSReg( vRT+32,
14173 binop( Iop_64HLtoV128,
14174 unop( Iop_ReinterpF64asI64,
14175 unop( Iop_F32toF64,
14176 unop( Iop_ReinterpI32asF32,
14177 load( Ity_I32, mkexpr( EA ) )
14178 ) ) ),
14179 mkU64( 0 ) ) );
14180 return True;
14182 default:
14183 vex_printf("dis_fp_pair_prefix(ppc) : DS-form wrong opc2\n");
14184 return False;
14186 break;
14189 case 0x2E: // pstxsd
14191 // pstxsd (Store VSX Scalar Doubleword)
14192 UChar vRS = ifieldRegDS(theInstr);
14194 assign( EA, calculate_prefix_EA( prefix, theInstr,
14195 rA_addr, ptype, DFORM_IMMASK,
14196 &immediate_val, &R ) );
14197 pDIP( is_prefix, "stxsd v%u,%llu(r%u)\n", vRS, immediate_val, rA_addr);
14198 DIPp( is_prefix, ",%u", R );
14199 store( mkexpr(EA), unop( Iop_V128HIto64,
14200 getVSReg( vRS+32 ) ) );
14201 /* HW is clearing vector element 1. Don't see that in the ISA but
14202 * matching the HW.
14204 putVSReg( vRS+32, binop( Iop_64HLtoV128,
14205 unop( Iop_V128HIto64,
14206 getVSReg( vRS+32 ) ),
14207 mkU64( 0 ) ) );
14208 return True;
14210 break;
14212 case 0x2F:
14214 // pstxssp (Store VSX Scalar Single - store double precision
14215 // value from register into memory in single precision format)
14216 UChar vRS = ifieldRegDS(theInstr);
14217 IRTemp high64 = newTemp(Ity_F64);
14218 IRTemp val32 = newTemp(Ity_I32);
14220 assign( EA, calculate_prefix_EA( prefix, theInstr,
14221 rA_addr, ptype, DFORM_IMMASK,
14222 &immediate_val, &R ) );
14223 pDIP( is_prefix, "stxssp v%u,%llu(r%u)\n", vRS, immediate_val, rA_addr);
14224 DIPp( is_prefix, ",%u", R );
14226 assign(high64, unop( Iop_ReinterpI64asF64,
14227 unop( Iop_V128HIto64, getVSReg( vRS+32 ) ) ) );
14229 assign(val32, unop( Iop_ReinterpF32asI32,
14230 unop( Iop_TruncF64asF32,
14231 mkexpr(high64) ) ) );
14232 store( mkexpr(EA), mkexpr( val32 ) );
14234 return True;
14236 break;
14238 case 0x3d: // lxv
14240 IRExpr* irx_addr;
14241 IRTemp word[2];
14242 UInt ea_off = 8;
14243 UChar vRS = ifieldRegDS(theInstr);
14244 UInt T = IFIELD( theInstr, 21, 5);
14245 UInt TX = IFIELD( theInstr, 3, 1);
14247 opc2 = IFIELD(theInstr, 0, 3);
14249 if ( IFIELD( theInstr, 0, 3) == 1) {
14250 // lxv (Load VSX Vector)
14251 assign( EA, calculate_prefix_EA( prefix, theInstr,
14252 rA_addr, ptype, DQFORM_IMMASK,
14253 &immediate_val, &R ) );
14255 DIP("lxv v%u,%llu(r%u)\n", vRS, immediate_val, rA_addr );
14256 word[0] = newTemp(Ity_I64);
14257 assign( word[0], load( Ity_I64, mkexpr( EA ) ) );
14259 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
14260 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
14261 word[1] = newTemp(Ity_I64);
14262 assign( word[1], load( Ity_I64, irx_addr ) );
14264 if (host_endness == VexEndnessBE)
14265 putVSReg( TX*32+T, binop( Iop_64HLtoV128,
14266 mkexpr( word[0] ),
14267 mkexpr( word[1] ) ) );
14268 else
14269 putVSReg( TX*32+T, binop( Iop_64HLtoV128,
14270 mkexpr( word[1] ),
14271 mkexpr( word[0] ) ) );
14272 return True;
14274 } else if ((opc2 & 0x3) == 0x2) {
14275 // stxsd (Store VSX Scalar Doubleword)
14276 R = 0; // must be zero for word instruction
14277 assign( EA, calculate_prefix_EA( prefix, theInstr,
14278 rA_addr, ptype, DSFORM_IMMASK,
14279 &immediate_val, &R ) );
14281 DIP("stxsd v%u,%llu(r%u)\n", vRS, immediate_val, rA_addr);
14282 store( mkexpr(EA), unop( Iop_V128HIto64,
14283 getVSReg( vRS+32 ) ) );
14284 /* HW is clearing vector element 1. Don't see that in the ISA but
14285 * matching the HW.
14287 putVSReg( vRS+32, binop( Iop_64HLtoV128,
14288 unop( Iop_V128HIto64,
14289 getVSReg( vRS+32 ) ),
14290 mkU64( 0 ) ) );
14291 return True;
14293 } else if ((opc2 & 0x3) == 0x3) {
14294 // stxssp (Store VSX Scalar Single - store double precision
14295 // value from register into memory in single precision format)
14296 IRTemp high64 = newTemp(Ity_F64);
14297 IRTemp val32 = newTemp(Ity_I32);
14299 assign( EA,
14300 calculate_prefix_EA( prefix, theInstr, rA_addr, ptype,
14301 DSFORM_IMMASK, &immediate_val, &R ) );
14302 DIP("stxssp v%u,%llu(r%u)\n", vRS, immediate_val, rA_addr);
14303 assign(high64, unop( Iop_ReinterpI64asF64,
14304 unop( Iop_V128HIto64, getVSReg( vRS+32 ) ) ) );
14306 assign(val32, unop( Iop_ReinterpF32asI32,
14307 unop( Iop_TruncF64asF32,
14308 mkexpr(high64) ) ) );
14309 store( mkexpr(EA), mkexpr( val32 ) );
14311 return True;
14313 } else if (opc2 == 0x5) {
14314 // stxv (Store VSX Vector)
14315 assign( EA, calculate_prefix_EA( prefix, theInstr,
14316 rA_addr, ptype, DQFORM_IMMASK,
14317 &immediate_val, &R ) );
14318 DIP("stxv v%u,%llu(r%u)\n", vRS, immediate_val, rA_addr );
14320 if (host_endness == VexEndnessBE) {
14321 store( mkexpr(EA), unop( Iop_V128HIto64,
14322 getVSReg( TX*32+T ) ) );
14323 irx_addr
14324 = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
14325 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
14326 store( irx_addr, unop( Iop_V128to64,
14327 getVSReg( TX*32+T ) ) );
14328 } else {
14329 store( mkexpr(EA), unop( Iop_V128to64,
14330 getVSReg( TX*32+T ) ) );
14331 irx_addr
14332 = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
14333 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
14334 store( irx_addr, unop( Iop_V128HIto64,
14335 getVSReg( TX*32+T ) ) );
14337 return True;
14339 } else {
14340 vex_printf("dis_fp_pair vector load/store (ppc) : DS-form wrong opc2\n");
14341 return False;
14344 break;
14346 case 0x3A: // plxvp
14348 UChar XTp = ifieldRegXTp(theInstr);
14350 assign( EA, calculate_prefix_EA( prefix, theInstr,
14351 rA_addr, ptype, DFORM_IMMASK,
14352 &immediate_val, &R ) );
14354 /* Endian aware prefixed load */
14355 pDIP( is_prefix, "lxvp %u,%llu(%u)", XTp, immediate_val, rA_addr );
14356 DIPp( is_prefix, ",%u", R );
14358 // address of next 128bits
14359 assign( EA_16, binop( Iop_Add64, mkU64( 16 ), mkexpr( EA ) ) );
14360 if (host_endness == VexEndnessBE) {
14361 putVSReg( XTp, load( Ity_V128, mkexpr( EA ) ) );
14362 putVSReg( XTp+1, load( Ity_V128, mkexpr( EA_16 ) ) );
14363 } else {
14364 putVSReg( XTp+1, load( Ity_V128, mkexpr( EA ) ) );
14365 putVSReg( XTp, load( Ity_V128, mkexpr( EA_16 ) ) );
14367 return True;
14370 case 0x3E: // pstxvp
14372 IRTemp EA_8 = newTemp(ty);
14373 IRTemp EA_24 = newTemp(ty);
14374 UChar XTp = ifieldRegXTp(theInstr);
14376 assign( EA, calculate_prefix_EA( prefix, theInstr,
14377 rA_addr, ptype, DFORM_IMMASK,
14378 &immediate_val, &R ) );
14380 /* Endian aware prefixed load */
14381 pDIP( is_prefix, "stxvp %u,%llu(%u)\n", XTp, immediate_val, rA_addr );
14382 DIPp( is_prefix, ",%u", R );
14384 assign( EA_8, binop( Iop_Add64, mkU64( 8 ), mkexpr( EA ) ) );
14385 assign( EA_16, binop( Iop_Add64, mkU64( 16 ), mkexpr( EA ) ) );
14386 assign( EA_24, binop( Iop_Add64, mkU64( 24 ), mkexpr( EA ) ) );
14388 if (host_endness == VexEndnessBE) {
14389 store( mkexpr( EA ), unop( Iop_V128to64, getVSReg( XTp ) ) );
14390 store( mkexpr( EA_8 ), unop( Iop_V128HIto64, getVSReg( XTp ) ) );
14391 store( mkexpr( EA_16 ), unop( Iop_V128to64, getVSReg( XTp+1 ) ) );
14392 store( mkexpr( EA_24 ), unop( Iop_V128HIto64, getVSReg( XTp+1 ) ) );
14393 } else {
14394 store( mkexpr( EA ), unop( Iop_V128to64, getVSReg( XTp+1 ) ) );
14395 store( mkexpr( EA_8 ), unop( Iop_V128HIto64, getVSReg( XTp+1 ) ) );
14396 store( mkexpr( EA_16 ), unop( Iop_V128to64, getVSReg( XTp ) ) );
14397 store( mkexpr( EA_24 ), unop( Iop_V128HIto64, getVSReg( XTp ) ) );
14399 return True;
14402 case 0x36: // pstxv0
14403 case 0x37: // pstxv1, pstxv inst where bit 5 is SX
14405 // pstxv (Prefixed store VSX Vector 1 8LS:D-form)
14406 // AKA pstxv0, pstxv1
14407 UInt S = IFIELD( theInstr, 21, 5);
14408 UInt SX = IFIELD( theInstr, 26, 1);
14409 UInt XS = 32*SX+S;
14410 UChar vRS = ifieldRegDS(theInstr);
14411 IRTemp tmpV128 = newTemp(Ity_V128);
14412 IRExpr* irx_addr;
14413 UInt ea_off = 8;
14415 DIP("pstxv v%u,%llu(r%u)", vRS, immediate_val, rA_addr );
14416 DIPp( is_prefix, ",%u", R );
14418 assign( tmpV128, getVSReg( XS ) );
14420 assign( EA,
14421 calculate_prefix_EA( prefix, theInstr,
14422 rA_addr, ptype, DFORM_IMMASK,
14423 &immediate_val, &R ) );
14425 if (host_endness == VexEndnessBE) {
14426 store( mkexpr(EA), unop( Iop_V128HIto64,
14427 mkexpr( tmpV128 ) ) );
14428 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
14429 ty == Ity_I64 ? mkU64( ea_off ):
14430 mkU32( ea_off ) );
14431 store( irx_addr, unop( Iop_V128to64,
14432 mkexpr( tmpV128 ) ) );
14434 } else {
14435 store( mkexpr(EA), unop( Iop_V128to64,
14436 mkexpr( tmpV128 ) ) );
14437 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
14438 ty == Ity_I64 ? mkU64( ea_off ):
14439 mkU32( ea_off ) );
14440 store( irx_addr, unop( Iop_V128HIto64,
14441 mkexpr( tmpV128 ) ) );
14443 return True;
14446 default:
14447 vex_printf("dis_fp_pair_prefix(ppc)(instr)\n");
14448 return False;
14450 return False;
14453 static Bool dis_fp_pair ( UInt prefix, UInt theInstr )
14455 /* X-Form/DS-Form */
14456 UChar opc1 = ifieldOPC(theInstr);
14457 UChar frT_hi_addr = ifieldRegDS(theInstr);
14458 UChar frT_lo_addr = frT_hi_addr + 1;
14459 UChar rA_addr = ifieldRegA(theInstr);
14460 UChar rB_addr = ifieldRegB(theInstr);
14461 UInt uimm16 = ifieldUIMM16(theInstr);
14462 Int simm16 = extend_s_16to32(uimm16);
14463 UInt opc2 = ifieldOPClo10(theInstr);
14464 IRType ty = mode64 ? Ity_I64 : Ity_I32;
14465 IRTemp EA_hi = newTemp(ty);
14466 IRTemp EA_lo = newTemp(ty);
14467 IRTemp frT_hi = newTemp(Ity_F64);
14468 IRTemp frT_lo = newTemp(Ity_F64);
14469 UChar b0 = ifieldBIT0(theInstr);
14470 Bool is_load = 0;
14472 /* There is no prefixed version of these instructions. */
14473 PREFIX_CHECK
14475 switch (opc1) {
14476 case 0x1F: // register offset
14477 /* These instructions work on a pair of registers. The specified
14478 * register must be even.
14480 if ((frT_hi_addr %2) != 0) {
14481 vex_printf("dis_fp_pair(ppc) ldpx or stdpx: odd frT register\n");
14482 return False;
14485 switch(opc2) {
14486 case 0x317: // lfdpx (FP Load Double Pair X-form, ISA 2.05 p125)
14487 DIP("ldpx fr%u,r%u,r%u\n", frT_hi_addr, rA_addr, rB_addr);
14488 is_load = 1;
14489 break;
14490 case 0x397: // stfdpx (FP STORE Double Pair X-form, ISA 2.05 p125)
14491 DIP("stdpx fr%u,r%u,r%u\n", frT_hi_addr, rA_addr, rB_addr);
14492 break;
14493 default:
14494 vex_printf("dis_fp_pair(ppc) : X-form wrong opc2\n");
14495 return False;
14498 if (b0 != 0) {
14499 vex_printf("dis_fp_pair(ppc)(0x1F,b0)\n");
14500 return False;
14502 assign( EA_hi, ea_rAor0_idxd( rA_addr, rB_addr ) );
14503 break;
14504 case 0x39:
14506 opc2 = ifieldOPC0o2(theInstr);
14508 switch(opc2) {
14509 case 0x0: // lfdp (FP Load Double Pair DS-form, ISA 2.05 p125)
14510 /* This instruction works on a pair of registers. The specified
14511 * register must be even.
14513 if ((frT_hi_addr %2) != 0) {
14514 vex_printf("dis_fp_pair(ppc) lfdp : odd frT register\n");
14515 return False;
14518 DIP("lfdp fr%u,%d(r%u)\n", frT_hi_addr, simm16, rA_addr);
14519 assign( EA_hi, ea_rAor0_simm( rA_addr, simm16 ) );
14520 is_load = 1;
14521 break;
14523 default:
14524 vex_printf("dis_fp_pair(ppc) : DS-form wrong opc2\n");
14525 return False;
14527 break;
14529 case 0x3d:
14531 opc2 = ifieldOPC0o2(theInstr);
14533 switch(opc2) {
14534 case 0x0:
14535 // stfdp (FP Store Double Pair DS-form, ISA 2.05 p125)
14536 /* This instruction works on a pair of registers. The specified
14537 * register must be even.
14539 if ((frT_hi_addr %2) != 0) {
14540 vex_printf("dis_fp_pair(ppc) stfdp : odd frT register\n");
14541 return False;
14544 DIP("stfdp fr%u,%d(r%u)\n", frT_hi_addr, simm16, rA_addr);
14545 assign( EA_hi, ea_rAor0_simm( rA_addr, simm16 ) );
14546 break;
14548 default:
14549 vex_printf("dis_fp_pair(ppc) : DS-form wrong opc2\n");
14550 return False;
14552 break;
14554 default: // immediate offset
14555 vex_printf("dis_fp_pair(ppc)(instr)\n");
14556 return False;
14559 if (mode64)
14560 assign( EA_lo, binop(Iop_Add64, mkexpr(EA_hi), mkU64(8)) );
14561 else
14562 assign( EA_lo, binop(Iop_Add32, mkexpr(EA_hi), mkU32(8)) );
14564 assign( frT_hi, getFReg(frT_hi_addr) );
14565 assign( frT_lo, getFReg(frT_lo_addr) );
14567 if (is_load) {
14568 putFReg( frT_hi_addr, load(Ity_F64, mkexpr(EA_hi)) );
14569 putFReg( frT_lo_addr, load(Ity_F64, mkexpr(EA_lo)) );
14570 } else {
14571 store( mkexpr(EA_hi), mkexpr(frT_hi) );
14572 store( mkexpr(EA_lo), mkexpr(frT_lo) );
14575 return True;
14580 Floating Point Merge Instructions
14582 static Bool dis_fp_merge ( UInt prefix, UInt theInstr )
14584 /* X-Form */
14585 UInt opc2 = ifieldOPClo10(theInstr);
14586 UChar frD_addr = ifieldRegDS(theInstr);
14587 UChar frA_addr = ifieldRegA(theInstr);
14588 UChar frB_addr = ifieldRegB(theInstr);
14590 IRTemp frD = newTemp(Ity_F64);
14591 IRTemp frA = newTemp(Ity_F64);
14592 IRTemp frB = newTemp(Ity_F64);
14594 /* There is no prefixed version of these instructions. */
14595 PREFIX_CHECK
14597 assign( frA, getFReg(frA_addr));
14598 assign( frB, getFReg(frB_addr));
14600 switch (opc2) {
14601 case 0x3c6: // fmrgew floating merge even word
14602 DIP("fmrgew fr%u,fr%u,fr%u\n", frD_addr, frA_addr, frB_addr);
14604 assign( frD, unop( Iop_ReinterpI64asF64,
14605 binop( Iop_32HLto64,
14606 unop( Iop_64HIto32,
14607 unop( Iop_ReinterpF64asI64,
14608 mkexpr(frA) ) ),
14609 unop( Iop_64HIto32,
14610 unop( Iop_ReinterpF64asI64,
14611 mkexpr(frB) ) ) ) ) );
14612 break;
14614 case 0x346: // fmrgow floating merge odd word
14615 DIP("fmrgow fr%u,fr%u,fr%u\n", frD_addr, frA_addr, frB_addr);
14617 assign( frD, unop( Iop_ReinterpI64asF64,
14618 binop( Iop_32HLto64,
14619 unop( Iop_64to32,
14620 unop( Iop_ReinterpF64asI64,
14621 mkexpr(frA) ) ),
14622 unop( Iop_64to32,
14623 unop( Iop_ReinterpF64asI64,
14624 mkexpr(frB) ) ) ) ) );
14625 break;
14627 default:
14628 vex_printf("dis_fp_merge(ppc)(opc2)\n");
14629 return False;
14632 putFReg( frD_addr, mkexpr(frD) );
14633 return True;
14637 Floating Point Move Instructions
14639 static Bool dis_fp_move ( UInt prefix, UInt theInstr )
14641 /* X-Form */
14642 UChar opc1 = ifieldOPC(theInstr);
14643 UChar frD_addr = ifieldRegDS(theInstr);
14644 UChar frA_addr = ifieldRegA(theInstr);
14645 UChar frB_addr = ifieldRegB(theInstr);
14646 UInt opc2 = ifieldOPClo10(theInstr);
14647 UChar flag_rC = ifieldBIT0(theInstr);
14649 IRTemp frD = newTemp(Ity_F64);
14650 IRTemp frB = newTemp(Ity_F64);
14651 IRTemp itmpB = newTemp(Ity_F64);
14652 IRTemp frA;
14653 IRTemp signA;
14654 IRTemp hiD;
14656 /* There is no prefixed version of these instructions. */
14657 PREFIX_CHECK
14659 if (opc1 != 0x3F || (frA_addr != 0 && opc2 != 0x008)) {
14660 vex_printf("dis_fp_move(ppc)(instr)\n");
14661 return False;
14664 assign( frB, getFReg(frB_addr));
14666 switch (opc2) {
14667 case 0x008: // fcpsgn (Floating Copy Sign, ISA_V2.05 p126)
14668 DIP("fcpsgn%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr,
14669 frB_addr);
14670 signA = newTemp(Ity_I32);
14671 hiD = newTemp(Ity_I32);
14672 itmpB = newTemp(Ity_I64);
14673 frA = newTemp(Ity_F64);
14674 assign( frA, getFReg(frA_addr) );
14676 /* get A's sign bit */
14677 assign(signA, binop(Iop_And32,
14678 unop(Iop_64HIto32, unop(Iop_ReinterpF64asI64,
14679 mkexpr(frA))),
14680 mkU32(0x80000000)) );
14682 assign( itmpB, unop(Iop_ReinterpF64asI64, mkexpr(frB)) );
14684 /* mask off B's sign bit and or in A's sign bit */
14685 assign(hiD, binop(Iop_Or32,
14686 binop(Iop_And32,
14687 unop(Iop_64HIto32,
14688 mkexpr(itmpB)), /* frB's high 32 bits */
14689 mkU32(0x7fffffff)),
14690 mkexpr(signA)) );
14692 /* combine hiD/loB into frD */
14693 assign( frD, unop(Iop_ReinterpI64asF64,
14694 binop(Iop_32HLto64,
14695 mkexpr(hiD),
14696 unop(Iop_64to32,
14697 mkexpr(itmpB)))) ); /* frB's low 32 bits */
14698 break;
14700 case 0x028: // fneg (Floating Negate, PPC32 p416)
14701 DIP("fneg%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
14702 assign( frD, unop( Iop_NegF64, mkexpr(frB) ));
14703 break;
14705 case 0x048: // fmr (Floating Move Register, PPC32 p410)
14706 DIP("fmr%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
14707 assign( frD, mkexpr(frB) );
14708 break;
14710 case 0x088: // fnabs (Floating Negative Absolute Value, PPC32 p415)
14711 DIP("fnabs%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
14712 assign( frD, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr(frB) )));
14713 break;
14715 case 0x108: // fabs (Floating Absolute Value, PPC32 p399)
14716 DIP("fabs%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
14717 assign( frD, unop( Iop_AbsF64, mkexpr(frB) ));
14718 break;
14720 default:
14721 vex_printf("dis_fp_move(ppc)(opc2)\n");
14722 return False;
14725 putFReg( frD_addr, mkexpr(frD) );
14727 /* None of these change FPRF. cr1 is set in the usual way though,
14728 if flag_rC is set. */
14730 if (flag_rC) {
14731 putCR321( 1, mkU8(0) );
14732 putCR0( 1, mkU8(0) );
14735 return True;
14741 Floating Point Status/Control Register Instructions
14743 static Bool dis_fp_scr ( UInt prefix, UInt theInstr, Bool GX_level )
14745 /* Many forms - see each switch case */
14746 UChar opc1 = ifieldOPC(theInstr);
14747 UInt opc2 = ifieldOPClo10(theInstr);
14748 UChar flag_rC = ifieldBIT0(theInstr);
14750 /* There is no prefixed version of these instructions. */
14751 PREFIX_CHECK
14753 if (opc1 != 0x3F) {
14754 vex_printf("dis_fp_scr(ppc)(instr)\n");
14755 return False;
14758 switch (opc2) {
14759 case 0x026: { // mtfsb1 (Move to FPSCR Bit 1, PPC32 p479)
14760 // Bit crbD of the FPSCR is set.
14761 UChar crbD = ifieldRegDS(theInstr);
14762 UInt b11to20 = IFIELD(theInstr, 11, 10);
14764 if (b11to20 != 0) {
14765 vex_printf("dis_fp_scr(ppc)(instr,mtfsb1)\n");
14766 return False;
14768 DIP("mtfsb1%s crb%d \n", flag_rC ? ".":"", crbD);
14769 putGST_masked( PPC_GST_FPSCR, mkU64( 1 <<( 31 - crbD ) ),
14770 1ULL << ( 31 - crbD ) );
14771 break;
14774 case 0x040: { // mcrfs (Move to Condition Register from FPSCR, PPC32 p465)
14775 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
14776 UChar b21to22 = toUChar( IFIELD( theInstr, 21, 2 ) );
14777 UChar crfS = toUChar( IFIELD( theInstr, 18, 3 ) );
14778 UChar b11to17 = toUChar( IFIELD( theInstr, 11, 7 ) );
14779 IRTemp tmp = newTemp(Ity_I32);
14780 IRExpr* fpscr_all;
14781 if (b21to22 != 0 || b11to17 != 0 || flag_rC != 0) {
14782 vex_printf("dis_fp_scr(ppc)(instr,mcrfs)\n");
14783 return False;
14785 DIP("mcrfs crf%d,crf%d\n", crfD, crfS);
14786 vassert(crfD < 8);
14787 vassert(crfS < 8);
14788 fpscr_all = getGST_masked( PPC_GST_FPSCR, MASK_FPSCR_RN );
14789 assign( tmp, binop(Iop_And32,
14790 binop(Iop_Shr32,fpscr_all,mkU8(4 * (7-crfS))),
14791 mkU32(0xF)) );
14792 putGST_field( PPC_GST_CR, mkexpr(tmp), crfD );
14793 break;
14796 case 0x046: { // mtfsb0 (Move to FPSCR Bit 0, PPC32 p478)
14797 // Bit crbD of the FPSCR is cleared.
14798 UChar crbD = ifieldRegDS(theInstr);
14799 UInt b11to20 = IFIELD(theInstr, 11, 10);
14801 if (b11to20 != 0) {
14802 vex_printf("dis_fp_scr(ppc)(instr,mtfsb0)\n");
14803 return False;
14805 DIP("mtfsb0%s crb%d\n", flag_rC ? ".":"", crbD);
14806 putGST_masked( PPC_GST_FPSCR, mkU64( 0 ), 1ULL << ( 31 - crbD ) );
14807 break;
14810 case 0x086: { // mtfsfi (Move to FPSCR Field Immediate, PPC32 p481)
14811 UInt crfD = IFIELD( theInstr, 23, 3 );
14812 UChar b17to22 = toUChar( IFIELD( theInstr, 17, 6 ) );
14813 UChar IMM = toUChar( IFIELD( theInstr, 12, 4 ) );
14814 UChar b11 = toUChar( IFIELD( theInstr, 11, 1 ) );
14815 UChar Wbit = toUChar( IFIELD( theInstr, 16, 1 ) );
14817 if (b17to22 != 0 || b11 != 0 || (Wbit && !GX_level)) {
14818 vex_printf("dis_fp_scr(ppc)(instr,mtfsfi)\n");
14819 return False;
14821 DIP("mtfsfi%s crf%u,%d%s\n", flag_rC ? ".":"", crfD, IMM, Wbit ? ",1":"");
14822 crfD = crfD + (8 * (1 - Wbit) );
14823 putGST_field( PPC_GST_FPSCR, mkU32( IMM ), crfD );
14824 break;
14827 case 0x247: { // mffs (Move from FPSCR, PPC32 p468)
14828 UChar frD_addr = ifieldRegDS(theInstr);
14829 UChar frB_addr = ifieldRegB(theInstr);
14830 IRTemp frB = newTemp(Ity_F64);
14831 UInt b11to12 = IFIELD(theInstr, 19, 2);
14832 UInt b13to15 = IFIELD(theInstr, 16, 3);
14833 UInt RN = IFIELD(theInstr, 11, 2);
14834 UInt DRN = IFIELD(theInstr, 11, 3);
14836 /* The FPSCR_DRN, FPSCR_RN and FPSCR_FPCC are all stored in
14837 * their own 8-bit entries with distinct offsets. The FPSCR
14838 * register is handled as two 32-bit values. We need to
14839 * assemble the pieces into the single 64-bit value to return.
14841 IRExpr* fpscr_lower
14842 = binop( Iop_Or32,
14843 getGST_masked( PPC_GST_FPSCR, (MASK_FPSCR_RN | MASK_FPSCR_C_FPCC) ),
14844 binop( Iop_Or32,
14845 binop( Iop_Shl32,
14846 getC(),
14847 mkU8(63-47) ) ,
14848 binop( Iop_Shl32,
14849 getFPCC(),
14850 mkU8(63-51) ) ) );
14851 IRExpr* fpscr_upper = getGST_masked_upper( PPC_GST_FPSCR, MASK_FPSCR_DRN );
14853 if ((b11to12 == 0) && (b13to15 == 0)) {
14854 DIP("mffs%s fr%u\n", flag_rC ? ".":"", frD_addr);
14855 putFReg( frD_addr,
14856 unop( Iop_ReinterpI64asF64,
14857 binop( Iop_32HLto64, fpscr_upper, fpscr_lower ) ) );
14859 } else if ((b11to12 == 0) && (b13to15 == 1)) {
14860 DIP("mffsce fr%u\n", frD_addr);
14861 /* Technically as of 4/5/2017 we are not tracking VE, OE, UE, ZE,
14862 or XE but in case that changes in the future, do the masking. */
14863 putFReg( frD_addr,
14864 unop( Iop_ReinterpI64asF64,
14865 binop( Iop_32HLto64, fpscr_upper,
14866 binop( Iop_And32, fpscr_lower,
14867 mkU32( 0xFFFFFF07 ) ) ) ) );
14869 } else if ((b11to12 == 2) && (b13to15 == 4)) {
14870 IRTemp frB_int = newTemp(Ity_I64);
14872 DIP("mffscdrn fr%u,fr%u\n", frD_addr, frB_addr);
14874 assign( frB, getFReg(frB_addr));
14875 assign( frB_int, unop( Iop_ReinterpF64asI64, mkexpr( frB ) ) );
14877 /* Clear all of the FPSCR bits except for the DRN field, VE,
14878 OE, UE, ZE and XE bits and write the result to the frD
14879 register. Note, currently the exception bits are not tracked but
14880 will mask anyway in case that changes in the future. */
14881 putFReg( frD_addr,
14882 unop( Iop_ReinterpI64asF64,
14883 binop( Iop_32HLto64,
14884 binop( Iop_And32, mkU32(0x7), fpscr_upper ),
14885 binop( Iop_And32, mkU32(0xFF), fpscr_lower ) ) ) );
14887 /* Put new_DRN bits into the FPSCR register */
14888 putGST_masked( PPC_GST_FPSCR, mkexpr( frB_int ), MASK_FPSCR_DRN );
14890 } else if ((b11to12 == 2) && (b13to15 == 5)) {
14891 DIP("mffscdrni fr%u,%u\n", frD_addr, DRN);
14893 /* Clear all of the FPSCR bits except for the DRN field, VE,
14894 OE, UE, ZE and XE bits and write the result to the frD
14895 register. Note, currently the exception bits are not tracked but
14896 will mask anyway in case that changes in the future. */
14897 putFReg( frD_addr,
14898 unop( Iop_ReinterpI64asF64,
14899 binop( Iop_32HLto64,
14900 binop( Iop_And32, mkU32(0x7), fpscr_upper ),
14901 binop( Iop_And32, mkU32(0xFF), fpscr_lower ) ) ) );
14903 /* Put new_DRN bits into the FPSCR register */
14904 putGST_masked( PPC_GST_FPSCR, binop( Iop_32HLto64, mkU32( DRN ),
14905 mkU32( 0 ) ), MASK_FPSCR_DRN );
14907 } else if ((b11to12 == 2) && (b13to15 == 6)) {
14908 IRTemp frB_int = newTemp(Ity_I64);
14910 DIP("mffscrn fr%u,fr%u\n", frD_addr,frB_addr);
14912 assign( frB, getFReg(frB_addr));
14913 assign( frB_int, unop( Iop_ReinterpF64asI64, mkexpr( frB ) ) );
14915 /* Clear all of the FPSCR bits except for the DRN field, VE,
14916 OE, UE, ZE and XE bits and write the result to the frD
14917 register. Note, currently the exception bits are not tracked but
14918 will mask anyway in case that changes in the future. */
14919 putFReg( frD_addr,
14920 unop( Iop_ReinterpI64asF64,
14921 binop( Iop_32HLto64,
14922 binop( Iop_And32, mkU32(0x7), fpscr_upper ),
14923 binop( Iop_And32, mkU32(0xFF), fpscr_lower ) ) ) );
14925 /* Put new_CRN bits into the FPSCR register */
14926 putGST_masked( PPC_GST_FPSCR, mkexpr( frB_int ), MASK_FPSCR_RN );
14928 } else if ((b11to12 == 2) && (b13to15 == 7)) {
14929 DIP("mffscrni fr%u,%u\n", frD_addr, RN);
14931 /* Clear all of the FPSCR bits except for the DRN field, VE,
14932 OE, UE, ZE and XE bits and write the result to the frD
14933 register. Note, currently the exception bits are not tracked but
14934 will mask anyway in case that changes in the future. */
14935 putFReg( frD_addr,
14936 unop( Iop_ReinterpI64asF64,
14937 binop( Iop_32HLto64,
14938 binop( Iop_And32, mkU32(0x7), fpscr_upper ),
14939 binop( Iop_And32, mkU32(0xFF), fpscr_lower ) ) ) );
14941 /* Put new_RN bits into the FPSCR register */
14942 putGST_masked( PPC_GST_FPSCR, binop( Iop_32HLto64, mkU32( 0 ),
14943 mkU32( RN ) ), MASK_FPSCR_RN );
14945 } else if ((b11to12 == 3) && (b13to15 == 0)) {
14946 DIP("mffsl fr%u\n", frD_addr);
14947 /* Technically as of 4/5/2017 we are not tracking VE, OE, UE, ZE,
14948 XE, FR, FI, C, FL, FG, FE, FU. Also only track DRN in the upper
14949 bits but in case that changes in the future we will do the
14950 masking. */
14951 putFReg( frD_addr,
14952 unop( Iop_ReinterpI64asF64,
14953 binop( Iop_32HLto64,
14954 binop( Iop_And32, fpscr_upper,
14955 mkU32( 0x7 ) ),
14956 binop( Iop_And32, fpscr_lower,
14957 mkU32( 0x7F0FF ) ) ) ) );
14958 } else {
14959 vex_printf("dis_fp_scr(ppc)(mff**) Unrecognized instruction.\n");
14960 return False;
14962 break;
14965 case 0x2C7: { // mtfsf (Move to FPSCR Fields, PPC32 p480)
14966 UChar b25 = toUChar( IFIELD(theInstr, 25, 1) );
14967 UChar FM = toUChar( IFIELD(theInstr, 17, 8) );
14968 UChar frB_addr = ifieldRegB(theInstr);
14969 IRTemp frB = newTemp(Ity_F64);
14970 IRTemp rB_64 = newTemp( Ity_I64 );
14971 Int i;
14972 ULong mask;
14973 UChar Wbit;
14974 #define BFP_MASK_SEED 0x3000000000000000ULL
14975 #define DFP_MASK_SEED 0x7000000000000000ULL
14977 if (GX_level) {
14978 /* This implies that Decimal Floating Point is supported, and the
14979 * FPSCR must be managed as a 64-bit register.
14981 Wbit = toUChar( IFIELD(theInstr, 16, 1) );
14982 } else {
14983 Wbit = 0;
14986 if (b25 == 1) {
14987 /* new 64 bit move variant for power 6. If L field (bit 25) is
14988 * a one do a full 64 bit move. Note, the FPSCR is not really
14989 * properly modeled. This instruciton only changes the value of
14990 * the rounding mode bit fields RN, FPCC and DRN. The HW exception bits
14991 * do not get set in the simulator. 1/12/09
14993 DIP("mtfsf%s %d,fr%u (L=1)\n", flag_rC ? ".":"", FM, frB_addr);
14994 mask = 0x1F0001F003;
14996 } else {
14997 DIP("mtfsf%s %d,fr%u\n", flag_rC ? ".":"", FM, frB_addr);
14998 // Build 32bit mask from FM:
14999 mask = 0;
15000 for (i=0; i<8; i++) {
15001 if ((FM & (1<<(7-i))) == 1) {
15002 /* FPSCR field k is set to the contents of the corresponding
15003 * field of register FRB, where k = i+8x(1-W). In the Power
15004 * ISA, register field numbering is from left to right, so field
15005 * 15 is the least significant field in a 64-bit register. To
15006 * generate the mask, we set all the appropriate rounding mode
15007 * bits in the highest order nibble (field 0) and shift right
15008 * 'k x nibble length'.
15010 if (Wbit)
15011 mask |= DFP_MASK_SEED >> ( 4 * ( i + 8 * ( 1 - Wbit ) ) );
15012 else
15013 mask |= BFP_MASK_SEED >> ( 4 * ( i + 8 * ( 1 - Wbit ) ) );
15015 if ((FM & (1<<(7-i))) == 0x2) { //set the FPCC bits
15016 mask |= 0xF000;
15018 if ((FM & (1<<(7-i))) == 0x4) { //set the Floating-Point Class Descriptor (C) bit
15019 mask |= 0x10000;
15023 assign( frB, getFReg(frB_addr));
15024 assign( rB_64, unop( Iop_ReinterpF64asI64, mkexpr( frB ) ) );
15025 putGST_masked( PPC_GST_FPSCR, mkexpr( rB_64 ), mask );
15026 break;
15029 default:
15030 vex_printf("dis_fp_scr(ppc)(opc2)\n");
15031 return False;
15033 return True;
15036 /*------------------------------------------------------------*/
15037 /*--- Decimal Floating Point (DFP) Helper functions ---*/
15038 /*------------------------------------------------------------*/
15039 #define DFP_LONG 1
15040 #define DFP_EXTND 2
15041 #define DFP_LONG_BIAS 398
15042 #define DFP_LONG_ENCODED_FIELD_MASK 0x1F00
15043 #define DFP_EXTND_BIAS 6176
15044 #define DFP_EXTND_ENCODED_FIELD_MASK 0x1F000
15045 #define DFP_LONG_EXP_MSK 0XFF
15046 #define DFP_EXTND_EXP_MSK 0XFFF
15048 #define DFP_G_FIELD_LONG_MASK 0x7FFC0000 // upper 32-bits only
15049 #define DFP_LONG_GFIELD_RT_SHIFT (63 - 13 - 32) // adj for upper 32-bits
15050 #define DFP_G_FIELD_EXTND_MASK 0x7FFFC000 // upper 32-bits only
15051 #define DFP_EXTND_GFIELD_RT_SHIFT (63 - 17 - 32) //adj for upper 32 bits
15052 #define DFP_T_FIELD_LONG_MASK 0x3FFFF // mask for upper 32-bits
15053 #define DFP_T_FIELD_EXTND_MASK 0x03FFFF // mask for upper 32-bits
15054 #define DFP_LONG_EXP_MAX 369 // biased max
15055 #define DFP_LONG_EXP_MIN 0 // biased min
15056 #define DFP_EXTND_EXP_MAX 6111 // biased max
15057 #define DFP_EXTND_EXP_MIN 0 // biased min
15058 #define DFP_LONG_MAX_SIG_DIGITS 16
15059 #define DFP_EXTND_MAX_SIG_DIGITS 34
15060 #define MAX_DIGITS_IN_STRING 8
15063 #define AND(x, y) binop( Iop_And32, x, y )
15064 #define AND4(w, x, y, z) AND( AND( w, x ), AND( y, z ) )
15065 #define OR(x, y) binop( Iop_Or32, x, y )
15066 #define OR3(x, y, z) OR( x, OR( y, z ) )
15067 #define OR4(w, x, y, z) OR( OR( w, x ), OR( y, z ) )
15068 #define NOT(x) unop( Iop_1Uto32, unop( Iop_Not1, unop( Iop_32to1, mkexpr( x ) ) ) )
15070 #define SHL(value, by) binop( Iop_Shl32, value, mkU8( by ) )
15071 #define SHR(value, by) binop( Iop_Shr32, value, mkU8( by ) )
15073 #define BITS5(_b4,_b3,_b2,_b1,_b0) \
15074 (((_b4) << 4) | ((_b3) << 3) | ((_b2) << 2) | \
15075 ((_b1) << 1) | ((_b0) << 0))
15077 static void generate_store_DFP_FPRF_value( ULong irType, IRExpr *src,
15078 const VexAbiInfo* vbi )
15080 /* This function takes a DFP value and computes the value of the FPRF
15081 field in the FPCC register and store it. It is done as a clean helper.
15082 The FPRF[0:4]:
15083 bits[0:4] =
15084 0b00001 Signaling NaN (DFP only)
15085 0b10001 Quite NaN
15086 0b01001 negative infinity
15087 0b01000 negative normal number
15088 0b11000 negative subnormal number
15089 0b10010 negative zero
15090 0b00010 positive zero
15091 0b10100 positive subnormal number
15092 0b00100 positive normal number
15093 0b00101 positive infinity
15096 IRTemp sign = newTemp( Ity_I32 );
15097 IRTemp gfield = newTemp( Ity_I32 );
15098 IRTemp gfield_mask = newTemp( Ity_I32 );
15099 IRTemp exponent = newTemp( Ity_I64 );
15100 UInt exponent_bias = 0;
15101 IRTemp T_value_is_zero = newTemp( Ity_I32 );
15102 IRTemp fprf_value = newTemp( Ity_I32 );
15103 IRTemp lmd = newTemp( Ity_I32 );
15104 IRTemp lmd_zero_true = newTemp( Ity_I1 );
15105 Int min_norm_exp = 0;
15107 vassert( irType == Ity_D128);
15109 if (irType == Ity_D128) {
15110 assign( gfield_mask, mkU32( DFP_G_FIELD_EXTND_MASK ) );
15111 /* The gfield bits are left justified. */
15112 assign( gfield, binop( Iop_And32,
15113 mkexpr( gfield_mask ),
15114 unop( Iop_64HIto32,
15115 unop( Iop_ReinterpD64asI64,
15116 unop( Iop_D128HItoD64, src ) ) ) ) );
15117 assign( exponent, unop( Iop_ExtractExpD128, src ) );
15118 exponent_bias = 6176;
15119 min_norm_exp = -6143;
15121 /* The significand is zero if the T field and LMD are all zeros */
15122 /* Check if LMD is zero */
15123 Get_lmd( &lmd, binop( Iop_Shr32,
15124 mkexpr( gfield ), mkU8( 31 - 5 ) ) );
15126 assign( lmd_zero_true, binop( Iop_CmpEQ32,
15127 mkexpr( lmd ),
15128 mkU32( 0 ) ) );
15129 /* The T value and the LMD are the BCD value of the significand.
15130 If the upper and lower T value fields and the LMD are all zero
15131 then the significand is zero. */
15132 assign( T_value_is_zero,
15133 unop( Iop_1Uto32,
15134 mkAND1 (
15135 mkexpr( lmd_zero_true ),
15136 mkAND1 ( binop( Iop_CmpEQ64,
15137 binop( Iop_And64,
15138 mkU64( DFP_T_FIELD_EXTND_MASK ),
15139 unop( Iop_ReinterpD64asI64,
15140 unop( Iop_D128HItoD64,
15141 src ) ) ),
15142 mkU64( 0 ) ),
15143 binop( Iop_CmpEQ64,
15144 unop( Iop_ReinterpD64asI64,
15145 unop( Iop_D128LOtoD64,
15146 src ) ),
15147 mkU64( 0 ) ) ) ) ) );
15149 assign( sign,
15150 unop( Iop_64to32,
15151 binop( Iop_Shr64,
15152 unop( Iop_ReinterpD64asI64,
15153 unop( Iop_D128HItoD64, src ) ),
15154 mkU8( 63 ) ) ) );
15155 } else {
15156 /* generate_store_DFP_FPRF_value, unknown value for irType */
15157 vassert(0);
15160 /* Determine what the type of the number is. */
15161 assign( fprf_value,
15162 mkIRExprCCall( Ity_I32, 0 /*regparms*/,
15163 "generate_DFP_FPRF_value_helper",
15164 fnptr_to_fnentry( vbi,
15165 &generate_DFP_FPRF_value_helper ),
15166 mkIRExprVec_6( mkexpr( gfield ),
15167 mkexpr( exponent ),
15168 mkU32( exponent_bias ),
15169 mkU32( min_norm_exp ),
15170 mkexpr( sign ),
15171 mkexpr( T_value_is_zero ) ) ) );
15172 /* fprf[0:4] = (C | FPCC[0:3]) */
15173 putC( binop( Iop_Shr32, mkexpr( fprf_value ), mkU8( 4 ) ) );
15174 putFPCC( binop( Iop_And32, mkexpr( fprf_value ), mkU32 (0xF ) ) );
15175 return;
15178 static IRExpr * Gfield_encoding( IRExpr * lmexp, IRExpr * lmd32 )
15180 IRTemp lmd_07_mask = newTemp( Ity_I32 );
15181 IRTemp lmd_8_mask = newTemp( Ity_I32 );
15182 IRTemp lmd_9_mask = newTemp( Ity_I32 );
15183 IRTemp lmexp_00_mask = newTemp( Ity_I32 );
15184 IRTemp lmexp_01_mask = newTemp( Ity_I32 );
15185 IRTemp lmexp_10_mask = newTemp( Ity_I32 );
15186 IRTemp lmd_07_val = newTemp( Ity_I32 );
15187 IRTemp lmd_8_val = newTemp( Ity_I32 );
15188 IRTemp lmd_9_val = newTemp( Ity_I32 );
15190 /* The encodig is as follows:
15191 * lmd - left most digit
15192 * lme - left most 2-bits of the exponent
15194 * lmd
15195 * 0 - 7 (lmexp << 3) | lmd
15196 * 8 0b11000 (24 decimal) if lme=0b00;
15197 * 0b11010 (26 decimal) if lme=0b01;
15198 * 0b11100 (28 decimal) if lme=0b10;
15199 * 9 0b11001 (25 decimal) if lme=0b00;
15200 * 0b11011 (27 decimal) if lme=0b01;
15201 * 0b11101 (29 decimal) if lme=0b10;
15204 /* Generate the masks for each condition */
15205 assign( lmd_07_mask,
15206 unop( Iop_1Sto32, binop( Iop_CmpLE32U, lmd32, mkU32( 7 ) ) ) );
15207 assign( lmd_8_mask,
15208 unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmd32, mkU32( 8 ) ) ) );
15209 assign( lmd_9_mask,
15210 unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmd32, mkU32( 9 ) ) ) );
15211 assign( lmexp_00_mask,
15212 unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmexp, mkU32( 0 ) ) ) );
15213 assign( lmexp_01_mask,
15214 unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmexp, mkU32( 1 ) ) ) );
15215 assign( lmexp_10_mask,
15216 unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmexp, mkU32( 2 ) ) ) );
15218 /* Generate the values for each LMD condition, assuming the condition
15219 * is TRUE.
15221 assign( lmd_07_val,
15222 binop( Iop_Or32, binop( Iop_Shl32, lmexp, mkU8( 3 ) ), lmd32 ) );
15223 assign( lmd_8_val,
15224 binop( Iop_Or32,
15225 binop( Iop_Or32,
15226 binop( Iop_And32,
15227 mkexpr( lmexp_00_mask ),
15228 mkU32( 24 ) ),
15229 binop( Iop_And32,
15230 mkexpr( lmexp_01_mask ),
15231 mkU32( 26 ) ) ),
15232 binop( Iop_And32, mkexpr( lmexp_10_mask ), mkU32( 28 ) ) ) );
15233 assign( lmd_9_val,
15234 binop( Iop_Or32,
15235 binop( Iop_Or32,
15236 binop( Iop_And32,
15237 mkexpr( lmexp_00_mask ),
15238 mkU32( 25 ) ),
15239 binop( Iop_And32,
15240 mkexpr( lmexp_01_mask ),
15241 mkU32( 27 ) ) ),
15242 binop( Iop_And32, mkexpr( lmexp_10_mask ), mkU32( 29 ) ) ) );
15244 /* generate the result from the possible LMD values */
15245 return binop( Iop_Or32,
15246 binop( Iop_Or32,
15247 binop( Iop_And32,
15248 mkexpr( lmd_07_mask ),
15249 mkexpr( lmd_07_val ) ),
15250 binop( Iop_And32,
15251 mkexpr( lmd_8_mask ),
15252 mkexpr( lmd_8_val ) ) ),
15253 binop( Iop_And32, mkexpr( lmd_9_mask ), mkexpr( lmd_9_val ) ) );
15256 static void Get_lmd( IRTemp * lmd, IRExpr * gfield_0_4 )
15258 /* Extract the exponent and the left most digit of the mantissa
15259 * from the G field bits [0:4].
15261 IRTemp lmd_07_mask = newTemp( Ity_I32 );
15262 IRTemp lmd_8_00_mask = newTemp( Ity_I32 );
15263 IRTemp lmd_8_01_mask = newTemp( Ity_I32 );
15264 IRTemp lmd_8_10_mask = newTemp( Ity_I32 );
15265 IRTemp lmd_9_00_mask = newTemp( Ity_I32 );
15266 IRTemp lmd_9_01_mask = newTemp( Ity_I32 );
15267 IRTemp lmd_9_10_mask = newTemp( Ity_I32 );
15269 IRTemp lmd_07_val = newTemp( Ity_I32 );
15270 IRTemp lmd_8_val = newTemp( Ity_I32 );
15271 IRTemp lmd_9_val = newTemp( Ity_I32 );
15273 /* The left most digit (LMD) encoding is as follows:
15274 * lmd
15275 * 0 - 7 (lmexp << 3) | lmd
15276 * 8 0b11000 (24 decimal) if lme=0b00;
15277 * 0b11010 (26 decimal) if lme=0b01;
15278 * 0b11100 (28 decimal) if lme=0b10
15279 * 9 0b11001 (25 decimal) if lme=0b00;
15280 * 0b11011 (27 decimal) if lme=0b01;
15281 * 0b11101 (29 decimal) if lme=0b10;
15284 /* Generate the masks for each condition of LMD and exponent bits */
15285 assign( lmd_07_mask,
15286 unop( Iop_1Sto32, binop( Iop_CmpLE32U,
15287 gfield_0_4,
15288 mkU32( BITS5(1,0,1,1,1) ) ) ) );
15289 assign( lmd_8_00_mask,
15290 unop( Iop_1Sto32, binop( Iop_CmpEQ32,
15291 gfield_0_4,
15292 mkU32( BITS5(1,1,0,0,0) ) ) ) );
15293 assign( lmd_8_01_mask,
15294 unop( Iop_1Sto32, binop( Iop_CmpEQ32,
15295 gfield_0_4,
15296 mkU32( BITS5(1,1,0,1,0) ) ) ) );
15297 assign( lmd_8_10_mask,
15298 unop( Iop_1Sto32, binop( Iop_CmpEQ32,
15299 gfield_0_4,
15300 mkU32( BITS5(1,1,1,0,0) ) ) ) );
15301 assign( lmd_9_00_mask,
15302 unop( Iop_1Sto32, binop( Iop_CmpEQ32,
15303 gfield_0_4,
15304 mkU32( BITS5(1,1,0,0,1) ) ) ) );
15305 assign( lmd_9_01_mask,
15306 unop( Iop_1Sto32, binop( Iop_CmpEQ32,
15307 gfield_0_4,
15308 mkU32( BITS5(1,1,0,1,1) ) ) ) );
15309 assign( lmd_9_10_mask,
15310 unop( Iop_1Sto32, binop( Iop_CmpEQ32,
15311 gfield_0_4,
15312 mkU32( BITS5(1,1,1,0,1) ) ) ) );
15314 /* Generate the values for each LMD condition, assuming the condition
15315 * is TRUE.
15317 assign( lmd_07_val, binop( Iop_And32, gfield_0_4, mkU32( 0x7 ) ) );
15318 assign( lmd_8_val, mkU32( 0x8 ) );
15319 assign( lmd_9_val, mkU32( 0x9 ) );
15321 assign( *lmd,
15322 OR( OR3 ( AND( mkexpr( lmd_07_mask ), mkexpr( lmd_07_val ) ),
15323 AND( mkexpr( lmd_8_00_mask ), mkexpr( lmd_8_val ) ),
15324 AND( mkexpr( lmd_8_01_mask ), mkexpr( lmd_8_val ) )),
15325 OR4( AND( mkexpr( lmd_8_10_mask ), mkexpr( lmd_8_val ) ),
15326 AND( mkexpr( lmd_9_00_mask ), mkexpr( lmd_9_val ) ),
15327 AND( mkexpr( lmd_9_01_mask ), mkexpr( lmd_9_val ) ),
15328 AND( mkexpr( lmd_9_10_mask ), mkexpr( lmd_9_val ) )
15329 ) ) );
15332 #define DIGIT1_SHR 4 // shift digit 1 to bottom 4 bits
15333 #define DIGIT2_SHR 8 // shift digit 2 to bottom 4 bits
15334 #define DIGIT3_SHR 12
15335 #define DIGIT4_SHR 16
15336 #define DIGIT5_SHR 20
15337 #define DIGIT6_SHR 24
15338 #define DIGIT7_SHR 28
15340 static IRExpr * bcd_digit_inval( IRExpr * bcd_u, IRExpr * bcd_l )
15342 /* 60-bit BCD string stored in two 32-bit values. Check that each,
15343 * digit is a valid BCD number, i.e. less then 9.
15345 IRTemp valid = newTemp( Ity_I32 );
15347 assign( valid,
15348 AND4( AND4 ( unop( Iop_1Sto32,
15349 binop( Iop_CmpLE32U,
15350 binop( Iop_And32,
15351 bcd_l,
15352 mkU32 ( 0xF ) ),
15353 mkU32( 0x9 ) ) ),
15354 unop( Iop_1Sto32,
15355 binop( Iop_CmpLE32U,
15356 binop( Iop_And32,
15357 binop( Iop_Shr32,
15358 bcd_l,
15359 mkU8 ( DIGIT1_SHR ) ),
15360 mkU32 ( 0xF ) ),
15361 mkU32( 0x9 ) ) ),
15362 unop( Iop_1Sto32,
15363 binop( Iop_CmpLE32U,
15364 binop( Iop_And32,
15365 binop( Iop_Shr32,
15366 bcd_l,
15367 mkU8 ( DIGIT2_SHR ) ),
15368 mkU32 ( 0xF ) ),
15369 mkU32( 0x9 ) ) ),
15370 unop( Iop_1Sto32,
15371 binop( Iop_CmpLE32U,
15372 binop( Iop_And32,
15373 binop( Iop_Shr32,
15374 bcd_l,
15375 mkU8 ( DIGIT3_SHR ) ),
15376 mkU32 ( 0xF ) ),
15377 mkU32( 0x9 ) ) ) ),
15378 AND4 ( unop( Iop_1Sto32,
15379 binop( Iop_CmpLE32U,
15380 binop( Iop_And32,
15381 binop( Iop_Shr32,
15382 bcd_l,
15383 mkU8 ( DIGIT4_SHR ) ),
15384 mkU32 ( 0xF ) ),
15385 mkU32( 0x9 ) ) ),
15386 unop( Iop_1Sto32,
15387 binop( Iop_CmpLE32U,
15388 binop( Iop_And32,
15389 binop( Iop_Shr32,
15390 bcd_l,
15391 mkU8 ( DIGIT5_SHR ) ),
15392 mkU32 ( 0xF ) ),
15393 mkU32( 0x9 ) ) ),
15394 unop( Iop_1Sto32,
15395 binop( Iop_CmpLE32U,
15396 binop( Iop_And32,
15397 binop( Iop_Shr32,
15398 bcd_l,
15399 mkU8 ( DIGIT6_SHR ) ),
15400 mkU32 ( 0xF ) ),
15401 mkU32( 0x9 ) ) ),
15402 unop( Iop_1Sto32,
15403 binop( Iop_CmpLE32U,
15404 binop( Iop_And32,
15405 binop( Iop_Shr32,
15406 bcd_l,
15407 mkU8 ( DIGIT7_SHR ) ),
15408 mkU32 ( 0xF ) ),
15409 mkU32( 0x9 ) ) ) ),
15410 AND4( unop( Iop_1Sto32,
15411 binop( Iop_CmpLE32U,
15412 binop( Iop_And32,
15413 bcd_u,
15414 mkU32 ( 0xF ) ),
15415 mkU32( 0x9 ) ) ),
15416 unop( Iop_1Sto32,
15417 binop( Iop_CmpLE32U,
15418 binop( Iop_And32,
15419 binop( Iop_Shr32,
15420 bcd_u,
15421 mkU8 ( DIGIT1_SHR ) ),
15422 mkU32 ( 0xF ) ),
15423 mkU32( 0x9 ) ) ),
15424 unop( Iop_1Sto32,
15425 binop( Iop_CmpLE32U,
15426 binop( Iop_And32,
15427 binop( Iop_Shr32,
15428 bcd_u,
15429 mkU8 ( DIGIT2_SHR ) ),
15430 mkU32 ( 0xF ) ),
15431 mkU32( 0x9 ) ) ),
15432 unop( Iop_1Sto32,
15433 binop( Iop_CmpLE32U,
15434 binop( Iop_And32,
15435 binop( Iop_Shr32,
15436 bcd_u,
15437 mkU8 ( DIGIT3_SHR ) ),
15438 mkU32 ( 0xF ) ),
15439 mkU32( 0x9 ) ) ) ),
15440 AND4( unop( Iop_1Sto32,
15441 binop( Iop_CmpLE32U,
15442 binop( Iop_And32,
15443 binop( Iop_Shr32,
15444 bcd_u,
15445 mkU8 ( DIGIT4_SHR ) ),
15446 mkU32 ( 0xF ) ),
15447 mkU32( 0x9 ) ) ),
15448 unop( Iop_1Sto32,
15449 binop( Iop_CmpLE32U,
15450 binop( Iop_And32,
15451 binop( Iop_Shr32,
15452 bcd_u,
15453 mkU8 ( DIGIT5_SHR ) ),
15454 mkU32 ( 0xF ) ),
15455 mkU32( 0x9 ) ) ),
15456 unop( Iop_1Sto32,
15457 binop( Iop_CmpLE32U,
15458 binop( Iop_And32,
15459 binop( Iop_Shr32,
15460 bcd_u,
15461 mkU8 ( DIGIT6_SHR ) ),
15462 mkU32 ( 0xF ) ),
15463 mkU32( 0x9 ) ) ),
15464 unop( Iop_1Sto32,
15465 binop( Iop_CmpLE32U,
15466 binop( Iop_And32,
15467 binop( Iop_Shr32,
15468 bcd_u,
15469 mkU8 ( DIGIT7_SHR ) ),
15470 mkU32 ( 0xF ) ),
15471 mkU32( 0x9 ) ) ) ) ) );
15473 return unop( Iop_Not32, mkexpr( valid ) );
15475 #undef DIGIT1_SHR
15476 #undef DIGIT2_SHR
15477 #undef DIGIT3_SHR
15478 #undef DIGIT4_SHR
15479 #undef DIGIT5_SHR
15480 #undef DIGIT6_SHR
15481 #undef DIGIT7_SHR
15483 static IRExpr * Generate_neg_sign_mask( IRExpr * sign )
15485 return binop( Iop_Or32,
15486 unop( Iop_1Sto32, binop( Iop_CmpEQ32, sign, mkU32( 0xB ) ) ),
15487 unop( Iop_1Sto32, binop( Iop_CmpEQ32, sign, mkU32( 0xD ) ) )
15491 static IRExpr * Generate_pos_sign_mask( IRExpr * sign )
15493 return binop( Iop_Or32,
15494 binop( Iop_Or32,
15495 unop( Iop_1Sto32,
15496 binop( Iop_CmpEQ32, sign, mkU32( 0xA ) ) ),
15497 unop( Iop_1Sto32,
15498 binop( Iop_CmpEQ32, sign, mkU32( 0xC ) ) ) ),
15499 binop( Iop_Or32,
15500 unop( Iop_1Sto32,
15501 binop( Iop_CmpEQ32, sign, mkU32( 0xE ) ) ),
15502 unop( Iop_1Sto32,
15503 binop( Iop_CmpEQ32, sign, mkU32( 0xF ) ) ) ) );
15506 static IRExpr * Generate_sign_bit( IRExpr * pos_sign_mask,
15507 IRExpr * neg_sign_mask )
15509 return binop( Iop_Or32,
15510 binop( Iop_And32, neg_sign_mask, mkU32( 0x80000000 ) ),
15511 binop( Iop_And32, pos_sign_mask, mkU32( 0x00000000 ) ) );
15514 static IRExpr * Generate_inv_mask( IRExpr * invalid_bcd_mask,
15515 IRExpr * pos_sign_mask,
15516 IRExpr * neg_sign_mask )
15517 /* first argument is all 1's if the BCD string had an invalid digit in it. */
15519 return binop( Iop_Or32,
15520 invalid_bcd_mask,
15521 unop( Iop_1Sto32,
15522 binop( Iop_CmpEQ32,
15523 binop( Iop_Or32, pos_sign_mask, neg_sign_mask ),
15524 mkU32( 0x0 ) ) ) );
15527 static void Generate_132_bit_bcd_string( IRExpr * frBI64_hi, IRExpr * frBI64_lo,
15528 IRTemp * top_12_l, IRTemp * mid_60_u,
15529 IRTemp * mid_60_l, IRTemp * low_60_u,
15530 IRTemp * low_60_l)
15532 IRTemp tmplow60 = newTemp( Ity_I64 );
15533 IRTemp tmpmid60 = newTemp( Ity_I64 );
15534 IRTemp tmptop12 = newTemp( Ity_I64 );
15535 IRTemp low_50 = newTemp( Ity_I64 );
15536 IRTemp mid_50 = newTemp( Ity_I64 );
15537 IRTemp top_10 = newTemp( Ity_I64 );
15538 IRTemp top_12_u = newTemp( Ity_I32 ); // only needed for a dummy arg
15540 /* Convert the 110-bit densely packed BCD string to a 128-bit BCD string */
15542 /* low_50[49:0] = ((frBI64_lo[49:32] << 14) | frBI64_lo[31:0]) */
15543 assign( low_50,
15544 binop( Iop_32HLto64,
15545 binop( Iop_And32,
15546 unop( Iop_64HIto32, frBI64_lo ),
15547 mkU32( 0x3FFFF ) ),
15548 unop( Iop_64to32, frBI64_lo ) ) );
15550 /* Convert the 50 bit densely packed BCD string to a 60 bit
15551 * BCD string.
15553 assign( tmplow60, unop( Iop_DPBtoBCD, mkexpr( low_50 ) ) );
15554 assign( *low_60_u, unop( Iop_64HIto32, mkexpr( tmplow60 ) ) );
15555 assign( *low_60_l, unop( Iop_64to32, mkexpr( tmplow60 ) ) );
15557 /* mid_50[49:0] = ((frBI64_hi[35:32] << 14) | frBI64_hi[31:18]) |
15558 * ((frBI64_hi[17:0] << 14) | frBI64_lo[63:50])
15560 assign( mid_50,
15561 binop( Iop_32HLto64,
15562 binop( Iop_Or32,
15563 binop( Iop_Shl32,
15564 binop( Iop_And32,
15565 unop( Iop_64HIto32, frBI64_hi ),
15566 mkU32( 0xF ) ),
15567 mkU8( 14 ) ),
15568 binop( Iop_Shr32,
15569 unop( Iop_64to32, frBI64_hi ),
15570 mkU8( 18 ) ) ),
15571 binop( Iop_Or32,
15572 binop( Iop_Shl32,
15573 unop( Iop_64to32, frBI64_hi ),
15574 mkU8( 14 ) ),
15575 binop( Iop_Shr32,
15576 unop( Iop_64HIto32, frBI64_lo ),
15577 mkU8( 18 ) ) ) ) );
15579 /* Convert the 50 bit densely packed BCD string to a 60 bit
15580 * BCD string.
15582 assign( tmpmid60, unop( Iop_DPBtoBCD, mkexpr( mid_50 ) ) );
15583 assign( *mid_60_u, unop( Iop_64HIto32, mkexpr( tmpmid60 ) ) );
15584 assign( *mid_60_l, unop( Iop_64to32, mkexpr( tmpmid60 ) ) );
15586 /* top_10[49:0] = frBI64_hi[45:36]) | */
15587 assign( top_10,
15588 binop( Iop_32HLto64,
15589 mkU32( 0 ),
15590 binop( Iop_And32,
15591 binop( Iop_Shr32,
15592 unop( Iop_64HIto32, frBI64_hi ),
15593 mkU8( 4 ) ),
15594 mkU32( 0x3FF ) ) ) );
15596 /* Convert the 10 bit densely packed BCD string to a 12 bit
15597 * BCD string.
15599 assign( tmptop12, unop( Iop_DPBtoBCD, mkexpr( top_10 ) ) );
15600 assign( top_12_u, unop( Iop_64HIto32, mkexpr( tmptop12 ) ) );
15601 assign( *top_12_l, unop( Iop_64to32, mkexpr( tmptop12 ) ) );
15604 static void Count_zeros( int start, IRExpr * init_cnt, IRExpr * init_flag,
15605 IRTemp * final_cnt, IRTemp * final_flag,
15606 IRExpr * string )
15608 IRTemp cnt[MAX_DIGITS_IN_STRING + 1];IRTemp flag[MAX_DIGITS_IN_STRING+1];
15609 int digits = MAX_DIGITS_IN_STRING;
15610 int i;
15612 cnt[start-1] = newTemp( Ity_I8 );
15613 flag[start-1] = newTemp( Ity_I8 );
15614 assign( cnt[start-1], init_cnt);
15615 assign( flag[start-1], init_flag);
15617 for ( i = start; i <= digits; i++) {
15618 cnt[i] = newTemp( Ity_I8 );
15619 flag[i] = newTemp( Ity_I8 );
15620 assign( cnt[i],
15621 binop( Iop_Add8,
15622 mkexpr( cnt[i-1] ),
15623 binop(Iop_And8,
15624 unop( Iop_1Uto8,
15625 binop(Iop_CmpEQ32,
15626 binop(Iop_And32,
15627 string,
15628 mkU32( 0xF <<
15629 ( ( digits - i ) * 4) ) ),
15630 mkU32( 0 ) ) ),
15631 binop( Iop_Xor8, /* complement flag */
15632 mkexpr( flag[i - 1] ),
15633 mkU8( 0xFF ) ) ) ) );
15635 /* set flag to 1 if digit was not a zero */
15636 assign( flag[i],
15637 binop(Iop_Or8,
15638 unop( Iop_1Sto8,
15639 binop(Iop_CmpNE32,
15640 binop(Iop_And32,
15641 string,
15642 mkU32( 0xF <<
15643 ( (digits - i) * 4) ) ),
15644 mkU32( 0 ) ) ),
15645 mkexpr( flag[i - 1] ) ) );
15648 *final_cnt = cnt[digits];
15649 *final_flag = flag[digits];
15652 static IRExpr * Count_leading_zeros_60( IRExpr * lmd, IRExpr * upper_28,
15653 IRExpr * low_32 )
15655 IRTemp num_lmd = newTemp( Ity_I8 );
15656 IRTemp num_upper = newTemp( Ity_I8 );
15657 IRTemp num_low = newTemp( Ity_I8 );
15658 IRTemp lmd_flag = newTemp( Ity_I8 );
15659 IRTemp upper_flag = newTemp( Ity_I8 );
15660 IRTemp low_flag = newTemp( Ity_I8 );
15662 assign( num_lmd, unop( Iop_1Uto8, binop( Iop_CmpEQ32, lmd, mkU32( 0 ) ) ) );
15663 assign( lmd_flag, unop( Iop_Not8, mkexpr( num_lmd ) ) );
15665 Count_zeros( 2,
15666 mkexpr( num_lmd ),
15667 mkexpr( lmd_flag ),
15668 &num_upper,
15669 &upper_flag,
15670 upper_28 );
15672 Count_zeros( 1,
15673 mkexpr( num_upper ),
15674 mkexpr( upper_flag ),
15675 &num_low,
15676 &low_flag,
15677 low_32 );
15679 return mkexpr( num_low );
15682 static IRExpr * Count_leading_zeros_128( IRExpr * lmd, IRExpr * top_12_l,
15683 IRExpr * mid_60_u, IRExpr * mid_60_l,
15684 IRExpr * low_60_u, IRExpr * low_60_l)
15686 IRTemp num_lmd = newTemp( Ity_I8 );
15687 IRTemp num_top = newTemp( Ity_I8 );
15688 IRTemp num_mid_u = newTemp( Ity_I8 );
15689 IRTemp num_mid_l = newTemp( Ity_I8 );
15690 IRTemp num_low_u = newTemp( Ity_I8 );
15691 IRTemp num_low_l = newTemp( Ity_I8 );
15693 IRTemp lmd_flag = newTemp( Ity_I8 );
15694 IRTemp top_flag = newTemp( Ity_I8 );
15695 IRTemp mid_u_flag = newTemp( Ity_I8 );
15696 IRTemp mid_l_flag = newTemp( Ity_I8 );
15697 IRTemp low_u_flag = newTemp( Ity_I8 );
15698 IRTemp low_l_flag = newTemp( Ity_I8 );
15700 /* Check the LMD, digit 34, to see if it is zero. */
15701 assign( num_lmd, unop( Iop_1Uto8, binop( Iop_CmpEQ32, lmd, mkU32( 0 ) ) ) );
15703 assign( lmd_flag, unop( Iop_Not8, mkexpr( num_lmd ) ) );
15705 Count_zeros( 6,
15706 mkexpr( num_lmd ),
15707 mkexpr( lmd_flag ),
15708 &num_top,
15709 &top_flag,
15710 top_12_l );
15712 Count_zeros( 2,
15713 mkexpr( num_top ),
15714 mkexpr( top_flag ),
15715 &num_mid_u,
15716 &mid_u_flag,
15717 binop( Iop_Or32,
15718 binop( Iop_Shl32, mid_60_u, mkU8( 2 ) ),
15719 binop( Iop_Shr32, mid_60_l, mkU8( 30 ) ) ) );
15721 Count_zeros( 1,
15722 mkexpr( num_mid_u ),
15723 mkexpr( mid_u_flag ),
15724 &num_mid_l,
15725 &mid_l_flag,
15726 mid_60_l );
15728 Count_zeros( 2,
15729 mkexpr( num_mid_l ),
15730 mkexpr( mid_l_flag ),
15731 &num_low_u,
15732 &low_u_flag,
15733 binop( Iop_Or32,
15734 binop( Iop_Shl32, low_60_u, mkU8( 2 ) ),
15735 binop( Iop_Shr32, low_60_l, mkU8( 30 ) ) ) );
15737 Count_zeros( 1,
15738 mkexpr( num_low_u ),
15739 mkexpr( low_u_flag ),
15740 &num_low_l,
15741 &low_l_flag,
15742 low_60_l );
15744 return mkexpr( num_low_l );
15747 static IRExpr * Check_unordered(IRExpr * val)
15749 IRTemp gfield0to5 = newTemp( Ity_I32 );
15751 /* Extract G[0:4] */
15752 assign( gfield0to5,
15753 binop( Iop_And32,
15754 binop( Iop_Shr32, unop( Iop_64HIto32, val ), mkU8( 26 ) ),
15755 mkU32( 0x1F ) ) );
15757 /* Check for unordered, return all 1'x if true */
15758 return binop( Iop_Or32, /* QNaN check */
15759 unop( Iop_1Sto32,
15760 binop( Iop_CmpEQ32,
15761 mkexpr( gfield0to5 ),
15762 mkU32( 0x1E ) ) ),
15763 unop( Iop_1Sto32, /* SNaN check */
15764 binop( Iop_CmpEQ32,
15765 mkexpr( gfield0to5 ),
15766 mkU32( 0x1F ) ) ) );
15769 #undef AND
15770 #undef AND4
15771 #undef OR
15772 #undef OR3
15773 #undef OR4
15774 #undef NOT
15775 #undef SHR
15776 #undef SHL
15777 #undef BITS5
15779 /*------------------------------------------------------------*/
15780 /*--- Decimal Floating Point (DFP) instruction translation ---*/
15781 /*------------------------------------------------------------*/
15783 /* DFP Arithmetic instructions */
15784 static Bool dis_dfp_arith( UInt prefix, UInt theInstr )
15786 UInt opc2 = ifieldOPClo10( theInstr );
15787 UChar frS_addr = ifieldRegDS( theInstr );
15788 UChar frA_addr = ifieldRegA( theInstr );
15789 UChar frB_addr = ifieldRegB( theInstr );
15790 UChar flag_rC = ifieldBIT0( theInstr );
15792 IRTemp frA = newTemp( Ity_D64 );
15793 IRTemp frB = newTemp( Ity_D64 );
15794 IRTemp frS = newTemp( Ity_D64 );
15795 IRExpr* round = get_IR_roundingmode_DFP();
15797 /* By default, if flag_RC is set, we will clear cr1 after the
15798 * operation. In reality we should set cr1 to indicate the
15799 * exception status of the operation, but since we're not
15800 * simulating exceptions, the exception status will appear to be
15801 * zero. Hence cr1 should be cleared if this is a . form insn.
15803 Bool clear_CR1 = True;
15805 /* There is no prefixed version of these instructions. */
15806 PREFIX_CHECK
15808 assign( frA, getDReg( frA_addr ) );
15809 assign( frB, getDReg( frB_addr ) );
15811 switch (opc2) {
15812 case 0x2: // dadd
15813 DIP( "dadd%s fr%u,fr%u,fr%u\n",
15814 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
15815 assign( frS, triop( Iop_AddD64, round, mkexpr( frA ), mkexpr( frB ) ) );
15816 break;
15817 case 0x202: // dsub
15818 DIP( "dsub%s fr%u,fr%u,fr%u\n",
15819 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
15820 assign( frS, triop( Iop_SubD64, round, mkexpr( frA ), mkexpr( frB ) ) );
15821 break;
15822 case 0x22: // dmul
15823 DIP( "dmul%s fr%u,fr%u,fr%u\n",
15824 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
15825 assign( frS, triop( Iop_MulD64, round, mkexpr( frA ), mkexpr( frB ) ) );
15826 break;
15827 case 0x222: // ddiv
15828 DIP( "ddiv%s fr%u,fr%u,fr%u\n",
15829 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
15830 assign( frS, triop( Iop_DivD64, round, mkexpr( frA ), mkexpr( frB ) ) );
15831 break;
15834 putDReg( frS_addr, mkexpr( frS ) );
15836 if (flag_rC && clear_CR1) {
15837 putCR321( 1, mkU8( 0 ) );
15838 putCR0( 1, mkU8( 0 ) );
15841 return True;
15844 /* Quad DFP Arithmetic instructions */
15845 static Bool dis_dfp_arithq( UInt prefix, UInt theInstr )
15847 UInt opc2 = ifieldOPClo10( theInstr );
15848 UChar frS_addr = ifieldRegDS( theInstr );
15849 UChar frA_addr = ifieldRegA( theInstr );
15850 UChar frB_addr = ifieldRegB( theInstr );
15851 UChar flag_rC = ifieldBIT0( theInstr );
15853 IRTemp frA = newTemp( Ity_D128 );
15854 IRTemp frB = newTemp( Ity_D128 );
15855 IRTemp frS = newTemp( Ity_D128 );
15856 IRExpr* round = get_IR_roundingmode_DFP();
15858 /* By default, if flag_RC is set, we will clear cr1 after the
15859 * operation. In reality we should set cr1 to indicate the
15860 * exception status of the operation, but since we're not
15861 * simulating exceptions, the exception status will appear to be
15862 * zero. Hence cr1 should be cleared if this is a . form insn.
15864 Bool clear_CR1 = True;
15866 /* There is no prefixed version of these instructions. */
15867 PREFIX_CHECK
15869 assign( frA, getDReg_pair( frA_addr ) );
15870 assign( frB, getDReg_pair( frB_addr ) );
15872 switch (opc2) {
15873 case 0x2: // daddq
15874 DIP( "daddq%s fr%u,fr%u,fr%u\n",
15875 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
15876 assign( frS, triop( Iop_AddD128, round, mkexpr( frA ), mkexpr( frB ) ) );
15877 break;
15878 case 0x202: // dsubq
15879 DIP( "dsubq%s fr%u,fr%u,fr%u\n",
15880 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
15881 assign( frS, triop( Iop_SubD128, round, mkexpr( frA ), mkexpr( frB ) ) );
15882 break;
15883 case 0x22: // dmulq
15884 DIP( "dmulq%s fr%u,fr%u,fr%u\n",
15885 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
15886 assign( frS, triop( Iop_MulD128, round, mkexpr( frA ), mkexpr( frB ) ) );
15887 break;
15888 case 0x222: // ddivq
15889 DIP( "ddivq%s fr%u,fr%u,fr%u\n",
15890 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
15891 assign( frS, triop( Iop_DivD128, round, mkexpr( frA ), mkexpr( frB ) ) );
15892 break;
15895 putDReg_pair( frS_addr, mkexpr( frS ) );
15897 if (flag_rC && clear_CR1) {
15898 putCR321( 1, mkU8( 0 ) );
15899 putCR0( 1, mkU8( 0 ) );
15902 return True;
15905 /* DFP 64-bit logical shift instructions */
15906 static Bool dis_dfp_shift( UInt prefix, UInt theInstr ) {
15907 UInt opc2 = ifieldOPClo9( theInstr );
15908 UChar frS_addr = ifieldRegDS( theInstr );
15909 UChar frA_addr = ifieldRegA( theInstr );
15910 UChar shift_val = IFIELD(theInstr, 10, 6);
15911 UChar flag_rC = ifieldBIT0( theInstr );
15913 IRTemp frA = newTemp( Ity_D64 );
15914 IRTemp frS = newTemp( Ity_D64 );
15915 Bool clear_CR1 = True;
15917 /* There is no prefixed version of these instructions. */
15918 PREFIX_CHECK
15920 assign( frA, getDReg( frA_addr ) );
15922 switch (opc2) {
15923 case 0x42: // dscli
15924 DIP( "dscli%s fr%u,fr%u,%u\n",
15925 flag_rC ? ".":"", frS_addr, frA_addr, shift_val );
15926 assign( frS, binop( Iop_ShlD64, mkexpr( frA ), mkU8( shift_val ) ) );
15927 break;
15928 case 0x62: // dscri
15929 DIP( "dscri%s fr%u,fr%u,%u\n",
15930 flag_rC ? ".":"", frS_addr, frA_addr, shift_val );
15931 assign( frS, binop( Iop_ShrD64, mkexpr( frA ), mkU8( shift_val ) ) );
15932 break;
15935 putDReg( frS_addr, mkexpr( frS ) );
15937 if (flag_rC && clear_CR1) {
15938 putCR321( 1, mkU8( 0 ) );
15939 putCR0( 1, mkU8( 0 ) );
15942 return True;
15945 /* Quad DFP logical shift instructions */
15946 static Bool dis_dfp_shiftq( UInt prefix, UInt theInstr ) {
15947 UInt opc2 = ifieldOPClo9( theInstr );
15948 UChar frS_addr = ifieldRegDS( theInstr );
15949 UChar frA_addr = ifieldRegA( theInstr );
15950 UChar shift_val = IFIELD(theInstr, 10, 6);
15951 UChar flag_rC = ifieldBIT0( theInstr );
15953 IRTemp frA = newTemp( Ity_D128 );
15954 IRTemp frS = newTemp( Ity_D128 );
15955 Bool clear_CR1 = True;
15957 /* There is no prefixed version of these instructions. */
15958 PREFIX_CHECK
15960 assign( frA, getDReg_pair( frA_addr ) );
15962 switch (opc2) {
15963 case 0x42: // dscliq
15964 DIP( "dscliq%s fr%u,fr%u,%u\n",
15965 flag_rC ? ".":"", frS_addr, frA_addr, shift_val );
15966 assign( frS, binop( Iop_ShlD128, mkexpr( frA ), mkU8( shift_val ) ) );
15967 break;
15968 case 0x62: // dscriq
15969 DIP( "dscriq%s fr%u,fr%u,%u\n",
15970 flag_rC ? ".":"", frS_addr, frA_addr, shift_val );
15971 assign( frS, binop( Iop_ShrD128, mkexpr( frA ), mkU8( shift_val ) ) );
15972 break;
15975 putDReg_pair( frS_addr, mkexpr( frS ) );
15977 if (flag_rC && clear_CR1) {
15978 putCR321( 1, mkU8( 0 ) );
15979 putCR0( 1, mkU8( 0 ) );
15982 return True;
15985 /* DFP 64-bit format conversion instructions */
15986 static Bool dis_dfp_fmt_conv( UInt prefix, UInt theInstr ) {
15987 UInt opc2 = ifieldOPClo10( theInstr );
15988 UChar frS_addr = ifieldRegDS( theInstr );
15989 UChar frB_addr = ifieldRegB( theInstr );
15990 IRExpr* round = get_IR_roundingmode_DFP();
15991 UChar flag_rC = ifieldBIT0( theInstr );
15992 IRTemp frB;
15993 IRTemp frS;
15994 Bool clear_CR1 = True;
15996 /* There is no prefixed version of these instructions. */
15997 PREFIX_CHECK
15999 switch (opc2) {
16000 case 0x102: //dctdp
16001 DIP( "dctdp%s fr%u,fr%u\n",
16002 flag_rC ? ".":"", frS_addr, frB_addr );
16004 frB = newTemp( Ity_D32 );
16005 frS = newTemp( Ity_D64 );
16006 assign( frB, getDReg32( frB_addr ) );
16007 assign( frS, unop( Iop_D32toD64, mkexpr( frB ) ) );
16008 putDReg( frS_addr, mkexpr( frS ) );
16009 break;
16010 case 0x302: // drsp
16011 DIP( "drsp%s fr%u,fr%u\n",
16012 flag_rC ? ".":"", frS_addr, frB_addr );
16013 frB = newTemp( Ity_D64 );
16014 frS = newTemp( Ity_D32 );
16015 assign( frB, getDReg( frB_addr ) );
16016 assign( frS, binop( Iop_D64toD32, round, mkexpr( frB ) ) );
16017 putDReg32( frS_addr, mkexpr( frS ) );
16018 break;
16019 case 0x122: // dctfix
16021 IRTemp tmp = newTemp( Ity_I64 );
16023 DIP( "dctfix%s fr%u,fr%u\n",
16024 flag_rC ? ".":"", frS_addr, frB_addr );
16025 frB = newTemp( Ity_D64 );
16026 frS = newTemp( Ity_D64 );
16027 assign( frB, getDReg( frB_addr ) );
16028 assign( tmp, binop( Iop_D64toI64S, round, mkexpr( frB ) ) );
16029 assign( frS, unop( Iop_ReinterpI64asD64, mkexpr( tmp ) ) );
16030 putDReg( frS_addr, mkexpr( frS ) );
16032 break;
16033 case 0x322: // dcffix
16034 DIP( "dcffix%s fr%u,fr%u\n",
16035 flag_rC ? ".":"", frS_addr, frB_addr );
16036 frB = newTemp( Ity_D64 );
16037 frS = newTemp( Ity_D64 );
16038 assign( frB, getDReg( frB_addr ) );
16039 assign( frS, binop( Iop_I64StoD64,
16040 round,
16041 unop( Iop_ReinterpD64asI64, mkexpr( frB ) ) ) );
16042 putDReg( frS_addr, mkexpr( frS ) );
16043 break;
16046 if (flag_rC && clear_CR1) {
16047 putCR321( 1, mkU8( 0 ) );
16048 putCR0( 1, mkU8( 0 ) );
16051 return True;
16054 /* Quad DFP format conversion instructions */
16055 static Bool dis_dfp_fmt_convq( UInt prefix, UInt theInstr,
16056 const VexAbiInfo* vbi ) {
16057 UInt opc2 = ifieldOPClo10( theInstr );
16058 UChar frS_addr = ifieldRegDS( theInstr );
16059 UChar frB_addr = ifieldRegB( theInstr );
16060 IRExpr* round = get_IR_roundingmode_DFP();
16061 IRTemp frB64 = newTemp( Ity_D64 );
16062 IRTemp frB128 = newTemp( Ity_D128 );
16063 IRTemp frS64 = newTemp( Ity_D64 );
16064 IRTemp frS128 = newTemp( Ity_D128 );
16065 UChar flag_rC = ifieldBIT0( theInstr );
16066 Bool clear_CR1 = True;
16068 /* There is no prefixed version of these instructions. */
16069 PREFIX_CHECK
16071 switch (opc2) {
16072 case 0x102: // dctqpq
16073 DIP( "dctqpq%s fr%u,fr%u\n",
16074 flag_rC ? ".":"", frS_addr, frB_addr );
16075 assign( frB64, getDReg( frB_addr ) );
16076 assign( frS128, unop( Iop_D64toD128, mkexpr( frB64 ) ) );
16077 putDReg_pair( frS_addr, mkexpr( frS128 ) );
16078 break;
16079 case 0x122: // dctfixq
16081 IRTemp tmp = newTemp( Ity_I64 );
16083 DIP( "dctfixq%s fr%u,fr%u\n",
16084 flag_rC ? ".":"", frS_addr, frB_addr );
16085 assign( frB128, getDReg_pair( frB_addr ) );
16086 assign( tmp, binop( Iop_D128toI64S, round, mkexpr( frB128 ) ) );
16087 assign( frS64, unop( Iop_ReinterpI64asD64, mkexpr( tmp ) ) );
16088 putDReg( frS_addr, mkexpr( frS64 ) );
16090 break;
16091 case 0x302: //drdpq
16092 DIP( "drdpq%s fr%u,fr%u\n",
16093 flag_rC ? ".":"", frS_addr, frB_addr );
16094 assign( frB128, getDReg_pair( frB_addr ) );
16095 assign( frS64, binop( Iop_D128toD64, round, mkexpr( frB128 ) ) );
16096 putDReg( frS_addr, mkexpr( frS64 ) );
16097 break;
16098 case 0x322: // dcffixq
16100 /* Have to introduce an IOP for this instruction so it will work
16101 * on POWER 6 because emulating the instruction requires a POWER 7
16102 * DFP instruction in the emulation code.
16104 DIP( "dcffixq%s fr%u,fr%u\n",
16105 flag_rC ? ".":"", frS_addr, frB_addr );
16106 assign( frB64, getDReg( frB_addr ) );
16107 assign( frS128, unop( Iop_I64StoD128,
16108 unop( Iop_ReinterpD64asI64,
16109 mkexpr( frB64 ) ) ) );
16110 putDReg_pair( frS_addr, mkexpr( frS128 ) );
16111 break;
16114 case 0x3E2:
16116 Int opc3 = IFIELD( theInstr, 16, 5 );
16118 flag_rC = 0; // These instructions do not set condition codes.
16120 if (opc3 == 0) { // dcffixqq
16121 IRTemp tmpD128 = newTemp( Ity_D128 );
16122 IRTemp vB_src = newTemp( Ity_V128 );
16124 DIP( "dcffixqq fr%u,v%u\n", frS_addr, frB_addr );
16126 assign( vB_src, getVReg( frB_addr ));
16127 assign( tmpD128, binop( Iop_I128StoD128, round,
16128 unop( Iop_ReinterpV128asI128,
16129 mkexpr( vB_src ) ) ) );
16130 /* tmp128 is a Dfp 128 value which is held in a hi/lo 64-bit values.
16132 generate_store_DFP_FPRF_value( Ity_D128, mkexpr( tmpD128 ), vbi);
16133 putDReg_pair( frS_addr, mkexpr( tmpD128 ) );
16135 } else if (opc3 == 1) { // dctfixqq
16136 IRTemp tmp128 = newTemp(Ity_I128);
16138 DIP( "dctfixqq v%u,fr%u\n", frS_addr, frB_addr );
16139 assign( tmp128, binop( Iop_D128toI128S, round,
16140 getDReg_pair( frB_addr ) ) );
16142 putVReg( frS_addr,
16143 unop( Iop_ReinterpI128asV128, mkexpr( tmp128 ) ) );
16145 } else {
16146 vex_printf("ERROR: dis_dfp_fmt_convq unknown opc3 = %d value.\n",
16147 opc3);
16148 return False;
16151 break;
16154 if (flag_rC && clear_CR1) {
16155 putCR321( 1, mkU8( 0 ) );
16156 putCR0( 1, mkU8( 0 ) );
16159 return True;
16162 static Bool dis_dfp_round( UInt prefix, UInt theInstr ) {
16163 UChar frS_addr = ifieldRegDS(theInstr);
16164 UChar R = IFIELD(theInstr, 16, 1);
16165 UChar RMC = IFIELD(theInstr, 9, 2);
16166 UChar frB_addr = ifieldRegB( theInstr );
16167 UChar flag_rC = ifieldBIT0( theInstr );
16168 IRTemp frB = newTemp( Ity_D64 );
16169 IRTemp frS = newTemp( Ity_D64 );
16170 UInt opc2 = ifieldOPClo8( theInstr );
16171 Bool clear_CR1 = True;
16173 /* There is no prefixed version of these instructions. */
16174 PREFIX_CHECK
16176 switch (opc2) {
16177 /* drintn, is the same as drintx. The only difference is this
16178 * instruction does not generate an exception for an inexact operation.
16179 * Currently not supporting inexact exceptions.
16181 case 0x63: // drintx
16182 case 0xE3: // drintn
16183 DIP( "drintx/drintn%s fr%u,fr%u\n",
16184 flag_rC ? ".":"", frS_addr, frB_addr );
16186 /* NOTE, this instruction takes a DFP value and rounds to the
16187 * neares floating point integer value, i.e. fractional part
16188 * is zero. The result is a floating point number.
16190 /* pass the value of R and RMC in the same field */
16191 assign( frB, getDReg( frB_addr ) );
16192 assign( frS, binop( Iop_RoundD64toInt,
16193 mkU32( ( R << 3 ) | RMC ),
16194 mkexpr( frB ) ) );
16195 putDReg( frS_addr, mkexpr( frS ) );
16196 break;
16197 default:
16198 vex_printf("dis_dfp_round(ppc)(opc2)\n");
16199 return False;
16202 if (flag_rC && clear_CR1) {
16203 putCR321( 1, mkU8( 0 ) );
16204 putCR0( 1, mkU8( 0 ) );
16207 return True;
16210 static Bool dis_dfp_roundq( UInt prefix, UInt theInstr ) {
16211 UChar frS_addr = ifieldRegDS( theInstr );
16212 UChar frB_addr = ifieldRegB( theInstr );
16213 UChar R = IFIELD(theInstr, 16, 1);
16214 UChar RMC = IFIELD(theInstr, 9, 2);
16215 UChar flag_rC = ifieldBIT0( theInstr );
16216 IRTemp frB = newTemp( Ity_D128 );
16217 IRTemp frS = newTemp( Ity_D128 );
16218 Bool clear_CR1 = True;
16219 UInt opc2 = ifieldOPClo8( theInstr );
16221 /* There is no prefixed version of these instructions. */
16222 PREFIX_CHECK
16224 switch (opc2) {
16225 /* drintnq, is the same as drintxq. The only difference is this
16226 * instruction does not generate an exception for an inexact operation.
16227 * Currently not supporting inexact exceptions.
16229 case 0x63: // drintxq
16230 case 0xE3: // drintnq
16231 DIP( "drintxq/drintnq%s fr%u,fr%u\n",
16232 flag_rC ? ".":"", frS_addr, frB_addr );
16234 /* pass the value of R and RMC in the same field */
16235 assign( frB, getDReg_pair( frB_addr ) );
16236 assign( frS, binop( Iop_RoundD128toInt,
16237 mkU32( ( R << 3 ) | RMC ),
16238 mkexpr( frB ) ) );
16239 putDReg_pair( frS_addr, mkexpr( frS ) );
16240 break;
16241 default:
16242 vex_printf("dis_dfp_roundq(ppc)(opc2)\n");
16243 return False;
16246 if (flag_rC && clear_CR1) {
16247 putCR321( 1, mkU8( 0 ) );
16248 putCR0( 1, mkU8( 0 ) );
16251 return True;
16254 static Bool dis_dfp_quantize_sig_rrnd( UInt prefix, UInt theInstr ) {
16255 UInt opc2 = ifieldOPClo8( theInstr );
16256 UChar frS_addr = ifieldRegDS( theInstr );
16257 UChar frA_addr = ifieldRegA( theInstr );
16258 UChar frB_addr = ifieldRegB( theInstr );
16259 UChar flag_rC = ifieldBIT0( theInstr );
16260 UInt TE_value = IFIELD(theInstr, 16, 4);
16261 UInt TE_sign = IFIELD(theInstr, 20, 1);
16262 UInt RMC = IFIELD(theInstr, 9, 2);
16263 IRTemp frA = newTemp( Ity_D64 );
16264 IRTemp frB = newTemp( Ity_D64 );
16265 IRTemp frS = newTemp( Ity_D64 );
16266 Bool clear_CR1 = True;
16268 /* There is no prefixed version of these instructions. */
16269 PREFIX_CHECK
16271 assign( frB, getDReg( frB_addr ) );
16273 switch (opc2) {
16274 case 0x43: // dquai
16275 DIP( "dquai%s fr%u,fr%u,fr%u\n",
16276 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16277 IRTemp TE_I64 = newTemp( Ity_I64 );
16279 /* Generate a reference DFP value frA with the desired exponent
16280 * given by TE using significand from frB. Need to add the bias
16281 * 398 to TE. TE is stored as a 2's complement number.
16283 if (TE_sign == 1) {
16284 /* Take 2's complement of the 5-bit value and subtract from bias.
16285 * Bias is adjusted for the +1 required when taking 2's complement.
16287 assign( TE_I64,
16288 unop( Iop_32Uto64,
16289 binop( Iop_Sub32, mkU32( 397 ),
16290 binop( Iop_And32, mkU32( 0xF ),
16291 unop( Iop_Not32, mkU32( TE_value ) )
16292 ) ) ) );
16294 } else {
16295 assign( TE_I64,
16296 unop( Iop_32Uto64,
16297 binop( Iop_Add32, mkU32( 398 ), mkU32( TE_value ) )
16298 ) );
16301 assign( frA, binop( Iop_InsertExpD64, mkexpr( TE_I64 ),
16302 unop( Iop_ReinterpI64asD64, mkU64( 1 ) ) ) );
16304 assign( frS, triop( Iop_QuantizeD64,
16305 mkU32( RMC ),
16306 mkexpr( frA ),
16307 mkexpr( frB ) ) );
16308 break;
16310 case 0x3: // dqua
16311 DIP( "dqua%s fr%u,fr%u,fr%u\n",
16312 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16313 assign( frA, getDReg( frA_addr ) );
16314 assign( frS, triop( Iop_QuantizeD64,
16315 mkU32( RMC ),
16316 mkexpr( frA ),
16317 mkexpr( frB ) ) );
16318 break;
16319 case 0x23: // drrnd
16321 IRTemp tmp = newTemp( Ity_I8 );
16323 DIP( "drrnd%s fr%u,fr%u,fr%u\n",
16324 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16325 assign( frA, getDReg( frA_addr ) );
16326 /* Iop_64to8 not supported in 32 bit mode, do it in two steps. */
16327 assign( tmp, unop( Iop_32to8,
16328 unop( Iop_64to32,
16329 unop( Iop_ReinterpD64asI64,
16330 mkexpr( frA ) ) ) ) );
16331 assign( frS, triop( Iop_SignificanceRoundD64,
16332 mkU32( RMC ),
16333 mkexpr( tmp ),
16334 mkexpr( frB ) ) );
16336 break;
16337 default:
16338 vex_printf("dis_dfp_quantize_sig_rrnd(ppc)(opc2)\n");
16339 return False;
16341 putDReg( frS_addr, mkexpr( frS ) );
16343 if (flag_rC && clear_CR1) {
16344 putCR321( 1, mkU8( 0 ) );
16345 putCR0( 1, mkU8( 0 ) );
16348 return True;
16351 static Bool dis_dfp_quantize_sig_rrndq( UInt prefix, UInt theInstr ) {
16352 UInt opc2 = ifieldOPClo8( theInstr );
16353 UChar frS_addr = ifieldRegDS( theInstr );
16354 UChar frA_addr = ifieldRegA( theInstr );
16355 UChar frB_addr = ifieldRegB( theInstr );
16356 UChar flag_rC = ifieldBIT0( theInstr );
16357 UInt TE_value = IFIELD(theInstr, 16, 4);
16358 UInt TE_sign = IFIELD(theInstr, 20, 1);
16359 UInt RMC = IFIELD(theInstr, 9, 2);
16360 IRTemp frA = newTemp( Ity_D128 );
16361 IRTemp frB = newTemp( Ity_D128 );
16362 IRTemp frS = newTemp( Ity_D128 );
16363 Bool clear_CR1 = True;
16365 /* There is no prefixed version of these instructions. */
16366 PREFIX_CHECK
16368 assign( frB, getDReg_pair( frB_addr ) );
16370 switch (opc2) {
16371 case 0x43: // dquaiq
16372 DIP( "dquaiq%s fr%u,fr%u,fr%u\n",
16373 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16374 IRTemp TE_I64 = newTemp( Ity_I64 );
16376 /* Generate a reference DFP value frA with the desired exponent
16377 * given by TE using significand of 1. Need to add the bias
16378 * 6176 to TE.
16380 if (TE_sign == 1) {
16381 /* Take 2's complement of the 5-bit value and subtract from bias.
16382 * Bias adjusted for the +1 required when taking 2's complement.
16384 assign( TE_I64,
16385 unop( Iop_32Uto64,
16386 binop( Iop_Sub32, mkU32( 6175 ),
16387 binop( Iop_And32, mkU32( 0xF ),
16388 unop( Iop_Not32, mkU32( TE_value ) )
16389 ) ) ) );
16391 } else {
16392 assign( TE_I64,
16393 unop( Iop_32Uto64,
16394 binop( Iop_Add32,
16395 mkU32( 6176 ),
16396 mkU32( TE_value ) ) ) );
16399 assign( frA,
16400 binop( Iop_InsertExpD128, mkexpr( TE_I64 ),
16401 unop( Iop_D64toD128,
16402 unop( Iop_ReinterpI64asD64, mkU64( 1 ) ) ) ) );
16403 assign( frS, triop( Iop_QuantizeD128,
16404 mkU32( RMC ),
16405 mkexpr( frA ),
16406 mkexpr( frB ) ) );
16407 break;
16408 case 0x3: // dquaq
16409 DIP( "dquaiq%s fr%u,fr%u,fr%u\n",
16410 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16411 assign( frA, getDReg_pair( frA_addr ) );
16412 assign( frS, triop( Iop_QuantizeD128,
16413 mkU32( RMC ),
16414 mkexpr( frA ),
16415 mkexpr( frB ) ) );
16416 break;
16417 case 0x23: // drrndq
16419 IRTemp tmp = newTemp( Ity_I8 );
16421 DIP( "drrndq%s fr%u,fr%u,fr%u\n",
16422 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16423 assign( frA, getDReg_pair( frA_addr ) );
16424 assign( tmp, unop( Iop_32to8,
16425 unop( Iop_64to32,
16426 unop( Iop_ReinterpD64asI64,
16427 unop( Iop_D128HItoD64,
16428 mkexpr( frA ) ) ) ) ) );
16429 assign( frS, triop( Iop_SignificanceRoundD128,
16430 mkU32( RMC ),
16431 mkexpr( tmp ),
16432 mkexpr( frB ) ) );
16434 break;
16435 default:
16436 vex_printf("dis_dfp_quantize_sig_rrndq(ppc)(opc2)\n");
16437 return False;
16439 putDReg_pair( frS_addr, mkexpr( frS ) );
16441 if (flag_rC && clear_CR1) {
16442 putCR321( 1, mkU8( 0 ) );
16443 putCR0( 1, mkU8( 0 ) );
16446 return True;
16449 static Bool dis_dfp_extract_insert( UInt prefix, UInt theInstr ) {
16450 UInt opc2 = ifieldOPClo10( theInstr );
16451 UChar frS_addr = ifieldRegDS( theInstr );
16452 UChar frA_addr = ifieldRegA( theInstr );
16453 UChar frB_addr = ifieldRegB( theInstr );
16454 UChar flag_rC = ifieldBIT0( theInstr );
16455 Bool clear_CR1 = True;
16457 IRTemp frA = newTemp( Ity_D64 );
16458 IRTemp frB = newTemp( Ity_D64 );
16459 IRTemp frS = newTemp( Ity_D64 );
16460 IRTemp tmp = newTemp( Ity_I64 );
16462 /* There is no prefixed version of these instructions. */
16463 PREFIX_CHECK
16465 assign( frA, getDReg( frA_addr ) );
16466 assign( frB, getDReg( frB_addr ) );
16468 switch (opc2) {
16469 case 0x162: // dxex
16470 DIP( "dxex%s fr%u,fr%u,fr%u\n",
16471 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16472 assign( tmp, unop( Iop_ExtractExpD64, mkexpr( frB ) ) );
16473 assign( frS, unop( Iop_ReinterpI64asD64, mkexpr( tmp ) ) );
16474 break;
16475 case 0x362: // diex
16476 DIP( "diex%s fr%u,fr%u,fr%u\n",
16477 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16478 assign( frS, binop( Iop_InsertExpD64,
16479 unop( Iop_ReinterpD64asI64,
16480 mkexpr( frA ) ),
16481 mkexpr( frB ) ) );
16482 break;
16483 default:
16484 vex_printf("dis_dfp_extract_insert(ppc)(opc2)\n");
16485 return False;
16488 putDReg( frS_addr, mkexpr( frS ) );
16490 if (flag_rC && clear_CR1) {
16491 putCR321( 1, mkU8( 0 ) );
16492 putCR0( 1, mkU8( 0 ) );
16495 return True;
16498 static Bool dis_dfp_extract_insertq( UInt prefix, UInt theInstr ) {
16499 UInt opc2 = ifieldOPClo10( theInstr );
16500 UChar frS_addr = ifieldRegDS( theInstr );
16501 UChar frA_addr = ifieldRegA( theInstr );
16502 UChar frB_addr = ifieldRegB( theInstr );
16503 UChar flag_rC = ifieldBIT0( theInstr );
16505 IRTemp frA = newTemp( Ity_D64 );
16506 IRTemp frB = newTemp( Ity_D128 );
16507 IRTemp frS64 = newTemp( Ity_D64 );
16508 IRTemp frS = newTemp( Ity_D128 );
16509 IRTemp tmp = newTemp( Ity_I64 );
16510 Bool clear_CR1 = True;
16512 /* There is no prefixed version of these instructions. */
16513 PREFIX_CHECK
16515 assign( frB, getDReg_pair( frB_addr ) );
16517 switch (opc2) {
16518 case 0x162: // dxexq
16519 DIP( "dxexq%s fr%u,fr%u\n",
16520 flag_rC ? ".":"", frS_addr, frB_addr );
16521 /* Instruction actually returns a 64-bit result. So as to be
16522 * consistent and not have to add a new struct, the emulation returns
16523 * the 64-bit result in the upper and lower register.
16525 assign( tmp, unop( Iop_ExtractExpD128, mkexpr( frB ) ) );
16526 assign( frS64, unop( Iop_ReinterpI64asD64, mkexpr( tmp ) ) );
16527 putDReg( frS_addr, mkexpr( frS64 ) );
16528 break;
16529 case 0x362: // diexq
16530 DIP( "diexq%s fr%u,fr%u,fr%u\n",
16531 flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
16532 assign( frA, getDReg( frA_addr ) );
16533 assign( frS, binop( Iop_InsertExpD128,
16534 unop( Iop_ReinterpD64asI64, mkexpr( frA ) ),
16535 mkexpr( frB ) ) );
16536 putDReg_pair( frS_addr, mkexpr( frS ) );
16537 break;
16538 default:
16539 vex_printf("dis_dfp_extract_insertq(ppc)(opc2)\n");
16540 return False;
16543 if (flag_rC && clear_CR1) {
16544 putCR321( 1, mkU8( 0 ) );
16545 putCR0( 1, mkU8( 0 ) );
16548 return True;
16551 /* DFP 64-bit comparison instructions */
16552 static Bool dis_dfp_compare( UInt prefix, UInt theInstr ) {
16553 /* X-Form */
16554 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) ); // AKA BF
16555 UChar frA_addr = ifieldRegA( theInstr );
16556 UChar frB_addr = ifieldRegB( theInstr );
16557 UInt opc1 = ifieldOPC( theInstr );
16558 IRTemp frA;
16559 IRTemp frB;
16561 IRTemp ccIR = newTemp( Ity_I32 );
16562 IRTemp ccPPC32 = newTemp( Ity_I32 );
16564 /* There is no prefixed version of these instructions. */
16565 PREFIX_CHECK
16567 /* Note: Differences between dcmpu and dcmpo are only in exception
16568 flag settings, which aren't supported anyway. */
16569 switch (opc1) {
16570 case 0x3B: /* dcmpo and dcmpu, DFP 64-bit */
16571 DIP( "dcmpo %u,fr%u,fr%u\n", crfD, frA_addr, frB_addr );
16572 frA = newTemp( Ity_D64 );
16573 frB = newTemp( Ity_D64 );
16575 assign( frA, getDReg( frA_addr ) );
16576 assign( frB, getDReg( frB_addr ) );
16578 assign( ccIR, binop( Iop_CmpD64, mkexpr( frA ), mkexpr( frB ) ) );
16579 break;
16580 case 0x3F: /* dcmpoq and dcmpuq,DFP 128-bit */
16581 DIP( "dcmpoq %u,fr%u,fr%u\n", crfD, frA_addr, frB_addr );
16582 frA = newTemp( Ity_D128 );
16583 frB = newTemp( Ity_D128 );
16585 assign( frA, getDReg_pair( frA_addr ) );
16586 assign( frB, getDReg_pair( frB_addr ) );
16587 assign( ccIR, binop( Iop_CmpD128, mkexpr( frA ), mkexpr( frB ) ) );
16588 break;
16589 default:
16590 vex_printf("dis_dfp_compare(ppc)(opc2)\n");
16591 return False;
16594 /* Map compare result from IR to PPC32 */
16596 FP cmp result | PPC | IR
16597 --------------------------
16598 UN | 0x1 | 0x45
16599 EQ | 0x2 | 0x40
16600 GT | 0x4 | 0x00
16601 LT | 0x8 | 0x01
16604 assign( ccPPC32,
16605 binop( Iop_Shl32,
16606 mkU32( 1 ),
16607 unop( Iop_32to8,
16608 binop( Iop_Or32,
16609 binop( Iop_And32,
16610 unop( Iop_Not32,
16611 binop( Iop_Shr32,
16612 mkexpr( ccIR ),
16613 mkU8( 5 ) ) ),
16614 mkU32( 2 ) ),
16615 binop( Iop_And32,
16616 binop( Iop_Xor32,
16617 mkexpr( ccIR ),
16618 binop( Iop_Shr32,
16619 mkexpr( ccIR ),
16620 mkU8( 6 ) ) ),
16621 mkU32( 1 ) ) ) ) ) );
16623 putGST_field( PPC_GST_CR, mkexpr( ccPPC32 ), crfD );
16624 putFPCC( mkexpr( ccPPC32 ) );
16625 return True;
16628 /* Test class/group/exponent/significance instructions. */
16629 static Bool dis_dfp_exponent_test ( UInt prefix, UInt theInstr )
16631 UChar frA_addr = ifieldRegA( theInstr );
16632 UChar frB_addr = ifieldRegB( theInstr );
16633 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
16634 IRTemp frA = newTemp( Ity_D64 );
16635 IRTemp frB = newTemp( Ity_D64 );
16636 IRTemp frA128 = newTemp( Ity_D128 );
16637 IRTemp frB128 = newTemp( Ity_D128 );
16638 UInt opc1 = ifieldOPC( theInstr );
16639 IRTemp gfield_A = newTemp( Ity_I32 );
16640 IRTemp gfield_B = newTemp( Ity_I32 );
16641 IRTemp gfield_mask = newTemp( Ity_I32 );
16642 IRTemp exponent_A = newTemp( Ity_I32 );
16643 IRTemp exponent_B = newTemp( Ity_I32 );
16644 IRTemp A_NaN_true = newTemp( Ity_I32 );
16645 IRTemp B_NaN_true = newTemp( Ity_I32 );
16646 IRTemp A_inf_true = newTemp( Ity_I32 );
16647 IRTemp B_inf_true = newTemp( Ity_I32 );
16648 IRTemp A_equals_B = newTemp( Ity_I32 );
16649 IRTemp finite_number = newTemp( Ity_I32 );
16650 IRTemp cc0 = newTemp( Ity_I32 );
16651 IRTemp cc1 = newTemp( Ity_I32 );
16652 IRTemp cc2 = newTemp( Ity_I32 );
16653 IRTemp cc3 = newTemp( Ity_I32 );
16654 IRTemp cc = newTemp( Ity_I32 );
16656 /* There is no prefixed version of these instructions. */
16657 PREFIX_CHECK
16659 /* The dtstex and dtstexg instructions only differ in the size of the
16660 * exponent field. The following switch statement takes care of the size
16661 * specific setup. Once the value of the exponents, the G-field shift
16662 * and mask is setup the remaining code is identical.
16664 switch (opc1) {
16665 case 0x3b: // dtstex Extended instruction setup
16666 DIP("dtstex %u,r%u,r%d\n", crfD, frA_addr, frB_addr);
16667 assign( frA, getDReg( frA_addr ) );
16668 assign( frB, getDReg( frB_addr ) );
16669 assign( gfield_mask, mkU32( DFP_G_FIELD_LONG_MASK ) );
16670 assign(exponent_A, unop( Iop_64to32,
16671 unop( Iop_ExtractExpD64,
16672 mkexpr( frA ) ) ) );
16673 assign(exponent_B, unop( Iop_64to32,
16674 unop( Iop_ExtractExpD64,
16675 mkexpr( frB ) ) ) );
16676 break;
16678 case 0x3F: // dtstexq Quad instruction setup
16679 DIP("dtstexq %u,r%u,r%d\n", crfD, frA_addr, frB_addr);
16680 assign( frA128, getDReg_pair( frA_addr ) );
16681 assign( frB128, getDReg_pair( frB_addr ) );
16682 assign( frA, unop( Iop_D128HItoD64, mkexpr( frA128 ) ) );
16683 assign( frB, unop( Iop_D128HItoD64, mkexpr( frB128 ) ) );
16684 assign( gfield_mask, mkU32( DFP_G_FIELD_EXTND_MASK ) );
16685 assign( exponent_A, unop( Iop_64to32,
16686 unop( Iop_ExtractExpD128,
16687 mkexpr( frA128 ) ) ) );
16688 assign( exponent_B, unop( Iop_64to32,
16689 unop( Iop_ExtractExpD128,
16690 mkexpr( frB128 ) ) ) );
16691 break;
16692 default:
16693 vex_printf("dis_dfp_exponent_test(ppc)(opc2)\n");
16694 return False;
16697 /* Extract the Gfield */
16698 assign( gfield_A, binop( Iop_And32,
16699 mkexpr( gfield_mask ),
16700 unop( Iop_64HIto32,
16701 unop( Iop_ReinterpD64asI64,
16702 mkexpr(frA) ) ) ) );
16704 assign( gfield_B, binop( Iop_And32,
16705 mkexpr( gfield_mask ),
16706 unop( Iop_64HIto32,
16707 unop( Iop_ReinterpD64asI64,
16708 mkexpr(frB) ) ) ) );
16710 /* check for NAN */
16711 assign( A_NaN_true, binop(Iop_Or32,
16712 unop( Iop_1Sto32,
16713 binop( Iop_CmpEQ32,
16714 mkexpr( gfield_A ),
16715 mkU32( 0x7C000000 ) ) ),
16716 unop( Iop_1Sto32,
16717 binop( Iop_CmpEQ32,
16718 mkexpr( gfield_A ),
16719 mkU32( 0x7E000000 ) )
16720 ) ) );
16721 assign( B_NaN_true, binop(Iop_Or32,
16722 unop( Iop_1Sto32,
16723 binop( Iop_CmpEQ32,
16724 mkexpr( gfield_B ),
16725 mkU32( 0x7C000000 ) ) ),
16726 unop( Iop_1Sto32,
16727 binop( Iop_CmpEQ32,
16728 mkexpr( gfield_B ),
16729 mkU32( 0x7E000000 ) )
16730 ) ) );
16732 /* check for infinity */
16733 assign( A_inf_true,
16734 unop( Iop_1Sto32,
16735 binop( Iop_CmpEQ32,
16736 mkexpr( gfield_A ),
16737 mkU32( 0x78000000 ) ) ) );
16739 assign( B_inf_true,
16740 unop( Iop_1Sto32,
16741 binop( Iop_CmpEQ32,
16742 mkexpr( gfield_B ),
16743 mkU32( 0x78000000 ) ) ) );
16745 assign( finite_number,
16746 unop( Iop_Not32,
16747 binop( Iop_Or32,
16748 binop( Iop_Or32,
16749 mkexpr( A_NaN_true ),
16750 mkexpr( B_NaN_true ) ),
16751 binop( Iop_Or32,
16752 mkexpr( A_inf_true ),
16753 mkexpr( B_inf_true ) ) ) ) );
16755 /* Calculate the condition code bits
16756 * If QNaN,SNaN, +infinity, -infinity then cc0, cc1 and cc2 are zero
16757 * regardless of the value of the comparisons and cc3 is 1. Otherwise,
16758 * cc0, cc1 and cc0 reflect the results of the comparisons.
16760 assign( A_equals_B,
16761 binop( Iop_Or32,
16762 unop( Iop_1Uto32,
16763 binop( Iop_CmpEQ32,
16764 mkexpr( exponent_A ),
16765 mkexpr( exponent_B ) ) ),
16766 binop( Iop_Or32,
16767 binop( Iop_And32,
16768 mkexpr( A_inf_true ),
16769 mkexpr( B_inf_true ) ),
16770 binop( Iop_And32,
16771 mkexpr( A_NaN_true ),
16772 mkexpr( B_NaN_true ) ) ) ) );
16774 assign( cc0, binop( Iop_And32,
16775 mkexpr( finite_number ),
16776 binop( Iop_Shl32,
16777 unop( Iop_1Uto32,
16778 binop( Iop_CmpLT32U,
16779 mkexpr( exponent_A ),
16780 mkexpr( exponent_B ) ) ),
16781 mkU8( 3 ) ) ) );
16783 assign( cc1, binop( Iop_And32,
16784 mkexpr( finite_number ),
16785 binop( Iop_Shl32,
16786 unop( Iop_1Uto32,
16787 binop( Iop_CmpLT32U,
16788 mkexpr( exponent_B ),
16789 mkexpr( exponent_A ) ) ),
16790 mkU8( 2 ) ) ) );
16792 assign( cc2, binop( Iop_Shl32,
16793 binop( Iop_And32,
16794 mkexpr( A_equals_B ),
16795 mkU32( 1 ) ),
16796 mkU8( 1 ) ) );
16798 assign( cc3, binop( Iop_And32,
16799 unop( Iop_Not32, mkexpr( A_equals_B ) ),
16800 binop( Iop_And32,
16801 mkU32( 0x1 ),
16802 binop( Iop_Or32,
16803 binop( Iop_Or32,
16804 mkexpr ( A_inf_true ),
16805 mkexpr ( B_inf_true ) ),
16806 binop( Iop_Or32,
16807 mkexpr ( A_NaN_true ),
16808 mkexpr ( B_NaN_true ) ) )
16809 ) ) );
16811 /* store the condition code */
16812 assign( cc, binop( Iop_Or32,
16813 mkexpr( cc0 ),
16814 binop( Iop_Or32,
16815 mkexpr( cc1 ),
16816 binop( Iop_Or32,
16817 mkexpr( cc2 ),
16818 mkexpr( cc3 ) ) ) ) );
16819 putGST_field( PPC_GST_CR, mkexpr( cc ), crfD );
16820 putFPCC( mkexpr( cc ) );
16821 return True;
16824 /* Test class/group/exponent/significance instructions. */
16825 static Bool dis_dfp_class_test ( UInt prefix, UInt theInstr )
16827 UChar frA_addr = ifieldRegA( theInstr );
16828 IRTemp frA = newTemp( Ity_D64 );
16829 IRTemp abs_frA = newTemp( Ity_D64 );
16830 IRTemp frAI64_hi = newTemp( Ity_I64 );
16831 IRTemp frAI64_lo = newTemp( Ity_I64 );
16832 UInt opc1 = ifieldOPC( theInstr );
16833 UInt opc2 = ifieldOPClo9( theInstr );
16834 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) ); // AKA BF
16835 UInt DCM = IFIELD( theInstr, 10, 6 );
16836 IRTemp DCM_calc = newTemp( Ity_I32 );
16837 UInt max_exp = 0;
16838 UInt min_exp = 0;
16839 IRTemp min_subnormalD64 = newTemp( Ity_D64 );
16840 IRTemp min_subnormalD128 = newTemp( Ity_D128 );
16841 IRTemp significand64 = newTemp( Ity_D64 );
16842 IRTemp significand128 = newTemp( Ity_D128 );
16843 IRTemp exp_min_normal = newTemp( Ity_I64 );
16844 IRTemp exponent = newTemp( Ity_I32 );
16846 IRTemp infinity_true = newTemp( Ity_I32 );
16847 IRTemp SNaN_true = newTemp( Ity_I32 );
16848 IRTemp QNaN_true = newTemp( Ity_I32 );
16849 IRTemp subnormal_true = newTemp( Ity_I32 );
16850 IRTemp normal_true = newTemp( Ity_I32 );
16851 IRTemp extreme_true = newTemp( Ity_I32 );
16852 IRTemp lmd = newTemp( Ity_I32 );
16853 IRTemp lmd_zero_true = newTemp( Ity_I32 );
16854 IRTemp zero_true = newTemp( Ity_I32 );
16855 IRTemp sign = newTemp( Ity_I32 );
16856 IRTemp field = newTemp( Ity_I32 );
16857 IRTemp ccIR_zero = newTemp( Ity_I32 );
16858 IRTemp ccIR_subnormal = newTemp( Ity_I32 );
16860 /* UInt size = DFP_LONG; JRS:unused */
16861 IRTemp gfield = newTemp( Ity_I32 );
16862 IRTemp gfield_0_4_shift = newTemp( Ity_I8 );
16863 IRTemp gfield_mask = newTemp( Ity_I32 );
16864 IRTemp dcm0 = newTemp( Ity_I32 );
16865 IRTemp dcm1 = newTemp( Ity_I32 );
16866 IRTemp dcm2 = newTemp( Ity_I32 );
16867 IRTemp dcm3 = newTemp( Ity_I32 );
16868 IRTemp dcm4 = newTemp( Ity_I32 );
16869 IRTemp dcm5 = newTemp( Ity_I32 );
16871 /* There is no prefixed version of these instructions. */
16872 PREFIX_CHECK
16874 /* The only difference between the dtstdc and dtstdcq instructions is
16875 * size of the T and G fields. The calculation of the 4 bit field
16876 * is the same. Setup the parameters and values that are DFP size
16877 * specific. The rest of the code is independent of the DFP size.
16879 * The Io_CmpD64 is used below. The instruction sets the ccIR values.
16880 * The interpretation of the ccIR values is as follows:
16882 * DFP cmp result | IR
16883 * --------------------------
16884 * UN | 0x45
16885 * EQ | 0x40
16886 * GT | 0x00
16887 * LT | 0x01
16890 assign( frA, getDReg( frA_addr ) );
16891 assign( frAI64_hi, unop( Iop_ReinterpD64asI64, mkexpr( frA ) ) );
16893 assign( abs_frA, unop( Iop_ReinterpI64asD64,
16894 binop( Iop_And64,
16895 unop( Iop_ReinterpD64asI64,
16896 mkexpr( frA ) ),
16897 mkU64( 0x7FFFFFFFFFFFFFFFULL ) ) ) );
16898 assign( gfield_0_4_shift, mkU8( 31 - 5 ) ); // G-field[0:4]
16899 switch (opc1) {
16900 case 0x3b: // dtstdc, dtstdg
16901 DIP("dtstd%s %u,r%u,%u\n", opc2 == 0xc2 ? "c" : "g",
16902 crfD, frA_addr, DCM);
16903 /* setup the parameters for the long format of the two instructions */
16904 assign( frAI64_lo, mkU64( 0 ) );
16905 assign( gfield_mask, mkU32( DFP_G_FIELD_LONG_MASK ) );
16906 max_exp = DFP_LONG_EXP_MAX;
16907 min_exp = DFP_LONG_EXP_MIN;
16909 assign( exponent, unop( Iop_64to32,
16910 unop( Iop_ExtractExpD64,
16911 mkexpr( frA ) ) ) );
16912 assign( significand64,
16913 unop( Iop_ReinterpI64asD64,
16914 mkU64( 0x2234000000000001ULL ) ) ); // dfp 1.0
16915 assign( exp_min_normal,mkU64( 398 - 383 ) );
16916 assign( min_subnormalD64,
16917 binop( Iop_InsertExpD64,
16918 mkexpr( exp_min_normal ),
16919 mkexpr( significand64 ) ) );
16921 assign( ccIR_subnormal,
16922 binop( Iop_CmpD64,
16923 mkexpr( abs_frA ),
16924 mkexpr( min_subnormalD64 ) ) );
16926 /* compare absolute value of frA with zero */
16927 assign( ccIR_zero,
16928 binop( Iop_CmpD64,
16929 mkexpr( abs_frA ),
16930 unop( Iop_ReinterpI64asD64,
16931 mkU64( 0x2238000000000000ULL ) ) ) );
16933 /* size = DFP_LONG; JRS: unused */
16934 break;
16936 case 0x3F: // dtstdcq, dtstdgq
16937 DIP("dtstd%sq %u,r%u,%u\n", opc2 == 0xc2 ? "c" : "g",
16938 crfD, frA_addr, DCM);
16939 /* setup the parameters for the extended format of the
16940 * two instructions
16942 assign( frAI64_lo, unop( Iop_ReinterpD64asI64,
16943 getDReg( frA_addr+1 ) ) );
16945 assign( gfield_mask, mkU32( DFP_G_FIELD_EXTND_MASK ) );
16946 max_exp = DFP_EXTND_EXP_MAX;
16947 min_exp = DFP_EXTND_EXP_MIN;
16948 assign( exponent, unop( Iop_64to32,
16949 unop( Iop_ExtractExpD128,
16950 getDReg_pair( frA_addr) ) ) );
16952 /* create quand exponent for minimum normal number */
16953 assign( exp_min_normal, mkU64( 6176 - 6143 ) );
16954 assign( significand128,
16955 unop( Iop_D64toD128,
16956 unop( Iop_ReinterpI64asD64,
16957 mkU64( 0x2234000000000001ULL ) ) ) ); // dfp 1.0
16959 assign( min_subnormalD128,
16960 binop( Iop_InsertExpD128,
16961 mkexpr( exp_min_normal ),
16962 mkexpr( significand128 ) ) );
16964 assign( ccIR_subnormal,
16965 binop( Iop_CmpD128,
16966 binop( Iop_D64HLtoD128,
16967 unop( Iop_ReinterpI64asD64,
16968 binop( Iop_And64,
16969 unop( Iop_ReinterpD64asI64,
16970 mkexpr( frA ) ),
16971 mkU64( 0x7FFFFFFFFFFFFFFFULL ) ) ),
16972 getDReg( frA_addr+1 ) ),
16973 mkexpr( min_subnormalD128 ) ) );
16974 assign( ccIR_zero,
16975 binop( Iop_CmpD128,
16976 binop( Iop_D64HLtoD128,
16977 mkexpr( abs_frA ),
16978 getDReg( frA_addr+1 ) ),
16979 unop( Iop_D64toD128,
16980 unop( Iop_ReinterpI64asD64,
16981 mkU64( 0x0ULL ) ) ) ) );
16983 /* size = DFP_EXTND; JRS:unused */
16984 break;
16985 default:
16986 vex_printf("dis_dfp_class_test(ppc)(opc2)\n");
16987 return False;
16990 /* The G-field is in the upper 32-bits. The I64 logical operations
16991 * do not seem to be supported in 32-bit mode so keep things as 32-bit
16992 * operations.
16994 assign( gfield, binop( Iop_And32,
16995 mkexpr( gfield_mask ),
16996 unop( Iop_64HIto32,
16997 mkexpr(frAI64_hi) ) ) );
16999 /* There is a lot of code that is the same to do the class and group
17000 * instructions. Later there is an if statement to handle the specific
17001 * instruction.
17003 * Will be using I32 values, compares, shifts and logical operations for
17004 * this code as the 64-bit compare, shifts, logical operations are not
17005 * supported in 32-bit mode.
17008 /* Check the bits for Infinity, QNaN or Signaling NaN */
17009 assign( infinity_true,
17010 unop( Iop_1Sto32,
17011 binop( Iop_CmpEQ32,
17012 binop( Iop_And32,
17013 mkU32( 0x7C000000 ),
17014 mkexpr( gfield ) ),
17015 mkU32( 0x78000000 ) ) ) );
17017 assign( SNaN_true,
17018 unop( Iop_1Sto32,
17019 binop( Iop_CmpEQ32,
17020 binop( Iop_And32,
17021 mkU32( 0x7E000000 ),
17022 mkexpr( gfield ) ),
17023 mkU32( 0x7E000000 ) ) ) );
17025 assign( QNaN_true,
17026 binop( Iop_And32,
17027 unop( Iop_1Sto32,
17028 binop( Iop_CmpEQ32,
17029 binop( Iop_And32,
17030 mkU32( 0x7E000000 ),
17031 mkexpr( gfield ) ),
17032 mkU32( 0x7C000000 ) ) ),
17033 unop( Iop_Not32,
17034 mkexpr( SNaN_true ) ) ) );
17036 assign( zero_true,
17037 binop( Iop_And32,
17038 unop(Iop_1Sto32,
17039 binop( Iop_CmpEQ32,
17040 mkexpr( ccIR_zero ),
17041 mkU32( 0x40 ) ) ), // ccIR code for Equal
17042 unop( Iop_Not32,
17043 binop( Iop_Or32,
17044 mkexpr( infinity_true ),
17045 binop( Iop_Or32,
17046 mkexpr( QNaN_true ),
17047 mkexpr( SNaN_true ) ) ) ) ) );
17049 /* Do compare of frA the minimum normal value. Comparison is size
17050 * depenent and was done above to get the ccIR value.
17052 assign( subnormal_true,
17053 binop( Iop_And32,
17054 binop( Iop_Or32,
17055 unop( Iop_1Sto32,
17056 binop( Iop_CmpEQ32,
17057 mkexpr( ccIR_subnormal ),
17058 mkU32( 0x40 ) ) ), // ccIR code for Equal
17059 unop( Iop_1Sto32,
17060 binop( Iop_CmpEQ32,
17061 mkexpr( ccIR_subnormal ),
17062 mkU32( 0x1 ) ) ) ), // ccIR code for LT
17063 unop( Iop_Not32,
17064 binop( Iop_Or32,
17065 binop( Iop_Or32,
17066 mkexpr( infinity_true ),
17067 mkexpr( zero_true) ),
17068 binop( Iop_Or32,
17069 mkexpr( QNaN_true ),
17070 mkexpr( SNaN_true ) ) ) ) ) );
17072 /* Normal number is not subnormal, infinity, NaN or Zero */
17073 assign( normal_true,
17074 unop( Iop_Not32,
17075 binop( Iop_Or32,
17076 binop( Iop_Or32,
17077 mkexpr( infinity_true ),
17078 mkexpr( zero_true ) ),
17079 binop( Iop_Or32,
17080 mkexpr( subnormal_true ),
17081 binop( Iop_Or32,
17082 mkexpr( QNaN_true ),
17083 mkexpr( SNaN_true ) ) ) ) ) );
17085 /* Calculate the DCM bit field based on the tests for the specific
17086 * instruction
17088 if (opc2 == 0xC2) { // dtstdc, dtstdcq
17089 /* DCM[0:5] Bit Data Class definition
17090 * 0 Zero
17091 * 1 Subnormal
17092 * 2 Normal
17093 * 3 Infinity
17094 * 4 Quiet NaN
17095 * 5 Signaling NaN
17098 assign( dcm0, binop( Iop_Shl32,
17099 mkexpr( zero_true ),
17100 mkU8( 5 ) ) );
17101 assign( dcm1, binop( Iop_Shl32,
17102 binop( Iop_And32,
17103 mkexpr( subnormal_true ),
17104 mkU32( 1 ) ),
17105 mkU8( 4 ) ) );
17106 assign( dcm2, binop( Iop_Shl32,
17107 binop( Iop_And32,
17108 mkexpr( normal_true ),
17109 mkU32( 1 ) ),
17110 mkU8( 3 ) ) );
17111 assign( dcm3, binop( Iop_Shl32,
17112 binop( Iop_And32,
17113 mkexpr( infinity_true),
17114 mkU32( 1 ) ),
17115 mkU8( 2 ) ) );
17116 assign( dcm4, binop( Iop_Shl32,
17117 binop( Iop_And32,
17118 mkexpr( QNaN_true ),
17119 mkU32( 1 ) ),
17120 mkU8( 1 ) ) );
17121 assign( dcm5, binop( Iop_And32, mkexpr( SNaN_true), mkU32( 1 ) ) );
17123 } else if (opc2 == 0xE2) { // dtstdg, dtstdgq
17124 /* check if the exponent is extreme */
17125 assign( extreme_true, binop( Iop_Or32,
17126 unop( Iop_1Sto32,
17127 binop( Iop_CmpEQ32,
17128 mkexpr( exponent ),
17129 mkU32( max_exp ) ) ),
17130 unop( Iop_1Sto32,
17131 binop( Iop_CmpEQ32,
17132 mkexpr( exponent ),
17133 mkU32( min_exp ) ) ) ) );
17135 /* Check if LMD is zero */
17136 Get_lmd( &lmd, binop( Iop_Shr32,
17137 mkexpr( gfield ), mkU8( 31 - 5 ) ) );
17139 assign( lmd_zero_true, unop( Iop_1Sto32,
17140 binop( Iop_CmpEQ32,
17141 mkexpr( lmd ),
17142 mkU32( 0 ) ) ) );
17144 /* DCM[0:5] Bit Data Class definition
17145 * 0 Zero with non-extreme exponent
17146 * 1 Zero with extreme exponent
17147 * 2 Subnormal or (Normal with extreme exponent)
17148 * 3 Normal with non-extreme exponent and
17149 * leftmost zero digit in significand
17150 * 4 Normal with non-extreme exponent and
17151 * leftmost nonzero digit in significand
17152 * 5 Special symbol (Infinity, QNaN, or SNaN)
17154 assign( dcm0, binop( Iop_Shl32,
17155 binop( Iop_And32,
17156 binop( Iop_And32,
17157 unop( Iop_Not32,
17158 mkexpr( extreme_true ) ),
17159 mkexpr( zero_true ) ),
17160 mkU32( 0x1 ) ),
17161 mkU8( 5 ) ) );
17163 assign( dcm1, binop( Iop_Shl32,
17164 binop( Iop_And32,
17165 binop( Iop_And32,
17166 mkexpr( extreme_true ),
17167 mkexpr( zero_true ) ),
17168 mkU32( 0x1 ) ),
17169 mkU8( 4 ) ) );
17171 assign( dcm2, binop( Iop_Shl32,
17172 binop( Iop_And32,
17173 binop( Iop_Or32,
17174 binop( Iop_And32,
17175 mkexpr( extreme_true ),
17176 mkexpr( normal_true ) ),
17177 mkexpr( subnormal_true ) ),
17178 mkU32( 0x1 ) ),
17179 mkU8( 3 ) ) );
17181 assign( dcm3, binop( Iop_Shl32,
17182 binop( Iop_And32,
17183 binop( Iop_And32,
17184 binop( Iop_And32,
17185 unop( Iop_Not32,
17186 mkexpr( extreme_true ) ),
17187 mkexpr( normal_true ) ),
17188 unop( Iop_1Sto32,
17189 binop( Iop_CmpEQ32,
17190 mkexpr( lmd ),
17191 mkU32( 0 ) ) ) ),
17192 mkU32( 0x1 ) ),
17193 mkU8( 2 ) ) );
17195 assign( dcm4, binop( Iop_Shl32,
17196 binop( Iop_And32,
17197 binop( Iop_And32,
17198 binop( Iop_And32,
17199 unop( Iop_Not32,
17200 mkexpr( extreme_true ) ),
17201 mkexpr( normal_true ) ),
17202 unop( Iop_1Sto32,
17203 binop( Iop_CmpNE32,
17204 mkexpr( lmd ),
17205 mkU32( 0 ) ) ) ),
17206 mkU32( 0x1 ) ),
17207 mkU8( 1 ) ) );
17209 assign( dcm5, binop( Iop_And32,
17210 binop( Iop_Or32,
17211 mkexpr( SNaN_true),
17212 binop( Iop_Or32,
17213 mkexpr( QNaN_true),
17214 mkexpr( infinity_true) ) ),
17215 mkU32( 0x1 ) ) );
17218 /* create DCM field */
17219 assign( DCM_calc,
17220 binop( Iop_Or32,
17221 mkexpr( dcm0 ),
17222 binop( Iop_Or32,
17223 mkexpr( dcm1 ),
17224 binop( Iop_Or32,
17225 mkexpr( dcm2 ),
17226 binop( Iop_Or32,
17227 mkexpr( dcm3 ),
17228 binop( Iop_Or32,
17229 mkexpr( dcm4 ),
17230 mkexpr( dcm5 ) ) ) ) ) ) );
17232 /* Get the sign of the DFP number, ignore sign for QNaN */
17233 assign( sign,
17234 unop( Iop_1Uto32,
17235 binop( Iop_CmpEQ32,
17236 binop( Iop_Shr32,
17237 unop( Iop_64HIto32, mkexpr( frAI64_hi ) ),
17238 mkU8( 63 - 32 ) ),
17239 mkU32( 1 ) ) ) );
17241 /* This instruction generates a four bit field to be stored in the
17242 * condition code register. The condition code register consists of 7
17243 * fields. The field to be written to is specified by the BF (AKA crfD)
17244 * field.
17246 * The field layout is as follows:
17248 * Field Meaning
17249 * 0000 Operand positive with no match
17250 * 0100 Operand positive with at least one match
17251 * 0001 Operand negative with no match
17252 * 0101 Operand negative with at least one match
17254 assign( field, binop( Iop_Or32,
17255 binop( Iop_Shl32,
17256 mkexpr( sign ),
17257 mkU8( 3 ) ),
17258 binop( Iop_Shl32,
17259 unop( Iop_1Uto32,
17260 binop( Iop_CmpNE32,
17261 binop( Iop_And32,
17262 mkU32( DCM ),
17263 mkexpr( DCM_calc ) ),
17264 mkU32( 0 ) ) ),
17265 mkU8( 1 ) ) ) );
17267 putGST_field( PPC_GST_CR, mkexpr( field ), crfD );
17268 putFPCC( mkexpr( field ) );
17269 return True;
17272 static Bool dis_dfp_bcd( UInt prefix, UInt theInstr ) {
17273 UInt opc2 = ifieldOPClo10( theInstr );
17274 ULong sp = IFIELD(theInstr, 19, 2);
17275 ULong s = IFIELD(theInstr, 20, 1);
17276 UChar frT_addr = ifieldRegDS( theInstr );
17277 UChar frB_addr = ifieldRegB( theInstr );
17278 IRTemp frB = newTemp( Ity_D64 );
17279 IRTemp frBI64 = newTemp( Ity_I64 );
17280 IRTemp result = newTemp( Ity_I64 );
17281 IRTemp resultD64 = newTemp( Ity_D64 );
17282 IRTemp bcd64 = newTemp( Ity_I64 );
17283 IRTemp bcd_u = newTemp( Ity_I32 );
17284 IRTemp bcd_l = newTemp( Ity_I32 );
17285 IRTemp dbcd_u = newTemp( Ity_I32 );
17286 IRTemp dbcd_l = newTemp( Ity_I32 );
17287 IRTemp lmd = newTemp( Ity_I32 );
17289 /* There is no prefixed version of these instructions. */
17290 PREFIX_CHECK
17292 assign( frB, getDReg( frB_addr ) );
17293 assign( frBI64, unop( Iop_ReinterpD64asI64, mkexpr( frB ) ) );
17295 switch ( opc2 ) {
17296 case 0x142: // ddedpd DFP Decode DPD to BCD
17297 DIP( "ddedpd %llu,r%u,r%u\n", sp, frT_addr, frB_addr );
17299 assign( bcd64, unop( Iop_DPBtoBCD, mkexpr( frBI64 ) ) );
17300 assign( bcd_u, unop( Iop_64HIto32, mkexpr( bcd64 ) ) );
17301 assign( bcd_l, unop( Iop_64to32, mkexpr( bcd64 ) ) );
17303 if ( ( sp == 0 ) || ( sp == 1 ) ) {
17304 /* Unsigned BCD string */
17305 Get_lmd( &lmd,
17306 binop( Iop_Shr32,
17307 unop( Iop_64HIto32, mkexpr( frBI64 ) ),
17308 mkU8( 31 - 5 ) ) ); // G-field[0:4]
17310 assign( result,
17311 binop( Iop_32HLto64,
17312 binop( Iop_Or32,
17313 binop( Iop_Shl32, mkexpr( lmd ), mkU8( 28 ) ),
17314 mkexpr( bcd_u ) ),
17315 mkexpr( bcd_l ) ) );
17317 } else {
17318 /* Signed BCD string, the cases for sp 2 and 3 only differ in how
17319 * the positive and negative values are encoded in the least
17320 * significant bits.
17322 IRTemp sign = newTemp( Ity_I32 );
17324 if (sp == 2) {
17325 /* Positive sign = 0xC, negative sign = 0xD */
17327 assign( sign,
17328 binop( Iop_Or32,
17329 binop( Iop_Shr32,
17330 unop( Iop_64HIto32, mkexpr( frBI64 ) ),
17331 mkU8( 31 ) ),
17332 mkU32( 0xC ) ) );
17334 } else if ( sp == 3 ) {
17335 /* Positive sign = 0xF, negative sign = 0xD */
17336 IRTemp tmp32 = newTemp( Ity_I32 );
17338 /* Complement sign bit then OR into bit position 1 */
17339 assign( tmp32,
17340 binop( Iop_Xor32,
17341 binop( Iop_Shr32,
17342 unop( Iop_64HIto32, mkexpr( frBI64 ) ),
17343 mkU8( 30 ) ),
17344 mkU32( 0x2 ) ) );
17346 assign( sign, binop( Iop_Or32, mkexpr( tmp32 ), mkU32( 0xD ) ) );
17348 } else {
17349 vpanic( "The impossible happened: dis_dfp_bcd(ppc), undefined SP field" );
17352 /* Put sign in bottom 4 bits, move most significant 4-bits from
17353 * bcd_l to bcd_u.
17355 assign( result,
17356 binop( Iop_32HLto64,
17357 binop( Iop_Or32,
17358 binop( Iop_Shr32,
17359 mkexpr( bcd_l ),
17360 mkU8( 28 ) ),
17361 binop( Iop_Shl32,
17362 mkexpr( bcd_u ),
17363 mkU8( 4 ) ) ),
17364 binop( Iop_Or32,
17365 mkexpr( sign ),
17366 binop( Iop_Shl32,
17367 mkexpr( bcd_l ),
17368 mkU8( 4 ) ) ) ) );
17371 putDReg( frT_addr, unop( Iop_ReinterpI64asD64, mkexpr( result ) ) );
17372 break;
17374 case 0x342: // denbcd DFP Encode BCD to DPD
17376 IRTemp valid_mask = newTemp( Ity_I32 );
17377 IRTemp invalid_mask = newTemp( Ity_I32 );
17378 IRTemp without_lmd = newTemp( Ity_I64 );
17379 IRTemp tmp64 = newTemp( Ity_I64 );
17380 IRTemp dbcd64 = newTemp( Ity_I64 );
17381 IRTemp left_exp = newTemp( Ity_I32 );
17382 IRTemp g0_4 = newTemp( Ity_I32 );
17384 DIP( "denbcd %llu,r%u,r%u\n", s, frT_addr, frB_addr );
17386 if ( s == 0 ) {
17387 /* Unsigned BCD string */
17388 assign( dbcd64, unop( Iop_BCDtoDPB, mkexpr(frBI64 ) ) );
17389 assign( dbcd_u, unop( Iop_64HIto32, mkexpr( dbcd64 ) ) );
17390 assign( dbcd_l, unop( Iop_64to32, mkexpr( dbcd64 ) ) );
17392 assign( lmd,
17393 binop( Iop_Shr32,
17394 binop( Iop_And32,
17395 unop( Iop_64HIto32, mkexpr( frBI64 ) ),
17396 mkU32( 0xF0000000 ) ),
17397 mkU8( 28 ) ) );
17399 assign( invalid_mask,
17400 bcd_digit_inval( unop( Iop_64HIto32, mkexpr( frBI64 ) ),
17401 unop( Iop_64to32, mkexpr( frBI64 ) ) ) );
17402 assign( valid_mask, unop( Iop_Not32, mkexpr( invalid_mask ) ) );
17404 assign( without_lmd,
17405 unop( Iop_ReinterpD64asI64,
17406 binop( Iop_InsertExpD64,
17407 mkU64( DFP_LONG_BIAS ),
17408 unop( Iop_ReinterpI64asD64,
17409 binop( Iop_32HLto64,
17410 mkexpr( dbcd_u ),
17411 mkexpr( dbcd_l ) ) ) ) ) );
17412 assign( left_exp,
17413 binop( Iop_Shr32,
17414 binop( Iop_And32,
17415 unop( Iop_64HIto32, mkexpr( without_lmd ) ),
17416 mkU32( 0x60000000 ) ),
17417 mkU8( 29 ) ) );
17419 assign( g0_4,
17420 binop( Iop_Shl32,
17421 Gfield_encoding( mkexpr( left_exp ), mkexpr( lmd ) ),
17422 mkU8( 26 ) ) );
17424 assign( tmp64,
17425 binop( Iop_32HLto64,
17426 binop( Iop_Or32,
17427 binop( Iop_And32,
17428 unop( Iop_64HIto32,
17429 mkexpr( without_lmd ) ),
17430 mkU32( 0x83FFFFFF ) ),
17431 mkexpr( g0_4 ) ),
17432 unop( Iop_64to32, mkexpr( without_lmd ) ) ) );
17434 } else if ( s == 1 ) {
17435 IRTemp sign = newTemp( Ity_I32 );
17436 IRTemp sign_bit = newTemp( Ity_I32 );
17437 IRTemp pos_sign_mask = newTemp( Ity_I32 );
17438 IRTemp neg_sign_mask = newTemp( Ity_I32 );
17439 IRTemp tmp = newTemp( Ity_I64 );
17441 /* Signed BCD string, least significant 4 bits are sign bits
17442 * positive sign = 0xC, negative sign = 0xD
17444 assign( tmp, unop( Iop_BCDtoDPB,
17445 binop( Iop_32HLto64,
17446 binop( Iop_Shr32,
17447 unop( Iop_64HIto32,
17448 mkexpr( frBI64 ) ),
17449 mkU8( 4 ) ),
17450 binop( Iop_Or32,
17451 binop( Iop_Shr32,
17452 unop( Iop_64to32,
17453 mkexpr( frBI64 ) ),
17454 mkU8( 4 ) ),
17455 binop( Iop_Shl32,
17456 unop( Iop_64HIto32,
17457 mkexpr( frBI64 ) ),
17458 mkU8( 28 ) ) ) ) ) );
17460 assign( dbcd_u, unop( Iop_64HIto32, mkexpr( tmp ) ) );
17461 assign( dbcd_l, unop( Iop_64to32, mkexpr( tmp ) ) );
17463 /* Get the sign of the BCD string. */
17464 assign( sign,
17465 binop( Iop_And32,
17466 unop( Iop_64to32, mkexpr( frBI64 ) ),
17467 mkU32( 0xF ) ) );
17469 assign( neg_sign_mask, Generate_neg_sign_mask( mkexpr( sign ) ) );
17470 assign( pos_sign_mask, Generate_pos_sign_mask( mkexpr( sign ) ) );
17471 assign( sign_bit,
17472 Generate_sign_bit( mkexpr( pos_sign_mask ),
17473 mkexpr( neg_sign_mask ) ) );
17475 /* Check for invalid sign and BCD digit. Don't check the bottom
17476 * four bits of bcd_l as that is the sign value.
17478 assign( invalid_mask,
17479 Generate_inv_mask(
17480 bcd_digit_inval( unop( Iop_64HIto32,
17481 mkexpr( frBI64 ) ),
17482 binop( Iop_Shr32,
17483 unop( Iop_64to32,
17484 mkexpr( frBI64 ) ),
17485 mkU8( 4 ) ) ),
17486 mkexpr( pos_sign_mask ),
17487 mkexpr( neg_sign_mask ) ) );
17489 assign( valid_mask, unop( Iop_Not32, mkexpr( invalid_mask ) ) );
17491 /* Generate the result assuming the sign value was valid. */
17492 assign( tmp64,
17493 unop( Iop_ReinterpD64asI64,
17494 binop( Iop_InsertExpD64,
17495 mkU64( DFP_LONG_BIAS ),
17496 unop( Iop_ReinterpI64asD64,
17497 binop( Iop_32HLto64,
17498 binop( Iop_Or32,
17499 mkexpr( dbcd_u ),
17500 mkexpr( sign_bit ) ),
17501 mkexpr( dbcd_l ) ) ) ) ) );
17504 /* Generate the value to store depending on the validity of the
17505 * sign value and the validity of the BCD digits.
17507 assign( resultD64,
17508 unop( Iop_ReinterpI64asD64,
17509 binop( Iop_32HLto64,
17510 binop( Iop_Or32,
17511 binop( Iop_And32,
17512 mkexpr( valid_mask ),
17513 unop( Iop_64HIto32,
17514 mkexpr( tmp64 ) ) ),
17515 binop( Iop_And32,
17516 mkU32( 0x7C000000 ),
17517 mkexpr( invalid_mask ) ) ),
17518 binop( Iop_Or32,
17519 binop( Iop_And32,
17520 mkexpr( valid_mask ),
17521 unop( Iop_64to32, mkexpr( tmp64 ) ) ),
17522 binop( Iop_And32,
17523 mkU32( 0x0 ),
17524 mkexpr( invalid_mask ) ) ) ) ) );
17525 putDReg( frT_addr, mkexpr( resultD64 ) );
17527 break;
17528 default:
17529 vpanic( "ERROR: dis_dfp_bcd(ppc), undefined opc2 case " );
17530 return False;
17532 return True;
17535 static Bool dis_dfp_bcdq( UInt prefix, UInt theInstr )
17537 UInt opc2 = ifieldOPClo10( theInstr );
17538 ULong sp = IFIELD(theInstr, 19, 2);
17539 ULong s = IFIELD(theInstr, 20, 1);
17540 IRTemp frB_hi = newTemp( Ity_D64 );
17541 IRTemp frB_lo = newTemp( Ity_D64 );
17542 IRTemp frBI64_hi = newTemp( Ity_I64 );
17543 IRTemp frBI64_lo = newTemp( Ity_I64 );
17544 UChar frT_addr = ifieldRegDS( theInstr );
17545 UChar frB_addr = ifieldRegB( theInstr );
17547 IRTemp lmd = newTemp( Ity_I32 );
17548 IRTemp result_hi = newTemp( Ity_I64 );
17549 IRTemp result_lo = newTemp( Ity_I64 );
17551 /* There is no prefixed version of these instructions. */
17552 PREFIX_CHECK
17554 assign( frB_hi, getDReg( frB_addr ) );
17555 assign( frB_lo, getDReg( frB_addr + 1 ) );
17556 assign( frBI64_hi, unop( Iop_ReinterpD64asI64, mkexpr( frB_hi ) ) );
17557 assign( frBI64_lo, unop( Iop_ReinterpD64asI64, mkexpr( frB_lo ) ) );
17559 switch ( opc2 ) {
17560 case 0x142: // ddedpdq DFP Decode DPD to BCD
17562 IRTemp low_60_u = newTemp( Ity_I32 );
17563 IRTemp low_60_l = newTemp( Ity_I32 );
17564 IRTemp mid_60_u = newTemp( Ity_I32 );
17565 IRTemp mid_60_l = newTemp( Ity_I32 );
17566 IRTemp top_12_l = newTemp( Ity_I32 );
17568 DIP( "ddedpdq %llu,r%u,r%u\n", sp, frT_addr, frB_addr );
17570 /* Note, instruction only stores the lower 32 BCD digits in
17571 * the result
17573 Generate_132_bit_bcd_string( mkexpr( frBI64_hi ),
17574 mkexpr( frBI64_lo ),
17575 &top_12_l,
17576 &mid_60_u,
17577 &mid_60_l,
17578 &low_60_u,
17579 &low_60_l );
17581 if ( ( sp == 0 ) || ( sp == 1 ) ) {
17582 /* Unsigned BCD string */
17583 assign( result_hi,
17584 binop( Iop_32HLto64,
17585 binop( Iop_Or32,
17586 binop( Iop_Shl32,
17587 mkexpr( top_12_l ),
17588 mkU8( 24 ) ),
17589 binop( Iop_Shr32,
17590 mkexpr( mid_60_u ),
17591 mkU8( 4 ) ) ),
17592 binop( Iop_Or32,
17593 binop( Iop_Shl32,
17594 mkexpr( mid_60_u ),
17595 mkU8( 28 ) ),
17596 binop( Iop_Shr32,
17597 mkexpr( mid_60_l ),
17598 mkU8( 4 ) ) ) ) );
17600 assign( result_lo,
17601 binop( Iop_32HLto64,
17602 binop( Iop_Or32,
17603 binop( Iop_Shl32,
17604 mkexpr( mid_60_l ),
17605 mkU8( 28 ) ),
17606 mkexpr( low_60_u ) ),
17607 mkexpr( low_60_l ) ) );
17609 } else {
17610 /* Signed BCD string, the cases for sp 2 and 3 only differ in how
17611 * the positive and negative values are encoded in the least
17612 * significant bits.
17614 IRTemp sign = newTemp( Ity_I32 );
17616 if ( sp == 2 ) {
17617 /* Positive sign = 0xC, negative sign = 0xD */
17618 assign( sign,
17619 binop( Iop_Or32,
17620 binop( Iop_Shr32,
17621 unop( Iop_64HIto32, mkexpr( frBI64_hi ) ),
17622 mkU8( 31 ) ),
17623 mkU32( 0xC ) ) );
17625 } else if ( sp == 3 ) {
17626 IRTemp tmp32 = newTemp( Ity_I32 );
17628 /* Positive sign = 0xF, negative sign = 0xD.
17629 * Need to complement sign bit then OR into bit position 1.
17631 assign( tmp32,
17632 binop( Iop_Xor32,
17633 binop( Iop_Shr32,
17634 unop( Iop_64HIto32, mkexpr( frBI64_hi ) ),
17635 mkU8( 30 ) ),
17636 mkU32( 0x2 ) ) );
17638 assign( sign, binop( Iop_Or32, mkexpr( tmp32 ), mkU32( 0xD ) ) );
17640 } else {
17641 vpanic( "The impossible happened: dis_dfp_bcd(ppc), undefined SP field" );
17644 assign( result_hi,
17645 binop( Iop_32HLto64,
17646 binop( Iop_Or32,
17647 binop( Iop_Shl32,
17648 mkexpr( top_12_l ),
17649 mkU8( 28 ) ),
17650 mkexpr( mid_60_u ) ),
17651 mkexpr( mid_60_l ) ) );
17653 assign( result_lo,
17654 binop( Iop_32HLto64,
17655 binop( Iop_Or32,
17656 binop( Iop_Shl32,
17657 mkexpr( low_60_u ),
17658 mkU8( 4 ) ),
17659 binop( Iop_Shr32,
17660 mkexpr( low_60_l ),
17661 mkU8( 28 ) ) ),
17662 binop( Iop_Or32,
17663 binop( Iop_Shl32,
17664 mkexpr( low_60_l ),
17665 mkU8( 4 ) ),
17666 mkexpr( sign ) ) ) );
17669 putDReg( frT_addr, unop( Iop_ReinterpI64asD64, mkexpr( result_hi ) ) );
17670 putDReg( frT_addr + 1,
17671 unop( Iop_ReinterpI64asD64, mkexpr( result_lo ) ) );
17673 break;
17674 case 0x342: // denbcdq DFP Encode BCD to DPD
17676 IRTemp valid_mask = newTemp( Ity_I32 );
17677 IRTemp invalid_mask = newTemp( Ity_I32 );
17678 IRTemp result128 = newTemp( Ity_D128 );
17679 IRTemp dfp_significand = newTemp( Ity_D128 );
17680 IRTemp tmp_hi = newTemp( Ity_I64 );
17681 IRTemp tmp_lo = newTemp( Ity_I64 );
17682 IRTemp dbcd_top_l = newTemp( Ity_I32 );
17683 IRTemp dbcd_mid_u = newTemp( Ity_I32 );
17684 IRTemp dbcd_mid_l = newTemp( Ity_I32 );
17685 IRTemp dbcd_low_u = newTemp( Ity_I32 );
17686 IRTemp dbcd_low_l = newTemp( Ity_I32 );
17687 IRTemp bcd_top_8 = newTemp( Ity_I64 );
17688 IRTemp bcd_mid_60 = newTemp( Ity_I64 );
17689 IRTemp bcd_low_60 = newTemp( Ity_I64 );
17690 IRTemp sign_bit = newTemp( Ity_I32 );
17691 IRTemp tmptop10 = newTemp( Ity_I64 );
17692 IRTemp tmpmid50 = newTemp( Ity_I64 );
17693 IRTemp tmplow50 = newTemp( Ity_I64 );
17694 IRTemp inval_bcd_digit_mask = newTemp( Ity_I32 );
17696 DIP( "denbcd %llu,r%u,r%u\n", s, frT_addr, frB_addr );
17698 if ( s == 0 ) {
17699 /* Unsigned BCD string */
17700 assign( sign_bit, mkU32( 0 ) ); // set to zero for unsigned string
17702 assign( bcd_top_8,
17703 binop( Iop_32HLto64,
17704 mkU32( 0 ),
17705 binop( Iop_And32,
17706 binop( Iop_Shr32,
17707 unop( Iop_64HIto32,
17708 mkexpr( frBI64_hi ) ),
17709 mkU8( 24 ) ),
17710 mkU32( 0xFF ) ) ) );
17711 assign( bcd_mid_60,
17712 binop( Iop_32HLto64,
17713 binop( Iop_Or32,
17714 binop( Iop_Shr32,
17715 unop( Iop_64to32,
17716 mkexpr( frBI64_hi ) ),
17717 mkU8( 28 ) ),
17718 binop( Iop_Shl32,
17719 unop( Iop_64HIto32,
17720 mkexpr( frBI64_hi ) ),
17721 mkU8( 4 ) ) ),
17722 binop( Iop_Or32,
17723 binop( Iop_Shl32,
17724 unop( Iop_64to32,
17725 mkexpr( frBI64_hi ) ),
17726 mkU8( 4 ) ),
17727 binop( Iop_Shr32,
17728 unop( Iop_64HIto32,
17729 mkexpr( frBI64_lo ) ),
17730 mkU8( 28 ) ) ) ) );
17732 /* Note, the various helper functions ignores top 4-bits */
17733 assign( bcd_low_60, mkexpr( frBI64_lo ) );
17735 assign( tmptop10, unop( Iop_BCDtoDPB, mkexpr( bcd_top_8 ) ) );
17736 assign( dbcd_top_l, unop( Iop_64to32, mkexpr( tmptop10 ) ) );
17738 assign( tmpmid50, unop( Iop_BCDtoDPB, mkexpr( bcd_mid_60 ) ) );
17739 assign( dbcd_mid_u, unop( Iop_64HIto32, mkexpr( tmpmid50 ) ) );
17740 assign( dbcd_mid_l, unop( Iop_64to32, mkexpr( tmpmid50 ) ) );
17742 assign( tmplow50, unop( Iop_BCDtoDPB, mkexpr( bcd_low_60 ) ) );
17743 assign( dbcd_low_u, unop( Iop_64HIto32, mkexpr( tmplow50 ) ) );
17744 assign( dbcd_low_l, unop( Iop_64to32, mkexpr( tmplow50 ) ) );
17746 /* The entire BCD string fits in lower 110-bits. The LMD = 0,
17747 * value is not part of the final result. Only the right most
17748 * BCD digits are stored.
17750 assign( lmd, mkU32( 0 ) );
17752 assign( invalid_mask,
17753 binop( Iop_Or32,
17754 bcd_digit_inval( mkU32( 0 ),
17755 unop( Iop_64to32,
17756 mkexpr( bcd_top_8 ) ) ),
17757 binop( Iop_Or32,
17758 bcd_digit_inval( unop( Iop_64HIto32,
17759 mkexpr( bcd_mid_60 ) ),
17760 unop( Iop_64to32,
17761 mkexpr( bcd_mid_60 ) ) ),
17762 bcd_digit_inval( unop( Iop_64HIto32,
17763 mkexpr( bcd_low_60 ) ),
17764 unop( Iop_64to32,
17765 mkexpr( bcd_low_60 ) )
17766 ) ) ) );
17768 } else if ( s == 1 ) {
17769 IRTemp sign = newTemp( Ity_I32 );
17770 IRTemp zero = newTemp( Ity_I32 );
17771 IRTemp pos_sign_mask = newTemp( Ity_I32 );
17772 IRTemp neg_sign_mask = newTemp( Ity_I32 );
17774 /* The sign of the BCD string is stored in lower 4 bits */
17775 assign( sign,
17776 binop( Iop_And32,
17777 unop( Iop_64to32, mkexpr( frBI64_lo ) ),
17778 mkU32( 0xF ) ) );
17779 assign( neg_sign_mask, Generate_neg_sign_mask( mkexpr( sign ) ) );
17780 assign( pos_sign_mask, Generate_pos_sign_mask( mkexpr( sign ) ) );
17781 assign( sign_bit,
17782 Generate_sign_bit( mkexpr( pos_sign_mask ),
17783 mkexpr( neg_sign_mask ) ) );
17785 /* Generate the value assuminig the sign and BCD digits are vaild */
17786 assign( bcd_top_8,
17787 binop( Iop_32HLto64,
17788 mkU32( 0x0 ),
17789 binop( Iop_Shr32,
17790 unop( Iop_64HIto32, mkexpr( frBI64_hi ) ),
17791 mkU8( 28 ) ) ) );
17793 /* The various helper routines ignore the upper 4-bits */
17794 assign( bcd_mid_60, mkexpr( frBI64_hi ) );
17796 /* Remove bottom four sign bits */
17797 assign( bcd_low_60,
17798 binop( Iop_32HLto64,
17799 binop( Iop_Shr32,
17800 unop( Iop_64HIto32,
17801 mkexpr( frBI64_lo ) ),
17802 mkU8( 4 ) ),
17803 binop( Iop_Or32,
17804 binop( Iop_Shl32,
17805 unop( Iop_64HIto32,
17806 mkexpr( frBI64_lo ) ),
17807 mkU8( 28 ) ),
17808 binop( Iop_Shr32,
17809 unop( Iop_64to32,
17810 mkexpr( frBI64_lo ) ),
17811 mkU8( 4 ) ) ) ) );
17812 assign( tmptop10, unop( Iop_BCDtoDPB, mkexpr(bcd_top_8 ) ) );
17813 assign( dbcd_top_l, unop( Iop_64to32, mkexpr( tmptop10 ) ) );
17815 assign( tmpmid50, unop( Iop_BCDtoDPB, mkexpr(bcd_mid_60 ) ) );
17816 assign( dbcd_mid_u, unop( Iop_64HIto32, mkexpr( tmpmid50 ) ) );
17817 assign( dbcd_mid_l, unop( Iop_64to32, mkexpr( tmpmid50 ) ) );
17819 assign( tmplow50, unop( Iop_BCDtoDPB, mkexpr( bcd_low_60 ) ) );
17820 assign( dbcd_low_u, unop( Iop_64HIto32, mkexpr( tmplow50 ) ) );
17821 assign( dbcd_low_l, unop( Iop_64to32, mkexpr( tmplow50 ) ) );
17823 /* The entire BCD string fits in lower 110-bits. The LMD value
17824 * is not stored in the final result for the DFP Long instruction.
17826 assign( lmd, mkU32( 0 ) );
17828 /* Check for invalid sign and invalid BCD digit. Don't check the
17829 * bottom four bits of frBI64_lo as that is the sign value.
17831 assign( zero, mkU32( 0 ) );
17832 assign( inval_bcd_digit_mask,
17833 binop( Iop_Or32,
17834 bcd_digit_inval( mkexpr( zero ),
17835 unop( Iop_64to32,
17836 mkexpr( bcd_top_8 ) ) ),
17837 binop( Iop_Or32,
17838 bcd_digit_inval( unop( Iop_64HIto32,
17839 mkexpr( bcd_mid_60 ) ),
17840 unop( Iop_64to32,
17841 mkexpr( bcd_mid_60 ) ) ),
17842 bcd_digit_inval( unop( Iop_64HIto32,
17843 mkexpr( frBI64_lo ) ),
17844 binop( Iop_Shr32,
17845 unop( Iop_64to32,
17846 mkexpr( frBI64_lo ) ),
17847 mkU8( 4 ) ) ) ) ) );
17848 assign( invalid_mask,
17849 Generate_inv_mask( mkexpr( inval_bcd_digit_mask ),
17850 mkexpr( pos_sign_mask ),
17851 mkexpr( neg_sign_mask ) ) );
17855 assign( valid_mask, unop( Iop_Not32, mkexpr( invalid_mask ) ) );
17857 /* Calculate the value of the result assuming sign and BCD digits
17858 * are all valid.
17860 assign( dfp_significand,
17861 binop( Iop_D64HLtoD128,
17862 unop( Iop_ReinterpI64asD64,
17863 binop( Iop_32HLto64,
17864 binop( Iop_Or32,
17865 mkexpr( sign_bit ),
17866 mkexpr( dbcd_top_l ) ),
17867 binop( Iop_Or32,
17868 binop( Iop_Shl32,
17869 mkexpr( dbcd_mid_u ),
17870 mkU8( 18 ) ),
17871 binop( Iop_Shr32,
17872 mkexpr( dbcd_mid_l ),
17873 mkU8( 14 ) ) ) ) ),
17874 unop( Iop_ReinterpI64asD64,
17875 binop( Iop_32HLto64,
17876 binop( Iop_Or32,
17877 mkexpr( dbcd_low_u ),
17878 binop( Iop_Shl32,
17879 mkexpr( dbcd_mid_l ),
17880 mkU8( 18 ) ) ),
17881 mkexpr( dbcd_low_l ) ) ) ) );
17883 /* Break the result back down to 32-bit chunks and replace chunks.
17884 * If there was an invalid BCD digit or invalid sign value, replace
17885 * the calculated result with the invalid bit string.
17887 assign( result128,
17888 binop( Iop_InsertExpD128,
17889 mkU64( DFP_EXTND_BIAS ),
17890 mkexpr( dfp_significand ) ) );
17892 assign( tmp_hi,
17893 unop( Iop_ReinterpD64asI64,
17894 unop( Iop_D128HItoD64, mkexpr( result128 ) ) ) );
17896 assign( tmp_lo,
17897 unop( Iop_ReinterpD64asI64,
17898 unop( Iop_D128LOtoD64, mkexpr( result128 ) ) ) );
17900 assign( result_hi,
17901 binop( Iop_32HLto64,
17902 binop( Iop_Or32,
17903 binop( Iop_And32,
17904 mkexpr( valid_mask ),
17905 unop( Iop_64HIto32, mkexpr( tmp_hi ) ) ),
17906 binop( Iop_And32,
17907 mkU32( 0x7C000000 ),
17908 mkexpr( invalid_mask ) ) ),
17909 binop( Iop_Or32,
17910 binop( Iop_And32,
17911 mkexpr( valid_mask ),
17912 unop( Iop_64to32, mkexpr( tmp_hi ) ) ),
17913 binop( Iop_And32,
17914 mkU32( 0x0 ),
17915 mkexpr( invalid_mask ) ) ) ) );
17917 assign( result_lo,
17918 binop( Iop_32HLto64,
17919 binop( Iop_Or32,
17920 binop( Iop_And32,
17921 mkexpr( valid_mask ),
17922 unop( Iop_64HIto32, mkexpr( tmp_lo ) ) ),
17923 binop( Iop_And32,
17924 mkU32( 0x0 ),
17925 mkexpr( invalid_mask ) ) ),
17926 binop( Iop_Or32,
17927 binop( Iop_And32,
17928 mkexpr( valid_mask ),
17929 unop( Iop_64to32, mkexpr( tmp_lo ) ) ),
17930 binop( Iop_And32,
17931 mkU32( 0x0 ),
17932 mkexpr( invalid_mask ) ) ) ) );
17934 putDReg( frT_addr, unop( Iop_ReinterpI64asD64, mkexpr( result_hi ) ) );
17935 putDReg( frT_addr + 1,
17936 unop( Iop_ReinterpI64asD64, mkexpr( result_lo ) ) );
17939 break;
17940 default:
17941 vpanic( "ERROR: dis_dfp_bcdq(ppc), undefined opc2 case " );
17942 break;
17944 return True;
17947 static Bool dis_dfp_significant_digits( UInt prefix, UInt theInstr )
17949 UInt opc1 = ifieldOPC( theInstr );
17950 UInt opc2 = ifieldOPClo10(theInstr);
17951 UChar frA_addr = ifieldRegA( theInstr );
17952 UChar frB_addr = ifieldRegB( theInstr );
17953 IRTemp frA = newTemp( Ity_D64 );
17954 IRTemp B_sig = newTemp( Ity_I8 );
17955 IRTemp K = newTemp( Ity_I8 );
17956 IRTemp lmd_B = newTemp( Ity_I32 );
17957 IRTemp field = newTemp( Ity_I32 );
17958 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) ); // AKA BF
17959 IRTemp Unordered_true = newTemp( Ity_I32 );
17960 IRTemp Eq_true_mask = newTemp( Ity_I32 );
17961 IRTemp Lt_true_mask = newTemp( Ity_I32 );
17962 IRTemp Gt_true_mask = newTemp( Ity_I32 );
17963 IRTemp KisZero_true_mask = newTemp( Ity_I32 );
17964 IRTemp KisZero_false_mask = newTemp( Ity_I32 );
17965 IRTemp cc = newTemp( Ity_I32 );
17966 UChar UIM = toUChar( IFIELD( theInstr, 16, 6 ) );
17967 IRTemp BCD_valid = newTemp( Ity_I32 );
17969 /* There is no prefixed version of these instructions. */
17970 PREFIX_CHECK
17972 if (opc2 == 0x2A2) { // dtstsf DFP Test Significance
17973 // dtstsfq DFP Test Significance Quad
17974 /* Get the reference singificance stored in frA */
17975 assign( frA, getDReg( frA_addr ) );
17977 /* Convert from 64 bit to 8 bits in two steps. The Iop_64to8 is not
17978 * supported in 32-bit mode.
17980 assign( K, unop( Iop_32to8,
17981 binop( Iop_And32,
17982 unop( Iop_64to32,
17983 unop( Iop_ReinterpD64asI64,
17984 mkexpr( frA ) ) ),
17985 mkU32( 0x3F ) ) ) );
17987 } else if (opc2 == 0x2A3) { // dtstsfi DFP Test Significance Immediate
17988 // dtstsfiq DFP Test Significance Quad Immediate
17989 /* get the significane from the immediate field */
17990 assign( K, mkU8( UIM) );
17992 } else {
17993 vex_printf("dis_dfp_significant_digits(ppc)(opc2) wrong\n");
17994 return False;
17997 switch ( opc1 ) {
17998 case 0x3b: // dtstsf DFP Test Significance
17999 // dtstsfi DFP Test Significance Immediate
18001 IRTemp frB = newTemp( Ity_D64 );
18002 IRTemp frBI64 = newTemp( Ity_I64 );
18003 IRTemp B_bcd_u = newTemp( Ity_I32 );
18004 IRTemp B_bcd_l = newTemp( Ity_I32 );
18005 IRTemp tmp64 = newTemp( Ity_I64 );
18007 if (opc2 == 0x2A2) {
18008 DIP( "dtstsf %u,r%u,r%u\n", crfD, frA_addr, frB_addr );
18009 } else {
18010 DIP( "dtstsfi %u,%u,r%u\n", crfD, UIM, frB_addr );
18013 assign( frB, getDReg( frB_addr ) );
18014 assign( frBI64, unop( Iop_ReinterpD64asI64, mkexpr( frB ) ) );
18016 /* Get the BCD string for the value stored in a series of I32 values.
18017 * Count the number of leading zeros. Subtract the number of leading
18018 * zeros from 16 (maximum number of significant digits in DFP
18019 * Long).
18021 Get_lmd( &lmd_B,
18022 binop( Iop_Shr32,
18023 unop( Iop_64HIto32, mkexpr( frBI64 ) ),
18024 mkU8( 31 - 5 ) ) ); // G-field[0:4]
18026 assign( tmp64, unop( Iop_DPBtoBCD, mkexpr( frBI64 ) ) );
18027 assign( B_bcd_u, unop( Iop_64HIto32, mkexpr( tmp64 ) ) );
18028 assign( B_bcd_l, unop( Iop_64to32, mkexpr( tmp64 ) ) );
18030 assign( B_sig,
18031 binop( Iop_Sub8,
18032 mkU8( DFP_LONG_MAX_SIG_DIGITS ),
18033 Count_leading_zeros_60( mkexpr( lmd_B ),
18034 mkexpr( B_bcd_u ),
18035 mkexpr( B_bcd_l ) ) ) );
18037 assign( BCD_valid,
18038 binop( Iop_Or32,
18039 bcd_digit_inval( mkexpr( B_bcd_u), mkexpr( B_bcd_l) ),
18040 bcd_digit_inval( mkexpr( lmd_B), mkU32( 0 ) ) ) );
18042 /* Set unordered to True if the number is NaN, Inf or an invalid
18043 * digit.
18045 assign( Unordered_true,
18046 binop( Iop_Or32,
18047 Check_unordered( mkexpr( frBI64 ) ),
18048 mkexpr( BCD_valid) ) );
18050 break;
18051 case 0x3F: // dtstsfq DFP Test Significance
18052 // dtstsfqi DFP Test Significance Immediate
18054 IRTemp frB_hi = newTemp( Ity_D64 );
18055 IRTemp frB_lo = newTemp( Ity_D64 );
18056 IRTemp frBI64_hi = newTemp( Ity_I64 );
18057 IRTemp frBI64_lo = newTemp( Ity_I64 );
18058 IRTemp B_low_60_u = newTemp( Ity_I32 );
18059 IRTemp B_low_60_l = newTemp( Ity_I32 );
18060 IRTemp B_mid_60_u = newTemp( Ity_I32 );
18061 IRTemp B_mid_60_l = newTemp( Ity_I32 );
18062 IRTemp B_top_12_l = newTemp( Ity_I32 );
18064 if (opc2 == 0x2A2) {
18065 DIP( "dtstsfq %u,r%u,r%u\n", crfD, frA_addr, frB_addr );
18066 } else {
18067 DIP( "dtstsfiq %u,%u,r%u\n", crfD, UIM, frB_addr );
18070 assign( frB_hi, getDReg( frB_addr ) );
18071 assign( frB_lo, getDReg( frB_addr + 1 ) );
18073 assign( frBI64_hi, unop( Iop_ReinterpD64asI64, mkexpr( frB_hi ) ) );
18074 assign( frBI64_lo, unop( Iop_ReinterpD64asI64, mkexpr( frB_lo ) ) );
18076 /* Get the BCD string for the value stored in a series of I32 values.
18077 * Count the number of leading zeros. Subtract the number of leading
18078 * zeros from 32 (maximum number of significant digits in DFP
18079 * extended).
18081 Get_lmd( &lmd_B,
18082 binop( Iop_Shr32,
18083 unop( Iop_64HIto32, mkexpr( frBI64_hi ) ),
18084 mkU8( 31 - 5 ) ) ); // G-field[0:4]
18086 Generate_132_bit_bcd_string( mkexpr( frBI64_hi ),
18087 mkexpr( frBI64_lo ),
18088 &B_top_12_l,
18089 &B_mid_60_u,
18090 &B_mid_60_l,
18091 &B_low_60_u,
18092 &B_low_60_l );
18094 assign( BCD_valid,
18095 binop( Iop_Or32,
18096 binop( Iop_Or32,
18097 bcd_digit_inval( mkexpr( lmd_B ),
18098 mkexpr( B_top_12_l ) ),
18099 bcd_digit_inval( mkexpr( B_mid_60_u ),
18100 mkexpr( B_mid_60_l ) ) ),
18101 bcd_digit_inval( mkexpr( B_low_60_u ),
18102 mkexpr( B_low_60_l ) ) ) );
18104 assign( B_sig,
18105 binop( Iop_Sub8,
18106 mkU8( DFP_EXTND_MAX_SIG_DIGITS ),
18107 Count_leading_zeros_128( mkexpr( lmd_B ),
18108 mkexpr( B_top_12_l ),
18109 mkexpr( B_mid_60_u ),
18110 mkexpr( B_mid_60_l ),
18111 mkexpr( B_low_60_u ),
18112 mkexpr( B_low_60_l ) ) ) );
18114 /* Set unordered to True if the number is NaN, Inf or an invalid
18115 * digit.
18117 assign( Unordered_true,
18118 binop( Iop_Or32,
18119 Check_unordered( mkexpr( frBI64_hi ) ),
18120 mkexpr( BCD_valid) ) );
18122 break;
18125 /* Compare (16 - cnt[0]) against K and set the condition code field
18126 * accordingly.
18128 * The field layout is as follows:
18130 * bit[3:0] Description
18131 * 3 K != 0 and K < Number of significant digits if FRB
18132 * 2 K != 0 and K > Number of significant digits if FRB OR K = 0
18133 * 1 K != 0 and K = Number of significant digits if FRB
18134 * 0 K ? Number of significant digits if FRB
18136 assign( Eq_true_mask,
18137 unop( Iop_1Sto32,
18138 binop( Iop_CmpEQ32,
18139 unop( Iop_8Uto32, mkexpr( K ) ),
18140 unop( Iop_8Uto32, mkexpr( B_sig ) ) ) ) );
18141 assign( Lt_true_mask,
18142 unop( Iop_1Sto32,
18143 binop( Iop_CmpLT32U,
18144 unop( Iop_8Uto32, mkexpr( K ) ),
18145 unop( Iop_8Uto32, mkexpr( B_sig ) ) ) ) );
18146 assign( Gt_true_mask,
18147 unop( Iop_1Sto32,
18148 binop( Iop_CmpLT32U,
18149 unop( Iop_8Uto32, mkexpr( B_sig ) ),
18150 unop( Iop_8Uto32, mkexpr( K ) ) ) ) );
18152 assign( KisZero_true_mask,
18153 unop( Iop_1Sto32,
18154 binop( Iop_CmpEQ32,
18155 unop( Iop_8Uto32, mkexpr( K ) ),
18156 mkU32( 0 ) ) ) );
18157 assign( KisZero_false_mask,
18158 unop( Iop_1Sto32,
18159 binop( Iop_CmpNE32,
18160 unop( Iop_8Uto32, mkexpr( K ) ),
18161 mkU32( 0 ) ) ) );
18163 assign( field,
18164 binop( Iop_Or32,
18165 binop( Iop_And32,
18166 mkexpr( KisZero_false_mask ),
18167 binop( Iop_Or32,
18168 binop( Iop_And32,
18169 mkexpr( Lt_true_mask ),
18170 mkU32( 0x8 ) ),
18171 binop( Iop_Or32,
18172 binop( Iop_And32,
18173 mkexpr( Gt_true_mask ),
18174 mkU32( 0x4 ) ),
18175 binop( Iop_And32,
18176 mkexpr( Eq_true_mask ),
18177 mkU32( 0x2 ) ) ) ) ),
18178 binop( Iop_And32,
18179 mkexpr( KisZero_true_mask ),
18180 mkU32( 0x4 ) ) ) );
18182 assign( cc, binop( Iop_Or32,
18183 binop( Iop_And32,
18184 mkexpr( Unordered_true ),
18185 mkU32( 0x1 ) ),
18186 binop( Iop_And32,
18187 unop( Iop_Not32, mkexpr( Unordered_true ) ),
18188 mkexpr( field ) ) ) );
18190 putGST_field( PPC_GST_CR, mkexpr( cc ), crfD );
18191 putFPCC( mkexpr( cc ) );
18193 return True;
18195 /*------------------------------------------------------------*/
18196 /*--- AltiVec Instruction Translation ---*/
18197 /*------------------------------------------------------------*/
18200 Altivec Cache Control Instructions (Data Streams)
18202 static Bool dis_av_datastream ( UInt prefix, UInt theInstr )
18204 /* X-Form */
18205 UChar opc1 = ifieldOPC(theInstr);
18206 UChar flag_T = toUChar( IFIELD( theInstr, 25, 1 ) );
18207 UChar flag_A = flag_T;
18208 UChar b23to24 = toUChar( IFIELD( theInstr, 23, 2 ) );
18209 UChar STRM = toUChar( IFIELD( theInstr, 21, 2 ) );
18210 UChar rA_addr = ifieldRegA(theInstr);
18211 UChar rB_addr = ifieldRegB(theInstr);
18212 UInt opc2 = ifieldOPClo10(theInstr);
18213 UChar b0 = ifieldBIT0(theInstr);
18215 /* There is no prefixed version of these instructions. */
18216 PREFIX_CHECK
18218 if (opc1 != 0x1F || b23to24 != 0 || b0 != 0) {
18219 vex_printf("dis_av_datastream(ppc)(instr)\n");
18220 return False;
18223 switch (opc2) {
18224 case 0x156: // dst (Data Stream Touch, AV p115)
18225 DIP("dst%s r%u,r%u,%d\n", flag_T ? "t" : "",
18226 rA_addr, rB_addr, STRM);
18227 break;
18229 case 0x176: // dstst (Data Stream Touch for Store, AV p117)
18230 DIP("dstst%s r%u,r%u,%d\n", flag_T ? "t" : "",
18231 rA_addr, rB_addr, STRM);
18232 break;
18234 case 0x336: // dss (Data Stream Stop, AV p114)
18235 if (rA_addr != 0 || rB_addr != 0) {
18236 vex_printf("dis_av_datastream(ppc)(opc2,dst)\n");
18237 return False;
18239 if (flag_A == 0) {
18240 DIP("dss %d\n", STRM);
18241 } else {
18242 DIP("dssall\n");
18244 break;
18246 default:
18247 vex_printf("dis_av_datastream(ppc)(opc2)\n");
18248 return False;
18250 return True;
18254 AltiVec Processor Control Instructions
18256 static Bool dis_av_procctl ( UInt prefix, UInt theInstr )
18258 /* VX-Form */
18259 UChar opc1 = ifieldOPC(theInstr);
18260 UChar vD_addr = ifieldRegDS(theInstr);
18261 UChar vA_addr = ifieldRegA(theInstr);
18262 UChar vB_addr = ifieldRegB(theInstr);
18263 UInt opc2 = IFIELD( theInstr, 0, 11 );
18265 /* There is no prefixed version of these instructions. */
18266 PREFIX_CHECK
18268 if (opc1 != 0x4) {
18269 vex_printf("dis_av_procctl(ppc)(instr)\n");
18270 return False;
18273 switch (opc2) {
18274 case 0x604: // mfvscr (Move from VSCR, AV p129)
18275 if (vA_addr != 0 || vB_addr != 0) {
18276 vex_printf("dis_av_procctl(ppc)(opc2,dst)\n");
18277 return False;
18279 DIP("mfvscr v%d\n", vD_addr);
18280 putVReg( vD_addr, unop(Iop_32UtoV128, getGST( PPC_GST_VSCR )) );
18281 break;
18283 case 0x644: { // mtvscr (Move to VSCR, AV p130)
18284 IRTemp vB = newTemp(Ity_V128);
18285 if (vD_addr != 0 || vA_addr != 0) {
18286 vex_printf("dis_av_procctl(ppc)(opc2,dst)\n");
18287 return False;
18289 DIP("mtvscr v%d\n", vB_addr);
18290 assign( vB, getVReg(vB_addr));
18291 putGST( PPC_GST_VSCR, unop(Iop_V128to32, mkexpr(vB)) );
18292 break;
18294 default:
18295 vex_printf("dis_av_procctl(ppc)(opc2)\n");
18296 return False;
18298 return True;
18302 Vector Extend Sign Instructions
18304 static Bool dis_av_extend_sign_count_zero ( UInt prefix, UInt theInstr,
18305 UInt allow_isa_3_0 )
18307 /* VX-Form, sort of, the A register field is used to select the specific
18308 * sign extension instruction or count leading/trailing zero LSB
18309 * instruction.
18312 UChar opc1 = ifieldOPC( theInstr );
18313 UChar rT_addr = ifieldRegDS (theInstr );
18314 UChar rA_addr = ifieldRegA( theInstr );
18315 UChar vB_addr = ifieldRegB( theInstr );
18316 UInt opc2 = IFIELD( theInstr, 0, 11 );
18318 IRTemp vB = newTemp( Ity_V128 );
18319 IRTemp vT = newTemp( Ity_V128 );
18321 /* There is no prefixed version of these instructions. */
18322 PREFIX_CHECK
18324 assign( vB, getVReg ( vB_addr ) );
18326 if ( ( opc1 != 0x4 ) && ( opc2 != 0x602 ) ) {
18327 vex_printf("dis_av_extend_sign(ppc)(instr)\n");
18328 return False;
18331 switch ( rA_addr ) {
18332 case 0:
18333 case 1:
18335 UInt i;
18336 IRTemp count[17];
18337 IRTemp bit_zero[16];
18338 IRTemp byte_mask[17];
18340 /* These instructions store the result in the general purpose
18341 * register in the rT_addr field.
18344 byte_mask[0] = newTemp( Ity_I32 );
18345 count[0] = newTemp( Ity_I32 );
18346 assign( count[0], mkU32( 0 ) );
18347 assign( byte_mask[0], mkU32( 0x1 ) );
18349 if ( rA_addr == 0 ) {
18350 // vclzlsbb (Vector Count Leading Zero Least-Significant Bits Byte)
18351 DIP("vclzlsbb %d,v%d\n", rT_addr, vB_addr);
18353 } else {
18354 // vctzlsbb (Vector Count Trailing Zero Least-Significant Bits Byte)
18355 DIP("vctzlsbb %d,v%d\n", rT_addr, vB_addr);
18358 for( i = 0; i < 16; i++ ) {
18359 byte_mask[i+1] = newTemp( Ity_I32 );
18360 count[i+1] = newTemp( Ity_I32 );
18361 bit_zero[i] = newTemp( Ity_I1 );
18363 /* bit_zero[i] = 0x0 until the first 1 bit is found in lsb of
18364 * byte. When the first 1 bit is found it causes the byte_mask
18365 * to change from 0x1 to 0x0. Thus the AND of the lsb and byte_mask
18366 * will be zero which will be equal to the zero byte_mask causing
18367 * the value of bit_zero[i] to be equal to 0x1 for all remaining bits.
18370 if ( rA_addr == 0 )
18371 /* leading zero bit in byte count,
18372 work bytes from left to right
18374 assign( bit_zero[i],
18375 binop( Iop_CmpEQ32,
18376 binop( Iop_And32,
18377 unop( Iop_V128to32,
18378 binop( Iop_ShrV128,
18379 mkexpr( vB ),
18380 mkU8( ( 15 - i) * 8 ) ) ),
18381 mkexpr( byte_mask[i] ) ),
18382 mkexpr( byte_mask[i] ) ) );
18384 else if ( rA_addr == 1 )
18385 /* trailing zero bit in byte count,
18386 * work bytes from right to left
18388 assign( bit_zero[i],
18389 binop( Iop_CmpEQ32,
18390 binop( Iop_And32,
18391 unop( Iop_V128to32,
18392 binop( Iop_ShrV128,
18393 mkexpr( vB ),
18394 mkU8( i * 8 ) ) ),
18395 mkexpr( byte_mask[i] ) ),
18396 mkexpr( byte_mask[i] ) ) );
18398 /* Increment count as long as bit_zero = 0 */
18399 assign( count[i+1], binop( Iop_Add32,
18400 mkexpr( count[i] ),
18401 unop( Iop_1Uto32,
18402 unop( Iop_Not1,
18403 mkexpr( bit_zero[i] ) ) ) ) );
18405 /* If comparison fails to find a zero bit, set the byte_mask to zero
18406 * for all future comparisons so there will be no more matches.
18408 assign( byte_mask[i+1],
18409 binop( Iop_And32,
18410 unop( Iop_1Uto32,
18411 unop( Iop_Not1,
18412 mkexpr( bit_zero[i] ) ) ),
18413 mkexpr( byte_mask[i] ) ) );
18415 putIReg( rT_addr, unop( Iop_32Uto64, mkexpr( count[16] ) ) );
18416 return True;
18419 case 6: // vnegw, Vector Negate Word
18420 DIP("vnegw v%u,v%u", rT_addr, vB_addr);
18422 /* multiply each word by -1 */
18423 assign( vT, binop( Iop_Mul32x4, mkexpr( vB ), mkV128( 0xFFFF ) ) );
18424 break;
18426 case 7: // vnegd, Vector Negate Doubleword
18427 DIP("vnegd v%u,v%u", rT_addr, vB_addr);
18429 /* multiply each word by -1 */
18430 assign( vT, binop( Iop_64HLtoV128,
18431 binop( Iop_Mul64,
18432 unop( Iop_V128HIto64,
18433 mkexpr( vB ) ),
18434 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
18435 binop( Iop_Mul64,
18436 unop( Iop_V128to64,
18437 mkexpr( vB ) ),
18438 mkU64( 0xFFFFFFFFFFFFFFFF ) ) ) );
18439 break;
18441 case 8: // vprtybw, Vector Parity Byte Word
18442 case 9: // vprtybd, Vector Parity Byte Doubleword
18443 case 10: // vprtybq, Vector Parity Byte Quadword
18445 UInt i;
18446 IRTemp bit_in_byte[16];
18447 IRTemp word_parity[4];
18449 for( i = 0; i < 16; i++ ) {
18450 bit_in_byte[i] = newTemp( Ity_I32 );
18451 assign( bit_in_byte[i],
18452 binop( Iop_And32,
18453 unop( Iop_V128to32,
18454 binop( Iop_ShrV128,
18455 mkexpr( vB ),
18456 mkU8( ( 15 - i ) * 8 ) ) ),
18457 mkU32( 0x1 ) ) );
18460 for( i = 0; i < 4; i++ ) {
18461 word_parity[i] = newTemp(Ity_I32);
18462 assign( word_parity[i],
18463 mkXOr4_32( bit_in_byte[0 + i * 4],
18464 bit_in_byte[1 + i * 4],
18465 bit_in_byte[2 + i * 4],
18466 bit_in_byte[3 + i * 4] ) );
18469 if ( rA_addr == 8 ) {
18470 DIP("vprtybw v%d,v%d", rT_addr, vB_addr);
18472 assign( vT, mkV128from32( word_parity[0], word_parity[1],
18473 word_parity[2], word_parity[3] ) );
18475 } else if ( rA_addr == 9 ) {
18476 DIP("vprtybd v%d,v%d", rT_addr, vB_addr);
18478 assign( vT,
18479 binop( Iop_64HLtoV128,
18480 binop( Iop_32HLto64,
18481 mkU32( 0 ),
18482 binop( Iop_Xor32,
18483 mkexpr( word_parity[0] ),
18484 mkexpr( word_parity[1] ) ) ),
18485 binop( Iop_32HLto64,
18486 mkU32( 0 ),
18487 binop( Iop_Xor32,
18488 mkexpr( word_parity[2] ),
18489 mkexpr( word_parity[3] ) ) ) ) );
18491 } else if ( rA_addr == 10 ) {
18492 DIP("vprtybq v%d,v%d", rT_addr, vB_addr);
18494 assign( vT,
18495 binop( Iop_64HLtoV128,
18496 mkU64( 0 ),
18497 unop( Iop_32Uto64,
18498 mkXOr4_32( word_parity[0],
18499 word_parity[1],
18500 word_parity[2],
18501 word_parity[3] ) ) ) );
18504 break;
18506 case 16: // vextsb2w, Vector Extend Sign Byte to Word
18507 DIP("vextsb2w v%u,v%u", rT_addr, vB_addr);
18509 /* Iop_MullEven8Sx16 does a signed widening multiplication of byte to
18510 * two byte sign extended result. Then do a two byte to four byte sign
18511 * extended multiply. Note contents of upper three bytes in word are
18512 * "over written". So just take source and multiply by 1.
18514 assign( vT, binop( Iop_MullEven16Sx8,
18515 binop( Iop_64HLtoV128,
18516 mkU64( 0x0000000100000001 ),
18517 mkU64( 0x0000000100000001 ) ),
18518 binop( Iop_MullEven8Sx16,
18519 mkexpr( vB ),
18520 binop( Iop_64HLtoV128,
18521 mkU64( 0x0001000100010001 ),
18522 mkU64( 0x0001000100010001 ) ) ) ) );
18523 break;
18525 case 17: // vextsh2w, Vector Extend Sign Halfword to Word
18526 DIP("vextsh2w v%u,v%u", rT_addr, vB_addr);
18528 /* Iop_MullEven16Sx8 does a signed widening multiply of four byte
18529 * 8 bytes. Note contents of upper two bytes in word are
18530 * "over written". So just take source and multiply by 1.
18532 assign( vT, binop( Iop_MullEven16Sx8,
18533 binop( Iop_64HLtoV128,
18534 mkU64( 0x0000000100000001 ),
18535 mkU64( 0x0000000100000001 ) ),
18536 mkexpr( vB ) ) );
18538 break;
18540 case 24: // vextsb2d, Vector Extend Sign Byte to Doubleword
18541 DIP("vextsb2d v%u,v%u", rT_addr, vB_addr);
18543 /* Iop_MullEven8Sx16 does a signed widening multiplication of byte to
18544 * two byte sign extended result. Then do a two byte to four byte sign
18545 * extended multiply. Then do four byte to eight byte multiply.
18547 assign( vT, binop( Iop_MullEven32Sx4,
18548 binop( Iop_64HLtoV128,
18549 mkU64( 0x0000000000000001 ),
18550 mkU64( 0x0000000000000001 ) ),
18551 binop( Iop_MullEven16Sx8,
18552 binop( Iop_64HLtoV128,
18553 mkU64( 0x0000000100000001 ),
18554 mkU64( 0x0000000100000001 ) ),
18555 binop( Iop_MullEven8Sx16,
18556 binop( Iop_64HLtoV128,
18557 mkU64( 0x0001000100010001 ),
18558 mkU64( 0x0001000100010001 ) ),
18559 mkexpr( vB ) ) ) ) );
18560 break;
18562 case 25: // vextsh2d, Vector Extend Sign Halfword to Doubleword
18563 DIP("vextsh2d v%u,v%u", rT_addr, vB_addr);
18565 assign( vT, binop( Iop_MullEven32Sx4,
18566 binop( Iop_64HLtoV128,
18567 mkU64( 0x0000000000000001 ),
18568 mkU64( 0x0000000000000001 ) ),
18569 binop( Iop_MullEven16Sx8,
18570 binop( Iop_64HLtoV128,
18571 mkU64( 0x0000000100000001 ),
18572 mkU64( 0x0000000100000001 ) ),
18573 mkexpr( vB ) ) ) );
18574 break;
18576 case 26: // vextsw2d, Vector Extend Sign Word to Doubleword
18577 DIP("vextsw2d v%u,v%u", rT_addr, vB_addr);
18579 assign( vT, binop( Iop_MullEven32Sx4,
18580 binop( Iop_64HLtoV128,
18581 mkU64( 0x0000000000000001 ),
18582 mkU64( 0x0000000000000001 ) ),
18583 mkexpr( vB ) ) );
18584 break;
18585 case 27: // vextsd2q Vector Extend Sign Doubleword to Quadword
18587 IRTemp sb = newTemp(Ity_I64); // sign bit extended
18588 IRTemp tmp = newTemp(Ity_I64);
18590 DIP("vextsd2q v%u,v%u\n", rT_addr, vB_addr);
18591 assign( tmp, unop( Iop_V128to64, mkexpr( vB ) ) );
18592 assign( sb, unop( Iop_1Sto64,
18593 unop( Iop_64to1,
18594 binop( Iop_Shr64,
18595 mkexpr( tmp ),
18596 mkU8( 63 ) ) ) ) );
18598 assign( vT, binop( Iop_64HLtoV128, mkexpr( sb ), mkexpr( tmp ) ) );
18600 break;
18603 case 28: // vctzb, Vector Count Trailing Zeros Byte
18605 DIP("vctzb v%d,v%d", rT_addr, vB_addr);
18607 /* This instruction is only available in the ISA 3.0 */
18608 if ( !mode64 || !allow_isa_3_0 ) {
18609 vex_printf("\n vctzb instruction not supported on non ISA 3.0 platform\n\n");
18610 return False;
18612 assign( vT, unop( Iop_Ctz8x16, mkexpr( vB ) ) );
18614 break;
18616 case 29: // vctzh, Vector Count Trailing Zeros Halfword
18618 DIP("vctzh v%d,v%d", rT_addr, vB_addr);
18620 /* This instruction is only available in the ISA 3.0 */
18621 if ( !mode64 || !allow_isa_3_0 ) {
18622 vex_printf("\n vctzh instruction not supported on non ISA 3.0 platform\n\n");
18623 return False;
18625 assign( vT, unop( Iop_Ctz16x8, mkexpr( vB ) ) );
18627 break;
18629 case 30: // vctzw, Vector Count Trailing Zeros Word
18631 DIP("vctzw v%d,v%d", rT_addr, vB_addr);
18633 /* This instruction is only available in the ISA 3.0 */
18634 if ( !mode64 || !allow_isa_3_0 ) {
18635 vex_printf("\n vctzw instruction not supported on non ISA 3.0 platform\n\n");
18636 return False;
18638 assign( vT, unop( Iop_Ctz32x4, mkexpr( vB ) ) );
18640 break;
18642 case 31: // vctzd, Vector Count Trailing Zeros Double word
18644 DIP("vctzd v%d,v%d", rT_addr, vB_addr);
18646 /* This instruction is only available in the ISA 3.0 */
18647 if ( !mode64 || !allow_isa_3_0 ) {
18648 vex_printf("\n vctzd instruction not supported on non ISA 3.0 platform\n\n");
18649 return False;
18651 assign( vT, unop( Iop_Ctz64x2, mkexpr( vB ) ) );
18653 break;
18655 default:
18656 vex_printf("dis_av_extend_sign(ppc)(Unsupported vector extend sign instruction)\n");
18657 return False;
18660 putVReg( rT_addr, mkexpr( vT ) );
18661 return True;
18665 Vector Rotate Instructions
18667 static Bool dis_av_rotate ( UInt prefix, UInt theInstr )
18669 /* VX-Form */
18671 UChar opc1 = ifieldOPC( theInstr );
18672 UChar vT_addr = ifieldRegDS( theInstr );
18673 UChar vA_addr = ifieldRegA( theInstr );
18674 UChar vB_addr = ifieldRegB( theInstr );
18675 UInt opc2 = IFIELD( theInstr, 0, 11 );
18677 IRTemp vA = newTemp( Ity_V128 );
18678 IRTemp vB = newTemp( Ity_V128 );
18679 IRTemp src3 = newTemp( Ity_V128 );
18680 IRTemp vT = newTemp( Ity_V128 );
18681 IRTemp field_mask = newTemp( Ity_V128 );
18682 IRTemp mask128 = newTemp( Ity_V128 );
18683 IRTemp vA_word[4];
18684 IRTemp left_bits[4];
18685 IRTemp right_bits[4];
18686 IRTemp mb[4];
18687 IRTemp me[4];
18688 IRTemp shift[4];
18689 IRTemp mask[4];
18690 IRTemp tmp_mask[4];
18691 IRTemp invert_mask[4];
18692 IRTemp tmp128[4];
18693 UInt i;
18694 UInt num_words;
18695 UInt word_size;
18696 unsigned long long word_mask;
18698 /* There is no prefixed version of these instructions. */
18699 PREFIX_CHECK
18701 if ( opc1 != 0x4 ) {
18702 vex_printf("dis_av_rotate(ppc)(instr)\n");
18703 return False;
18706 assign( vA, getVReg( vA_addr ) );
18707 assign( vB, getVReg( vB_addr ) );
18709 switch (opc2) {
18710 case 0x85: // vrlwmi, Vector Rotate Left Word then Mask Insert
18711 case 0x185: // vrlwnm, Vector Rotate Left Word then AND with Mask
18712 num_words = 4;
18713 word_size = 32;
18714 assign( field_mask, binop( Iop_64HLtoV128,
18715 mkU64( 0 ),
18716 mkU64( 0x1F ) ) );
18717 word_mask = 0xFFFFFFFF;
18718 break;
18720 case 0x0C5: // vrldmi, Vector Rotate Left Doubleword then Mask Insert
18721 case 0x1C5: // vrldnm, Vector Rotate Left Doubleword then AND with Mask
18722 num_words = 2;
18723 word_size = 64;
18724 assign( field_mask, binop( Iop_64HLtoV128,
18725 mkU64( 0 ),
18726 mkU64( 0x3F ) ) );
18727 word_mask = 0xFFFFFFFFFFFFFFFFULL;
18728 break;
18729 default:
18730 vex_printf("dis_av_rotate(ppc)(opc2)\n");
18731 return False;
18734 for( i = 0; i < num_words; i++ ) {
18735 left_bits[i] = newTemp( Ity_I8 );
18736 right_bits[i] = newTemp( Ity_I8 );
18737 shift[i] = newTemp( Ity_I8 );
18738 mb[i] = newTemp( Ity_I64 );
18739 me[i] = newTemp( Ity_I64 );
18740 tmp_mask[i] = newTemp( Ity_I64 );
18741 invert_mask[i] = newTemp( Ity_I64 );
18742 mask[i] = newTemp( Ity_V128 );
18743 tmp128[i] = newTemp( Ity_V128 );
18744 vA_word[i] = newTemp( Ity_V128 );
18746 assign( shift[i],
18747 unop( Iop_64to8,
18748 unop( Iop_V128to64,
18749 binop( Iop_AndV128,
18750 binop( Iop_ShrV128,
18751 mkexpr( vB ),
18752 mkU8( (num_words - 1 - i )
18753 * word_size ) ),
18754 mkexpr( field_mask ) ) ) ) );
18756 assign( mb[i], unop( Iop_V128to64,
18757 binop( Iop_AndV128,
18758 binop( Iop_ShrV128,
18759 mkexpr( vB ),
18760 mkU8( ( num_words - 1 - i )
18761 * word_size + 16 ) ),
18762 mkexpr( field_mask ) ) ) );
18764 assign( me[i], unop( Iop_V128to64,
18765 binop( Iop_AndV128,
18766 binop( Iop_ShrV128,
18767 mkexpr( vB ),
18768 mkU8( ( num_words - 1 - i )
18769 * word_size + 8 ) ),
18770 mkexpr( field_mask ) ) ) );
18772 /* If me < mb, we have to flip things around and invert the mask */
18773 assign( invert_mask[i],
18774 unop( Iop_1Sto64, binop( Iop_CmpLT64U,
18775 mkexpr( me[i] ), mkexpr( mb[i] ) ) ) );
18777 /* left_bits = 63 - mb. Tells us how many bits to the left
18778 * of mb to clear. Note for a word left_bits = 32+mb, for a double
18779 * word left_bits = mb
18781 assign( left_bits[i],
18782 unop( Iop_64to8,
18783 binop( Iop_Or64,
18784 binop( Iop_And64, // mb < me
18785 unop( Iop_Not64, mkexpr( invert_mask[i] ) ),
18786 binop( Iop_Add64,
18787 mkU64( 64 - word_size ),
18788 mkexpr( mb[i] ) ) ),
18789 binop( Iop_And64, // me < mb
18790 mkexpr( invert_mask[i] ),
18791 binop( Iop_Add64,
18792 mkU64( 64 + 1 - word_size ),
18793 mkexpr( me[i] ) ) ) ) ) );
18795 /* right_bits = 63 - me. Tells us how many bits to the right
18796 * of me to clear. Note for a word, left_bits = me+32, for a double
18797 * word left_bits = me
18799 assign( right_bits[i],
18800 unop( Iop_64to8,
18801 binop( Iop_Or64,
18802 binop( Iop_And64, // mb < me
18803 unop( Iop_Not64, mkexpr( invert_mask[i] ) ),
18804 binop( Iop_Sub64,
18805 mkU64( word_size - 1 ),
18806 mkexpr( me[i] ) ) ),
18807 binop( Iop_And64, // me < mb
18808 mkexpr( invert_mask[i] ),
18809 binop( Iop_Sub64,
18810 mkU64( word_size - 1 + 1),
18811 mkexpr( mb[i] ) ) ) ) ) );
18813 /* create mask for 32-bit word or 64-bit word */
18814 assign( tmp_mask[i],
18815 binop( Iop_Shl64,
18816 binop( Iop_Shr64,
18817 binop( Iop_Shr64,
18818 binop( Iop_Shl64,
18819 mkU64( 0xFFFFFFFFFFFFFFFF ),
18820 mkexpr( left_bits[i] ) ),
18821 mkexpr( left_bits[i] ) ),
18822 mkexpr( right_bits[i] ) ),
18823 mkexpr( right_bits[i] ) ) );
18825 assign( mask[i],
18826 binop( Iop_64HLtoV128,
18827 mkU64( 0 ),
18828 binop( Iop_Or64,
18829 binop( Iop_And64,
18830 unop( Iop_Not64, mkexpr( invert_mask[i] ) ),
18831 mkexpr( tmp_mask[i] ) ),
18832 binop( Iop_And64,
18833 mkexpr( invert_mask[i] ),
18834 /* Need to make sure mask is only the size
18835 desired word.*/
18836 binop( Iop_And64,
18837 mkU64( word_mask ),
18838 unop( Iop_Not64,
18839 mkexpr( tmp_mask[i] ) ) ) ))));
18841 /* Need to rotate vA using a left and right shift of vA OR'd together
18842 * then ANDed with the mask.
18844 assign( vA_word[i], binop( Iop_AndV128,
18845 mkexpr( vA ),
18846 binop( Iop_ShlV128,
18847 binop( Iop_64HLtoV128,
18848 mkU64( 0 ),
18849 mkU64( word_mask ) ),
18850 mkU8( ( num_words - 1 - i )
18851 * word_size ) ) ) );
18852 assign( tmp128[i],
18853 binop( Iop_AndV128,
18854 binop( Iop_ShlV128,
18855 mkexpr( mask[i] ),
18856 mkU8( ( num_words - 1 - i) * word_size ) ),
18857 binop( Iop_OrV128,
18858 binop( Iop_ShlV128,
18859 mkexpr( vA_word[i] ),
18860 mkexpr( shift[i] ) ),
18861 binop( Iop_ShrV128,
18862 mkexpr( vA_word[i] ),
18863 unop( Iop_32to8,
18864 binop(Iop_Sub32,
18865 mkU32( word_size ),
18866 unop( Iop_8Uto32,
18867 mkexpr( shift[i] ) ) )
18868 ) ) ) ) );
18871 switch (opc2) {
18872 case 0x85: // vrlwmi, Vector Rotate Left Word then Mask Insert
18873 DIP("vrlwmi %d,%d,v%d", vT_addr, vA_addr, vB_addr);
18875 assign( src3, getVReg( vT_addr ) );
18876 assign( mask128, unop( Iop_NotV128,
18877 mkOr4_V128_expr( binop( Iop_ShlV128,
18878 mkexpr( mask[0] ),
18879 mkU8( 96 ) ),
18880 binop( Iop_ShlV128,
18881 mkexpr( mask[1] ),
18882 mkU8( 64 ) ),
18883 binop( Iop_ShlV128,
18884 mkexpr( mask[2] ),
18885 mkU8( 32 ) ),
18886 mkexpr( mask[3] ) ) ) );
18887 assign( vT, binop( Iop_OrV128,
18888 binop( Iop_AndV128,
18889 mkexpr( src3 ),
18890 mkexpr( mask128 ) ),
18891 mkOr4_V128( tmp128[0], tmp128[1],
18892 tmp128[2], tmp128[3] ) ) );
18893 break;
18895 case 0xC5: // vrldmi, Vector Rotate Left Double word then Mask Insert
18896 DIP("vrldmi %d,%d,v%d", vT_addr, vA_addr, vB_addr);
18898 assign( src3, getVReg( vT_addr ) );
18899 assign( mask128, unop( Iop_NotV128,
18900 binop( Iop_OrV128,
18901 binop( Iop_ShlV128,
18902 mkexpr( mask[0] ),
18903 mkU8( 64 ) ),
18904 mkexpr( mask[1] ) ) ) );
18906 assign( vT, binop( Iop_OrV128,
18907 binop( Iop_AndV128,
18908 mkexpr( src3 ),
18909 mkexpr( mask128 ) ),
18910 binop( Iop_OrV128,
18911 mkexpr( tmp128[0] ),
18912 mkexpr( tmp128[1] ) ) ) );
18913 break;
18915 case 0x185: // vrlwnm, Vector Rotate Left Word then AND with Mask
18916 DIP("vrlwnm %d,%d,v%d", vT_addr, vA_addr, vB_addr);
18917 assign( vT, mkOr4_V128( tmp128[0], tmp128[1], tmp128[2], tmp128[3] ) );
18918 break;
18920 case 0x1C5: // vrldnm, Vector Rotate Left Doubleword then AND with Mask
18921 DIP("vrldnm %d,%d,v%d", vT_addr, vA_addr, vB_addr);
18922 assign( vT, binop( Iop_OrV128,
18923 mkexpr( tmp128[0] ),
18924 mkexpr( tmp128[1] ) ) );
18925 break;
18928 putVReg( vT_addr, mkexpr( vT ) );
18929 return True;
18933 AltiVec Vector Extract Element Instructions
18935 static Bool dis_av_insert_element ( UInt prefix, UInt theInstr )
18937 /* VX-Form,
18938 * Source, index and value are GPR, destination is a vector register.
18940 UChar opc1 = ifieldOPC( theInstr );
18941 UChar VRT = ifieldRegDS( theInstr );
18942 UChar rA_addr = ifieldRegA( theInstr );
18943 UChar VRB = ifieldRegB( theInstr );
18944 UInt opc2 = IFIELD( theInstr, 0, 11 );
18945 UChar rVT_addr = VRT;
18946 UChar rVB_addr = VRB;
18948 IRTemp rA = newTemp( Ity_I64 );
18949 IRTemp vTmp = newTemp( Ity_V128 );
18950 IRTemp index = newTemp( Ity_I64 );
18951 UInt max_index_in_src = 15;
18953 /* There is no prefixed version of these instructions. */
18954 vassert( !prefix_instruction( prefix ) );
18956 assign( vTmp, getVReg( rVT_addr ) );
18957 assign( rA, getIReg( rA_addr ) );
18958 assign ( index, binop( Iop_Sub64,
18959 mkU64( 15 ),
18960 mkexpr( rA ) ) );
18962 if ( opc1 != 0x4 ) {
18963 vex_printf("dis_av_insert_element(ppc)(instr)\n");
18964 return False;
18967 switch ( opc2 ) {
18968 case 0x00F: // vinsbvlx, vector insert Byte from VSR Left-indexed VX form
18970 IRTemp src = newTemp( Ity_I64 );
18971 IRTemp adj_index = newTemp( Ity_I64 );
18972 IRTemp rVB = newTemp( Ity_V128 );
18974 DIP("vinsbvlx v%d,%d,v%d", VRT, rA_addr, VRB);
18976 assign( rVB, getVReg( rVB_addr ) );
18977 assign( adj_index, binop( Iop_Sub64,
18978 mkU64( max_index_in_src ),
18979 binop( Iop_And64,
18980 mkU64( 0xF),
18981 mkexpr( rA ) ) ) );
18983 /* Extract byte in rVB[56:63], that is byte 8 counting from the right */
18984 assign( src, extract_field_from_vector( rVB, mkU64( 8 ), 0xFF ) );
18985 putVReg( rVT_addr,
18986 insert_field_into_vector( vTmp, mkexpr( adj_index ),
18987 mkexpr( src), mkU64( 0xFF ) ) );
18989 break;
18991 case 0x10F: // vinsbvrx, vector insert Byte from VSR Right-indexed VX form
18993 IRTemp src = newTemp( Ity_I64 );
18994 IRTemp rVB = newTemp( Ity_V128 );
18995 IRTemp adj_index = newTemp( Ity_I64 );
18997 DIP("vinsbvrx v%d,%d,v%d", VRT, rA_addr, VRB);
18999 assign( rVB, getVReg( rVB_addr ) );
19001 assign( adj_index, binop( Iop_And64, mkexpr( rA ), mkU64( 0xF ) ) );
19002 /* Extract byte in rVB[56:63], that is byte 8 counting from the right */
19003 assign( src, extract_field_from_vector( rVB, mkU64( 8 ), 0xFF ) );
19004 putVReg( rVT_addr,
19005 insert_field_into_vector( vTmp, mkexpr( rA ),
19006 mkexpr( src), mkU64( 0xFF ) ) );
19008 break;
19010 case 0x04F:
19011 // vinshvlx, vector insert Halfword from VSR Left-indexed VX form
19013 IRTemp src = newTemp( Ity_I64 );
19014 IRTemp adj_index = newTemp( Ity_I64 );
19015 IRTemp rVB = newTemp( Ity_V128 );
19017 DIP("vinshvlx v%d,%d,v%d", VRT, rA_addr, VRB);
19019 assign( rVB, getVReg( rVB_addr ) );
19020 assign( adj_index, binop( Iop_Sub64,
19021 mkU64( max_index_in_src - 1 ),
19022 binop( Iop_And64,
19023 mkexpr( rA ),
19024 mkU64( 0xF ) ) ) );
19026 /* Extract half word rVB[48:63], bytes [9:8] counting from the right */
19027 assign( src, extract_field_from_vector( rVB, mkU64( 8 ), 0xFFFF ) );
19028 putVReg( rVT_addr,
19029 insert_field_into_vector( vTmp, mkexpr( adj_index ),
19030 mkexpr( src), mkU64( 0xFFFF ) ) );
19032 break;
19034 case 0x14F:
19035 // vinshvrx, vector insert Halfword from VSR Right-indexed VX form
19037 IRTemp src = newTemp( Ity_I64 );
19038 IRTemp rVB = newTemp( Ity_V128 );
19039 IRTemp adj_index = newTemp( Ity_I64 );
19041 DIP("vinshvrx v%d,%d,v%d", VRT, rA_addr, VRB);
19043 assign( rVB, getVReg( rVB_addr ) );
19045 assign( adj_index, binop( Iop_And64, mkexpr( rA ), mkU64( 0xF ) ) );
19047 /* Extract half word rVB[48:63], bytes [9:8] counting from the right */
19048 assign( src, extract_field_from_vector( rVB, mkU64( 8 ), 0xFFFF ) );
19049 putVReg( rVT_addr,
19050 insert_field_into_vector( vTmp, mkexpr( rA ), mkexpr( src),
19051 mkU64( 0xFFFF ) ) );
19053 break;
19055 case 0x08F:
19056 // vinswvlx, vector insert Word from VSR Left-indexed VX form
19058 IRTemp src = newTemp( Ity_I64 );
19059 IRTemp adj_index = newTemp( Ity_I64 );
19060 IRTemp rVB = newTemp( Ity_V128 );
19062 DIP("vinswvlx v%u,%u,v%u", VRT, rA_addr, VRB);
19064 assign( rVB, getVReg( rVB_addr ) );
19065 assign( adj_index, binop( Iop_Sub64,
19066 mkU64( max_index_in_src - 3 ),
19067 binop( Iop_And64,
19068 mkU64( 0xF ),
19069 mkexpr( rA ) ) ) );
19071 /* Extract word rVB[32:63], bytes [15:8] counting from the right */
19072 assign( src, extract_field_from_vector( rVB, mkU64( 8 ), 0xFFFFFFFF ) );
19073 putVReg( rVT_addr,
19074 insert_field_into_vector( vTmp, mkexpr( adj_index ),
19075 mkexpr( src), mkU64( 0xFFFFFFFF ) ) );
19077 break;
19079 case 0x18F:
19080 // vinswvrx, vector insert Word from VSR Right-indexed VX form
19082 IRTemp src = newTemp( Ity_I64 );
19083 IRTemp rVB = newTemp( Ity_V128 );
19084 IRTemp adj_index = newTemp( Ity_I64 );
19086 DIP("vinswvrx v%u,%u,v%u", VRT, rA_addr, VRB);
19088 assign( rVB, getVReg( rVB_addr ) );
19090 assign( adj_index, binop( Iop_And64, mkexpr( rA ), mkU64( 0xF ) ) );
19091 /* Extract word in rVB[32:63], bytes [15:8] counting from the right */
19092 assign( src, extract_field_from_vector( rVB, mkU64( 8 ), 0xFFFFFFFF ) );
19094 putVReg( rVT_addr,
19095 insert_field_into_vector( vTmp, mkexpr( rA ),
19096 mkexpr( src), mkU64( 0xFFFFFFFF ) ) );
19098 break;
19100 case 0x0CF:
19101 // vinsw, vector insert Word from GPR VX form
19103 IRTemp rB = newTemp( Ity_I64 );
19104 UChar rB_addr = ifieldRegB( theInstr );
19105 UInt UIM = IFIELD( theInstr, 16, 4 );
19106 UInt max_bytes_in_src = 15;
19108 DIP("vinsw v%u,%u,%u", VRT, rB_addr, UIM);
19110 assign( rB, getIReg( rB_addr ) );
19112 putVReg( rVT_addr,
19113 insert_field_into_vector( vTmp,
19114 mkU64( max_bytes_in_src - 3 - UIM ),
19115 mkexpr( rB), mkU64( 0xFFFFFFFF ) ) );
19117 break;
19119 case 0x1CF:
19120 // vinsd, vector insert Doubleword from GPR VX form
19122 IRTemp rB = newTemp( Ity_I64 );
19123 UChar rB_addr = ifieldRegB( theInstr );
19124 UInt UIM = IFIELD( theInstr, 16, 4 );
19125 UInt max_bytes_in_src = 15;
19127 DIP("vinsd v%u,%u,%u", VRT, rB_addr, UIM);
19129 assign( rB, getIReg( rB_addr ) );
19131 putVReg( rVT_addr,
19132 insert_field_into_vector( vTmp,
19133 mkU64( max_bytes_in_src - 7 - UIM ),
19134 mkexpr( rB ),
19135 mkU64( 0xFFFFFFFFFFFFFFFFULL ) ) );
19137 break;
19139 case 0x20F: // vinsblx, vector insert Byte from GPR Left-indexed VX form
19141 IRTemp rB = newTemp( Ity_I64 );
19142 UChar rB_addr = ifieldRegB( theInstr );
19144 DIP("vinsblx v%u,%u,%u", VRT, rA_addr, rB_addr);
19146 assign( rB, getIReg( rB_addr ) );
19147 putVReg( rVT_addr,
19148 insert_field_into_vector( vTmp,
19149 binop( Iop_Sub64,
19150 mkU64( max_index_in_src ),
19151 mkexpr( rA ) ),
19152 mkexpr( rB ), mkU64( 0xFF ) ) );
19153 break;
19155 case 0x30F: // vinsbrx, vector insert Byte from GPR Right-indexed VX form
19157 IRTemp rB = newTemp( Ity_I64 );
19158 UChar rB_addr = ifieldRegB( theInstr );
19160 DIP("vinsbrx v%u,%u,%u", VRT, rA_addr, rB_addr);
19162 assign( rB, getIReg( rB_addr ) );
19163 putVReg( rVT_addr,
19164 insert_field_into_vector( vTmp, mkexpr( rA ),
19165 mkexpr( rB ), mkU64( 0xFF ) ) );
19166 break;
19168 case 0x24F: // vinshlx, vector insert Halfword from GPR Left-indexed VX form
19170 IRTemp rB = newTemp( Ity_I64 );
19171 UChar rB_addr = ifieldRegB( theInstr );
19173 DIP("vinshlx v%u,%u,%u", VRT, rA_addr, rB_addr);
19175 /* insert_field_into_vector assumes right-indexed, convert argument */
19176 assign( rB, getIReg( rB_addr ) );
19177 putVReg( rVT_addr,
19178 insert_field_into_vector( vTmp,
19179 binop( Iop_Sub64,
19180 mkU64( max_index_in_src-1 ),
19181 mkexpr( rA ) ),
19182 mkexpr( rB ), mkU64( 0xFFFF ) ) );
19183 break;
19185 case 0x34F:// vinshrx, vector insert Halfword from GPR Right-indexed VX form
19187 IRTemp rB = newTemp( Ity_I64 );
19188 UChar rB_addr = ifieldRegB( theInstr );
19190 DIP("vinshrx v%u,%u,%u", VRT, rA_addr, rB_addr);
19192 assign( rB, getIReg( rB_addr ) );
19193 putVReg( rVT_addr,
19194 insert_field_into_vector( vTmp, mkexpr( rA ),
19195 mkexpr( rB ), mkU64( 0xFFFF ) ) );
19196 break;
19198 case 0x28F: // vinswlx, vector insert Word from GPR Left-indexed VX form
19200 IRTemp rB = newTemp( Ity_I64 );
19201 UChar rB_addr = ifieldRegB( theInstr );
19203 DIP("vinswlx v%u,%u,%u", VRT, rA_addr, rB_addr);
19205 /* insert_field_into_vector assumes right-indexed, convert argument */
19206 assign( rB, getIReg( rB_addr ) );
19207 putVReg( rVT_addr,
19208 insert_field_into_vector( vTmp,
19209 binop( Iop_Sub64,
19210 mkU64( max_index_in_src-3 ),
19211 mkexpr( rA ) ),
19212 mkexpr( rB ), mkU64( 0xFFFFFFFF ) ) );
19213 break;
19215 case 0x38F:// vinswrx, vector insert Word from GPR Right-indexed VX form
19217 IRTemp rB = newTemp( Ity_I64 );
19218 UChar rB_addr = ifieldRegB( theInstr );
19220 DIP("vinswrx v%u,%u,%u", VRT, rA_addr, rB_addr);
19222 assign( rB, getIReg( rB_addr ) );
19223 putVReg( rVT_addr,
19224 insert_field_into_vector( vTmp, mkexpr( rA ),
19225 mkexpr( rB ), mkU64( 0xFFFFFFFF ) ) );
19226 break;
19228 case 0x2CF:
19230 // vinsdlx, vector insert Doubleword from GPR Left-indexed VX form
19231 IRTemp rB = newTemp( Ity_I64 );
19232 UChar rB_addr = ifieldRegB( theInstr );
19234 DIP("vinsdlx v%u,%u,%u", VRT, rA_addr, rB_addr);
19236 /* insert_field_into_vector assumes right-indexed, convert argument */
19237 assign( rB, getIReg( rB_addr ) );
19238 putVReg( rVT_addr,
19239 insert_field_into_vector( vTmp,
19240 binop( Iop_Sub64,
19241 mkU64( max_index_in_src-7 ),
19242 mkexpr( rA ) ),
19243 mkexpr( rB ),
19244 mkU64( 0xFFFFFFFFFFFFFFFFULL ) ) );
19245 break;
19247 case 0x3CF:
19249 // vinsdrx, vector insert Doubleword from GPR Right-indexed VX form
19250 IRTemp rB = newTemp( Ity_I64 );
19251 UChar rB_addr = ifieldRegB( theInstr );
19253 DIP("vinsdrx v%u,%u,%u", VRT, rA_addr, rB_addr);
19255 assign( rB, getIReg( rB_addr ) );
19256 putVReg( rVT_addr,
19257 insert_field_into_vector( vTmp, mkexpr( rA ),
19258 mkexpr( rB ),
19259 mkU64( 0xFFFFFFFFFFFFFFFFULL ) ) );
19260 break;
19262 default:
19263 vex_printf("dis_av_extract_element(ppc)(opc2)\n");
19264 return False;
19266 return True;
19270 AltiVec Vector Extract Element Instructions
19272 static Bool dis_av_extract_element ( UInt prefix, UInt theInstr )
19274 /* VX-Form,
19275 * sorta destination and first source are GPR not vector registers
19278 UChar opc1 = ifieldOPC( theInstr );
19279 UChar rT_addr = ifieldRegDS( theInstr );
19280 UChar rA_addr = ifieldRegA( theInstr );
19281 UChar vB_addr = ifieldRegB( theInstr );
19282 UInt opc2 = IFIELD( theInstr, 0, 11 );
19284 IRTemp vB = newTemp( Ity_V128 );
19285 IRTemp rA = newTemp( Ity_I64 );
19286 IRTemp rT = newTemp( Ity_I64 );
19288 /* There is no prefixed version of these instructions. */
19289 PREFIX_CHECK
19291 assign( vB, getVReg( vB_addr ) );
19292 assign( rA, getIReg( rA_addr ) );
19294 if ( opc1 != 0x4 ) {
19295 vex_printf("dis_av_extract_element(ppc)(instr)\n");
19296 return False;
19299 switch ( opc2 ) {
19300 case 0x60D: // vextublx, vector extract unsigned Byte Left-indexed
19301 DIP("vextublx %d,%d,v%d", rT_addr, rA_addr, vB_addr);
19303 assign( rT, extract_field_from_vector( vB,
19304 binop( Iop_Sub64,
19305 mkU64( 15 ),
19306 mkexpr( rA ) ),
19307 0xFF ) );
19309 break;
19311 case 0x64D: // vextuhlx, vector extract unsigned Halfword Left-indexed
19312 DIP("vextuhlx %d,%d,v%d", rT_addr, rA_addr, vB_addr);
19314 assign( rT, extract_field_from_vector( vB,
19315 binop( Iop_Sub64,
19316 mkU64( 14 ),
19317 mkexpr( rA ) ),
19318 0xFFFF ) );
19319 break;
19321 case 0x68D: // vextuwlx, vector extract unsigned Word Left-indexed
19322 DIP("vextuwlx %d,%d,v%d", rT_addr, rA_addr, vB_addr);
19324 assign( rT, extract_field_from_vector( vB,
19325 binop( Iop_Sub64,
19326 mkU64( 12 ),
19327 mkexpr( rA ) ),
19328 0xFFFFFFFF ) );
19329 break;
19331 case 0x70D: // vextubrx, vector extract unsigned Byte Right-indexed
19332 DIP("vextubrx %d,%d,v%d", rT_addr, rA_addr, vB_addr);
19334 assign( rT, extract_field_from_vector( vB, mkexpr( rA ), 0xFF ) );
19335 break;
19337 case 0x74D: // vextuhrx, vector extract unsigned Halfword Right-indexed
19338 DIP("vextuhrx %d,%d,v%d", rT_addr, rA_addr, vB_addr);
19340 assign( rT, extract_field_from_vector( vB, mkexpr( rA ), 0xFFFF ) );
19341 break;
19343 case 0x78D: // vextuwrx, vector extract unsigned Word Right-indexed
19344 DIP("vextuwrx %d,%d,v%d", rT_addr, rA_addr, vB_addr);
19346 assign( rT, extract_field_from_vector( vB, mkexpr( rA ), 0xFFFFFFFF ) );
19347 break;
19349 default:
19350 vex_printf("dis_av_extract_element(ppc)(opc2)\n");
19351 return False;
19353 putIReg( rT_addr, mkexpr( rT ) );
19354 return True;
19358 * VSX scalar and vector convert instructions
19360 static Bool
19361 dis_vx_conv ( UInt prefix, UInt theInstr, UInt opc2 )
19363 /* XX2-Form */
19364 UChar opc1 = ifieldOPC( theInstr );
19365 UChar XT = ifieldRegXT( theInstr );
19366 UChar XB = ifieldRegXB( theInstr );
19367 IRTemp xB, xB2;
19368 IRTemp b3, b2, b1, b0;
19370 /* There is no prefixed version of these instructions. */
19371 PREFIX_CHECK
19373 xB = xB2 = IRTemp_INVALID;
19375 if (opc1 != 0x3C) {
19376 vex_printf( "dis_vx_conv(ppc)(instr)\n" );
19377 return False;
19380 /* Create and assign temps only as needed for the given instruction. */
19381 switch (opc2) {
19382 // scalar double-precision floating point argument
19383 case 0x2B0: case 0x0b0: case 0x290: case 0x212: case 0x216: case 0x090:
19384 xB = newTemp(Ity_F64);
19385 assign( xB,
19386 unop( Iop_ReinterpI64asF64,
19387 unop( Iop_V128HIto64, getVSReg( XB ) ) ) );
19388 break;
19389 // vector double-precision floating point arguments
19390 case 0x1b0: case 0x312: case 0x390: case 0x190: case 0x3B0:
19392 xB = newTemp(Ity_F64);
19393 xB2 = newTemp(Ity_F64);
19394 assign( xB,
19395 unop( Iop_ReinterpI64asF64,
19396 unop( Iop_V128HIto64, getVSReg( XB ) ) ) );
19397 assign( xB2,
19398 unop( Iop_ReinterpI64asF64,
19399 unop( Iop_V128to64, getVSReg( XB ) ) ) );
19400 break;
19401 // vector single precision or [un]signed integer word arguments
19402 case 0x130: case 0x392: case 0x330: case 0x310: case 0x110:
19403 case 0x1f0: case 0x1d0:
19404 b3 = b2 = b1 = b0 = IRTemp_INVALID;
19405 breakV128to4x32(getVSReg(XB), &b3, &b2, &b1, &b0);
19406 break;
19407 // vector [un]signed integer doubleword argument
19408 case 0x3f0: case 0x370: case 0x3d0: case 0x350:
19409 xB = newTemp(Ity_I64);
19410 assign( xB, unop( Iop_V128HIto64, getVSReg( XB ) ) );
19411 xB2 = newTemp(Ity_I64);
19412 assign( xB2, unop( Iop_V128to64, getVSReg( XB ) ) );
19413 break;
19414 // scalar [un]signed integer doubleword argument
19415 case 0x250: case 0x270: case 0x2D0: case 0x2F0:
19416 xB = newTemp(Ity_I64);
19417 assign( xB, unop( Iop_V128HIto64, getVSReg( XB ) ) );
19418 break;
19419 // scalar single precision argument
19420 case 0x292: // xscvspdp
19421 xB = newTemp(Ity_I32);
19423 assign( xB, handle_SNaN_to_QNaN_32(unop( Iop_64HIto32,
19424 unop( Iop_V128HIto64,
19425 getVSReg( XB ) ) ) ) );
19426 break;
19427 case 0x296: // xscvspdpn (non signaling version of xscvpdp)
19428 xB = newTemp(Ity_I32);
19429 assign( xB,
19430 unop( Iop_64HIto32, unop( Iop_V128HIto64, getVSReg( XB ) ) ) );
19431 break;
19433 /* Certain instructions have their complete implementation in the main switch statement
19434 * that follows this one; thus we have a "do nothing" case for those instructions here.
19436 case 0x170: case 0x150:
19437 break; // do nothing
19439 default:
19440 vex_printf( "dis_vx_conv(ppc)(opc2)\n" );
19441 return False;
19445 switch (opc2) {
19446 case 0x2B0:
19447 // xscvdpsxds (VSX Scalar truncate Double-Precision to integer and Convert
19448 // to Signed Integer Doubleword format with Saturate)
19449 DIP("xscvdpsxds v%u,v%u\n", XT, XB);
19450 putVSReg( XT,
19451 binop( Iop_64HLtoV128, binop( Iop_F64toI64S,
19452 mkU32( Irrm_ZERO ),
19453 mkexpr( xB ) ), mkU64( 0 ) ) );
19454 break;
19455 case 0x0b0: // xscvdpsxws (VSX Scalar truncate Double-Precision to integer and
19456 // Convert to Signed Integer Word format with Saturate)
19457 DIP("xscvdpsxws v%u,v%u\n", XT, XB);
19458 putVSReg( XT,
19459 binop( Iop_64HLtoV128,
19460 unop( Iop_32Sto64,
19461 binop( Iop_F64toI32S,
19462 mkU32( Irrm_ZERO ),
19463 mkexpr( xB ) ) ),
19464 mkU64( 0ULL ) ) );
19465 break;
19466 case 0x290: // xscvdpuxds (VSX Scalar truncate Double-Precision integer and Convert
19467 // to Unsigned Integer Doubleword format with Saturate)
19468 DIP("xscvdpuxds v%u,v%u\n", XT, XB);
19469 putVSReg( XT,
19470 binop( Iop_64HLtoV128,
19471 binop( Iop_F64toI64U,
19472 mkU32( Irrm_ZERO ),
19473 mkexpr( xB ) ),
19474 mkU64( 0ULL ) ) );
19475 break;
19476 case 0x270:
19477 // xscvsxdsp (VSX Scalar Convert and round Signed Integer Doubleword
19478 // to Single-Precision format)
19479 DIP("xscvsxdsp v%u,v%u\n", XT, XB);
19480 putVSReg( XT,
19481 binop( Iop_64HLtoV128,
19482 unop( Iop_ReinterpF64asI64,
19483 binop( Iop_RoundF64toF32,
19484 get_IR_roundingmode(),
19485 binop( Iop_I64StoF64,
19486 get_IR_roundingmode(),
19487 mkexpr( xB ) ) ) ),
19488 mkU64( 0 ) ) );
19489 break;
19490 case 0x2F0:
19491 // xscvsxddp (VSX Scalar Convert and round Signed Integer Doubleword to
19492 // Double-Precision format)
19493 DIP("xscvsxddp v%u,v%u\n", XT, XB);
19494 putVSReg( XT,
19495 binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
19496 binop( Iop_I64StoF64, get_IR_roundingmode(),
19497 mkexpr( xB ) ) ),
19498 mkU64( 0 ) ) );
19499 break;
19500 case 0x250:
19501 // xscvuxdsp (VSX Scalar Convert and round Unsigned Integer
19502 // Doubleword to Singel-Precision format)
19503 DIP("xscvuxdsp v%u,v%u\n", XT, XB);
19504 putVSReg( XT,
19505 binop( Iop_64HLtoV128,
19506 unop( Iop_ReinterpF64asI64,
19507 binop( Iop_RoundF64toF32,
19508 get_IR_roundingmode(),
19509 binop( Iop_I64UtoF64,
19510 get_IR_roundingmode(),
19511 mkexpr( xB ) ) ) ),
19512 mkU64( 0 ) ) );
19513 break;
19514 case 0x2D0:
19515 // xscvuxddp (VSX Scalar Convert and round Unsigned Integer Doubleword to
19516 // Double-Precision format)
19517 DIP("xscvuxddp v%u,v%u\n", XT, XB);
19518 putVSReg( XT,
19519 binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
19520 binop( Iop_I64UtoF64, get_IR_roundingmode(),
19521 mkexpr( xB ) ) ),
19522 mkU64( 0 ) ) );
19523 break;
19524 case 0x1b0: // xvcvdpsxws (VSX Vector truncate Double-Precision to integer and Convert
19525 // to Signed Integer Word format with Saturate)
19526 case 0x190: // xvcvdpuxws (VSX Vector truncate Double-Precision to integer and
19527 // Convert to Unsigned Integer Word format with Saturate)
19529 IRTemp value_f64[2];
19530 IRTemp Result_32[2];
19531 IRTemp Result_32_tmp[2];
19532 IRTemp nan_mask[2];
19533 IRTemp underflow_mask[2];
19534 IRTemp overflow_mask[2];
19535 IRTemp error_mask[2];
19536 IRTemp error_value[2];
19537 IRTemp tmp_64[2];
19539 Int i;
19540 Int underflow_value;
19541 Int overflow_value;
19542 IRExpr* rmZero = mkU32(Irrm_ZERO);
19544 value_f64[0] = newTemp(Ity_F64);
19545 assign( value_f64[0], mkexpr( xB ) );
19547 value_f64[1] = newTemp(Ity_F64);
19548 assign( value_f64[1], mkexpr( xB2 ) );
19550 for ( i = 0; i < 2; i++) {
19551 Result_32[i] = newTemp(Ity_I32);
19552 Result_32_tmp[i] = newTemp(Ity_I32);
19553 nan_mask[i] = newTemp(Ity_I32);
19554 underflow_mask[i] = newTemp(Ity_I32);
19555 overflow_mask[i] = newTemp(Ity_I32);
19556 error_mask[i] = newTemp(Ity_I32);
19557 error_value[i] = newTemp(Ity_I32);
19558 tmp_64[i] = newTemp(Ity_I64);
19560 if ( opc2 == 0x1b0 ) { // xvcvdpsxws
19561 assign(Result_32_tmp[i], binop(Iop_F64toI32S,
19562 rmZero, mkexpr( value_f64[i] ) ) );
19564 /* result of Iop_CmpF64 is 0x01 if A < -2^31. */
19565 assign( underflow_mask[i],
19566 unop( Iop_1Sto32,
19567 unop( Iop_32to1,
19568 binop( Iop_CmpF64,
19569 mkexpr( value_f64[i] ),
19570 unop( Iop_ReinterpI64asF64,
19571 mkU64( 0xC1E0000000000000 ))))));
19572 overflow_value = 0x7FFFFFFF;
19573 underflow_value = 0x80000000;
19575 } else { // xvcvdpuxws
19576 assign( Result_32_tmp[i],
19577 binop( Iop_F64toI32U,
19578 mkU32( Irrm_ZERO ),
19579 mkexpr( value_f64[i] ) ) );
19581 /* result of Iop_CmpF64 is 0x01 if A < 0. */
19582 assign( underflow_mask[i],
19583 unop( Iop_1Sto32,
19584 unop( Iop_32to1,
19585 binop( Iop_CmpF64,
19586 mkexpr( value_f64[i] ),
19587 unop( Iop_ReinterpI64asF64,
19588 mkU64( 0x0 ) ) ) ) ) );
19589 overflow_value = 0xFFFFFFFF;
19590 underflow_value = 0;
19593 /* Check if input is NaN, output is 0x80000000.
19594 if input < -2^31, output is 0x80000000.
19595 if input > 2^31 - 1, output is 0x7FFFFFFF */
19596 assign( tmp_64[i], unop (Iop_ReinterpF64asI64,
19597 mkexpr( value_f64[i] ) ) );
19599 assign( nan_mask[i], unop( Iop_1Sto32,
19600 is_NaN( Ity_I64, tmp_64[i] ) ) );
19602 /* result of Iop_CmpF64 is 0x00 if A > 2^31 - 1. */
19603 assign( overflow_mask[i],
19604 unop( Iop_1Sto32,
19605 binop( Iop_CmpEQ32,
19606 mkU32( 0 ),
19607 binop( Iop_CmpF64,
19608 mkexpr( value_f64[i] ),
19609 unop( Iop_ReinterpI64asF64,
19610 mkU64( 0x41DFFFFFFFC00000 ))))));
19612 assign( error_mask[i], binop( Iop_Or32, mkexpr( overflow_mask[i] ),
19613 binop( Iop_Or32,
19614 mkexpr( underflow_mask[i] ),
19615 mkexpr( nan_mask[i] ) ) ) );
19617 if ( opc2 == 0x1b0 ) { // xvcvdpsxws
19618 /* NaN takes precedence over underflow/overflow for vxcvdpsxws */
19619 assign( error_value[i],
19620 binop( Iop_Or32,
19621 binop( Iop_And32,
19622 unop( Iop_Not32, mkexpr( nan_mask[i] ) ),
19623 binop( Iop_Or32,
19624 binop( Iop_And32,
19625 mkexpr( overflow_mask[i] ),
19626 mkU32( overflow_value ) ),
19627 binop( Iop_And32,
19628 mkexpr( underflow_mask[i] ),
19629 mkU32( underflow_value ) ) ) ),
19630 binop( Iop_And32,
19631 mkexpr( nan_mask[i] ),
19632 mkU32( 0x80000000 ) ) ) );
19633 } else {
19634 /* Less then zeo takes precedence over NaN/overflow
19635 for vxcvdpuxws in the hardware. Matching the HW here
19636 but it does not appear to match ISA. */
19637 assign( error_value[i],
19638 binop( Iop_Or32,
19639 binop( Iop_And32,
19640 unop( Iop_Not32,
19641 mkexpr( underflow_mask[i] ) ),
19642 binop( Iop_Or32,
19643 binop( Iop_And32,
19644 mkexpr( overflow_mask[i] ),
19645 mkU32( overflow_value ) ),
19646 binop( Iop_And32,
19647 mkexpr( nan_mask[i] ),
19648 mkU32( 0x80000000 ) ) ) ),
19649 binop( Iop_And32,
19650 mkexpr( underflow_mask[i] ),
19651 mkU32( underflow_value ) ) ) );
19654 assign( Result_32[i], binop( Iop_Or32,
19655 binop( Iop_And32,
19656 mkexpr( Result_32_tmp[i] ),
19657 unop( Iop_Not32,
19658 mkexpr( error_mask[i] ) ) ),
19659 binop( Iop_And32,
19660 mkexpr( error_value[i] ),
19661 mkexpr( error_mask[i] ) ) ) );
19664 if ( opc2 == 0x1b0 ) {
19665 DIP("xvcvdpsxws v%u,v%u\n", XT, XB);
19667 } else {
19668 DIP("xvcvdpuxws v%u,v%u", XT, XB);
19671 /* Result is put in the hi and low 32-bits of the double word result. */
19672 putVSReg( XT,
19673 binop( Iop_64HLtoV128,
19674 binop( Iop_32HLto64,
19675 mkexpr( Result_32[0] ),
19676 mkexpr( Result_32[0] ) ),
19677 binop( Iop_32HLto64,
19678 mkexpr( Result_32[1] ),
19679 mkexpr( Result_32[1] ) ) ) );
19680 break;
19682 case 0x130: case 0x110: // xvcvspsxws, xvcvspuxws
19683 // (VSX Vector truncate Single-Precision to integer and
19684 // Convert to [Un]signed Integer Word format with Saturate)
19686 IRExpr * b0_result, * b1_result, * b2_result, * b3_result;
19687 IRTemp tempResult = newTemp(Ity_V128);
19688 IRTemp res0 = newTemp(Ity_I32);
19689 IRTemp res1 = newTemp(Ity_I32);
19690 IRTemp res2 = newTemp(Ity_I32);
19691 IRTemp res3 = newTemp(Ity_I32);
19692 IRTemp hi64 = newTemp(Ity_I64);
19693 IRTemp lo64 = newTemp(Ity_I64);
19694 Bool un_signed = (opc2 == 0x110);
19695 IROp op = un_signed ? Iop_QF32toI32Ux4_RZ : Iop_QF32toI32Sx4_RZ;
19697 DIP("xvcvsp%sxws v%u,v%u\n", un_signed ? "u" : "s", XT, XB);
19698 /* The xvcvsp{s|u}xws instruction is similar to vct{s|u}xs, except if src is a NaN,
19699 * then result is set to 0x80000000. */
19700 assign(tempResult, unop(op, getVSReg(XB)));
19701 assign( hi64, unop(Iop_V128HIto64, mkexpr(tempResult)) );
19702 assign( lo64, unop(Iop_V128to64, mkexpr(tempResult)) );
19703 assign( res3, unop(Iop_64HIto32, mkexpr(hi64)) );
19704 assign( res2, unop(Iop_64to32, mkexpr(hi64)) );
19705 assign( res1, unop(Iop_64HIto32, mkexpr(lo64)) );
19706 assign( res0, unop(Iop_64to32, mkexpr(lo64)) );
19708 b3_result = IRExpr_ITE(is_NaN(Ity_I32, b3),
19709 // then: result is 0x{8|0}80000000
19710 mkU32(un_signed ? 0x00000000 : 0x80000000),
19711 // else: result is from the Iop_QFtoI32{s|u}x4_RZ
19712 mkexpr(res3));
19713 b2_result = IRExpr_ITE(is_NaN(Ity_I32, b2),
19714 // then: result is 0x{8|0}80000000
19715 mkU32(un_signed ? 0x00000000 : 0x80000000),
19716 // else: result is from the Iop_QFtoI32{s|u}x4_RZ
19717 mkexpr(res2));
19718 b1_result = IRExpr_ITE(is_NaN(Ity_I32, b1),
19719 // then: result is 0x{8|0}80000000
19720 mkU32(un_signed ? 0x00000000 : 0x80000000),
19721 // else: result is from the Iop_QFtoI32{s|u}x4_RZ
19722 mkexpr(res1));
19723 b0_result = IRExpr_ITE(is_NaN(Ity_I32, b0),
19724 // then: result is 0x{8|0}80000000
19725 mkU32(un_signed ? 0x00000000 : 0x80000000),
19726 // else: result is from the Iop_QFtoI32{s|u}x4_RZ
19727 mkexpr(res0));
19729 putVSReg( XT,
19730 binop( Iop_64HLtoV128,
19731 binop( Iop_32HLto64, b3_result, b2_result ),
19732 binop( Iop_32HLto64, b1_result, b0_result ) ) );
19733 break;
19735 case 0x212: // xscvdpsp (VSX Scalar round Double-Precision to single-precision and
19736 // Convert to Single-Precision format
19737 // Apr 2019 update - write the result to both halves of the
19738 // target VSR. (see bug 401827,401828).
19739 DIP("xscvdpsp v%u,v%u\n", XT, XB);
19740 IRTemp ResultI32a = newTemp(Ity_I32);
19741 assign(ResultI32a, unop( Iop_ReinterpF32asI32,
19742 unop( Iop_TruncF64asF32,
19743 binop( Iop_RoundF64toF32,
19744 get_IR_roundingmode(),
19745 mkexpr( xB ) ) ) ) );
19746 putVSReg( XT,
19747 binop( Iop_64HLtoV128,
19748 binop( Iop_32HLto64,
19749 mkexpr(ResultI32a ),
19750 mkexpr(ResultI32a ) ),
19751 mkU64( 0ULL ) ) );
19752 break;
19753 case 0x216: /* xscvdpspn (VSX Scalar convert scalar Single-Precision to
19754 vector Single-Precision non-signalling */
19755 // Apr 2019 update - write the result to both halves of the
19756 // target VSR. (see bug 401827,401828).
19757 DIP("xscvdpspn v%u,v%u\n", XT, XB);
19758 IRTemp ResultI32b = newTemp(Ity_I32);
19759 assign(ResultI32b, unop( Iop_ReinterpF32asI32,
19760 unop( Iop_TruncF64asF32,
19761 mkexpr( xB ) ) ) );
19762 putVSReg( XT,
19763 binop( Iop_64HLtoV128,
19764 binop( Iop_32HLto64,
19765 mkexpr(ResultI32b ),
19766 mkexpr(ResultI32b ) ),
19767 mkU64( 0ULL ) ) );
19768 break;
19769 case 0x090: // xscvdpuxws (VSX Scalar truncate Double-Precision to integer
19770 // and Convert to Unsigned Integer Word format with Saturate)
19771 DIP("xscvdpuxws v%u,v%u\n", XT, XB);
19772 putVSReg( XT,
19773 binop( Iop_64HLtoV128,
19774 binop( Iop_32HLto64,
19775 mkU32( 0 ),
19776 binop( Iop_F64toI32U,
19777 mkU32( Irrm_ZERO ),
19778 mkexpr( xB ) ) ),
19779 mkU64( 0ULL ) ) );
19780 break;
19781 case 0x292: // xscvspdp (VSX Scalar Convert Single-Precision to Double-Precision format, signaling)
19782 DIP("xscvspdp v%u,v%u\n", XT, XB);
19783 putVSReg( XT,
19784 binop( Iop_64HLtoV128,
19785 unop( Iop_ReinterpF64asI64,
19786 unop( Iop_F32toF64,
19787 unop( Iop_ReinterpI32asF32, mkexpr( xB ) ) ) ),
19788 mkU64( 0ULL ) ) );
19789 break;
19790 case 0x296: // xscvspdpn (VSX Scalar Convert Single-Precision to Double-Precision format Non signaling)
19791 DIP("xscvspdpn v%u,v%u\n", XT, XB);
19792 putVSReg( XT,
19793 binop( Iop_64HLtoV128,
19794 unop( Iop_ReinterpF64asI64,
19795 unop( Iop_F32toF64,
19796 unop( Iop_ReinterpI32asF32, mkexpr( xB ) ) ) ),
19797 mkU64( 0ULL ) ) );
19798 break;
19799 case 0x312: // xvcvdpsp (VSX Vector round Double-Precision to single-precision
19800 // and Convert to Single-Precision format)
19801 DIP("xvcvdpsp v%u,v%u\n", XT, XB);
19803 /* Note, the 32-bit result is put into the upper and lower bits of the
19804 doubleword result. */
19805 putVSReg( XT,
19806 binop( Iop_64HLtoV128,
19807 binop( Iop_32HLto64,
19808 unop( Iop_ReinterpF32asI32,
19809 unop( Iop_TruncF64asF32,
19810 binop( Iop_RoundF64toF32,
19811 get_IR_roundingmode(),
19812 mkexpr( xB ) ) ) ),
19813 unop( Iop_ReinterpF32asI32,
19814 unop( Iop_TruncF64asF32,
19815 binop( Iop_RoundF64toF32,
19816 get_IR_roundingmode(),
19817 mkexpr( xB ) ) ) ) ),
19818 binop( Iop_32HLto64,
19819 unop( Iop_ReinterpF32asI32,
19820 unop( Iop_TruncF64asF32,
19821 binop( Iop_RoundF64toF32,
19822 get_IR_roundingmode(),
19823 mkexpr( xB2 ) ) ) ),
19824 unop( Iop_ReinterpF32asI32,
19825 unop( Iop_TruncF64asF32,
19826 binop( Iop_RoundF64toF32,
19827 get_IR_roundingmode(),
19828 mkexpr( xB2 ) ) ) ) ) ) );
19829 break;
19830 case 0x390: // xvcvdpuxds (VSX Vector truncate Double-Precision to integer
19831 // and Convert to Unsigned Integer Doubleword format
19832 // with Saturate)
19833 DIP("xvcvdpuxds v%u,v%u\n", XT, XB);
19834 putVSReg( XT,
19835 binop( Iop_64HLtoV128,
19836 binop( Iop_F64toI64U, mkU32( Irrm_ZERO ), mkexpr( xB ) ),
19837 binop( Iop_F64toI64U, mkU32( Irrm_ZERO ), mkexpr( xB2 ) ) ) );
19838 break;
19839 case 0x392: // xvcvspdp (VSX Vector Convert Single-Precision to Double-Precision format)
19840 DIP("xvcvspdp v%u,v%u\n", XT, XB);
19841 putVSReg( XT,
19842 binop( Iop_64HLtoV128,
19843 unop( Iop_ReinterpF64asI64,
19844 unop( Iop_F32toF64,
19845 unop( Iop_ReinterpI32asF32,
19846 handle_SNaN_to_QNaN_32( mkexpr( b3 ) ) ) ) ),
19847 unop( Iop_ReinterpF64asI64,
19848 unop( Iop_F32toF64,
19849 unop( Iop_ReinterpI32asF32,
19850 handle_SNaN_to_QNaN_32( mkexpr( b1 ) ) ) ) ) ) );
19851 break;
19852 case 0x330: // xvcvspsxds (VSX Vector truncate Single-Precision to integer and
19853 // Convert to Signed Integer Doubleword format with Saturate)
19854 DIP("xvcvspsxds v%u,v%u\n", XT, XB);
19855 putVSReg( XT,
19856 binop( Iop_64HLtoV128,
19857 binop( Iop_F64toI64S,
19858 mkU32( Irrm_ZERO ),
19859 unop( Iop_F32toF64,
19860 unop( Iop_ReinterpI32asF32, mkexpr( b3 ) ) ) ),
19861 binop( Iop_F64toI64S,
19862 mkU32( Irrm_ZERO ),
19863 unop( Iop_F32toF64,
19864 unop( Iop_ReinterpI32asF32, mkexpr( b1 ) ) ) ) ) );
19865 break;
19866 case 0x310: // xvcvspuxds (VSX Vector truncate Single-Precision to integer and
19867 // Convert to Unsigned Integer Doubleword format with Saturate)
19868 DIP("xvcvspuxds v%u,v%u\n", XT, XB);
19869 putVSReg( XT,
19870 binop( Iop_64HLtoV128,
19871 binop( Iop_F64toI64U,
19872 mkU32( Irrm_ZERO ),
19873 unop( Iop_F32toF64,
19874 unop( Iop_ReinterpI32asF32, mkexpr( b3 ) ) ) ),
19875 binop( Iop_F64toI64U,
19876 mkU32( Irrm_ZERO ),
19877 unop( Iop_F32toF64,
19878 unop( Iop_ReinterpI32asF32, mkexpr( b1 ) ) ) ) ) );
19879 break;
19880 case 0x3B0: // xvcvdpsxds (VSX Vector truncate Double-Precision to integer and
19881 // Convert to Signed Integer Doubleword format with Saturate)
19882 DIP("xvcvdpsxds v%u,v%u\n", XT, XB);
19883 putVSReg( XT,
19884 binop( Iop_64HLtoV128,
19885 binop( Iop_F64toI64S, mkU32( Irrm_ZERO ), mkexpr( xB ) ),
19886 binop( Iop_F64toI64S, mkU32( Irrm_ZERO ), mkexpr( xB2 ) ) ) );
19887 break;
19888 case 0x3f0: // xvcvsxddp (VSX Vector Convert and round Signed Integer Doubleword
19889 // to Double-Precision format)
19890 DIP("xvcvsxddp v%u,v%u\n", XT, XB);
19891 putVSReg( XT,
19892 binop( Iop_64HLtoV128,
19893 unop( Iop_ReinterpF64asI64,
19894 binop( Iop_I64StoF64,
19895 get_IR_roundingmode(),
19896 mkexpr( xB ) ) ),
19897 unop( Iop_ReinterpF64asI64,
19898 binop( Iop_I64StoF64,
19899 get_IR_roundingmode(),
19900 mkexpr( xB2 ) ) ) ) );
19901 break;
19902 case 0x3d0: // xvcvuxddp (VSX Vector Convert and round Unsigned Integer Doubleword
19903 // to Double-Precision format)
19904 DIP("xvcvuxddp v%u,v%u\n", XT, XB);
19905 putVSReg( XT,
19906 binop( Iop_64HLtoV128,
19907 unop( Iop_ReinterpF64asI64,
19908 binop( Iop_I64UtoF64,
19909 get_IR_roundingmode(),
19910 mkexpr( xB ) ) ),
19911 unop( Iop_ReinterpF64asI64,
19912 binop( Iop_I64UtoF64,
19913 get_IR_roundingmode(),
19914 mkexpr( xB2 ) ) ) ) );
19916 break;
19917 case 0x370: // xvcvsxdsp (VSX Vector Convert and round Signed Integer Doubleword
19918 // to Single-Precision format)
19920 IRTemp result32hi = newTemp(Ity_I32);
19921 IRTemp result32lo = newTemp(Ity_I32);
19923 DIP("xvcvsxdsp v%u,v%u\n", XT, XB);
19924 assign( result32hi,
19925 unop( Iop_ReinterpF32asI32,
19926 unop( Iop_TruncF64asF32,
19927 binop( Iop_RoundF64toF32,
19928 get_IR_roundingmode(),
19929 binop( Iop_I64StoF64,
19930 get_IR_roundingmode(),
19931 mkexpr( xB ) ) ) ) ) );
19932 assign( result32lo,
19933 unop( Iop_ReinterpF32asI32,
19934 unop( Iop_TruncF64asF32,
19935 binop( Iop_RoundF64toF32,
19936 get_IR_roundingmode(),
19937 binop( Iop_I64StoF64,
19938 get_IR_roundingmode(),
19939 mkexpr( xB2 ) ) ) ) ) );
19941 putVSReg( XT,
19942 binop( Iop_64HLtoV128,
19943 binop( Iop_32HLto64,
19944 mkexpr( result32hi ),
19945 mkexpr( result32hi ) ),
19946 binop( Iop_32HLto64,
19947 mkexpr( result32lo ),
19948 mkexpr( result32lo ) ) ) );
19950 break;
19951 case 0x350: // xvcvuxdsp (VSX Vector Convert and round Unsigned Integer Doubleword
19952 // to Single-Precision format)
19954 IRTemp result32hi = newTemp(Ity_I32);
19955 IRTemp result32lo = newTemp(Ity_I32);
19957 DIP("xvcvuxdsp v%u,v%u\n", XT, XB);
19958 assign( result32hi,
19959 unop( Iop_ReinterpF32asI32,
19960 unop( Iop_TruncF64asF32,
19961 binop( Iop_RoundF64toF32,
19962 get_IR_roundingmode(),
19963 binop( Iop_I64UtoF64,
19964 get_IR_roundingmode(),
19965 mkexpr( xB ) ) ) ) ) );
19966 assign( result32lo,
19967 unop( Iop_ReinterpF32asI32,
19968 unop( Iop_TruncF64asF32,
19969 binop( Iop_RoundF64toF32,
19970 get_IR_roundingmode(),
19971 binop( Iop_I64UtoF64,
19972 get_IR_roundingmode(),
19973 mkexpr( xB2 ) ) ) ) ) );
19974 putVSReg( XT,
19975 binop( Iop_64HLtoV128,
19976 binop( Iop_32HLto64,
19977 mkexpr( result32hi ),
19978 mkexpr( result32hi ) ),
19979 binop( Iop_32HLto64,
19980 mkexpr( result32lo ),
19981 mkexpr( result32lo ) ) ) );
19983 break;
19985 case 0x1f0: // xvcvsxwdp (VSX Vector Convert Signed Integer Word to Double-Precision format)
19986 DIP("xvcvsxwdp v%u,v%u\n", XT, XB);
19987 putVSReg( XT,
19988 binop( Iop_64HLtoV128,
19989 unop( Iop_ReinterpF64asI64,
19990 binop( Iop_I64StoF64, get_IR_roundingmode(),
19991 unop( Iop_32Sto64, mkexpr( b3 ) ) ) ),
19992 unop( Iop_ReinterpF64asI64,
19993 binop( Iop_I64StoF64, get_IR_roundingmode(),
19994 unop( Iop_32Sto64, mkexpr( b1 ) ) ) ) ) );
19995 break;
19996 case 0x1d0: // xvcvuxwdp (VSX Vector Convert Unsigned Integer Word to Double-Precision format)
19997 DIP("xvcvuxwdp v%u,v%u\n", XT, XB);
19998 putVSReg( XT,
19999 binop( Iop_64HLtoV128,
20000 unop( Iop_ReinterpF64asI64,
20001 binop( Iop_I64UtoF64, get_IR_roundingmode(),
20002 unop( Iop_32Uto64, mkexpr( b3 ) ) ) ),
20003 unop( Iop_ReinterpF64asI64,
20004 binop( Iop_I64UtoF64, get_IR_roundingmode(),
20005 unop( Iop_32Uto64, mkexpr( b1 ) ) ) ) ) );
20006 break;
20007 case 0x170: // xvcvsxwsp (VSX Vector Convert Signed Integer Word to Single-Precision format)
20008 DIP("xvcvsxwsp v%u,v%u\n", XT, XB);
20009 putVSReg( XT, unop( Iop_I32StoF32x4_DEP, getVSReg( XB ) ) );
20010 break;
20011 case 0x150: // xvcvuxwsp (VSX Vector Convert Unsigned Integer Word to Single-Precision format)
20012 DIP("xvcvuxwsp v%u,v%u\n", XT, XB);
20013 putVSReg( XT, unop( Iop_I32UtoF32x4_DEP, getVSReg( XB ) ) );
20014 break;
20016 default:
20017 vex_printf( "dis_vx_conv(ppc)(opc2)\n" );
20018 return False;
20020 return True;
20024 * VSX vector Double Precision Floating Point Arithmetic Instructions
20026 static Bool
20027 dis_vxv_dp_arith ( UInt prefix, UInt theInstr, UInt opc2 )
20029 /* XX3-Form */
20030 UChar opc1 = ifieldOPC( theInstr );
20031 UChar XT = ifieldRegXT( theInstr );
20032 UChar XA = ifieldRegXA( theInstr );
20033 UChar XB = ifieldRegXB( theInstr );
20034 IRExpr* rm = get_IR_roundingmode();
20035 IRTemp frA = newTemp(Ity_F64);
20036 IRTemp frB = newTemp(Ity_F64);
20037 IRTemp frA2 = newTemp(Ity_F64);
20038 IRTemp frB2 = newTemp(Ity_F64);
20040 /* There is no prefixed version of these instructions. */
20041 PREFIX_CHECK
20043 if (opc1 != 0x3C) {
20044 vex_printf( "dis_vxv_dp_arith(ppc)(instr)\n" );
20045 return False;
20048 assign(frA, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XA ))));
20049 assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
20050 assign(frA2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg( XA ))));
20051 assign(frB2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg( XB ))));
20053 switch (opc2) {
20054 case 0x1E0: // xvdivdp (VSX Vector Divide Double-Precision)
20055 case 0x1C0: // xvmuldp (VSX Vector Multiply Double-Precision)
20056 case 0x180: // xvadddp (VSX Vector Add Double-Precision)
20057 case 0x1A0: // xvsubdp (VSX Vector Subtract Double-Precision)
20059 IROp mOp;
20060 const HChar * oper_name;
20061 switch (opc2) {
20062 case 0x1E0:
20063 mOp = Iop_DivF64;
20064 oper_name = "div";
20065 break;
20066 case 0x1C0:
20067 mOp = Iop_MulF64;
20068 oper_name = "mul";
20069 break;
20070 case 0x180:
20071 mOp = Iop_AddF64;
20072 oper_name = "add";
20073 break;
20074 case 0x1A0:
20075 mOp = Iop_SubF64;
20076 oper_name = "sub";
20077 break;
20079 default:
20080 vpanic("The impossible happened: dis_vxv_dp_arith(ppc)");
20082 IRTemp hiResult = newTemp(Ity_I64);
20083 IRTemp loResult = newTemp(Ity_I64);
20084 DIP("xv%sdp v%d,v%d,v%d\n", oper_name, XT, XA, XB);
20086 assign( hiResult,
20087 unop( Iop_ReinterpF64asI64,
20088 triop( mOp, rm, mkexpr( frA ), mkexpr( frB ) ) ) );
20089 assign( loResult,
20090 unop( Iop_ReinterpF64asI64,
20091 triop( mOp, rm, mkexpr( frA2 ), mkexpr( frB2 ) ) ) );
20092 putVSReg( XT,
20093 binop( Iop_64HLtoV128, mkexpr( hiResult ), mkexpr( loResult ) ) );
20094 break;
20096 case 0x196: // xvsqrtdp
20098 IRTemp hiResult = newTemp(Ity_I64);
20099 IRTemp loResult = newTemp(Ity_I64);
20100 DIP("xvsqrtdp v%d,v%d\n", XT, XB);
20102 assign( hiResult,
20103 unop( Iop_ReinterpF64asI64,
20104 binop( Iop_SqrtF64, rm, mkexpr( frB ) ) ) );
20105 assign( loResult,
20106 unop( Iop_ReinterpF64asI64,
20107 binop( Iop_SqrtF64, rm, mkexpr( frB2 ) ) ) );
20108 putVSReg( XT,
20109 binop( Iop_64HLtoV128, mkexpr( hiResult ), mkexpr( loResult ) ) );
20110 break;
20112 case 0x184: case 0x1A4: // xvmaddadp, xvmaddmdp (VSX Vector Multiply-Add Double-Precision)
20113 case 0x1C4: case 0x1E4: // xvmsubadp, xvmsubmdp (VSX Vector Multiply-Subtract Double-Precision)
20114 case 0x384: case 0x3A4: // xvnmaddadp, xvnmaddmdp (VSX Vector Negate Multiply-Add Double-Precision)
20115 case 0x3C4: case 0x3E4: // xvnmsubadp, xvnmsubmdp (VSX Vector Negate Multiply-Subtract Double-Precision)
20117 /* xvm{add|sub}mdp XT,XA,XB is element-wise equivalent to fm{add|sub} FRT,FRA,FRC,FRB with . . .
20118 * XT == FRC
20119 * XA == FRA
20120 * XB == FRB
20122 * and for xvm{add|sub}adp . . .
20123 * XT == FRB
20124 * XA == FRA
20125 * XB == FRC
20127 Bool negate;
20128 IROp mOp = Iop_INVALID;
20129 const HChar * oper_name = NULL;
20130 Bool mdp = False;
20132 switch (opc2) {
20133 case 0x184: case 0x1A4:
20134 case 0x384: case 0x3A4:
20135 mOp = Iop_MAddF64;
20136 oper_name = "add";
20137 mdp = (opc2 & 0x0FF) == 0x0A4;
20138 break;
20140 case 0x1C4: case 0x1E4:
20141 case 0x3C4: case 0x3E4:
20142 mOp = Iop_MSubF64;
20143 oper_name = "sub";
20144 mdp = (opc2 & 0x0FF) == 0x0E4;
20145 break;
20147 default:
20148 vpanic("The impossible happened: dis_vxv_sp_arith(ppc)");
20151 switch (opc2) {
20152 case 0x384: case 0x3A4:
20153 case 0x3C4: case 0x3E4:
20154 negate = True;
20155 break;
20156 default:
20157 negate = False;
20159 IRTemp hiResult = newTemp(Ity_I64);
20160 IRTemp loResult = newTemp(Ity_I64);
20161 IRTemp frT = newTemp(Ity_F64);
20162 IRTemp frT2 = newTemp(Ity_F64);
20163 DIP("xv%sm%s%s v%d,v%d,v%d\n", negate ? "n" : "", oper_name, mdp ? "mdp" : "adp",
20164 XT, XA, XB);
20165 assign(frT, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XT ) ) ) );
20166 assign(frT2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg( XT ) ) ) );
20168 assign( hiResult,
20169 unop( Iop_ReinterpF64asI64,
20170 qop( mOp,
20172 mkexpr( frA ),
20173 mkexpr( mdp ? frT : frB ),
20174 mkexpr( mdp ? frB : frT ) ) ) );
20175 assign( loResult,
20176 unop( Iop_ReinterpF64asI64,
20177 qop( mOp,
20179 mkexpr( frA2 ),
20180 mkexpr( mdp ? frT2 : frB2 ),
20181 mkexpr( mdp ? frB2 : frT2 ) ) ) );
20182 putVSReg( XT,
20183 binop( Iop_64HLtoV128,
20184 mkexpr( negate ? getNegatedResult( hiResult )
20185 : hiResult ),
20186 mkexpr( negate ? getNegatedResult( loResult )
20187 : loResult ) ) );
20188 break;
20190 case 0x1D4: // xvtsqrtdp (VSX Vector Test for software Square Root Double-Precision)
20192 IRTemp frBHi_I64 = newTemp(Ity_I64);
20193 IRTemp frBLo_I64 = newTemp(Ity_I64);
20194 IRTemp flagsHi = newTemp(Ity_I32);
20195 IRTemp flagsLo = newTemp(Ity_I32);
20196 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
20197 IRTemp fe_flagHi, fg_flagHi, fe_flagLo, fg_flagLo;
20198 fe_flagHi = fg_flagHi = fe_flagLo = fg_flagLo = IRTemp_INVALID;
20200 DIP("xvtsqrtdp cr%d,v%d\n", crfD, XB);
20201 assign( frBHi_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
20202 assign( frBLo_I64, unop(Iop_V128to64, getVSReg( XB )) );
20203 do_fp_tsqrt(frBHi_I64, False /*not single precision*/, &fe_flagHi, &fg_flagHi);
20204 do_fp_tsqrt(frBLo_I64, False /*not single precision*/, &fe_flagLo, &fg_flagLo);
20205 /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
20206 * where fl_flag == 1 on ppc64.
20208 assign( flagsHi,
20209 binop( Iop_Or32,
20210 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20211 binop( Iop_Shl32, mkexpr(fg_flagHi), mkU8( 2 ) ) ),
20212 binop( Iop_Shl32, mkexpr(fe_flagHi), mkU8( 1 ) ) ) );
20213 assign( flagsLo,
20214 binop( Iop_Or32,
20215 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20216 binop( Iop_Shl32, mkexpr(fg_flagLo), mkU8( 2 ) ) ),
20217 binop( Iop_Shl32, mkexpr(fe_flagLo), mkU8( 1 ) ) ) );
20218 putGST_field( PPC_GST_CR,
20219 binop( Iop_Or32, mkexpr( flagsHi ), mkexpr( flagsLo ) ),
20220 crfD );
20221 break;
20223 case 0x1F4: // xvtdivdp (VSX Vector Test for software Divide Double-Precision)
20225 IRTemp frBHi_I64 = newTemp(Ity_I64);
20226 IRTemp frBLo_I64 = newTemp(Ity_I64);
20227 IRTemp frAHi_I64 = newTemp(Ity_I64);
20228 IRTemp frALo_I64 = newTemp(Ity_I64);
20229 IRTemp flagsHi = newTemp(Ity_I32);
20230 IRTemp flagsLo = newTemp(Ity_I32);
20231 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
20232 IRTemp fe_flagHi, fg_flagHi, fe_flagLo, fg_flagLo;
20233 fe_flagHi = fg_flagHi = fe_flagLo = fg_flagLo = IRTemp_INVALID;
20235 DIP("xvtdivdp cr%d,v%d,v%d\n", crfD, XA, XB);
20236 assign( frAHi_I64, unop(Iop_V128HIto64, getVSReg( XA )) );
20237 assign( frALo_I64, unop(Iop_V128to64, getVSReg( XA )) );
20238 assign( frBHi_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
20239 assign( frBLo_I64, unop(Iop_V128to64, getVSReg( XB )) );
20241 _do_fp_tdiv(frAHi_I64, frBHi_I64, False/*dp*/, &fe_flagHi, &fg_flagHi);
20242 _do_fp_tdiv(frALo_I64, frBLo_I64, False/*dp*/, &fe_flagLo, &fg_flagLo);
20243 /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
20244 * where fl_flag == 1 on ppc64.
20246 assign( flagsHi,
20247 binop( Iop_Or32,
20248 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20249 binop( Iop_Shl32, mkexpr(fg_flagHi), mkU8( 2 ) ) ),
20250 binop( Iop_Shl32, mkexpr(fe_flagHi), mkU8( 1 ) ) ) );
20251 assign( flagsLo,
20252 binop( Iop_Or32,
20253 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20254 binop( Iop_Shl32, mkexpr(fg_flagLo), mkU8( 2 ) ) ),
20255 binop( Iop_Shl32, mkexpr(fe_flagLo), mkU8( 1 ) ) ) );
20256 putGST_field( PPC_GST_CR,
20257 binop( Iop_Or32, mkexpr( flagsHi ), mkexpr( flagsLo ) ),
20258 crfD );
20259 break;
20262 default:
20263 vex_printf( "dis_vxv_dp_arith(ppc)(opc2)\n" );
20264 return False;
20266 return True;
20270 * VSX vector Single Precision Floating Point Arithmetic Instructions
20272 static Bool
20273 dis_vxv_sp_arith ( UInt prefix, UInt theInstr, UInt opc2 )
20275 /* XX3-Form */
20276 UChar opc1 = ifieldOPC( theInstr );
20277 UChar XT = ifieldRegXT( theInstr );
20278 UChar XA = ifieldRegXA( theInstr );
20279 UChar XB = ifieldRegXB( theInstr );
20280 IRExpr* rm = get_IR_roundingmode();
20281 IRTemp a3, a2, a1, a0;
20282 IRTemp b3, b2, b1, b0;
20283 IRTemp res0 = newTemp(Ity_I32);
20284 IRTemp res1 = newTemp(Ity_I32);
20285 IRTemp res2 = newTemp(Ity_I32);
20286 IRTemp res3 = newTemp(Ity_I32);
20288 /* There is no prefixed version of these instructions. */
20289 PREFIX_CHECK
20291 a3 = a2 = a1 = a0 = IRTemp_INVALID;
20292 b3 = b2 = b1 = b0 = IRTemp_INVALID;
20294 if (opc1 != 0x3C) {
20295 vex_printf( "dis_vxv_sp_arith(ppc)(instr)\n" );
20296 return False;
20299 switch (opc2) {
20300 case 0x100: // xvaddsp (VSX Vector Add Single-Precision)
20301 DIP("xvaddsp v%d,v%d,v%d\n", XT, XA, XB);
20302 // WARNING: BOGUS! The backend ignores rm on Iop_Add32Fx4
20303 putVSReg( XT, triop(Iop_Add32Fx4, rm,
20304 getVSReg( XA ), getVSReg( XB )) );
20305 break;
20307 case 0x140: // xvmulsp (VSX Vector Multiply Single-Precision)
20308 DIP("xvmulsp v%d,v%d,v%d\n", XT, XA, XB);
20309 // WARNING: BOGUS! The backend ignores rm on Iop_Mul32Fx4
20310 putVSReg( XT, triop(Iop_Mul32Fx4, rm,
20311 getVSReg( XA ), getVSReg( XB )) );
20312 break;
20314 case 0x120: // xvsubsp (VSX Vector Subtract Single-Precision)
20315 DIP("xvsubsp v%d,v%d,v%d\n", XT, XA, XB);
20316 // WARNING: BOGUS! The backend ignores rm on Iop_Sub32Fx4
20317 putVSReg( XT, triop(Iop_Sub32Fx4, rm,
20318 getVSReg( XA ), getVSReg( XB )) );
20319 break;
20321 case 0x160: // xvdivsp (VSX Vector Divide Single-Precision)
20323 /* Iop_Div32Fx4 is not implemented for ppc64 (in host_ppc_{isel|defs}.c.
20324 * So there are two choices:
20325 * 1. Implement the xvdivsp with a native insn; or
20326 * 2. Extract the 4 single precision floats from each vector
20327 * register inputs and perform fdivs on each pair
20328 * I will do the latter, due to the general philosophy of
20329 * reusing existing implementations when practical.
20331 DIP("xvdivsp v%d,v%d,v%d\n", XT, XA, XB);
20332 breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
20333 breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
20335 assign( res0,
20336 unop( Iop_ReinterpF32asI32,
20337 unop( Iop_TruncF64asF32,
20338 triop( Iop_DivF64r32, rm, mkexpr( a0 ), mkexpr( b0 ) ) ) ) );
20339 assign( res1,
20340 unop( Iop_ReinterpF32asI32,
20341 unop( Iop_TruncF64asF32,
20342 triop( Iop_DivF64r32, rm, mkexpr( a1 ), mkexpr( b1 ) ) ) ) );
20343 assign( res2,
20344 unop( Iop_ReinterpF32asI32,
20345 unop( Iop_TruncF64asF32,
20346 triop( Iop_DivF64r32, rm, mkexpr( a2 ), mkexpr( b2 ) ) ) ) );
20347 assign( res3,
20348 unop( Iop_ReinterpF32asI32,
20349 unop( Iop_TruncF64asF32,
20350 triop( Iop_DivF64r32, rm, mkexpr( a3 ), mkexpr( b3 ) ) ) ) );
20352 putVSReg( XT,
20353 binop( Iop_64HLtoV128,
20354 binop( Iop_32HLto64, mkexpr( res3 ), mkexpr( res2 ) ),
20355 binop( Iop_32HLto64, mkexpr( res1 ), mkexpr( res0 ) ) ) );
20356 break;
20358 case 0x116: // xvsqrtsp (VSX Vector Square Root Single-Precision)
20360 DIP("xvsqrtsp v%d,v%d\n", XT, XB);
20361 breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
20362 /* Note: The native xvsqrtsp insruction does not always give the same precision
20363 * as what we get with Iop_SqrtF64. But it doesn't seem worthwhile to implement
20364 * an Iop_SqrtF32 that would give us a lower precision result, albeit more true
20365 * to the actual instruction.
20368 assign( res0,
20369 unop( Iop_ReinterpF32asI32,
20370 unop( Iop_TruncF64asF32,
20371 binop(Iop_SqrtF64, rm, mkexpr( b0 ) ) ) ) );
20372 assign( res1,
20373 unop( Iop_ReinterpF32asI32,
20374 unop( Iop_TruncF64asF32,
20375 binop(Iop_SqrtF64, rm, mkexpr( b1 ) ) ) ) );
20376 assign( res2,
20377 unop( Iop_ReinterpF32asI32,
20378 unop( Iop_TruncF64asF32,
20379 binop(Iop_SqrtF64, rm, mkexpr( b2) ) ) ) );
20380 assign( res3,
20381 unop( Iop_ReinterpF32asI32,
20382 unop( Iop_TruncF64asF32,
20383 binop(Iop_SqrtF64, rm, mkexpr( b3 ) ) ) ) );
20385 putVSReg( XT,
20386 binop( Iop_64HLtoV128,
20387 binop( Iop_32HLto64, mkexpr( res3 ), mkexpr( res2 ) ),
20388 binop( Iop_32HLto64, mkexpr( res1 ), mkexpr( res0 ) ) ) );
20389 break;
20392 case 0x104: case 0x124: // xvmaddasp, xvmaddmsp (VSX Vector Multiply-Add Single-Precision)
20393 case 0x144: case 0x164: // xvmsubasp, xvmsubmsp (VSX Vector Multiply-Subtract Single-Precision)
20394 case 0x304: case 0x324: // xvnmaddasp, xvnmaddmsp (VSX Vector Negate Multiply-Add Single-Precision)
20395 case 0x344: case 0x364: // xvnmsubasp, xvnmsubmsp (VSX Vector Negate Multiply-Subtract Single-Precision)
20397 IRTemp t3, t2, t1, t0;
20398 Bool msp = False;
20399 Bool negate;
20400 const HChar * oper_name = NULL;
20401 IROp mOp = Iop_INVALID;
20402 switch (opc2) {
20403 case 0x104: case 0x124:
20404 case 0x304: case 0x324:
20405 msp = (opc2 & 0x0FF) == 0x024;
20406 mOp = Iop_MAddF64r32;
20407 oper_name = "madd";
20408 break;
20410 case 0x144: case 0x164:
20411 case 0x344: case 0x364:
20412 msp = (opc2 & 0x0FF) == 0x064;
20413 mOp = Iop_MSubF64r32;
20414 oper_name = "sub";
20415 break;
20417 default:
20418 vpanic("The impossible happened: dis_vxv_sp_arith(ppc)");
20421 switch (opc2) {
20422 case 0x304: case 0x324:
20423 case 0x344: case 0x364:
20424 negate = True;
20425 break;
20427 default:
20428 negate = False;
20431 DIP("xv%sm%s%s v%d,v%d,v%d\n", negate ? "n" : "", oper_name,
20432 msp ? "msp" : "asp", XT, XA, XB);
20434 t3 = t2 = t1 = t0 = IRTemp_INVALID;
20435 breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
20436 breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
20437 breakV128to4xF64( getVSReg( XT ), &t3, &t2, &t1, &t0 );
20439 assign( res0,
20440 unop( Iop_ReinterpF32asI32,
20441 unop( Iop_TruncF64asF32,
20442 qop( mOp,
20444 mkexpr( a0 ),
20445 mkexpr( msp ? t0 : b0 ),
20446 mkexpr( msp ? b0 : t0 ) ) ) ) );
20447 assign( res1,
20448 unop( Iop_ReinterpF32asI32,
20449 unop( Iop_TruncF64asF32,
20450 qop( mOp,
20452 mkexpr( a1 ),
20453 mkexpr( msp ? t1 : b1 ),
20454 mkexpr( msp ? b1 : t1 ) ) ) ) );
20455 assign( res2,
20456 unop( Iop_ReinterpF32asI32,
20457 unop( Iop_TruncF64asF32,
20458 qop( mOp,
20460 mkexpr( a2 ),
20461 mkexpr( msp ? t2 : b2 ),
20462 mkexpr( msp ? b2 : t2 ) ) ) ) );
20463 assign( res3,
20464 unop( Iop_ReinterpF32asI32,
20465 unop( Iop_TruncF64asF32,
20466 qop( mOp,
20468 mkexpr( a3 ),
20469 mkexpr( msp ? t3 : b3 ),
20470 mkexpr( msp ? b3 : t3 ) ) ) ) );
20472 putVSReg( XT,
20473 binop( Iop_64HLtoV128,
20474 binop( Iop_32HLto64, mkexpr( negate ? getNegatedResult_32( res3 ) : res3 ),
20475 mkexpr( negate ? getNegatedResult_32( res2 ) : res2 ) ),
20476 binop( Iop_32HLto64, mkexpr( negate ? getNegatedResult_32( res1 ) : res1 ),
20477 mkexpr( negate ? getNegatedResult_32( res0 ) : res0 ) ) ) );
20479 break;
20481 case 0x154: // xvtsqrtsp (VSX Vector Test for software Square Root Single-Precision)
20483 IRTemp flags0 = newTemp(Ity_I32);
20484 IRTemp flags1 = newTemp(Ity_I32);
20485 IRTemp flags2 = newTemp(Ity_I32);
20486 IRTemp flags3 = newTemp(Ity_I32);
20487 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
20488 IRTemp fe_flag0, fg_flag0, fe_flag1, fg_flag1;
20489 IRTemp fe_flag2, fg_flag2, fe_flag3, fg_flag3;
20490 fe_flag0 = fg_flag0 = fe_flag1 = fg_flag1 = IRTemp_INVALID;
20491 fe_flag2 = fg_flag2 = fe_flag3 = fg_flag3 = IRTemp_INVALID;
20492 DIP("xvtsqrtsp cr%d,v%d\n", crfD, XB);
20494 breakV128to4x32( getVSReg( XB ), &b3, &b2, &b1, &b0 );
20495 do_fp_tsqrt(b0, True /* single precision*/, &fe_flag0, &fg_flag0);
20496 do_fp_tsqrt(b1, True /* single precision*/, &fe_flag1, &fg_flag1);
20497 do_fp_tsqrt(b2, True /* single precision*/, &fe_flag2, &fg_flag2);
20498 do_fp_tsqrt(b3, True /* single precision*/, &fe_flag3, &fg_flag3);
20500 /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
20501 * where fl_flag == 1 on ppc64.
20503 assign( flags0,
20504 binop( Iop_Or32,
20505 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20506 binop( Iop_Shl32, mkexpr(fg_flag0), mkU8( 2 ) ) ),
20507 binop( Iop_Shl32, mkexpr(fe_flag0), mkU8( 1 ) ) ) );
20508 assign( flags1,
20509 binop( Iop_Or32,
20510 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20511 binop( Iop_Shl32, mkexpr(fg_flag1), mkU8( 2 ) ) ),
20512 binop( Iop_Shl32, mkexpr(fe_flag1), mkU8( 1 ) ) ) );
20513 assign( flags2,
20514 binop( Iop_Or32,
20515 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20516 binop( Iop_Shl32, mkexpr(fg_flag2), mkU8( 2 ) ) ),
20517 binop( Iop_Shl32, mkexpr(fe_flag2), mkU8( 1 ) ) ) );
20518 assign( flags3,
20519 binop( Iop_Or32,
20520 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20521 binop( Iop_Shl32, mkexpr(fg_flag3), mkU8( 2 ) ) ),
20522 binop( Iop_Shl32, mkexpr(fe_flag3), mkU8( 1 ) ) ) );
20523 putGST_field( PPC_GST_CR,
20524 binop( Iop_Or32,
20525 mkexpr( flags0 ),
20526 binop( Iop_Or32,
20527 mkexpr( flags1 ),
20528 binop( Iop_Or32,
20529 mkexpr( flags2 ),
20530 mkexpr( flags3 ) ) ) ),
20531 crfD );
20532 break;
20535 case 0x174: // xvtdivsp (VSX Vector Test for software Divide Single-Precision)
20537 IRTemp flags0 = newTemp(Ity_I32);
20538 IRTemp flags1 = newTemp(Ity_I32);
20539 IRTemp flags2 = newTemp(Ity_I32);
20540 IRTemp flags3 = newTemp(Ity_I32);
20541 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
20542 IRTemp fe_flag0, fg_flag0, fe_flag1, fg_flag1;
20543 IRTemp fe_flag2, fg_flag2, fe_flag3, fg_flag3;
20544 fe_flag0 = fg_flag0 = fe_flag1 = fg_flag1 = IRTemp_INVALID;
20545 fe_flag2 = fg_flag2 = fe_flag3 = fg_flag3 = IRTemp_INVALID;
20546 DIP("xvtdivsp cr%d,v%d,v%d\n", crfD, XA, XB);
20548 breakV128to4x32( getVSReg( XA ), &a3, &a2, &a1, &a0 );
20549 breakV128to4x32( getVSReg( XB ), &b3, &b2, &b1, &b0 );
20550 _do_fp_tdiv(a0, b0, True /* single precision*/, &fe_flag0, &fg_flag0);
20551 _do_fp_tdiv(a1, b1, True /* single precision*/, &fe_flag1, &fg_flag1);
20552 _do_fp_tdiv(a2, b2, True /* single precision*/, &fe_flag2, &fg_flag2);
20553 _do_fp_tdiv(a3, b3, True /* single precision*/, &fe_flag3, &fg_flag3);
20555 /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
20556 * where fl_flag == 1 on ppc64.
20558 assign( flags0,
20559 binop( Iop_Or32,
20560 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20561 binop( Iop_Shl32, mkexpr(fg_flag0), mkU8( 2 ) ) ),
20562 binop( Iop_Shl32, mkexpr(fe_flag0), mkU8( 1 ) ) ) );
20563 assign( flags1,
20564 binop( Iop_Or32,
20565 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20566 binop( Iop_Shl32, mkexpr(fg_flag1), mkU8( 2 ) ) ),
20567 binop( Iop_Shl32, mkexpr(fe_flag1), mkU8( 1 ) ) ) );
20568 assign( flags2,
20569 binop( Iop_Or32,
20570 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20571 binop( Iop_Shl32, mkexpr(fg_flag2), mkU8( 2 ) ) ),
20572 binop( Iop_Shl32, mkexpr(fe_flag2), mkU8( 1 ) ) ) );
20573 assign( flags3,
20574 binop( Iop_Or32,
20575 binop( Iop_Or32, mkU32( 8 ), // fl_flag
20576 binop( Iop_Shl32, mkexpr(fg_flag3), mkU8( 2 ) ) ),
20577 binop( Iop_Shl32, mkexpr(fe_flag3), mkU8( 1 ) ) ) );
20578 putGST_field( PPC_GST_CR,
20579 binop( Iop_Or32,
20580 mkexpr( flags0 ),
20581 binop( Iop_Or32,
20582 mkexpr( flags1 ),
20583 binop( Iop_Or32,
20584 mkexpr( flags2 ),
20585 mkexpr( flags3 ) ) ) ),
20586 crfD );
20588 break;
20591 default:
20592 vex_printf( "dis_vxv_sp_arith(ppc)(opc2)\n" );
20593 return False;
20595 return True;
20599 * Vector Population Count/bit matrix transpose
20601 static Bool
20602 dis_av_count_bitTranspose ( UInt prefix, UInt theInstr, UInt opc2 )
20604 UChar vRB_addr = ifieldRegB(theInstr);
20605 UChar vRT_addr = ifieldRegDS(theInstr);
20606 UChar opc1 = ifieldOPC( theInstr );
20607 IRTemp vB = newTemp(Ity_V128);
20609 /* There is no prefixed version of these instructions. */
20610 PREFIX_CHECK
20612 assign( vB, getVReg(vRB_addr));
20614 if (opc1 != 0x4) {
20615 vex_printf( "dis_av_count_bitTranspose(ppc)(instr)\n" );
20616 return False;
20619 switch (opc2) {
20620 case 0x702: // vclzb
20621 DIP("vclzb v%d,v%d\n", vRT_addr, vRB_addr);
20622 putVReg( vRT_addr, unop(Iop_Clz8x16, mkexpr( vB ) ) );
20623 break;
20625 case 0x742: // vclzh
20626 DIP("vclzh v%d,v%d\n", vRT_addr, vRB_addr);
20627 putVReg( vRT_addr, unop(Iop_Clz16x8, mkexpr( vB ) ) );
20628 break;
20630 case 0x782: // vclzw
20631 DIP("vclzw v%d,v%d\n", vRT_addr, vRB_addr);
20632 putVReg( vRT_addr, unop(Iop_Clz32x4, mkexpr( vB ) ) );
20633 break;
20635 case 0x7C2: // vclzd
20636 DIP("vclzd v%d,v%d\n", vRT_addr, vRB_addr);
20637 putVReg( vRT_addr, unop(Iop_Clz64x2, mkexpr( vB ) ) );
20638 break;
20640 case 0x703: // vpopcntb
20642 /* Break vector into 32-bit words and do the population count
20643 * on byte in the words
20645 IRType ty = Ity_I32;
20646 IRTemp bits0_31, bits32_63, bits64_95, bits96_127;
20647 bits0_31 = bits32_63 = bits64_95 = bits96_127 = IRTemp_INVALID;
20648 IRTemp cnt_bits0_31, cnt_bits32_63, cnt_bits64_95, cnt_bits96_127;
20649 cnt_bits0_31 = cnt_bits32_63 = cnt_bits64_95 = cnt_bits96_127 = IRTemp_INVALID;
20651 DIP("vpopcntb v%d,v%d\n", vRT_addr, vRB_addr);
20652 breakV128to4x32(mkexpr( vB), &bits96_127, &bits64_95, &bits32_63, &bits0_31 );
20653 cnt_bits0_31 = gen_POPCOUNT(ty, bits0_31, BYTE);
20654 cnt_bits32_63 = gen_POPCOUNT(ty, bits32_63, BYTE);
20655 cnt_bits64_95 = gen_POPCOUNT(ty, bits64_95, BYTE);
20656 cnt_bits96_127 = gen_POPCOUNT(ty, bits96_127, BYTE);
20658 putVReg( vRT_addr, mkV128from32(cnt_bits96_127, cnt_bits64_95,
20659 cnt_bits32_63, cnt_bits0_31) );
20660 break;
20663 case 0x743: // vpopcnth
20665 /* Break vector into 32-bit words and do the population count
20666 * for each half word
20668 IRType ty = Ity_I32;
20669 IRTemp bits0_31, bits32_63, bits64_95, bits96_127;
20670 bits0_31 = bits32_63 = bits64_95 = bits96_127 = IRTemp_INVALID;
20671 IRTemp cnt_bits0_31, cnt_bits32_63, cnt_bits64_95, cnt_bits96_127;
20672 cnt_bits0_31 = cnt_bits32_63 = cnt_bits64_95 = cnt_bits96_127 = IRTemp_INVALID;
20674 DIP("vpopcnth v%d,v%d\n", vRT_addr, vRB_addr);
20675 breakV128to4x32(mkexpr( vB), &bits96_127, &bits64_95, &bits32_63, &bits0_31 );
20677 cnt_bits0_31 = gen_POPCOUNT(ty, bits0_31, HWORD);
20678 cnt_bits32_63 = gen_POPCOUNT(ty, bits32_63, HWORD);
20679 cnt_bits64_95 = gen_POPCOUNT(ty, bits64_95, HWORD);
20680 cnt_bits96_127 = gen_POPCOUNT(ty, bits96_127, HWORD);
20682 putVReg( vRT_addr, mkV128from32(cnt_bits96_127, cnt_bits64_95,
20683 cnt_bits32_63, cnt_bits0_31) );
20684 break;
20687 case 0x783: // vpopcntw
20689 /* Break vector into 32-bit words and do the population count
20690 * on each word.
20692 IRType ty = Ity_I32;
20693 IRTemp bits0_31, bits32_63, bits64_95, bits96_127;
20694 bits0_31 = bits32_63 = bits64_95 = bits96_127 = IRTemp_INVALID;
20695 IRTemp cnt_bits0_31, cnt_bits32_63, cnt_bits64_95, cnt_bits96_127;
20696 cnt_bits0_31 = cnt_bits32_63 = cnt_bits64_95 = cnt_bits96_127 = IRTemp_INVALID;
20698 DIP("vpopcntw v%d,v%d\n", vRT_addr, vRB_addr);
20699 breakV128to4x32(mkexpr( vB), &bits96_127, &bits64_95, &bits32_63, &bits0_31 );
20701 cnt_bits0_31 = gen_POPCOUNT(ty, bits0_31, WORD);
20702 cnt_bits32_63 = gen_POPCOUNT(ty, bits32_63, WORD);
20703 cnt_bits64_95 = gen_POPCOUNT(ty, bits64_95, WORD);
20704 cnt_bits96_127 = gen_POPCOUNT(ty, bits96_127, WORD);
20706 putVReg( vRT_addr, mkV128from32(cnt_bits96_127, cnt_bits64_95,
20707 cnt_bits32_63, cnt_bits0_31) );
20708 break;
20711 case 0x7C3: // vpopcntd
20713 if (mode64) {
20714 /* Break vector into 64-bit double words and do the population
20715 count on each double word.
20717 IRType ty = Ity_I64;
20718 IRTemp bits0_63 = newTemp(Ity_I64);
20719 IRTemp bits64_127 = newTemp(Ity_I64);
20720 IRTemp cnt_bits0_63 = newTemp(Ity_I64);
20721 IRTemp cnt_bits64_127 = newTemp(Ity_I64);
20723 DIP("vpopcntd v%d,v%d\n", vRT_addr, vRB_addr);
20725 assign(bits0_63, unop( Iop_V128to64, mkexpr( vB ) ) );
20726 assign(bits64_127, unop( Iop_V128HIto64, mkexpr( vB ) ) );
20727 cnt_bits0_63 = gen_POPCOUNT(ty, bits0_63, DWORD);
20728 cnt_bits64_127 = gen_POPCOUNT(ty, bits64_127, DWORD);
20730 putVReg( vRT_addr, binop( Iop_64HLtoV128,
20731 mkexpr( cnt_bits64_127 ),
20732 mkexpr( cnt_bits0_63 ) ) );
20733 } else {
20734 /* Break vector into 32-bit words and do the population count
20735 on each 32-bit word.
20737 IRTemp bits0_31, bits32_63, bits64_95, bits96_127;
20738 bits0_31 = bits32_63 = bits64_95 = bits96_127 = IRTemp_INVALID;
20739 IRTemp cnt_bits0_63 = newTemp(Ity_I64);
20740 IRTemp cnt_bits64_127 = newTemp(Ity_I64);
20742 DIP("vpopcntd v%d,v%d\n", vRT_addr, vRB_addr);
20743 breakV128to4x32(mkexpr( vB), &bits96_127, &bits64_95,
20744 &bits32_63, &bits0_31 );
20746 cnt_bits0_63 = gen_vpopcntd_mode32(bits0_31, bits32_63);
20747 cnt_bits64_127 = gen_vpopcntd_mode32(bits64_95, bits96_127);
20749 putVReg( vRT_addr, binop( Iop_64HLtoV128,
20750 mkexpr( cnt_bits64_127 ),
20751 mkexpr( cnt_bits0_63 ) ) );
20753 break;
20756 case 0x50C: // vgbbd Vector Gather Bits by Bytes by Doubleword
20757 DIP("vgbbd v%d,v%d\n", vRT_addr, vRB_addr);
20758 putVReg( vRT_addr, unop( Iop_PwBitMtxXpose64x2, mkexpr( vB ) ) );
20759 break;
20761 case 0x5CC: // vbpermd Vector Bit Permute Doubleword
20763 UChar vRA_addr = ifieldRegA( theInstr );
20764 IRTemp vA = newTemp( Ity_V128 );
20765 UInt j;
20766 IRTemp index_dword_hi[8]; // index in double word
20767 IRTemp index_dword_lo[8];
20768 IRTemp index_dword_hi_valid[8];
20769 IRTemp index_dword_lo_valid[8];
20770 IRTemp pb_dword_hi[8]; // permute bit
20771 IRTemp pb_dword_lo[8];
20772 IRTemp tmp_hi[9];
20773 IRTemp tmp_lo[9];
20775 DIP("vbpermd v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
20777 tmp_hi[0] = newTemp( Ity_I64 );
20778 tmp_lo[0] = newTemp( Ity_I64 );
20780 assign( vA, getVReg(vRA_addr) );
20781 assign( tmp_hi[0], mkU64( 0 ) );
20782 assign( tmp_lo[0], mkU64( 0 ) );
20784 for (j=0; j<8; j++) {
20785 index_dword_hi[j] = newTemp( Ity_I64 );
20786 index_dword_lo[j] = newTemp( Ity_I64 );
20787 index_dword_hi_valid[j] = newTemp( Ity_I64 );
20788 index_dword_lo_valid[j] = newTemp( Ity_I64 );
20789 pb_dword_hi[j] = newTemp( Ity_I64 );
20790 pb_dword_lo[j] = newTemp( Ity_I64 );
20791 tmp_hi[j+1] = newTemp( Ity_I64 );
20792 tmp_lo[j+1] = newTemp( Ity_I64 );
20794 assign( index_dword_hi[j],
20795 binop( Iop_And64,
20796 binop( Iop_Shr64,
20797 unop( Iop_V128HIto64,
20798 mkexpr( vB ) ),
20799 mkU8( ( 7 - j ) * 8 ) ),
20800 mkU64( 0xFF ) ) );
20802 assign( index_dword_lo[j],
20803 binop( Iop_And64,
20804 binop( Iop_Shr64,
20805 unop( Iop_V128to64,
20806 mkexpr( vB ) ),
20807 mkU8( ( 7 - j ) * 8 ) ),
20808 mkU64( 0xFF ) ) );
20810 assign( index_dword_hi_valid[j],
20811 unop( Iop_1Sto64,
20812 binop( Iop_CmpLT64U,
20813 mkexpr( index_dword_hi[j] ),
20814 mkU64( 64 ) ) ) );
20816 assign( index_dword_lo_valid[j],
20817 unop( Iop_1Sto64,
20818 binop( Iop_CmpLT64U,
20819 mkexpr( index_dword_lo[j] ),
20820 mkU64( 64 ) ) ) );
20821 assign( pb_dword_hi[j],
20822 binop( Iop_And64,
20823 binop( Iop_Shr64,
20824 unop( Iop_V128HIto64,
20825 mkexpr( vA ) ),
20826 unop( Iop_64to8,
20827 binop( Iop_Sub64,
20828 mkU64( 63 ),
20829 mkexpr( index_dword_hi[j] )
20830 ) ) ),
20831 mkU64( 0x1 ) ) );
20833 assign( pb_dword_lo[j],
20834 binop( Iop_And64,
20835 binop( Iop_Shr64,
20836 unop( Iop_V128to64,
20837 mkexpr( vA ) ),
20838 unop( Iop_64to8,
20839 binop( Iop_Sub64,
20840 mkU64( 63 ),
20841 mkexpr( index_dword_lo[j] )
20842 ) ) ),
20843 mkU64( 0x1 ) ) );
20845 assign( tmp_hi[j+1],
20846 binop( Iop_Or64,
20847 binop( Iop_And64,
20848 mkexpr( index_dword_hi_valid[j] ),
20849 binop( Iop_Shl64,
20850 mkexpr( pb_dword_hi[j] ),
20851 mkU8( 7 - j ) ) ),
20852 mkexpr( tmp_hi[j] ) ) );
20854 assign( tmp_lo[j+1],
20855 binop( Iop_Or64,
20856 binop( Iop_And64,
20857 mkexpr( index_dword_lo_valid[j] ),
20858 binop( Iop_Shl64,
20859 mkexpr( pb_dword_lo[j] ),
20860 mkU8( 7 - j ) ) ),
20861 mkexpr( tmp_lo[j] ) ) );
20864 putVReg( vRT_addr,
20865 binop( Iop_64HLtoV128,
20866 mkexpr( tmp_hi[8] ),
20867 mkexpr( tmp_lo[8] ) ) );
20869 break;
20871 default:
20872 vex_printf("dis_av_count_bitTranspose(ppc)(opc2)\n");
20873 return False;
20874 break;
20876 return True;
20880 * Scalar / Vector Population Count/bit matrix transpose
20882 static Bool dis_logical_mask_bits ( UInt prefix, UInt theInstr,
20883 const VexAbiInfo* vbi )
20885 UChar opc1 = ifieldOPC(theInstr);
20886 UInt opc2 = ifieldOPClo10(theInstr);
20887 UChar rS_addr = ifieldRegDS(theInstr);
20888 UChar rA_addr = ifieldRegA(theInstr);
20889 UChar rB_addr = ifieldRegB(theInstr);
20891 IRTemp rS = newTemp( Ity_I64 );
20892 IRTemp rA = newTemp( Ity_I64 );
20893 IRTemp rB = newTemp( Ity_I64 );
20895 /* There are no prefixed version of these instructions. */
20896 vassert( !prefix_instruction( prefix ) );
20898 assign( rS, getIReg(rS_addr) );
20899 assign( rB, getIReg(rB_addr) );
20901 if (opc1 != 0x1F) {
20902 vex_printf( "dis_logical_mask_bits(ppc)(instr)\n" );
20903 return False;
20906 switch (opc2) {
20908 /* X-form instructions */
20909 case 0x03B: // cntlzdm, Count Leading Zeros Doubleword Under bitmask
20910 case 0x0BC: // pextd, Parallel Bits Extract Doubleword
20911 case 0x0DC: // cfuged, Centrifuge Doubleword
20912 case 0x23B: // cnttzdm, Count Trailing Zeros Doubleword Under bit mask
20914 UInt max_bits = mode64 ? 64 : 32;
20915 IRTemp ones = newTemp( Ity_I64 );
20916 IRTemp all_ones = newTemp( Ity_I64 );
20918 /* Get the bits corresponding to 1's in the mask */
20919 assign( ones, extract_bits_under_mask ( vbi,
20920 mkexpr( rS ),
20921 mkexpr( rB ),
20922 mkU64( 1 ) ) );
20924 if ( opc2 == 0x03b ) { // cntlzdm
20925 IRTemp cnt = newTemp( Ity_I64 );
20927 DIP("cntlzdm r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
20928 assign( cnt, popcnt64( vbi, mkexpr( rB ) ) );
20930 assign( all_ones, binop( Iop_Shr64,
20931 mkU64( 0xFFFFFFFFFFFFFFFF ),
20932 unop( Iop_64to8, mkexpr( cnt ) ) ) );
20934 assign( rA,
20935 unop( Iop_ClzNat64,
20936 binop( Iop_Or64,
20937 binop( Iop_Shl64,
20938 mkexpr( ones ),
20939 binop( Iop_Sub8,
20940 mkU8( max_bits ),
20941 unop( Iop_64to8,
20942 mkexpr( cnt ) ) ) ),
20943 mkexpr( all_ones ) ) ) );
20945 } else if ( opc2 == 0x0BC ) { // pextd
20946 DIP("pextd r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
20947 assign( rA, mkexpr( ones ) );
20949 } else if ( opc2 == 0x0DC ) { // cfuged
20950 IRTemp zeros = newTemp( Ity_I64 );
20951 IRTemp cnt = newTemp( Ity_I64 );
20953 DIP("cfuged r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
20954 assign( cnt, popcnt64( vbi, mkexpr( rB ) ) );
20956 /* Get the bits corresponding to 0's in the mask */
20957 assign( zeros, extract_bits_under_mask ( vbi,
20958 mkexpr( rS ),
20959 mkexpr( rB ),
20960 mkU64( 0 ) ) );
20962 assign( rA,
20963 binop( Iop_Or64,
20964 binop( Iop_Shl64,
20965 mkexpr( zeros ),
20966 unop( Iop_64to8,
20967 mkexpr( cnt ) ) ),
20968 mkexpr( ones ) ) );
20970 } else if ( opc2 == 0x23B ) { //cnttzdm
20971 DIP("cnttzdm r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
20972 assign( all_ones, binop( Iop_Shl64,
20973 mkU64( 0xFFFFFFFFFFFFFFFF ),
20974 unop( Iop_64to8,
20975 popcnt64( vbi,
20976 mkexpr( rB ) ) ) ) );
20978 assign( rA,
20979 unop( Iop_CtzNat64,
20980 binop( Iop_Or64,
20981 mkexpr( all_ones ), mkexpr( ones ) ) ) );
20983 } else { //pexld
20984 DIP("pexld r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
20985 assign( rA, mkexpr( ones ) );
20987 break;
20990 case 0x09C: // pdepd, Parallel Bits Deposit Doubleword X-form
20992 IRTemp ones = newTemp( Ity_I64 );
20994 DIP("pdepd r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
20995 assign( ones, deposit_bits_under_mask ( vbi, mkexpr( rS ),
20996 mkexpr( rB ) ) );
20997 assign( rA, mkexpr( ones ) );
20998 break;
21001 default:
21002 vex_printf("dis_logical_mask_bits)(ppc)\n");
21003 return False;
21006 putIReg( rA_addr, mkexpr( rA ) );
21007 return True;
21010 static Bool
21011 dis_vector_logical_mask_bits ( UInt prefix, UInt theInstr, UInt opc2,
21012 const VexAbiInfo* vbi )
21014 UChar vRA_addr = ifieldRegA(theInstr);
21015 UChar vRB_addr = ifieldRegB(theInstr);
21016 UChar vRT_addr = ifieldRegDS(theInstr);
21017 UChar opc1 = ifieldOPC( theInstr );
21018 IRTemp vA = newTemp(Ity_V128);
21019 IRTemp vB = newTemp(Ity_V128);
21021 /* There are no prefixed version of these instructions. */
21022 vassert( !prefix_instruction( prefix ) );
21024 if (opc1 != 4) {
21025 vex_printf( "dis_vector_logical_mask_bits(ppc)(instr)\n" );
21026 return False;
21029 assign( vA, getVReg(vRA_addr));
21030 assign( vB, getVReg(vRB_addr));
21032 switch (opc2) {
21033 case 0x4CC: // vgnb, Vector Gather every Nth Bit VX-form
21035 IRTemp vB_hi = newTemp( Ity_I64 );
21036 IRTemp vB_lo = newTemp( Ity_I64 );
21037 IRTemp ones_hi, ones_lo;
21038 UChar N = toUChar( IFIELD( theInstr, 16, 3 ) );
21039 ULong extract_mask_hi, extract_mask_lo, byte_mask;
21040 UInt i, num_bits_hi, num_bits_lo;
21042 /* Note, the return register number is actually for a GPR not a
21043 vector register. */
21044 DIP("vgnb %u,v%u,%u\n", vRT_addr, vRB_addr, N);
21046 if ((N < 2) || (N>7)) {
21047 /* The value of N can be any value between 2 and 7, inclusive. */
21048 vex_printf("\nERROR: vgnb RT,VRB,N; N is out of range.\n\n");
21049 return False;
21052 /* Create 32-bit extract mask, starting with bit 0 (IBM numbering),
21053 every Nth bit going right will be a 1. */
21054 extract_mask_hi = 0;
21055 extract_mask_lo = 0;
21057 byte_mask = 1;
21059 i = 0;
21060 num_bits_hi = 0;
21061 while( i < 64) {
21062 extract_mask_hi = extract_mask_hi | (byte_mask << (63 - i));
21063 i = i + N;
21064 num_bits_hi++;
21067 num_bits_lo = 0;
21068 while( i < 128) {
21069 extract_mask_lo = extract_mask_lo | (byte_mask << (127 - i));
21070 i = i + N;
21071 num_bits_lo++;
21074 ones_hi = newTemp( Ity_I64 );
21075 ones_lo = newTemp( Ity_I64 );
21077 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
21078 assign( vB_lo, unop( Iop_V128to64, mkexpr( vB ) ) );
21080 assign( ones_hi, extract_bits_under_mask ( vbi, mkexpr( vB_hi ),
21081 mkU64( extract_mask_hi ),
21082 mkU64( 1 ) ) );
21083 assign( ones_lo, extract_bits_under_mask ( vbi, mkexpr( vB_lo ),
21084 mkU64( extract_mask_lo ),
21085 mkU64( 1 ) ) );
21087 /* Concatenate the extracted bits from ones_hi and ones_lo and
21088 store in GPR. Make sure the hi and low bits are left aligned per
21089 IBM numbering */
21090 putIReg( vRT_addr, binop( Iop_Or64,
21091 binop( Iop_Shl64,
21092 mkexpr( ones_hi ),
21093 mkU8( 64 - num_bits_hi ) ),
21094 binop( Iop_Shl64,
21095 mkexpr( ones_lo ),
21096 mkU8( 64 - num_bits_hi
21097 - num_bits_lo ) ) ) );
21099 return True;
21101 case 0x54D: // vcfuged, Centrifuge Doubleword VX-form
21103 IRTemp vA_hi = newTemp( Ity_I64 );
21104 IRTemp vA_lo = newTemp( Ity_I64 );
21105 IRTemp vB_hi = newTemp( Ity_I64 );
21106 IRTemp vB_lo = newTemp( Ity_I64 );
21107 IRTemp zeros[2];
21108 IRTemp ones[2];
21109 IRTemp count[2];
21111 DIP("vcfuged v%u,v%u,v%u\n", vRT_addr, vRA_addr, vRB_addr);
21113 zeros[0] = newTemp( Ity_I64 );
21114 zeros[1] = newTemp( Ity_I64 );
21115 ones[0] = newTemp( Ity_I64 );
21116 ones[1] = newTemp( Ity_I64 );
21117 count[0] = newTemp( Ity_I64 );
21118 count[1] = newTemp( Ity_I64 );
21120 assign( vA_hi, unop( Iop_V128HIto64, mkexpr( vA ) ) );
21121 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
21122 assign( vA_lo, unop( Iop_V128to64, mkexpr( vA ) ) );
21123 assign( vB_lo, unop( Iop_V128to64, mkexpr( vB ) ) );
21125 assign( count[0], popcnt64( vbi, mkexpr( vB_hi ) ) );
21126 assign( count[1], popcnt64( vbi, mkexpr( vB_lo ) ) );
21128 assign( ones[0], extract_bits_under_mask ( vbi, mkexpr( vA_hi ),
21129 mkexpr( vB_hi ),
21130 mkU64( 1 ) ) );
21131 assign( ones[1], extract_bits_under_mask ( vbi, mkexpr( vA_lo ),
21132 mkexpr( vB_lo ),
21133 mkU64( 1 ) ) );
21134 assign( zeros[0], extract_bits_under_mask ( vbi, mkexpr( vA_hi ),
21135 mkexpr( vB_hi ),
21136 mkU64( 0 ) ) );
21137 assign( zeros[1], extract_bits_under_mask ( vbi, mkexpr( vA_lo ),
21138 mkexpr( vB_lo ),
21139 mkU64( 0 ) ) );
21141 /* Put the bits corresponding to zero mask bits to the left of the
21142 bits corresponding to one mask bits for the upper and lower 64-bit
21143 words. */
21144 putVReg( vRT_addr, binop( Iop_64HLtoV128,
21145 binop( Iop_Or64,
21146 binop( Iop_Shl64,
21147 mkexpr( zeros[0] ),
21148 unop( Iop_64to8,
21149 mkexpr( count[0] ) ) ),
21150 mkexpr( ones[0] ) ),
21151 binop( Iop_Or64,
21152 binop( Iop_Shl64,
21153 mkexpr( zeros[1] ),
21154 unop( Iop_64to8,
21155 mkexpr( count[1] ) ) ),
21156 mkexpr( ones[1] ) ) ) );
21158 break;
21160 case 0x58D: // vpextd, Vector Parallel Bits Extract Doubleword VX-form
21162 IRTemp vA_hi = newTemp( Ity_I64 );
21163 IRTemp vA_lo = newTemp( Ity_I64 );
21164 IRTemp vB_hi = newTemp( Ity_I64 );
21165 IRTemp vB_lo = newTemp( Ity_I64 );
21166 IRTemp ones[2];
21168 DIP("vpextd v%u,v%u,v%u\n", vRT_addr, vRA_addr, vRB_addr);
21170 ones[0] = newTemp( Ity_I64 );
21171 ones[1] = newTemp( Ity_I64 );
21173 assign( vA_hi, unop( Iop_V128HIto64, mkexpr( vA ) ) );
21174 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
21175 assign( vA_lo, unop( Iop_V128to64, mkexpr( vA ) ) );
21176 assign( vB_lo, unop( Iop_V128to64, mkexpr( vB ) ) );
21178 assign( ones[0], extract_bits_under_mask ( vbi, mkexpr( vA_hi ),
21179 mkexpr( vB_hi ),
21180 mkU64( 1 ) ) );
21181 assign( ones[1], extract_bits_under_mask ( vbi, mkexpr( vA_lo ),
21182 mkexpr( vB_lo ),
21183 mkU64( 1 ) ) );
21184 putVReg( vRT_addr, binop( Iop_64HLtoV128,
21185 mkexpr( ones[0] ), mkexpr( ones[1] ) ) );
21187 break;
21189 case 0x5CD: // vpdepd, Vector Parallel Bits Deposit Doubleword VX-form
21191 IRTemp vA_hi = newTemp( Ity_I64 );
21192 IRTemp vA_lo = newTemp( Ity_I64 );
21193 IRTemp vB_hi = newTemp( Ity_I64 );
21194 IRTemp vB_lo = newTemp( Ity_I64 );
21195 IRTemp ones[2];
21197 DIP("vpdepd v%u,v%u,v%u\n", vRT_addr, vRA_addr, vRB_addr);
21199 ones[0] = newTemp( Ity_I64 );
21200 ones[1] = newTemp( Ity_I64 );
21202 assign( vA_hi, unop( Iop_V128HIto64, mkexpr( vA ) ) );
21203 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
21204 assign( vA_lo, unop( Iop_V128to64, mkexpr( vA ) ) );
21205 assign( vB_lo, unop( Iop_V128to64, mkexpr( vB ) ) );
21207 assign( ones[0], deposit_bits_under_mask ( vbi, mkexpr( vA_hi ),
21208 mkexpr( vB_hi ) ) );
21209 assign( ones[1], deposit_bits_under_mask ( vbi, mkexpr( vA_lo ),
21210 mkexpr( vB_lo ) ) );
21211 putVReg( vRT_addr, binop( Iop_64HLtoV128,
21212 mkexpr( ones[0] ), mkexpr( ones[1] ) ) );
21214 break;
21216 case 0x784: // vclzdm,
21218 /* Vector Count Leading Zeros Doubleword under bit mask */
21220 IRTemp extracted_bits[2];
21221 IRTemp clz[2];
21222 IRTemp ones[2];
21223 IRTemp cnt_extract_bits[2];
21224 UInt max_bits = 64;
21225 IRTemp vA_hi = newTemp( Ity_I64 );
21226 IRTemp vA_lo = newTemp( Ity_I64 );
21227 IRTemp vB_hi = newTemp( Ity_I64 );
21228 IRTemp vB_lo = newTemp( Ity_I64 );
21230 DIP("vclzdm v%u,v%u,v%u\n", vRT_addr, vRA_addr, vRB_addr);
21232 ones[0] = newTemp( Ity_I64 );
21233 ones[1] = newTemp( Ity_I64 );
21234 clz[0] = newTemp( Ity_I64 );
21235 clz[1] = newTemp( Ity_I64 );
21236 extracted_bits[0] = newTemp( Ity_I64 );
21237 extracted_bits[1] = newTemp( Ity_I64 );
21238 cnt_extract_bits[0] = newTemp( Ity_I8 );
21239 cnt_extract_bits[1] = newTemp( Ity_I8 );
21241 /* Gather bits in each vector element, then count leading zeros. */
21242 assign( vA_hi, unop( Iop_V128HIto64, mkexpr( vA ) ) );
21243 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
21244 assign( vA_lo, unop( Iop_V128to64, mkexpr( vA ) ) );
21245 assign( vB_lo, unop( Iop_V128to64, mkexpr( vB ) ) );
21247 assign( ones[0], extract_bits_under_mask ( vbi,
21248 mkexpr( vA_hi ),
21249 mkexpr( vB_hi ),
21250 mkU64( 1 ) ) );
21252 assign( ones[1], extract_bits_under_mask ( vbi,
21253 mkexpr( vA_lo ),
21254 mkexpr( vB_lo ),
21255 mkU64( 1 ) ) );
21257 assign( cnt_extract_bits[0],
21258 unop( Iop_16to8,
21259 unop( Iop_32to16,
21260 count_bits_under_mask ( vbi,
21261 mkexpr( vA_hi ),
21262 mkexpr( vB_hi ),
21263 mkU64( 1 ) ) ) ) );
21265 assign( cnt_extract_bits[1],
21266 unop( Iop_16to8,
21267 unop( Iop_32to16,
21268 count_bits_under_mask ( vbi,
21269 mkexpr( vA_lo ),
21270 mkexpr( vB_lo ),
21271 mkU64( 1 ) ) ) ) );
21273 /* Shift extracted bits to High order bits, filling lower order bits
21274 with 1's so we only count zeros in extracted bits. */
21275 assign( extracted_bits[0],
21276 binop( Iop_Or64,
21277 binop( Iop_Shr64,
21278 mkU64( 0xFFFFFFFFFFFFFFFF ),
21279 mkexpr( cnt_extract_bits[0] ) ),
21280 binop( Iop_Shl64,
21281 mkexpr( ones[0] ),
21282 binop( Iop_Sub8,
21283 mkU8( max_bits ),
21284 mkexpr( cnt_extract_bits[0] )
21285 ) ) ) );
21287 assign( clz[0],
21288 unop( Iop_Clz64,
21289 mkexpr( extracted_bits[0] ) ) );
21291 assign( extracted_bits[1],
21292 binop( Iop_Or64,
21293 binop( Iop_Shr64,
21294 mkU64( 0xFFFFFFFFFFFFFFFF ),
21295 mkexpr( cnt_extract_bits[1] ) ),
21296 binop( Iop_Shl64,
21297 mkexpr( ones[1] ),
21298 binop( Iop_Sub8,
21299 mkU8( max_bits ),
21300 mkexpr( cnt_extract_bits[1] )
21301 ) ) ) );
21302 assign( clz[1],
21303 unop( Iop_Clz64,
21304 mkexpr( extracted_bits[1] ) ) );
21306 putVReg( vRT_addr, binop( Iop_64HLtoV128,
21307 mkexpr( clz[0] ), mkexpr( clz[1] ) ) );
21308 break;
21311 case 0x7C4: // vctzdm
21313 /* Vector Count Trailing Zeros Doubleword under bit mask */
21314 IRTemp ctz[2];
21315 IRTemp ones[2];
21316 IRTemp all_ones_hi = newTemp( Ity_I64 );
21317 IRTemp all_ones_lo = newTemp( Ity_I64 );
21318 IRTemp vA_hi = newTemp( Ity_I64 );
21319 IRTemp vA_lo = newTemp( Ity_I64 );
21320 IRTemp vB_hi = newTemp( Ity_I64 );
21321 IRTemp vB_lo = newTemp( Ity_I64 );
21323 DIP("vctzdm v%u,v%u,v%u\n", vRT_addr, vRA_addr, vRB_addr);
21325 ones[0] = newTemp( Ity_I64 );
21326 ones[1] = newTemp( Ity_I64 );
21327 ctz[0] = newTemp( Ity_I64 );
21328 ctz[1] = newTemp( Ity_I64 );
21330 /* Gather bits in each vector element, then count trailing zeros. */
21331 assign( vA_hi, unop( Iop_V128HIto64, mkexpr( vA ) ) );
21332 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
21333 assign( vA_lo, unop( Iop_V128to64, mkexpr( vA ) ) );
21334 assign( vB_lo, unop( Iop_V128to64, mkexpr( vB ) ) );
21336 /* Shift all 1's value left by the count of the number of bits in the
21337 mask. OR this with the extracted bits so the trailing zero count
21338 will only count zeros in extracted field. */
21339 assign( all_ones_hi,
21340 binop( Iop_Shl64,
21341 mkU64( 0xFFFFFFFFFFFFFFFF ),
21342 unop( Iop_64to8,
21343 popcnt64( vbi, mkexpr( vB_hi ) ) ) ) );
21344 assign( all_ones_lo,
21345 binop( Iop_Shl64,
21346 mkU64( 0xFFFFFFFFFFFFFFFF ),
21347 unop( Iop_64to8,
21348 popcnt64( vbi, mkexpr( vB_lo ) ) ) ) );
21350 assign( ones[0],
21351 binop( Iop_Or64,
21352 mkexpr( all_ones_hi ),
21353 extract_bits_under_mask ( vbi,
21354 mkexpr( vA_hi ),
21355 mkexpr( vB_hi ),
21356 mkU64( 1 ) ) ) );
21358 assign( ones[1],
21359 binop( Iop_Or64,
21360 mkexpr( all_ones_lo ),
21361 extract_bits_under_mask ( vbi,
21362 mkexpr( vA_lo ),
21363 mkexpr( vB_lo ),
21364 mkU64( 1 ) ) ) );
21366 assign( ctz[0], unop( Iop_CtzNat64, mkexpr( ones[0] ) ) );
21367 assign( ctz[1], unop( Iop_CtzNat64, mkexpr( ones[1] ) ) );
21369 putVReg( vRT_addr, binop( Iop_64HLtoV128,
21370 mkexpr( ctz[0] ), mkexpr( ctz[1] ) ) );
21371 break;
21374 default:
21375 vex_printf("dis_vector_logical_mask_bits(ppc)(opc2)\n");
21376 return False;
21378 return True;
21381 typedef enum {
21382 PPC_CMP_EQ = 2,
21383 PPC_CMP_GT = 4,
21384 PPC_CMP_GE = 6,
21385 PPC_CMP_LT = 8
21386 } ppc_cmp_t;
21390 This helper function takes as input the IRExpr returned
21391 from a binop( Iop_CmpF64, fpA, fpB), whose result is returned
21392 in IR form. This helper function converts it to PPC form.
21394 Map compare result from IR to PPC
21396 FP cmp result | PPC | IR
21397 --------------------------
21398 UN | 0x1 | 0x45
21399 EQ | 0x2 | 0x40
21400 GT | 0x4 | 0x00
21401 LT | 0x8 | 0x01
21403 condcode = Shl(1, (~(ccIR>>5) & 2)
21404 | ((ccIR ^ (ccIR>>6)) & 1)
21406 static IRTemp
21407 get_fp_cmp_CR_val (IRExpr * ccIR_expr)
21409 IRTemp condcode = newTemp( Ity_I32 );
21410 IRTemp ccIR = newTemp( Ity_I32 );
21412 assign(ccIR, ccIR_expr);
21413 assign( condcode,
21414 binop( Iop_Shl32,
21415 mkU32( 1 ),
21416 unop( Iop_32to8,
21417 binop( Iop_Or32,
21418 binop( Iop_And32,
21419 unop( Iop_Not32,
21420 binop( Iop_Shr32,
21421 mkexpr( ccIR ),
21422 mkU8( 5 ) ) ),
21423 mkU32( 2 ) ),
21424 binop( Iop_And32,
21425 binop( Iop_Xor32,
21426 mkexpr( ccIR ),
21427 binop( Iop_Shr32,
21428 mkexpr( ccIR ),
21429 mkU8( 6 ) ) ),
21430 mkU32( 1 ) ) ) ) ) );
21431 return condcode;
21435 * Helper function for get_max_min_fp for ascertaining the max or min between two doubles
21436 * following these special rules:
21437 * - The max/min of a QNaN and any value is that value
21438 * (When two QNaNs are being compared, the frA QNaN is the return value.)
21439 * - The max/min of any value and an SNaN is that SNaN converted to a QNaN
21440 * (When two SNaNs are being compared, the frA SNaN is converted to a QNaN.)
21442 static IRExpr * _get_maxmin_fp_NaN(IRTemp frA_I64, IRTemp frB_I64)
21444 IRTemp frA_isNaN = newTemp(Ity_I1);
21445 IRTemp frB_isNaN = newTemp(Ity_I1);
21446 IRTemp frA_isSNaN = newTemp(Ity_I1);
21447 IRTemp frB_isSNaN = newTemp(Ity_I1);
21448 IRTemp frA_isQNaN = newTemp(Ity_I1);
21449 IRTemp frB_isQNaN = newTemp(Ity_I1);
21451 assign( frA_isNaN, is_NaN( Ity_I64, frA_I64 ) );
21452 assign( frB_isNaN, is_NaN( Ity_I64, frB_I64 ) );
21453 // If operand is a NAN and bit 12 is '0', then it's an SNaN
21454 assign( frA_isSNaN,
21455 mkAND1( mkexpr(frA_isNaN),
21456 binop( Iop_CmpEQ32,
21457 binop( Iop_And32,
21458 unop( Iop_64HIto32, mkexpr( frA_I64 ) ),
21459 mkU32( 0x00080000 ) ),
21460 mkU32( 0 ) ) ) );
21461 assign( frB_isSNaN,
21462 mkAND1( mkexpr(frB_isNaN),
21463 binop( Iop_CmpEQ32,
21464 binop( Iop_And32,
21465 unop( Iop_64HIto32, mkexpr( frB_I64 ) ),
21466 mkU32( 0x00080000 ) ),
21467 mkU32( 0 ) ) ) );
21468 assign( frA_isQNaN,
21469 mkAND1( mkexpr( frA_isNaN ), unop( Iop_Not1, mkexpr( frA_isSNaN ) ) ) );
21470 assign( frB_isQNaN,
21471 mkAND1( mkexpr( frB_isNaN ), unop( Iop_Not1, mkexpr( frB_isSNaN ) ) ) );
21473 /* Based on the rules specified in the function prologue, the algorithm is as follows:
21474 * <<<<<<<<<>>>>>>>>>>>>>>>>>>
21475 * if frA is a SNaN
21476 * result = frA converted to QNaN
21477 * else if frB is a SNaN
21478 * if (frA is QNan)
21479 * result = frA
21480 * else
21481 * result = frB converted to QNaN
21482 * else if frB is a QNaN
21483 * result = frA
21484 * // One of frA or frB was a NaN in order for this function to be called, so
21485 * // if we get to this point, we KNOW that frA must be a QNaN.
21486 * else // frA is a QNaN
21487 * result = frB
21488 * <<<<<<<<<>>>>>>>>>>>>>>>>>>
21491 #define SNAN_MASK 0x0008000000000000ULL
21493 return
21494 IRExpr_ITE(mkexpr(frA_isSNaN),
21495 /* then: result = frA converted to QNaN */
21496 binop(Iop_Or64, mkexpr(frA_I64), mkU64(SNAN_MASK)),
21497 /* else: if frB is a SNaN */
21498 IRExpr_ITE(mkexpr(frB_isSNaN),
21499 IRExpr_ITE(mkexpr(frA_isQNaN),
21500 /* then: result = frA */
21501 mkexpr(frA_I64),
21502 /* else: result = frB converted to QNaN */
21503 binop(Iop_Or64, mkexpr(frB_I64),
21504 mkU64(SNAN_MASK))),
21505 /* else: if frB is a QNaN */
21506 IRExpr_ITE(mkexpr(frB_isQNaN),
21507 /* then: result = frA */
21508 mkexpr(frA_I64),
21509 /* else: frA is a QNaN, so result = frB */
21510 mkexpr(frB_I64))));
21514 * Helper function for get_max_min_fp.
21516 static IRExpr * _get_maxmin_fp_cmp(IRTemp src1, IRTemp src2, Bool isMin)
21518 IRTemp src1cmpsrc2 = get_fp_cmp_CR_val( binop( Iop_CmpF64,
21519 unop( Iop_ReinterpI64asF64,
21520 mkexpr( src1 ) ),
21521 unop( Iop_ReinterpI64asF64,
21522 mkexpr( src2 ) ) ) );
21524 return IRExpr_ITE( binop( Iop_CmpEQ32,
21525 mkexpr( src1cmpsrc2 ),
21526 mkU32( isMin ? PPC_CMP_LT : PPC_CMP_GT ) ),
21527 /* then: use src1 */
21528 mkexpr( src1 ),
21529 /* else: use src2 */
21530 mkexpr( src2 ) );
21534 * Helper function for "Maximum/Minimum Double Precision" operations.
21535 * Arguments: frA and frb are Ity_I64
21536 * Returns Ity_I64 IRExpr that answers the "which is Maxiumum/Minimum" question
21538 static IRExpr * get_max_min_fp(IRTemp frA_I64, IRTemp frB_I64, Bool isMin)
21540 /* There are three special cases where get_fp_cmp_CR_val is not helpful
21541 * for ascertaining the maximum between two doubles:
21542 * 1. The max/min of +0 and -0 is +0.
21543 * 2. The max/min of a QNaN and any value is that value.
21544 * 3. The max/min of any value and an SNaN is that SNaN converted to a QNaN.
21545 * We perform the check for [+/-]0 here in this function and use the
21546 * _get_maxmin_fp_NaN helper for the two NaN cases; otherwise we call _get_maxmin_fp_cmp
21547 * to do the standard comparison function.
21549 IRTemp anyNaN = newTemp(Ity_I1);
21550 IRTemp frA_isZero = newTemp(Ity_I1);
21551 IRTemp frB_isZero = newTemp(Ity_I1);
21552 assign( frA_isZero, is_Zero( Ity_I64, frA_I64 ) );
21553 assign( frB_isZero, is_Zero( Ity_I64, frB_I64 ) );
21554 assign( anyNaN, mkOR1( is_NaN( Ity_I64, frA_I64 ),
21555 is_NaN(Ity_I64, frB_I64 ) ) );
21556 #define MINUS_ZERO 0x8000000000000000ULL
21558 return IRExpr_ITE( /* If both arguments are zero . . . */
21559 mkAND1( mkexpr( frA_isZero ), mkexpr( frB_isZero ) ),
21560 /* then: if frA is -0 and isMin==True, return -0;
21561 * else if frA is +0 and isMin==False; return +0;
21562 * otherwise, simply return frB. */
21563 IRExpr_ITE( binop( Iop_CmpEQ32,
21564 unop( Iop_64HIto32,
21565 mkexpr( frA_I64 ) ),
21566 mkU32( isMin ? 0x80000000 : 0 ) ),
21567 mkU64( isMin ? MINUS_ZERO : 0ULL ),
21568 mkexpr( frB_I64 ) ),
21569 /* else: check if either input is a NaN*/
21570 IRExpr_ITE( mkexpr( anyNaN ),
21571 /* then: use "NaN helper" */
21572 _get_maxmin_fp_NaN( frA_I64, frB_I64 ),
21573 /* else: use "comparison helper" */
21574 _get_maxmin_fp_cmp( frB_I64, frA_I64, isMin ) ));
21577 static const HChar * _get_vsx_rdpi_suffix(UInt opc2)
21579 switch (opc2 & 0x7F) {
21580 case 0x72:
21581 return "m";
21582 case 0x52:
21583 return "p";
21584 case 0x56:
21585 return "c";
21586 case 0x32:
21587 return "z";
21588 case 0x12:
21589 return "";
21591 default: // Impossible to get here
21592 vex_printf("Unrecognized opcode %x\n", opc2);
21593 vpanic("_get_vsx_rdpi_suffix(ppc)(opc2)");
21598 * Helper function for vector/scalar double precision fp round to integer instructions.
21600 static IRExpr * _do_vsx_fp_roundToInt(IRTemp frB_I64, UInt opc2)
21603 /* The same rules apply for x{s|v}rdpi{m|p|c|z} as for floating point round operations (fri{m|n|p|z}). */
21604 IRTemp frB = newTemp(Ity_F64);
21605 IRTemp frD = newTemp(Ity_F64);
21606 IRTemp intermediateResult = newTemp(Ity_I64);
21607 IRTemp is_SNAN = newTemp(Ity_I1);
21608 IRExpr * hi32;
21609 IRExpr * rxpi_rm;
21610 switch (opc2 & 0x7F) {
21611 case 0x72:
21612 rxpi_rm = mkU32(Irrm_NegINF);
21613 break;
21614 case 0x52:
21615 rxpi_rm = mkU32(Irrm_PosINF);
21616 break;
21617 case 0x56:
21618 rxpi_rm = get_IR_roundingmode();
21619 break;
21620 case 0x32:
21621 rxpi_rm = mkU32(Irrm_ZERO);
21622 break;
21623 case 0x12:
21624 rxpi_rm = mkU32(Irrm_NEAREST);
21625 break;
21627 default: // Impossible to get here
21628 vex_printf("Unrecognized opcode %x\n", opc2);
21629 vpanic("_do_vsx_fp_roundToInt(ppc)(opc2)");
21631 assign(frB, unop(Iop_ReinterpI64asF64, mkexpr(frB_I64)));
21632 assign( intermediateResult,
21633 binop( Iop_F64toI64S, rxpi_rm,
21634 mkexpr( frB ) ) );
21636 /* don't use the rounded integer if frB is outside -9e18..9e18 */
21637 /* F64 has only log10(2**52) significant digits anyway */
21638 /* need to preserve sign of zero */
21639 /* frD = (fabs(frB) > 9e18) ? frB :
21640 (sign(frB)) ? -fabs((double)intermediateResult) : (double)intermediateResult */
21641 assign( frD,
21642 IRExpr_ITE(
21643 binop( Iop_CmpNE8,
21644 unop( Iop_32to8,
21645 binop( Iop_CmpF64,
21646 IRExpr_Const( IRConst_F64( 9e18 ) ),
21647 unop( Iop_AbsF64, mkexpr( frB ) ) ) ),
21648 mkU8(0) ),
21649 mkexpr( frB ),
21650 IRExpr_ITE(
21651 binop( Iop_CmpNE32,
21652 binop( Iop_Shr32,
21653 unop( Iop_64HIto32,
21654 mkexpr( frB_I64 ) ),
21655 mkU8( 31 ) ),
21656 mkU32(0) ),
21657 unop( Iop_NegF64,
21658 unop( Iop_AbsF64,
21659 binop( Iop_I64StoF64,
21660 mkU32( 0 ),
21661 mkexpr( intermediateResult ) ) ) ),
21662 binop( Iop_I64StoF64,
21663 mkU32( 0 ),
21664 mkexpr( intermediateResult ) )
21669 /* See Appendix "Floating-Point Round to Integer Model" in ISA doc.
21670 * If frB is a SNAN, then frD <- frB, with bit 12 set to '1'.
21672 #define SNAN_MASK 0x0008000000000000ULL
21673 hi32 = unop( Iop_64HIto32, mkexpr(frB_I64) );
21674 assign( is_SNAN,
21675 mkAND1( is_NaN( Ity_I64, frB_I64 ),
21676 binop( Iop_CmpEQ32,
21677 binop( Iop_And32, hi32, mkU32( 0x00080000 ) ),
21678 mkU32( 0 ) ) ) );
21680 return IRExpr_ITE( mkexpr( is_SNAN ),
21681 unop( Iop_ReinterpI64asF64,
21682 binop( Iop_Xor64,
21683 mkU64( SNAN_MASK ),
21684 mkexpr( frB_I64 ) ) ),
21685 mkexpr( frD ));
21689 * Miscellaneous VSX vector instructions
21691 static Bool
21692 dis_vxv_misc ( UInt prefix, UInt theInstr, UInt opc2 )
21694 /* XX3-Form */
21695 UChar opc1 = ifieldOPC( theInstr );
21696 UChar XT = ifieldRegXT( theInstr );
21697 UChar XB = ifieldRegXB( theInstr );
21699 /* There is no prefixed version of these instructions. */
21700 PREFIX_CHECK
21702 if (opc1 != 0x3C) {
21703 vex_printf( "dis_vxv_misc(ppc)(instr)\n" );
21704 return False;
21707 switch (opc2) {
21708 case 0x1B4: // xvredp (VSX Vector Reciprocal Estimate Double-Precision)
21709 case 0x194: // xvrsqrtedp (VSX Vector Reciprocal Square Root Estimate
21710 // Double-Precision)
21712 IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
21713 IRExpr* rm = get_IR_roundingmode();
21714 IRTemp frB = newTemp(Ity_I64);
21715 IRTemp frB2 = newTemp(Ity_I64);
21716 Bool redp = opc2 == 0x1B4;
21717 IRTemp sqrtHi = newTemp(Ity_F64);
21718 IRTemp sqrtLo = newTemp(Ity_F64);
21719 assign(frB, unop(Iop_V128HIto64, getVSReg( XB )));
21720 assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
21722 DIP("%s v%d,v%d\n", redp ? "xvredp" : "xvrsqrtedp", XT, XB);
21724 if (!redp) {
21725 assign( sqrtHi,
21726 binop( Iop_SqrtF64,
21728 unop( Iop_ReinterpI64asF64, mkexpr( frB ) ) ) );
21729 assign( sqrtLo,
21730 binop( Iop_SqrtF64,
21732 unop( Iop_ReinterpI64asF64, mkexpr( frB2 ) ) ) );
21734 putVSReg( XT,
21735 binop( Iop_64HLtoV128,
21736 unop( Iop_ReinterpF64asI64,
21737 triop( Iop_DivF64,
21739 ieee_one,
21740 redp ? unop( Iop_ReinterpI64asF64,
21741 mkexpr( frB ) )
21742 : mkexpr( sqrtHi ) ) ),
21743 unop( Iop_ReinterpF64asI64,
21744 triop( Iop_DivF64,
21746 ieee_one,
21747 redp ? unop( Iop_ReinterpI64asF64,
21748 mkexpr( frB2 ) )
21749 : mkexpr( sqrtLo ) ) ) ) );
21750 break;
21753 case 0x134: // xvresp (VSX Vector Reciprocal Estimate Single-Precision)
21754 case 0x114: // xvrsqrtesp (VSX Vector Reciprocal Square Root Estimate Single-Precision)
21756 IRTemp b3, b2, b1, b0;
21757 IRTemp res0 = newTemp(Ity_I32);
21758 IRTemp res1 = newTemp(Ity_I32);
21759 IRTemp res2 = newTemp(Ity_I32);
21760 IRTemp res3 = newTemp(Ity_I32);
21761 IRTemp sqrt3 = newTemp(Ity_F64);
21762 IRTemp sqrt2 = newTemp(Ity_F64);
21763 IRTemp sqrt1 = newTemp(Ity_F64);
21764 IRTemp sqrt0 = newTemp(Ity_F64);
21765 IRExpr* rm = get_IR_roundingmode();
21766 Bool resp = opc2 == 0x134;
21768 IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
21770 b3 = b2 = b1 = b0 = IRTemp_INVALID;
21771 DIP("%s v%d,v%d\n", resp ? "xvresp" : "xvrsqrtesp", XT, XB);
21772 breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
21774 if (!resp) {
21775 assign( sqrt3, binop( Iop_SqrtF64, rm, mkexpr( b3 ) ) );
21776 assign( sqrt2, binop( Iop_SqrtF64, rm, mkexpr( b2 ) ) );
21777 assign( sqrt1, binop( Iop_SqrtF64, rm, mkexpr( b1 ) ) );
21778 assign( sqrt0, binop( Iop_SqrtF64, rm, mkexpr( b0 ) ) );
21781 assign( res0,
21782 unop( Iop_ReinterpF32asI32,
21783 unop( Iop_TruncF64asF32,
21784 triop( Iop_DivF64r32,
21786 ieee_one,
21787 resp ? mkexpr( b0 ) : mkexpr( sqrt0 ) ) ) ) );
21788 assign( res1,
21789 unop( Iop_ReinterpF32asI32,
21790 unop( Iop_TruncF64asF32,
21791 triop( Iop_DivF64r32,
21793 ieee_one,
21794 resp ? mkexpr( b1 ) : mkexpr( sqrt1 ) ) ) ) );
21795 assign( res2,
21796 unop( Iop_ReinterpF32asI32,
21797 unop( Iop_TruncF64asF32,
21798 triop( Iop_DivF64r32,
21800 ieee_one,
21801 resp ? mkexpr( b2 ) : mkexpr( sqrt2 ) ) ) ) );
21802 assign( res3,
21803 unop( Iop_ReinterpF32asI32,
21804 unop( Iop_TruncF64asF32,
21805 triop( Iop_DivF64r32,
21807 ieee_one,
21808 resp ? mkexpr( b3 ) : mkexpr( sqrt3 ) ) ) ) );
21809 putVSReg( XT,
21810 binop( Iop_64HLtoV128,
21811 binop( Iop_32HLto64, mkexpr( res3 ), mkexpr( res2 ) ),
21812 binop( Iop_32HLto64, mkexpr( res1 ), mkexpr( res0 ) ) ) );
21813 break;
21815 case 0x300: // xvmaxsp (VSX Vector Maximum Single-Precision)
21816 case 0x320: // xvminsp (VSX Vector Minimum Single-Precision)
21818 UChar XA = ifieldRegXA( theInstr );
21819 IRTemp a3, a2, a1, a0;
21820 IRTemp b3, b2, b1, b0;
21821 IRTemp res0 = newTemp( Ity_I32 );
21822 IRTemp res1 = newTemp( Ity_I32 );
21823 IRTemp res2 = newTemp( Ity_I32 );
21824 IRTemp res3 = newTemp( Ity_I32 );
21825 IRTemp a0_I64 = newTemp( Ity_I64 );
21826 IRTemp a1_I64 = newTemp( Ity_I64 );
21827 IRTemp a2_I64 = newTemp( Ity_I64 );
21828 IRTemp a3_I64 = newTemp( Ity_I64 );
21829 IRTemp b0_I64 = newTemp( Ity_I64 );
21830 IRTemp b1_I64 = newTemp( Ity_I64 );
21831 IRTemp b2_I64 = newTemp( Ity_I64 );
21832 IRTemp b3_I64 = newTemp( Ity_I64 );
21834 Bool isMin = opc2 == 0x320 ? True : False;
21836 a3 = a2 = a1 = a0 = IRTemp_INVALID;
21837 b3 = b2 = b1 = b0 = IRTemp_INVALID;
21838 DIP("%s v%d,v%d v%d\n", isMin ? "xvminsp" : "xvmaxsp", XT, XA, XB);
21839 breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
21840 breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
21841 assign( a0_I64, unop( Iop_ReinterpF64asI64, mkexpr( a0 ) ) );
21842 assign( b0_I64, unop( Iop_ReinterpF64asI64, mkexpr( b0 ) ) );
21843 assign( a1_I64, unop( Iop_ReinterpF64asI64, mkexpr( a1 ) ) );
21844 assign( b1_I64, unop( Iop_ReinterpF64asI64, mkexpr( b1 ) ) );
21845 assign( a2_I64, unop( Iop_ReinterpF64asI64, mkexpr( a2 ) ) );
21846 assign( b2_I64, unop( Iop_ReinterpF64asI64, mkexpr( b2 ) ) );
21847 assign( a3_I64, unop( Iop_ReinterpF64asI64, mkexpr( a3 ) ) );
21848 assign( b3_I64, unop( Iop_ReinterpF64asI64, mkexpr( b3 ) ) );
21849 assign( res0,
21850 unop( Iop_ReinterpF32asI32,
21851 unop( Iop_TruncF64asF32,
21852 unop( Iop_ReinterpI64asF64,
21853 get_max_min_fp( a0_I64, b0_I64, isMin ) ) ) ) );
21854 assign( res1,
21855 unop( Iop_ReinterpF32asI32,
21856 unop( Iop_TruncF64asF32,
21857 unop( Iop_ReinterpI64asF64,
21858 get_max_min_fp( a1_I64, b1_I64, isMin ) ) ) ) );
21859 assign( res2,
21860 unop( Iop_ReinterpF32asI32,
21861 unop( Iop_TruncF64asF32,
21862 unop( Iop_ReinterpI64asF64,
21863 get_max_min_fp( a2_I64, b2_I64, isMin ) ) ) ) );
21864 assign( res3,
21865 unop( Iop_ReinterpF32asI32,
21866 unop( Iop_TruncF64asF32,
21867 unop( Iop_ReinterpI64asF64,
21868 get_max_min_fp( a3_I64, b3_I64, isMin ) ) ) ) );
21869 putVSReg( XT,
21870 binop( Iop_64HLtoV128,
21871 binop( Iop_32HLto64, mkexpr( res3 ), mkexpr( res2 ) ),
21872 binop( Iop_32HLto64, mkexpr( res1 ), mkexpr( res0 ) ) ) );
21873 break;
21875 case 0x380: // xvmaxdp (VSX Vector Maximum Double-Precision)
21876 case 0x3A0: // xvmindp (VSX Vector Minimum Double-Precision)
21878 UChar XA = ifieldRegXA( theInstr );
21879 IRTemp frA = newTemp(Ity_I64);
21880 IRTemp frB = newTemp(Ity_I64);
21881 IRTemp frA2 = newTemp(Ity_I64);
21882 IRTemp frB2 = newTemp(Ity_I64);
21883 Bool isMin = opc2 == 0x3A0 ? True : False;
21885 assign(frA, unop(Iop_V128HIto64, getVSReg( XA )));
21886 assign(frB, unop(Iop_V128HIto64, getVSReg( XB )));
21887 assign(frA2, unop(Iop_V128to64, getVSReg( XA )));
21888 assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
21889 DIP("%s v%d,v%d v%d\n", isMin ? "xvmindp" : "xvmaxdp", XT, XA, XB);
21890 putVSReg( XT, binop( Iop_64HLtoV128, get_max_min_fp(frA, frB, isMin), get_max_min_fp(frA2, frB2, isMin) ) );
21892 break;
21894 case 0x3c0: // xvcpsgndp (VSX Vector Copy Sign Double-Precision)
21896 UChar XA = ifieldRegXA( theInstr );
21897 IRTemp frA = newTemp(Ity_I64);
21898 IRTemp frB = newTemp(Ity_I64);
21899 IRTemp frA2 = newTemp(Ity_I64);
21900 IRTemp frB2 = newTemp(Ity_I64);
21901 assign(frA, unop(Iop_V128HIto64, getVSReg( XA )));
21902 assign(frB, unop(Iop_V128HIto64, getVSReg( XB )));
21903 assign(frA2, unop(Iop_V128to64, getVSReg( XA )));
21904 assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
21906 DIP("xvcpsgndp v%d,v%d,v%d\n", XT, XA, XB);
21907 putVSReg( XT,
21908 binop( Iop_64HLtoV128,
21909 binop( Iop_Or64,
21910 binop( Iop_And64,
21911 mkexpr( frA ),
21912 mkU64( SIGN_BIT ) ),
21913 binop( Iop_And64,
21914 mkexpr( frB ),
21915 mkU64( SIGN_MASK ) ) ),
21916 binop( Iop_Or64,
21917 binop( Iop_And64,
21918 mkexpr( frA2 ),
21919 mkU64( SIGN_BIT ) ),
21920 binop( Iop_And64,
21921 mkexpr( frB2 ),
21922 mkU64( SIGN_MASK ) ) ) ) );
21923 break;
21925 case 0x340: // xvcpsgnsp
21927 UChar XA = ifieldRegXA( theInstr );
21928 IRTemp a3_I64, a2_I64, a1_I64, a0_I64;
21929 IRTemp b3_I64, b2_I64, b1_I64, b0_I64;
21930 IRTemp resHi = newTemp(Ity_I64);
21931 IRTemp resLo = newTemp(Ity_I64);
21933 a3_I64 = a2_I64 = a1_I64 = a0_I64 = IRTemp_INVALID;
21934 b3_I64 = b2_I64 = b1_I64 = b0_I64 = IRTemp_INVALID;
21935 DIP("xvcpsgnsp v%d,v%d v%d\n",XT, XA, XB);
21936 breakV128to4x64U( getVSReg( XA ), &a3_I64, &a2_I64, &a1_I64, &a0_I64 );
21937 breakV128to4x64U( getVSReg( XB ), &b3_I64, &b2_I64, &b1_I64, &b0_I64 );
21939 assign( resHi,
21940 binop( Iop_32HLto64,
21941 binop( Iop_Or32,
21942 binop( Iop_And32,
21943 unop(Iop_64to32, mkexpr( a3_I64 ) ),
21944 mkU32( SIGN_BIT32 ) ),
21945 binop( Iop_And32,
21946 unop(Iop_64to32, mkexpr( b3_I64 ) ),
21947 mkU32( SIGN_MASK32) ) ),
21949 binop( Iop_Or32,
21950 binop( Iop_And32,
21951 unop(Iop_64to32, mkexpr( a2_I64 ) ),
21952 mkU32( SIGN_BIT32 ) ),
21953 binop( Iop_And32,
21954 unop(Iop_64to32, mkexpr( b2_I64 ) ),
21955 mkU32( SIGN_MASK32 ) ) ) ) );
21956 assign( resLo,
21957 binop( Iop_32HLto64,
21958 binop( Iop_Or32,
21959 binop( Iop_And32,
21960 unop(Iop_64to32, mkexpr( a1_I64 ) ),
21961 mkU32( SIGN_BIT32 ) ),
21962 binop( Iop_And32,
21963 unop(Iop_64to32, mkexpr( b1_I64 ) ),
21964 mkU32( SIGN_MASK32 ) ) ),
21966 binop( Iop_Or32,
21967 binop( Iop_And32,
21968 unop(Iop_64to32, mkexpr( a0_I64 ) ),
21969 mkU32( SIGN_BIT32 ) ),
21970 binop( Iop_And32,
21971 unop(Iop_64to32, mkexpr( b0_I64 ) ),
21972 mkU32( SIGN_MASK32 ) ) ) ) );
21973 putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( resHi ), mkexpr( resLo ) ) );
21974 break;
21976 case 0x3B2: // xvabsdp (VSX Vector Absolute Value Double-Precision)
21977 case 0x3D2: // xvnabsdp VSX Vector Negative Absolute Value Double-Precision)
21979 IRTemp frB = newTemp(Ity_F64);
21980 IRTemp frB2 = newTemp(Ity_F64);
21981 IRTemp abs_resultHi = newTemp(Ity_F64);
21982 IRTemp abs_resultLo = newTemp(Ity_F64);
21983 Bool make_negative = (opc2 == 0x3D2) ? True : False;
21984 assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
21985 assign(frB2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg(XB))));
21987 DIP("xv%sabsdp v%d,v%d\n", make_negative ? "n" : "", XT, XB);
21988 if (make_negative) {
21989 assign(abs_resultHi, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr( frB ) ) ) );
21990 assign(abs_resultLo, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr( frB2 ) ) ) );
21992 } else {
21993 assign(abs_resultHi, unop( Iop_AbsF64, mkexpr( frB ) ) );
21994 assign(abs_resultLo, unop( Iop_AbsF64, mkexpr( frB2 ) ) );
21996 putVSReg( XT, binop( Iop_64HLtoV128,
21997 unop( Iop_ReinterpF64asI64, mkexpr( abs_resultHi ) ),
21998 unop( Iop_ReinterpF64asI64, mkexpr( abs_resultLo ) ) ) );
21999 break;
22001 case 0x332: // xvabssp (VSX Vector Absolute Value Single-Precision)
22002 case 0x352: // xvnabssp (VSX Vector Negative Absolute Value Single-Precision)
22005 * The Iop_AbsF32 IRop is not implemented for ppc64 since, up until introduction
22006 * of xvabssp, there has not been an abs(sp) type of instruction. But since emulation
22007 * of this function is so easy using shifts, I choose to emulate this instruction that
22008 * way versus a native instruction method of implementation.
22010 Bool make_negative = (opc2 == 0x352) ? True : False;
22011 IRTemp shiftVector = newTemp(Ity_V128);
22012 IRTemp absVal_vector = newTemp(Ity_V128);
22013 assign( shiftVector,
22014 binop( Iop_64HLtoV128,
22015 binop( Iop_32HLto64, mkU32( 1 ), mkU32( 1 ) ),
22016 binop( Iop_32HLto64, mkU32( 1 ), mkU32( 1 ) ) ) );
22017 assign( absVal_vector,
22018 binop( Iop_Shr32x4,
22019 binop( Iop_Shl32x4,
22020 getVSReg( XB ),
22021 mkexpr( shiftVector ) ),
22022 mkexpr( shiftVector ) ) );
22023 if (make_negative) {
22024 IRTemp signBit_vector = newTemp(Ity_V128);
22025 assign( signBit_vector,
22026 binop( Iop_64HLtoV128,
22027 binop( Iop_32HLto64,
22028 mkU32( 0x80000000 ),
22029 mkU32( 0x80000000 ) ),
22030 binop( Iop_32HLto64,
22031 mkU32( 0x80000000 ),
22032 mkU32( 0x80000000 ) ) ) );
22033 putVSReg( XT,
22034 binop( Iop_OrV128,
22035 mkexpr( absVal_vector ),
22036 mkexpr( signBit_vector ) ) );
22037 } else {
22038 putVSReg( XT, mkexpr( absVal_vector ) );
22040 break;
22042 case 0x372: // xvnegsp (VSX Vector Negate Single-Precision)
22044 IRTemp B0 = newTemp(Ity_I32);
22045 IRTemp B1 = newTemp(Ity_I32);
22046 IRTemp B2 = newTemp(Ity_I32);
22047 IRTemp B3 = newTemp(Ity_I32);
22049 DIP("xvnegsp v%d,v%d\n", XT, XB);
22051 /* Don't support NegF32, so just XOR the sign bit in the int value */
22052 assign(B0, unop( Iop_64HIto32,
22053 unop( Iop_V128HIto64, getVSReg( XB ) ) ) );
22054 assign(B1, unop( Iop_64to32,
22055 unop( Iop_V128HIto64, getVSReg( XB ) ) ) );
22056 assign(B2, unop( Iop_64HIto32,
22057 unop( Iop_V128to64, getVSReg( XB ) ) ) );
22058 assign(B3, unop( Iop_64to32,
22059 unop( Iop_V128to64, getVSReg( XB ) ) ) );
22061 putVSReg( XT,
22062 binop( Iop_64HLtoV128,
22063 binop( Iop_32HLto64,
22064 binop( Iop_Xor32, mkexpr( B0 ), mkU32( 0x80000000 ) ),
22065 binop( Iop_Xor32, mkexpr( B1 ), mkU32( 0x80000000 ) ) ),
22066 binop( Iop_32HLto64,
22067 binop( Iop_Xor32, mkexpr( B2 ), mkU32( 0x80000000 ) ),
22068 binop( Iop_Xor32, mkexpr( B3 ), mkU32( 0x80000000 ) ) ) ) );
22069 break;
22071 case 0x3F2: // xvnegdp (VSX Vector Negate Double-Precision)
22073 IRTemp frB = newTemp(Ity_F64);
22074 IRTemp frB2 = newTemp(Ity_F64);
22075 assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
22076 assign(frB2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg(XB))));
22077 DIP("xvnegdp v%d,v%d\n", XT, XB);
22078 putVSReg( XT,
22079 binop( Iop_64HLtoV128,
22080 unop( Iop_ReinterpF64asI64,
22081 unop( Iop_NegF64, mkexpr( frB ) ) ),
22082 unop( Iop_ReinterpF64asI64,
22083 unop( Iop_NegF64, mkexpr( frB2 ) ) ) ) );
22084 break;
22086 case 0x192: // xvrdpi (VSX Vector Round to Double-Precision Integer using round toward Nearest Away)
22087 case 0x1D6: // xvrdpic (VSX Vector Round to Double-Precision Integer using Current rounding mode)
22088 case 0x1F2: // xvrdpim (VSX Vector Round to Double-Precision Integer using round toward -Infinity)
22089 case 0x1D2: // xvrdpip (VSX Vector Round to Double-Precision Integer using round toward +Infinity)
22090 case 0x1B2: // xvrdpiz (VSX Vector Round to Double-Precision Integer using round toward Zero)
22092 IRTemp frBHi_I64 = newTemp(Ity_I64);
22093 IRTemp frBLo_I64 = newTemp(Ity_I64);
22094 IRExpr * frD_fp_roundHi = NULL;
22095 IRExpr * frD_fp_roundLo = NULL;
22097 assign( frBHi_I64, unop( Iop_V128HIto64, getVSReg( XB ) ) );
22098 frD_fp_roundHi = _do_vsx_fp_roundToInt(frBHi_I64, opc2);
22099 assign( frBLo_I64, unop( Iop_V128to64, getVSReg( XB ) ) );
22100 frD_fp_roundLo = _do_vsx_fp_roundToInt(frBLo_I64, opc2);
22102 DIP("xvrdpi%s v%d,v%d\n", _get_vsx_rdpi_suffix(opc2), XT, XB);
22103 putVSReg( XT,
22104 binop( Iop_64HLtoV128,
22105 unop( Iop_ReinterpF64asI64, frD_fp_roundHi ),
22106 unop( Iop_ReinterpF64asI64, frD_fp_roundLo ) ) );
22107 break;
22109 case 0x112: // xvrspi (VSX Vector Round to Single-Precision Integer using round toward Nearest Away)
22110 case 0x156: // xvrspic (VSX Vector Round to SinglePrecision Integer using Current rounding mode)
22111 case 0x172: // xvrspim (VSX Vector Round to SinglePrecision Integer using round toward -Infinity)
22112 case 0x152: // xvrspip (VSX Vector Round to SinglePrecision Integer using round toward +Infinity)
22113 case 0x132: // xvrspiz (VSX Vector Round to SinglePrecision Integer using round toward Zero)
22115 const HChar * insn_suffix = NULL;
22116 IROp op;
22117 if (opc2 != 0x156) {
22118 // Use pre-defined IRop's for vrfi{m|n|p|z}
22119 switch (opc2) {
22120 case 0x112:
22121 insn_suffix = "";
22122 op = Iop_RoundF32x4_RN;
22123 break;
22124 case 0x172:
22125 insn_suffix = "m";
22126 op = Iop_RoundF32x4_RM;
22127 break;
22128 case 0x152:
22129 insn_suffix = "p";
22130 op = Iop_RoundF32x4_RP;
22131 break;
22132 case 0x132:
22133 insn_suffix = "z";
22134 op = Iop_RoundF32x4_RZ;
22135 break;
22137 default:
22138 vex_printf("Unrecognized opcode %x\n", opc2);
22139 vpanic("dis_vxv_misc(ppc)(vrspi<x>)(opc2)\n");
22141 DIP("xvrspi%s v%d,v%d\n", insn_suffix, XT, XB);
22142 putVSReg( XT, unop( op, getVSReg(XB) ) );
22143 } else {
22144 // Handle xvrspic. Unfortunately there is no corresponding "vfric" instruction.
22145 IRExpr * frD_fp_roundb3, * frD_fp_roundb2, * frD_fp_roundb1, * frD_fp_roundb0;
22146 IRTemp b3_F64, b2_F64, b1_F64, b0_F64;
22147 IRTemp b3_I64 = newTemp(Ity_I64);
22148 IRTemp b2_I64 = newTemp(Ity_I64);
22149 IRTemp b1_I64 = newTemp(Ity_I64);
22150 IRTemp b0_I64 = newTemp(Ity_I64);
22152 b3_F64 = b2_F64 = b1_F64 = b0_F64 = IRTemp_INVALID;
22153 frD_fp_roundb3 = frD_fp_roundb2 = frD_fp_roundb1 = frD_fp_roundb0 = NULL;
22154 breakV128to4xF64( getVSReg(XB), &b3_F64, &b2_F64, &b1_F64, &b0_F64);
22155 assign(b3_I64, unop(Iop_ReinterpF64asI64, mkexpr(b3_F64)));
22156 assign(b2_I64, unop(Iop_ReinterpF64asI64, mkexpr(b2_F64)));
22157 assign(b1_I64, unop(Iop_ReinterpF64asI64, mkexpr(b1_F64)));
22158 assign(b0_I64, unop(Iop_ReinterpF64asI64, mkexpr(b0_F64)));
22159 frD_fp_roundb3 = unop(Iop_TruncF64asF32,
22160 _do_vsx_fp_roundToInt(b3_I64, opc2));
22161 frD_fp_roundb2 = unop(Iop_TruncF64asF32,
22162 _do_vsx_fp_roundToInt(b2_I64, opc2));
22163 frD_fp_roundb1 = unop(Iop_TruncF64asF32,
22164 _do_vsx_fp_roundToInt(b1_I64, opc2));
22165 frD_fp_roundb0 = unop(Iop_TruncF64asF32,
22166 _do_vsx_fp_roundToInt(b0_I64, opc2));
22167 DIP("xvrspic v%d,v%d\n", XT, XB);
22168 putVSReg( XT,
22169 binop( Iop_64HLtoV128,
22170 binop( Iop_32HLto64,
22171 unop( Iop_ReinterpF32asI32, frD_fp_roundb3 ),
22172 unop( Iop_ReinterpF32asI32, frD_fp_roundb2 ) ),
22173 binop( Iop_32HLto64,
22174 unop( Iop_ReinterpF32asI32, frD_fp_roundb1 ),
22175 unop( Iop_ReinterpF32asI32, frD_fp_roundb0 ) ) ) );
22177 break;
22180 default:
22181 vex_printf( "dis_vxv_misc(ppc)(opc2)\n" );
22182 return False;
22184 return True;
22189 * VSX Scalar Floating Point Arithmetic Instructions
22191 static Bool
22192 dis_vxs_arith ( UInt prefix, UInt theInstr, UInt opc2 )
22194 /* XX3-Form */
22195 UChar opc1 = ifieldOPC( theInstr );
22196 UChar XT = ifieldRegXT( theInstr );
22197 UChar XA = ifieldRegXA( theInstr );
22198 UChar XB = ifieldRegXB( theInstr );
22199 IRExpr* rm = get_IR_roundingmode();
22200 IRTemp frA = newTemp(Ity_F64);
22201 IRTemp frB = newTemp(Ity_F64);
22203 /* There is no prefixed version of these instructions. */
22204 PREFIX_CHECK
22206 if (opc1 != 0x3C) {
22207 vex_printf( "dis_vxs_arith(ppc)(instr)\n" );
22208 return False;
22211 assign(frA, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XA ))));
22212 assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
22214 /* For all the VSX sclar arithmetic instructions, the contents of doubleword element 1
22215 * of VSX[XT] are undefined after the operation; therefore, we can simply set
22216 * element to zero where it makes sense to do so.
22218 switch (opc2) {
22219 case 0x000: // xsaddsp (VSX Scalar Add Single-Precision)
22220 DIP("xsaddsp v%d,v%d,v%d\n", XT, XA, XB);
22221 putVSReg( XT, binop( Iop_64HLtoV128,
22222 unop( Iop_ReinterpF64asI64,
22223 binop( Iop_RoundF64toF32, rm,
22224 triop( Iop_AddF64, rm,
22225 mkexpr( frA ),
22226 mkexpr( frB ) ) ) ),
22227 mkU64( 0 ) ) );
22228 break;
22229 case 0x020: // xssubsp (VSX Scalar Subtract Single-Precision)
22230 DIP("xssubsp v%d,v%d,v%d\n", XT, XA, XB);
22231 putVSReg( XT, binop( Iop_64HLtoV128,
22232 unop( Iop_ReinterpF64asI64,
22233 binop( Iop_RoundF64toF32, rm,
22234 triop( Iop_SubF64, rm,
22235 mkexpr( frA ),
22236 mkexpr( frB ) ) ) ),
22237 mkU64( 0 ) ) );
22238 break;
22239 case 0x080: // xsadddp (VSX scalar add double-precision)
22240 DIP("xsadddp v%d,v%d,v%d\n", XT, XA, XB);
22241 putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
22242 triop( Iop_AddF64, rm,
22243 mkexpr( frA ),
22244 mkexpr( frB ) ) ),
22245 mkU64( 0 ) ) );
22246 break;
22247 case 0x060: // xsdivsp (VSX scalar divide single-precision)
22248 DIP("xsdivsp v%d,v%d,v%d\n", XT, XA, XB);
22249 putVSReg( XT, binop( Iop_64HLtoV128,
22250 unop( Iop_ReinterpF64asI64,
22251 binop( Iop_RoundF64toF32, rm,
22252 triop( Iop_DivF64, rm,
22253 mkexpr( frA ),
22254 mkexpr( frB ) ) ) ),
22255 mkU64( 0 ) ) );
22256 break;
22257 case 0x0E0: // xsdivdp (VSX scalar divide double-precision)
22258 DIP("xsdivdp v%d,v%d,v%d\n", XT, XA, XB);
22259 putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
22260 triop( Iop_DivF64, rm,
22261 mkexpr( frA ),
22262 mkexpr( frB ) ) ),
22263 mkU64( 0 ) ) );
22264 break;
22265 case 0x004: case 0x024: /* xsmaddasp, xsmaddmsp (VSX scalar multiply-add
22266 * single-precision)
22269 IRTemp frT = newTemp(Ity_F64);
22270 Bool mdp = opc2 == 0x024;
22271 DIP("xsmadd%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
22272 assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
22273 getVSReg( XT ) ) ) );
22274 putVSReg( XT,
22275 binop( Iop_64HLtoV128,
22276 unop( Iop_ReinterpF64asI64,
22277 binop( Iop_RoundF64toF32, rm,
22278 qop( Iop_MAddF64, rm,
22279 mkexpr( frA ),
22280 mkexpr( mdp ? frT : frB ),
22281 mkexpr( mdp ? frB : frT ) ) ) ),
22282 mkU64( 0 ) ) );
22283 break;
22285 case 0x084: case 0x0A4: // xsmaddadp, xsmaddmdp (VSX scalar multiply-add double-precision)
22287 IRTemp frT = newTemp(Ity_F64);
22288 Bool mdp = opc2 == 0x0A4;
22289 DIP("xsmadd%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
22290 assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
22291 getVSReg( XT ) ) ) );
22292 putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
22293 qop( Iop_MAddF64, rm,
22294 mkexpr( frA ),
22295 mkexpr( mdp ? frT : frB ),
22296 mkexpr( mdp ? frB : frT ) ) ),
22297 mkU64( 0 ) ) );
22298 break;
22300 case 0x044: case 0x064: /* xsmsubasp, xsmsubmsp (VSX scalar
22301 * multiply-subtract single-precision)
22304 IRTemp frT = newTemp(Ity_F64);
22305 Bool mdp = opc2 == 0x064;
22306 DIP("xsmsub%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
22307 assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
22308 getVSReg( XT ) ) ) );
22309 putVSReg( XT,
22310 binop( Iop_64HLtoV128,
22311 unop( Iop_ReinterpF64asI64,
22312 binop( Iop_RoundF64toF32, rm,
22313 qop( Iop_MSubF64, rm,
22314 mkexpr( frA ),
22315 mkexpr( mdp ? frT : frB ),
22316 mkexpr( mdp ? frB : frT ) ) ) ),
22317 mkU64( 0 ) ) );
22318 break;
22320 case 0x0C4: case 0x0E4: // xsmsubadp, xsmsubmdp (VSX scalar multiply-subtract double-precision)
22322 IRTemp frT = newTemp(Ity_F64);
22323 Bool mdp = opc2 == 0x0E4;
22324 DIP("xsmsub%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
22325 assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
22326 getVSReg( XT ) ) ) );
22327 putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
22328 qop( Iop_MSubF64, rm,
22329 mkexpr( frA ),
22330 mkexpr( mdp ? frT : frB ),
22331 mkexpr( mdp ? frB : frT ) ) ),
22332 mkU64( 0 ) ) );
22333 break;
22335 case 0x284: case 0x2A4: // xsnmaddadp, xsnmaddmdp (VSX scalar multiply-add double-precision)
22337 /* TODO: mpj -- Naturally, I expected to be able to leverage the implementation
22338 * of fnmadd and use pretty much the same code. However, that code has a bug in the
22339 * way it blindly negates the signbit, even if the floating point result is a NaN.
22340 * So, the TODO is to fix fnmadd (which I'll do in a different patch).
22341 * FIXED 7/1/2012: carll fnmadd and fnmsubs fixed to not negate sign
22342 * bit for NaN result.
22344 Bool mdp = opc2 == 0x2A4;
22345 IRTemp frT = newTemp(Ity_F64);
22346 IRTemp maddResult = newTemp(Ity_I64);
22348 DIP("xsnmadd%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
22349 assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
22350 getVSReg( XT ) ) ) );
22351 assign( maddResult, unop( Iop_ReinterpF64asI64, qop( Iop_MAddF64, rm,
22352 mkexpr( frA ),
22353 mkexpr( mdp ? frT : frB ),
22354 mkexpr( mdp ? frB : frT ) ) ) );
22356 putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( getNegatedResult(maddResult) ),
22357 mkU64( 0 ) ) );
22358 break;
22360 case 0x204: case 0x224: /* xsnmaddasp, xsnmaddmsp (VSX scalar
22361 * multiply-add single-precision)
22364 Bool mdp = opc2 == 0x224;
22365 IRTemp frT = newTemp(Ity_F64);
22366 IRTemp maddResult = newTemp(Ity_I64);
22368 DIP("xsnmadd%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
22369 assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
22370 getVSReg( XT ) ) ) );
22371 assign( maddResult,
22372 unop( Iop_ReinterpF64asI64,
22373 binop( Iop_RoundF64toF32, rm,
22374 qop( Iop_MAddF64, rm,
22375 mkexpr( frA ),
22376 mkexpr( mdp ? frT : frB ),
22377 mkexpr( mdp ? frB : frT ) ) ) ) );
22379 putVSReg( XT, binop( Iop_64HLtoV128,
22380 mkexpr( getNegatedResult(maddResult) ),
22381 mkU64( 0 ) ) );
22382 break;
22384 case 0x244: case 0x264: /* xsnmsubasp, xsnmsubmsp (VSX Scalar Negative
22385 * Multiply-Subtract Single-Precision)
22388 IRTemp frT = newTemp(Ity_F64);
22389 Bool mdp = opc2 == 0x264;
22390 IRTemp msubResult = newTemp(Ity_I64);
22392 DIP("xsnmsub%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
22393 assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
22394 getVSReg( XT ) ) ) );
22395 assign( msubResult,
22396 unop( Iop_ReinterpF64asI64,
22397 binop( Iop_RoundF64toF32, rm,
22398 qop( Iop_MSubF64, rm,
22399 mkexpr( frA ),
22400 mkexpr( mdp ? frT : frB ),
22401 mkexpr( mdp ? frB : frT ) ) ) ) );
22403 putVSReg( XT, binop( Iop_64HLtoV128,
22404 mkexpr( getNegatedResult(msubResult) ),
22405 mkU64( 0 ) ) );
22407 break;
22410 case 0x2C4: case 0x2E4: // xsnmsubadp, xsnmsubmdp (VSX Scalar Negative Multiply-Subtract Double-Precision)
22412 IRTemp frT = newTemp(Ity_F64);
22413 Bool mdp = opc2 == 0x2E4;
22414 IRTemp msubResult = newTemp(Ity_I64);
22416 DIP("xsnmsub%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
22417 assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
22418 getVSReg( XT ) ) ) );
22419 assign(msubResult, unop( Iop_ReinterpF64asI64,
22420 qop( Iop_MSubF64,
22422 mkexpr( frA ),
22423 mkexpr( mdp ? frT : frB ),
22424 mkexpr( mdp ? frB : frT ) ) ));
22426 putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( getNegatedResult(msubResult) ), mkU64( 0 ) ) );
22428 break;
22431 case 0x040: // xsmulsp (VSX Scalar Multiply Single-Precision)
22432 DIP("xsmulsp v%d,v%d,v%d\n", XT, XA, XB);
22433 putVSReg( XT, binop( Iop_64HLtoV128,
22434 unop( Iop_ReinterpF64asI64,
22435 binop( Iop_RoundF64toF32, rm,
22436 triop( Iop_MulF64, rm,
22437 mkexpr( frA ),
22438 mkexpr( frB ) ) ) ),
22439 mkU64( 0 ) ) );
22440 break;
22442 case 0x0C0: // xsmuldp (VSX Scalar Multiply Double-Precision)
22443 DIP("xsmuldp v%d,v%d,v%d\n", XT, XA, XB);
22444 putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
22445 triop( Iop_MulF64, rm,
22446 mkexpr( frA ),
22447 mkexpr( frB ) ) ),
22448 mkU64( 0 ) ) );
22449 break;
22450 case 0x0A0: // xssubdp (VSX Scalar Subtract Double-Precision)
22451 DIP("xssubdp v%d,v%d,v%d\n", XT, XA, XB);
22452 putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
22453 triop( Iop_SubF64, rm,
22454 mkexpr( frA ),
22455 mkexpr( frB ) ) ),
22456 mkU64( 0 ) ) );
22457 break;
22459 case 0x016: // xssqrtsp (VSX Scalar Square Root Single-Precision)
22460 DIP("xssqrtsp v%d,v%d\n", XT, XB);
22461 putVSReg( XT,
22462 binop( Iop_64HLtoV128,
22463 unop( Iop_ReinterpF64asI64,
22464 binop( Iop_RoundF64toF32, rm,
22465 binop( Iop_SqrtF64, rm,
22466 mkexpr( frB ) ) ) ),
22467 mkU64( 0 ) ) );
22468 break;
22470 case 0x096: // xssqrtdp (VSX Scalar Square Root Double-Precision)
22471 DIP("xssqrtdp v%d,v%d\n", XT, XB);
22472 putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
22473 binop( Iop_SqrtF64, rm,
22474 mkexpr( frB ) ) ),
22475 mkU64( 0 ) ) );
22476 break;
22478 case 0x0F4: // xstdivdp (VSX Scalar Test for software Divide Double-Precision)
22480 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
22481 IRTemp frA_I64 = newTemp(Ity_I64);
22482 IRTemp frB_I64 = newTemp(Ity_I64);
22483 DIP("xstdivdp crf%d,v%d,v%d\n", crfD, XA, XB);
22484 assign( frA_I64, unop( Iop_ReinterpF64asI64, mkexpr( frA ) ) );
22485 assign( frB_I64, unop( Iop_ReinterpF64asI64, mkexpr( frB ) ) );
22486 putGST_field( PPC_GST_CR, do_fp_tdiv(frA_I64, frB_I64), crfD );
22487 break;
22489 case 0x0D4: // xstsqrtdp (VSX Vector Test for software Square Root Double-Precision)
22491 IRTemp frB_I64 = newTemp(Ity_I64);
22492 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
22493 IRTemp flags = newTemp(Ity_I32);
22494 IRTemp fe_flag, fg_flag;
22495 fe_flag = fg_flag = IRTemp_INVALID;
22496 DIP("xstsqrtdp v%d,v%d\n", XT, XB);
22497 assign( frB_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
22498 do_fp_tsqrt(frB_I64, False /*not single precision*/, &fe_flag, &fg_flag);
22499 /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
22500 * where fl_flag == 1 on ppc64.
22502 assign( flags,
22503 binop( Iop_Or32,
22504 binop( Iop_Or32, mkU32( 8 ), // fl_flag
22505 binop( Iop_Shl32, mkexpr(fg_flag), mkU8( 2 ) ) ),
22506 binop( Iop_Shl32, mkexpr(fe_flag), mkU8( 1 ) ) ) );
22507 putGST_field( PPC_GST_CR, mkexpr(flags), crfD );
22508 break;
22511 default:
22512 vex_printf( "dis_vxs_arith(ppc)(opc2)\n" );
22513 return False;
22516 return True;
22521 * VSX Floating Point Compare Instructions
22523 static Bool
22524 dis_vx_cmp( UInt prefix, UInt theInstr, UInt opc2 )
22526 /* XX3-Form and XX2-Form */
22527 UChar opc1 = ifieldOPC( theInstr );
22528 UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
22529 IRTemp ccPPC32;
22530 UChar XA = ifieldRegXA ( theInstr );
22531 UChar XB = ifieldRegXB ( theInstr );
22532 IRTemp frA = newTemp(Ity_F64);
22533 IRTemp frB = newTemp(Ity_F64);
22535 /* There is no prefixed version of these instructions. */
22536 PREFIX_CHECK
22538 if (opc1 != 0x3C) {
22539 vex_printf( "dis_vx_cmp(ppc)(instr)\n" );
22540 return False;
22543 assign(frA, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XA ))));
22544 assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
22545 switch (opc2) {
22546 case 0x08C: case 0x0AC: // xscmpudp, xscmpodp
22547 /* Note: Differences between xscmpudp and xscmpodp are only in
22548 * exception flag settings, which aren't supported anyway. */
22549 DIP("xscmp%sdp crf%d,fr%u,fr%u\n", opc2 == 0x08c ? "u" : "o",
22550 crfD, XA, XB);
22551 ccPPC32 = get_fp_cmp_CR_val( binop(Iop_CmpF64, mkexpr(frA), mkexpr(frB)));
22552 putGST_field( PPC_GST_CR, mkexpr(ccPPC32), crfD );
22553 putFPCC( mkexpr( ccPPC32 ) );
22554 break;
22556 default:
22557 vex_printf( "dis_vx_cmp(ppc)(opc2)\n" );
22558 return False;
22560 return True;
22563 static void
22564 do_vvec_fp_cmp ( IRTemp vA, IRTemp vB, UChar XT, UChar flag_rC,
22565 ppc_cmp_t cmp_type )
22567 IRTemp frA_hi = newTemp(Ity_F64);
22568 IRTemp frB_hi = newTemp(Ity_F64);
22569 IRTemp frA_lo = newTemp(Ity_F64);
22570 IRTemp frB_lo = newTemp(Ity_F64);
22571 IRTemp ccPPC32 = newTemp(Ity_I32);
22572 IRTemp ccIR_hi;
22573 IRTemp ccIR_lo;
22575 IRTemp hiResult = newTemp(Ity_I64);
22576 IRTemp loResult = newTemp(Ity_I64);
22577 IRTemp hiEQlo = newTemp(Ity_I1);
22578 IRTemp all_elem_true = newTemp(Ity_I32);
22579 IRTemp all_elem_false = newTemp(Ity_I32);
22581 assign(frA_hi, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, mkexpr( vA ))));
22582 assign(frB_hi, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, mkexpr( vB ))));
22583 assign(frA_lo, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, mkexpr( vA ))));
22584 assign(frB_lo, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, mkexpr( vB ))));
22586 ccIR_hi = get_fp_cmp_CR_val( binop( Iop_CmpF64,
22587 mkexpr( frA_hi ),
22588 mkexpr( frB_hi ) ) );
22589 ccIR_lo = get_fp_cmp_CR_val( binop( Iop_CmpF64,
22590 mkexpr( frA_lo ),
22591 mkexpr( frB_lo ) ) );
22593 if (cmp_type != PPC_CMP_GE) {
22594 assign( hiResult,
22595 unop( Iop_1Sto64,
22596 binop( Iop_CmpEQ32, mkexpr( ccIR_hi ), mkU32( cmp_type ) ) ) );
22597 assign( loResult,
22598 unop( Iop_1Sto64,
22599 binop( Iop_CmpEQ32, mkexpr( ccIR_lo ), mkU32( cmp_type ) ) ) );
22600 } else {
22601 // For PPC_CMP_GE, one element compare may return "4" (for "greater than") and
22602 // the other element compare may return "2" (for "equal to").
22603 IRTemp lo_GE = newTemp(Ity_I1);
22604 IRTemp hi_GE = newTemp(Ity_I1);
22606 assign(hi_GE, mkOR1( binop( Iop_CmpEQ32, mkexpr( ccIR_hi ), mkU32( 2 ) ),
22607 binop( Iop_CmpEQ32, mkexpr( ccIR_hi ), mkU32( 4 ) ) ) );
22608 assign( hiResult,unop( Iop_1Sto64, mkexpr( hi_GE ) ) );
22610 assign(lo_GE, mkOR1( binop( Iop_CmpEQ32, mkexpr( ccIR_lo ), mkU32( 2 ) ),
22611 binop( Iop_CmpEQ32, mkexpr( ccIR_lo ), mkU32( 4 ) ) ) );
22612 assign( loResult, unop( Iop_1Sto64, mkexpr( lo_GE ) ) );
22615 // The [hi/lo]Result will be all 1's or all 0's. We just look at the lower word.
22616 assign( hiEQlo,
22617 binop( Iop_CmpEQ32,
22618 unop( Iop_64to32, mkexpr( hiResult ) ),
22619 unop( Iop_64to32, mkexpr( loResult ) ) ) );
22620 putVSReg( XT,
22621 binop( Iop_64HLtoV128, mkexpr( hiResult ), mkexpr( loResult ) ) );
22623 assign( all_elem_true,
22624 unop( Iop_1Uto32,
22625 mkAND1( mkexpr( hiEQlo ),
22626 binop( Iop_CmpEQ32,
22627 mkU32( 0xffffffff ),
22628 unop( Iop_64to32,
22629 mkexpr( hiResult ) ) ) ) ) );
22631 assign( all_elem_false,
22632 unop( Iop_1Uto32,
22633 mkAND1( mkexpr( hiEQlo ),
22634 binop( Iop_CmpEQ32,
22635 mkU32( 0 ),
22636 unop( Iop_64to32,
22637 mkexpr( hiResult ) ) ) ) ) );
22638 assign( ccPPC32,
22639 binop( Iop_Or32,
22640 binop( Iop_Shl32, mkexpr( all_elem_false ), mkU8( 1 ) ),
22641 binop( Iop_Shl32, mkexpr( all_elem_true ), mkU8( 3 ) ) ) );
22643 if (flag_rC) {
22644 putGST_field( PPC_GST_CR, mkexpr(ccPPC32), 6 );
22649 * VSX Vector Compare Instructions
22651 static Bool
22652 dis_vvec_cmp( UInt prefix, UInt theInstr, UInt opc2 )
22654 /* XX3-Form */
22655 UChar opc1 = ifieldOPC( theInstr );
22656 UChar XT = ifieldRegXT ( theInstr );
22657 UChar XA = ifieldRegXA ( theInstr );
22658 UChar XB = ifieldRegXB ( theInstr );
22659 UChar flag_rC = ifieldBIT10(theInstr);
22660 IRTemp vA = newTemp( Ity_V128 );
22661 IRTemp vB = newTemp( Ity_V128 );
22663 /* There is no prefixed version of these instructions. */
22664 PREFIX_CHECK
22666 if (opc1 != 0x3C) {
22667 vex_printf( "dis_vvec_cmp(ppc)(instr)\n" );
22668 return False;
22671 assign( vA, getVSReg( XA ) );
22672 assign( vB, getVSReg( XB ) );
22674 switch (opc2) {
22675 case 0x18C: // xvcmpeqdp[.] (VSX Vector Compare Equal To Double-Precision [ & Record ])
22677 DIP("xvcmpeqdp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
22678 XT, XA, XB);
22679 do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_EQ);
22680 break;
22683 case 0x1CC: // xvcmpgedp[.] (VSX Vector Compare Greater Than or Equal To Double-Precision [ & Record ])
22685 DIP("xvcmpgedp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
22686 XT, XA, XB);
22687 do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_GE);
22688 break;
22691 case 0x1AC: // xvcmpgtdp[.] (VSX Vector Compare Greater Than Double-Precision [ & Record ])
22693 DIP("xvcmpgtdp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
22694 XT, XA, XB);
22695 do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_GT);
22696 break;
22699 case 0x10C: // xvcmpeqsp[.] (VSX Vector Compare Equal To Single-Precision [ & Record ])
22701 IRTemp vD = newTemp(Ity_V128);
22703 DIP("xvcmpeqsp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
22704 XT, XA, XB);
22705 assign( vD, binop(Iop_CmpEQ32Fx4, mkexpr(vA), mkexpr(vB)) );
22706 putVSReg( XT, mkexpr(vD) );
22707 if (flag_rC) {
22708 set_AV_CR6( mkexpr(vD), True );
22710 break;
22713 case 0x14C: // xvcmpgesp[.] (VSX Vector Compare Greater Than or Equal To Single-Precision [ & Record ])
22715 IRTemp vD = newTemp(Ity_V128);
22717 DIP("xvcmpgesp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
22718 XT, XA, XB);
22719 assign( vD, binop(Iop_CmpGE32Fx4, mkexpr(vA), mkexpr(vB)) );
22720 putVSReg( XT, mkexpr(vD) );
22721 if (flag_rC) {
22722 set_AV_CR6( mkexpr(vD), True );
22724 break;
22727 case 0x12C: //xvcmpgtsp[.] (VSX Vector Compare Greater Than Single-Precision [ & Record ])
22729 IRTemp vD = newTemp(Ity_V128);
22731 DIP("xvcmpgtsp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
22732 XT, XA, XB);
22733 assign( vD, binop(Iop_CmpGT32Fx4, mkexpr(vA), mkexpr(vB)) );
22734 putVSReg( XT, mkexpr(vD) );
22735 if (flag_rC) {
22736 set_AV_CR6( mkexpr(vD), True );
22738 break;
22741 default:
22742 vex_printf( "dis_vvec_cmp(ppc)(opc2)\n" );
22743 return False;
22745 return True;
22748 * Miscellaneous VSX Scalar Instructions
22750 static Bool
22751 dis_load_vector_special( UInt prefix, UInt theInstr,
22752 const VexAbiInfo* vbi, UInt opc2, int allow_isa_3_0 )
22754 UChar opc1 = ifieldOPC( theInstr );
22755 UChar XT = ifieldRegXT ( theInstr );
22756 UInt uim = IFIELD( theInstr, 11, 5 ); // inst[16:20]
22758 if (opc1 != 0x3C) {
22759 vex_printf( "dis_load_special(ppc)(instr)\n" );
22760 return False;
22763 DIP("lxvkq v%u,%u\n", (UInt)XT, uim);
22765 switch( uim ) {
22766 case 0b00001: putVSReg( XT, binop( Iop_64HLtoV128,
22767 mkU64( 0x3FFF000000000000 ),
22768 mkU64( 0x0000000000000000 ) ) );
22769 break;
22770 case 0b00010: putVSReg( XT, binop( Iop_64HLtoV128,
22771 mkU64( 0x4000000000000000 ),
22772 mkU64( 0x0000000000000000 ) ) );
22773 break;
22774 case 0b00011: putVSReg( XT, binop( Iop_64HLtoV128,
22775 mkU64( 0x4000800000000000 ),
22776 mkU64( 0x0000000000000000 ) ) );
22777 break;
22778 case 0b00100: putVSReg( XT, binop( Iop_64HLtoV128,
22779 mkU64( 0x4001000000000000 ),
22780 mkU64( 0x0000000000000000 ) ) );
22781 break;
22782 case 0b00101: putVSReg( XT, binop( Iop_64HLtoV128,
22783 mkU64( 0x4001400000000000 ),
22784 mkU64( 0x0000000000000000 ) ) );
22785 break;
22786 case 0b00110: putVSReg( XT, binop( Iop_64HLtoV128,
22787 mkU64( 0x4001800000000000 ),
22788 mkU64( 0x0000000000000000 ) ) );
22789 break;
22790 case 0b00111: putVSReg( XT, binop( Iop_64HLtoV128,
22791 mkU64( 0x4001C00000000000 ),
22792 mkU64( 0x0000000000000000 ) ) );
22793 break;
22794 case 0b01000: putVSReg( XT, binop( Iop_64HLtoV128,
22795 mkU64( 0x7FFF000000000000 ),
22796 mkU64( 0x0000000000000000 ) ) );
22797 break;
22798 case 0b01001: putVSReg( XT, binop( Iop_64HLtoV128,
22799 mkU64( 0x7FFF800000000000 ),
22800 mkU64( 0x0000000000000000 ) ) );
22801 break;
22802 case 0b10000: putVSReg( XT, binop( Iop_64HLtoV128,
22803 mkU64( 0x8000000000000000 ),
22804 mkU64( 0x0000000000000000 ) ) );
22805 break;
22806 case 0b10001: putVSReg( XT, binop( Iop_64HLtoV128,
22807 mkU64( 0xBFFF000000000000 ),
22808 mkU64( 0x0000000000000000 ) ) );
22809 break;
22810 case 0b10010: putVSReg( XT, binop( Iop_64HLtoV128,
22811 mkU64( 0xC000000000000000 ),
22812 mkU64( 0x0000000000000000 ) ) );
22813 break;
22814 case 0b10011: putVSReg( XT, binop( Iop_64HLtoV128,
22815 mkU64( 0xC000800000000000 ),
22816 mkU64( 0x0000000000000000 ) ) );
22817 break;
22818 case 0b10100: putVSReg( XT, binop( Iop_64HLtoV128,
22819 mkU64( 0xC001000000000000 ),
22820 mkU64( 0x0000000000000000 ) ) );
22821 break;
22822 case 0b10101: putVSReg( XT, binop( Iop_64HLtoV128,
22823 mkU64( 0xC001400000000000 ),
22824 mkU64( 0x0000000000000000 ) ) );
22825 break;
22826 case 0b10110: putVSReg( XT, binop( Iop_64HLtoV128,
22827 mkU64( 0xC001800000000000 ),
22828 mkU64( 0x0000000000000000 ) ) );
22829 break;
22830 case 0b10111: putVSReg( XT, binop( Iop_64HLtoV128,
22831 mkU64( 0xC001C00000000000 ),
22832 mkU64( 0x0000000000000000 ) ) );
22833 break;
22834 case 0b11000: putVSReg( XT, binop( Iop_64HLtoV128,
22835 mkU64( 0xFFFF000000000000 ),
22836 mkU64( 0x0000000000000000 ) ) );
22837 break;
22838 default: vex_printf( "dis_load_special(ppc)(lxvkq XT, UIM not valid)\n" );
22839 putVSReg( XT, binop( Iop_64HLtoV128,
22840 mkU64( 0x0000000000000000 ),
22841 mkU64( 0x0000000000000000 ) ) );
22842 return True; /* print message, continue */
22844 return True;
22847 static Bool
22848 dis_vxs_misc( UInt prefix, UInt theInstr, const VexAbiInfo* vbi, UInt opc2,
22849 int allow_isa_3_0 )
22851 #define VG_PPC_SIGN_MASK 0x7fffffffffffffffULL
22852 /* XX3-Form and XX2-Form */
22853 UChar opc1 = ifieldOPC( theInstr );
22854 UChar XT = ifieldRegXT ( theInstr );
22855 UChar XA = ifieldRegXA ( theInstr );
22856 UChar XB = ifieldRegXB ( theInstr );
22857 IRTemp vA = newTemp( Ity_V128 );
22858 IRTemp vB = newTemp( Ity_V128 );
22860 /* There is no prefixed version of these instructions. */
22861 PREFIX_CHECK
22863 if (opc1 != 0x3C) {
22864 vex_printf( "dis_vxs_misc(ppc)(instr)\n" );
22865 return False;
22868 assign( vA, getVSReg( XA ) );
22869 assign( vB, getVSReg( XB ) );
22871 /* For all the VSX move instructions, the contents of doubleword element 1
22872 * of VSX[XT] are undefined after the operation; therefore, we can simply
22873 * move the entire array element where it makes sense to do so.
22875 if (( opc2 == 0x168 ) && ( IFIELD( theInstr, 19, 2 ) == 0 ) )
22877 /* Special case of XX1-Form with immediate value
22878 * xxspltib (VSX Vector Splat Immediate Byte)
22880 UInt uim = IFIELD( theInstr, 11, 8 );
22881 UInt word_value = ( uim << 24 ) | ( uim << 16 ) | ( uim << 8 ) | uim;
22883 DIP("xxspltib v%u,%u\n", (UInt)XT, uim);
22884 putVSReg(XT, binop( Iop_64HLtoV128,
22885 binop( Iop_32HLto64,
22886 mkU32( word_value ),
22887 mkU32( word_value ) ),
22888 binop( Iop_32HLto64,
22889 mkU32( word_value ),
22890 mkU32( word_value ) ) ) );
22891 return True;
22894 switch ( opc2 ) {
22895 case 0x0ec: // xscmpexpdp (VSX Scalar Compare Exponents Double-Precision)
22897 /* Compare 64-bit data, 128-bit layout:
22898 src1[0:63] is double word, src1[64:127] is unused
22899 src2[0:63] is double word, src2[64:127] is unused
22901 IRExpr *bit4, *bit5, *bit6, *bit7;
22902 UInt BF = IFIELD( theInstr, 23, 3 );
22903 IRTemp eq_lt_gt = newTemp( Ity_I32 );
22904 IRTemp CC = newTemp( Ity_I32 );
22905 IRTemp vA_hi = newTemp( Ity_I64 );
22906 IRTemp vB_hi = newTemp( Ity_I64 );
22907 IRExpr *mask = mkU64( 0x7FF0000000000000 );
22909 DIP("xscmpexpdp %u,v%u,v%u\n", BF, XA, XB);
22911 assign( vA_hi, unop( Iop_V128HIto64, mkexpr( vA ) ) );
22912 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
22914 /* A exp < B exp */
22915 bit4 = binop( Iop_CmpLT64U,
22916 binop( Iop_And64,
22917 mkexpr( vA_hi ),
22918 mask ),
22919 binop( Iop_And64,
22920 mkexpr( vB_hi ),
22921 mask ) );
22922 /* A exp > B exp */
22923 bit5 = binop( Iop_CmpLT64U,
22924 binop( Iop_And64,
22925 mkexpr( vB_hi ),
22926 mask ),
22927 binop( Iop_And64,
22928 mkexpr( vA_hi ),
22929 mask ) );
22930 /* test equal */
22931 bit6 = binop( Iop_CmpEQ64,
22932 binop( Iop_And64,
22933 mkexpr( vA_hi ),
22934 mask ),
22935 binop( Iop_And64,
22936 mkexpr( vB_hi ),
22937 mask ) );
22939 /* exp A or exp B is NaN */
22940 bit7 = mkOR1( is_NaN( Ity_I64, vA_hi ),
22941 is_NaN( Ity_I64, vB_hi ) );
22943 assign( eq_lt_gt, binop( Iop_Or32,
22944 binop( Iop_Shl32,
22945 unop( Iop_1Uto32, bit4 ),
22946 mkU8( 3) ),
22947 binop( Iop_Or32,
22948 binop( Iop_Shl32,
22949 unop( Iop_1Uto32, bit5 ),
22950 mkU8( 2) ),
22951 binop( Iop_Shl32,
22952 unop( Iop_1Uto32, bit6 ),
22953 mkU8( 1 ) ) ) ) );
22954 assign(CC, binop( Iop_Or32,
22955 binop( Iop_And32,
22956 mkexpr( eq_lt_gt ) ,
22957 unop( Iop_Not32, unop( Iop_1Sto32, bit7 ) ) ),
22958 unop( Iop_1Uto32, bit7 ) ) );
22960 putGST_field( PPC_GST_CR, mkexpr( CC ), BF );
22961 putFPCC( mkexpr( CC ) );
22962 return True;
22964 break;
22966 case 0x14A: // xxextractuw (VSX Vector Extract Unsigned Word)
22968 UInt uim = IFIELD( theInstr, 16, 4 );
22970 DIP("xxextractuw v%u,v%u,%u\n", (UInt)XT, (UInt)XB, uim);
22972 putVSReg( XT,
22973 binop( Iop_ShlV128,
22974 binop( Iop_AndV128,
22975 binop( Iop_ShrV128,
22976 mkexpr( vB ),
22977 mkU8( ( 12 - uim ) * 8 ) ),
22978 binop(Iop_64HLtoV128,
22979 mkU64( 0 ),
22980 mkU64( 0xFFFFFFFF ) ) ),
22981 mkU8( ( 32*2 ) ) ) );
22982 break;
22984 case 0x16A: // xxinsertw (VSX Vector insert Word)
22986 UInt uim = IFIELD( theInstr, 16, 4 );
22987 IRTemp vT = newTemp( Ity_V128 );
22988 IRTemp tmp = newTemp( Ity_V128 );
22990 DIP("xxinsertw v%u,v%u,%u\n", (UInt)XT, (UInt)XB, uim);
22992 assign( vT, getVSReg( XT ) );
22993 assign( tmp, binop( Iop_AndV128,
22994 mkexpr( vT ),
22995 unop( Iop_NotV128,
22996 binop( Iop_ShlV128,
22997 binop( Iop_64HLtoV128,
22998 mkU64( 0x0 ),
22999 mkU64( 0xFFFFFFFF) ),
23000 mkU8( ( 12 - uim ) * 8 ) ) ) ) );
23002 putVSReg( XT,
23003 binop( Iop_OrV128,
23004 binop( Iop_ShlV128,
23005 binop( Iop_AndV128,
23006 binop( Iop_ShrV128,
23007 mkexpr( vB ),
23008 mkU8( 32 * 2 ) ),
23009 binop( Iop_64HLtoV128,
23010 mkU64( 0 ),
23011 mkU64( 0xFFFFFFFF ) ) ),
23012 mkU8( ( 12 - uim ) * 8 ) ),
23013 mkexpr( tmp ) ) );
23014 break;
23017 case 0x2B2: // xsabsdp (VSX scalar absolute value double-precision
23019 /* Move abs val of dw 0 of VSX[XB] to dw 0 of VSX[XT]. */
23020 IRTemp absVal = newTemp(Ity_V128);
23021 if (host_endness == VexEndnessLE) {
23022 IRTemp hi64 = newTemp(Ity_I64);
23023 IRTemp lo64 = newTemp(Ity_I64);
23024 assign( hi64, unop( Iop_V128HIto64, mkexpr(vB) ) );
23025 assign( lo64, unop( Iop_V128to64, mkexpr(vB) ) );
23026 assign( absVal, binop( Iop_64HLtoV128,
23027 binop( Iop_And64, mkexpr(hi64),
23028 mkU64(VG_PPC_SIGN_MASK) ),
23029 mkexpr(lo64) ) );
23030 } else {
23031 assign(absVal, binop(Iop_ShrV128,
23032 binop(Iop_ShlV128, mkexpr(vB),
23033 mkU8(1)), mkU8(1)));
23035 DIP("xsabsdp v%u,v%u\n", XT, XB);
23036 putVSReg(XT, mkexpr(absVal));
23037 break;
23040 case 0x2b6: // xsxexpdp (VSX Scalar Extract Exponent Double-Precision)
23041 // xsxsigdp (VSX Scalar Extract Significand Doulbe-Precision)
23042 // xsvhpdp (VSX Scalar Convert Half-Precision format
23043 // to Double-Precision format)
23044 // xscvdphp (VSX Scalar round & convert Double-precision
23045 // format to Half-precision format)
23047 IRTemp rT = newTemp( Ity_I64 );
23048 UInt inst_select = IFIELD( theInstr, 16, 5);
23050 if (inst_select == 0) {
23051 DIP("xsxexpd %u,v%u\n", (UInt)XT, (UInt)XB);
23053 assign( rT, binop( Iop_Shr64,
23054 binop( Iop_And64,
23055 unop( Iop_V128HIto64, mkexpr( vB ) ),
23056 mkU64( 0x7FF0000000000000 ) ),
23057 mkU8 ( 52 ) ) );
23058 } else if (inst_select == 1) {
23059 IRExpr *normal;
23060 IRTemp tmp = newTemp(Ity_I64);
23062 DIP("xsxsigdp v%u,v%u\n", (UInt)XT, (UInt)XB);
23064 assign( tmp, unop( Iop_V128HIto64, mkexpr( vB ) ) );
23066 /* Value is normal if it isn't infinite, zero or denormalized */
23067 normal = mkNOT1( mkOR1(
23068 mkOR1( is_NaN( Ity_I64, tmp ),
23069 is_Inf( Ity_I64, tmp ) ),
23070 mkOR1( is_Zero( Ity_I64, tmp ),
23071 is_Denorm( Ity_I64, tmp ) ) ) );
23073 assign( rT, binop( Iop_Or64,
23074 binop( Iop_And64,
23075 mkexpr( tmp ),
23076 mkU64( 0xFFFFFFFFFFFFF ) ),
23077 binop( Iop_Shl64,
23078 unop( Iop_1Uto64, normal),
23079 mkU8( 52 ) ) ) );
23080 putIReg( XT, mkexpr( rT ) );
23082 } else if (inst_select == 16) {
23083 IRTemp result = newTemp( Ity_V128 );
23084 IRTemp value = newTemp( Ity_I64 );
23085 /* Note: PPC only coverts the 16-bit value in the upper 64-bits
23086 * of the source V128 to a 64-bit value stored in the upper
23087 * 64-bits of the V128 result. The contents of the lower 64-bits
23088 * is undefined.
23091 DIP("xscvhpdp v%u, v%u\n", (UInt)XT, (UInt)XB);
23092 assign( result, unop( Iop_F16toF64x2, mkexpr( vB ) ) );
23094 putVSReg( XT, mkexpr( result ) );
23096 assign( value, unop( Iop_V128HIto64, mkexpr( result ) ) );
23097 generate_store_FPRF( Ity_I64, value, vbi );
23098 return True;
23100 } else if (inst_select == 17) { // xscvdphp
23101 IRTemp value = newTemp( Ity_I32 );
23102 IRTemp result = newTemp( Ity_V128 );
23103 /* Note: PPC only coverts the 64-bit value in the upper 64-bits of
23104 * the V128 and stores the 16-bit result in the upper word of the
23105 * V128 result. The contents of the lower 64-bits is undefined.
23107 DIP("xscvdphp v%u, v%u\n", (UInt)XT, (UInt)XB);
23108 assign( result, unop( Iop_F64toF16x2_DEP, mkexpr( vB ) ) );
23109 assign( value, unop( Iop_64to32, unop( Iop_V128HIto64,
23110 mkexpr( result ) ) ) );
23111 putVSReg( XT, mkexpr( result ) );
23112 generate_store_FPRF( Ity_I16, value, vbi );
23113 return True;
23115 } else {
23116 vex_printf( "dis_vxv_scalar_extract_exp_sig invalid inst_select (ppc)(opc2)\n" );
23117 vex_printf("inst_select = %u\n", inst_select);
23118 return False;
23121 break;
23123 case 0x254: // xststdcsp (VSX Scalar Test Data Class Single-Precision)
23124 case 0x2D4: // xststdcdp (VSX Scalar Test Data Class Double-Precision)
23126 /* These instructions only differ in that the single precision
23127 instruction, xststdcsp, has the additional constraint on the
23128 denormal test that the exponent be greater then zero and
23129 less then 0x381. */
23130 IRTemp vB_hi = newTemp( Ity_I64 );
23131 UInt BF = IFIELD( theInstr, 23, 3 );
23132 UInt DCMX_mask = IFIELD( theInstr, 16, 7 );
23133 IRTemp NaN = newTemp( Ity_I64 );
23134 IRTemp inf = newTemp( Ity_I64 );
23135 IRTemp zero = newTemp( Ity_I64 );
23136 IRTemp dnorm = newTemp( Ity_I64 );
23137 IRTemp pos = newTemp( Ity_I64 );
23138 IRTemp not_sp = newTemp( Ity_I64 );
23139 IRTemp DCM = newTemp( Ity_I64 );
23140 IRTemp CC = newTemp( Ity_I64 );
23141 IRTemp exponent = newTemp( Ity_I64 );
23142 IRTemp tmp = newTemp( Ity_I64 );
23144 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
23146 assign( pos, unop( Iop_1Uto64,
23147 binop( Iop_CmpEQ64,
23148 binop( Iop_Shr64,
23149 mkexpr( vB_hi ),
23150 mkU8( 63 ) ),
23151 mkU64( 0 ) ) ) );
23153 assign( NaN, unop( Iop_1Uto64, is_NaN( Ity_I64, vB_hi ) ) );
23154 assign( inf, unop( Iop_1Uto64, is_Inf( Ity_I64, vB_hi ) ) );
23155 assign( zero, unop( Iop_1Uto64, is_Zero( Ity_I64, vB_hi ) ) );
23157 if (opc2 == 0x254) {
23158 DIP("xststdcsp %u,v%u,%u\n", BF, (UInt)XB, DCMX_mask);
23160 /* The least significant bit of the CC is set to 1 if the double
23161 precision value is not representable as a single precision
23162 value. The spec says the bit is set if:
23163 src != convert_SPtoDP(convert_DPtoSP(src))
23165 assign( tmp,
23166 unop( Iop_ReinterpF64asI64,
23167 unop( Iop_F32toF64,
23168 unop( Iop_TruncF64asF32,
23169 unop( Iop_ReinterpI64asF64,
23170 mkexpr( vB_hi ) ) ) ) ) );
23171 assign( not_sp, unop( Iop_1Uto64,
23172 mkNOT1( binop( Iop_CmpEQ64,
23173 mkexpr( vB_hi ),
23174 mkexpr( tmp ) ) ) ) );
23175 assign( exponent,
23176 binop( Iop_Shr64,
23177 binop( Iop_And64,
23178 mkexpr( vB_hi ),
23179 mkU64( 0x7ff0000000000000 ) ),
23180 mkU8( 52 ) ) );
23181 assign( dnorm, unop( Iop_1Uto64,
23182 mkOR1( is_Denorm( Ity_I64, vB_hi ),
23183 mkAND1( binop( Iop_CmpLT64U,
23184 mkexpr( exponent ),
23185 mkU64( 0x381 ) ),
23186 binop( Iop_CmpNE64,
23187 mkexpr( exponent ),
23188 mkU64( 0x0 ) ) ) ) ) );
23190 } else {
23191 DIP("xststdcdp %u,v%u,%u\n", BF, (UInt)XB, DCMX_mask);
23192 assign( not_sp, mkU64( 0 ) );
23193 assign( dnorm, unop( Iop_1Uto64, is_Denorm( Ity_I64, vB_hi ) ) );
23196 assign( DCM, create_DCM( Ity_I64, NaN, inf, zero, dnorm, pos ) );
23197 assign( CC,
23198 binop( Iop_Or64,
23199 binop( Iop_And64, /* vB sign bit */
23200 binop( Iop_Shr64,
23201 mkexpr( vB_hi ),
23202 mkU8( 60 ) ),
23203 mkU64( 0x8 ) ),
23204 binop( Iop_Or64,
23205 binop( Iop_Shl64,
23206 unop( Iop_1Uto64,
23207 binop( Iop_CmpNE64,
23208 binop( Iop_And64,
23209 mkexpr( DCM ),
23210 mkU64( DCMX_mask ) ),
23211 mkU64( 0 ) ) ),
23212 mkU8( 1 ) ),
23213 mkexpr( not_sp ) ) ) );
23214 putGST_field( PPC_GST_CR, unop( Iop_64to32, mkexpr( CC ) ), BF );
23215 putFPCC( unop( Iop_64to32, mkexpr( CC ) ) );
23217 return True;
23219 case 0x2C0: // xscpsgndp
23221 /* Scalar copy sign double-precision */
23222 IRTemp vecA_signed = newTemp(Ity_I64);
23223 IRTemp vecB_unsigned = newTemp(Ity_I64);
23224 IRTemp vec_result = newTemp(Ity_V128);
23225 DIP("xscpsgndp v%d,v%d v%d\n", XT, XA, XB);
23226 assign( vecA_signed, binop( Iop_And64,
23227 unop( Iop_V128HIto64,
23228 mkexpr(vA)),
23229 mkU64(~VG_PPC_SIGN_MASK) ) );
23230 assign( vecB_unsigned, binop( Iop_And64,
23231 unop( Iop_V128HIto64,
23232 mkexpr(vB) ),
23233 mkU64(VG_PPC_SIGN_MASK) ) );
23234 assign( vec_result, binop( Iop_64HLtoV128,
23235 binop( Iop_Or64,
23236 mkexpr(vecA_signed),
23237 mkexpr(vecB_unsigned) ),
23238 mkU64(0x0ULL)));
23239 putVSReg(XT, mkexpr(vec_result));
23240 break;
23242 case 0x2D2: // xsnabsdp
23244 /* Scalar negative absolute value double-precision */
23245 IRTemp BHi_signed = newTemp(Ity_I64);
23246 DIP("xsnabsdp v%d,v%d\n", XT, XB);
23247 assign( BHi_signed, binop( Iop_Or64,
23248 unop( Iop_V128HIto64,
23249 mkexpr(vB) ),
23250 mkU64(~VG_PPC_SIGN_MASK) ) );
23251 putVSReg(XT, binop( Iop_64HLtoV128,
23252 mkexpr(BHi_signed), mkU64(0x0ULL) ) );
23253 break;
23255 case 0x2F2: // xsnegdp
23257 /* Scalar negate double-precision */
23258 IRTemp BHi_signed = newTemp(Ity_I64);
23259 IRTemp BHi_unsigned = newTemp(Ity_I64);
23260 IRTemp BHi_negated = newTemp(Ity_I64);
23261 IRTemp BHi_negated_signbit = newTemp(Ity_I1);
23262 IRTemp vec_result = newTemp(Ity_V128);
23263 DIP("xsnabsdp v%d,v%d\n", XT, XB);
23264 assign( BHi_signed, unop( Iop_V128HIto64, mkexpr(vB) ) );
23265 assign( BHi_unsigned, binop( Iop_And64, mkexpr(BHi_signed),
23266 mkU64(VG_PPC_SIGN_MASK) ) );
23267 assign( BHi_negated_signbit,
23268 unop( Iop_Not1,
23269 unop( Iop_32to1,
23270 binop( Iop_Shr32,
23271 unop( Iop_64HIto32,
23272 binop( Iop_And64,
23273 mkexpr(BHi_signed),
23274 mkU64(~VG_PPC_SIGN_MASK) )
23276 mkU8(31) ) ) ) );
23277 assign( BHi_negated,
23278 binop( Iop_Or64,
23279 binop( Iop_32HLto64,
23280 binop( Iop_Shl32,
23281 unop( Iop_1Uto32,
23282 mkexpr(BHi_negated_signbit) ),
23283 mkU8(31) ),
23284 mkU32(0) ),
23285 mkexpr(BHi_unsigned) ) );
23286 assign( vec_result, binop( Iop_64HLtoV128, mkexpr(BHi_negated),
23287 mkU64(0x0ULL)));
23288 putVSReg( XT, mkexpr(vec_result));
23289 break;
23291 case 0x280: // xsmaxdp (VSX Scalar Maximum Double-Precision)
23292 case 0x2A0: // xsmindp (VSX Scalar Minimum Double-Precision)
23294 IRTemp frA = newTemp(Ity_I64);
23295 IRTemp frB = newTemp(Ity_I64);
23296 Bool isMin = opc2 == 0x2A0 ? True : False;
23297 DIP("%s v%d,v%d v%d\n", isMin ? "xsmaxdp" : "xsmindp", XT, XA, XB);
23299 assign(frA, unop(Iop_V128HIto64, mkexpr( vA )));
23300 assign(frB, unop(Iop_V128HIto64, mkexpr( vB )));
23301 putVSReg( XT, binop( Iop_64HLtoV128, get_max_min_fp(frA, frB, isMin), mkU64( 0 ) ) );
23303 break;
23305 case 0x0F2: // xsrdpim (VSX Scalar Round to Double-Precision Integer using round toward -Infinity)
23306 case 0x0D2: // xsrdpip (VSX Scalar Round to Double-Precision Integer using round toward +Infinity)
23307 case 0x0D6: // xsrdpic (VSX Scalar Round to Double-Precision Integer using Current rounding mode)
23308 case 0x0B2: // xsrdpiz (VSX Scalar Round to Double-Precision Integer using round toward Zero)
23309 case 0x092: // xsrdpi (VSX Scalar Round to Double-Precision Integer using round toward Nearest Away)
23311 IRTemp frB_I64 = newTemp(Ity_I64);
23312 IRExpr * frD_fp_round = NULL;
23314 assign(frB_I64, unop(Iop_V128HIto64, mkexpr( vB )));
23315 frD_fp_round = _do_vsx_fp_roundToInt(frB_I64, opc2);
23317 DIP("xsrdpi%s v%d,v%d\n", _get_vsx_rdpi_suffix(opc2), XT, XB);
23318 putVSReg( XT,
23319 binop( Iop_64HLtoV128,
23320 unop( Iop_ReinterpF64asI64, frD_fp_round),
23321 mkU64( 0 ) ) );
23322 break;
23324 case 0x034: // xsresp (VSX Scalar Reciprocal Estimate single-Precision)
23325 case 0x014: /* xsrsqrtesp (VSX Scalar Reciprocal Square Root Estimate
23326 * single-Precision)
23329 IRTemp frB = newTemp(Ity_F64);
23330 IRTemp sqrt = newTemp(Ity_F64);
23331 IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
23332 IRExpr* rm = get_IR_roundingmode();
23333 Bool redp = opc2 == 0x034;
23334 DIP("%s v%d,v%d\n", redp ? "xsresp" : "xsrsqrtesp", XT,
23335 XB);
23337 assign( frB,
23338 unop( Iop_ReinterpI64asF64,
23339 unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
23341 if (!redp)
23342 assign( sqrt,
23343 binop( Iop_SqrtF64,
23345 mkexpr(frB) ) );
23346 putVSReg( XT,
23347 binop( Iop_64HLtoV128,
23348 unop( Iop_ReinterpF64asI64,
23349 binop( Iop_RoundF64toF32, rm,
23350 triop( Iop_DivF64,
23352 ieee_one,
23353 redp ? mkexpr( frB ) :
23354 mkexpr( sqrt ) ) ) ),
23355 mkU64( 0 ) ) );
23356 break;
23359 case 0x0B4: // xsredp (VSX Scalar Reciprocal Estimate Double-Precision)
23360 case 0x094: // xsrsqrtedp (VSX Scalar Reciprocal Square Root Estimate Double-Precision)
23363 IRTemp frB = newTemp(Ity_F64);
23364 IRTemp sqrt = newTemp(Ity_F64);
23365 IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
23366 IRExpr* rm = get_IR_roundingmode();
23367 Bool redp = opc2 == 0x0B4;
23368 DIP("%s v%d,v%d\n", redp ? "xsredp" : "xsrsqrtedp", XT, XB);
23369 assign( frB,
23370 unop( Iop_ReinterpI64asF64,
23371 unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
23373 if (!redp)
23374 assign( sqrt,
23375 binop( Iop_SqrtF64,
23377 mkexpr(frB) ) );
23378 putVSReg( XT,
23379 binop( Iop_64HLtoV128,
23380 unop( Iop_ReinterpF64asI64,
23381 triop( Iop_DivF64,
23383 ieee_one,
23384 redp ? mkexpr( frB ) : mkexpr( sqrt ) ) ),
23385 mkU64( 0 ) ) );
23386 break;
23389 case 0x232: // xsrsp (VSX Scalar Round to Single-Precision)
23391 IRTemp frB = newTemp(Ity_F64);
23392 IRExpr* rm = get_IR_roundingmode();
23393 DIP("xsrsp v%d, v%d\n", XT, XB);
23394 assign( frB,
23395 unop( Iop_ReinterpI64asF64,
23396 unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
23398 putVSReg( XT, binop( Iop_64HLtoV128,
23399 unop( Iop_ReinterpF64asI64,
23400 binop( Iop_RoundF64toF32,
23402 mkexpr( frB ) ) ),
23403 mkU64( 0 ) ) );
23404 break;
23407 case 0x354: // xvtstdcsp (VSX Test Data Class Single-Precision)
23409 UInt DX_mask = IFIELD( theInstr, 16, 5 );
23410 UInt DC_mask = IFIELD( theInstr, 6, 1 );
23411 UInt DM_mask = IFIELD( theInstr, 2, 1 );
23412 UInt DCMX_mask = (DC_mask << 6) | (DM_mask << 5) | DX_mask;
23414 IRTemp match_value[4];
23415 IRTemp value[4];
23416 IRTemp NaN[4];
23417 IRTemp inf[4];
23418 IRTemp pos[4];
23419 IRTemp DCM[4];
23420 IRTemp zero[4];
23421 IRTemp dnorm[4];
23422 Int i;
23424 DIP("xvtstdcsp v%u,v%u,%u\n", (UInt)XT, (UInt)XB, DCMX_mask);
23426 for (i = 0; i < 4; i++) {
23427 NaN[i] = newTemp(Ity_I32);
23428 inf[i] = newTemp(Ity_I32);
23429 pos[i] = newTemp(Ity_I32);
23430 DCM[i] = newTemp(Ity_I32);
23431 zero[i] = newTemp(Ity_I32);
23432 dnorm[i] = newTemp(Ity_I32);
23434 value[i] = newTemp(Ity_I32);
23435 match_value[i] = newTemp(Ity_I32);
23437 assign( value[i],
23438 unop( Iop_64to32,
23439 unop( Iop_V128to64,
23440 binop( Iop_AndV128,
23441 binop( Iop_ShrV128,
23442 mkexpr( vB ),
23443 mkU8( (3-i)*32 ) ),
23444 binop( Iop_64HLtoV128,
23445 mkU64( 0x0 ),
23446 mkU64( 0xFFFFFFFF ) ) ) ) ) );
23448 assign( pos[i], unop( Iop_1Uto32,
23449 binop( Iop_CmpEQ32,
23450 binop( Iop_Shr32,
23451 mkexpr( value[i] ),
23452 mkU8( 31 ) ),
23453 mkU32( 0 ) ) ) );
23455 assign( NaN[i], unop( Iop_1Uto32, is_NaN( Ity_I32, value[i] ) ));
23456 assign( inf[i], unop( Iop_1Uto32, is_Inf( Ity_I32, value[i] ) ) );
23457 assign( zero[i], unop( Iop_1Uto32, is_Zero( Ity_I32, value[i] ) ) );
23459 assign( dnorm[i], unop( Iop_1Uto32, is_Denorm( Ity_I32,
23460 value[i] ) ) );
23461 assign( DCM[i], create_DCM( Ity_I32, NaN[i], inf[i], zero[i],
23462 dnorm[i], pos[i] ) );
23464 assign( match_value[i],
23465 unop( Iop_1Sto32,
23466 binop( Iop_CmpNE32,
23467 binop( Iop_And32,
23468 mkU32( DCMX_mask ),
23469 mkexpr( DCM[i] ) ),
23470 mkU32( 0 ) ) ) );
23473 putVSReg( XT, binop( Iop_64HLtoV128,
23474 binop( Iop_32HLto64,
23475 mkexpr( match_value[0] ),
23476 mkexpr( match_value[1] ) ),
23477 binop( Iop_32HLto64,
23478 mkexpr( match_value[2] ),
23479 mkexpr( match_value[3] ) ) ) );
23481 break;
23483 case 0x360: // xviexpsp (VSX Vector Insert Exponent Single-Precision)
23485 Int i;
23486 IRTemp new_XT[5];
23487 IRTemp A_value[4];
23488 IRTemp B_value[4];
23489 IRExpr *sign[4], *expr[4], *fract[4];
23491 DIP("xviexpsp v%d,v%d\n", XT, XB);
23492 new_XT[0] = newTemp(Ity_V128);
23493 assign( new_XT[0], binop( Iop_64HLtoV128,
23494 mkU64( 0x0 ),
23495 mkU64( 0x0 ) ) );
23497 for (i = 0; i < 4; i++) {
23498 A_value[i] = newTemp(Ity_I32);
23499 B_value[i] = newTemp(Ity_I32);
23501 assign( A_value[i],
23502 unop( Iop_64to32,
23503 unop( Iop_V128to64,
23504 binop( Iop_AndV128,
23505 binop( Iop_ShrV128,
23506 mkexpr( vA ),
23507 mkU8( (3-i)*32 ) ),
23508 binop( Iop_64HLtoV128,
23509 mkU64( 0x0 ),
23510 mkU64( 0xFFFFFFFF ) ) ) ) ) );
23511 assign( B_value[i],
23512 unop( Iop_64to32,
23513 unop( Iop_V128to64,
23514 binop( Iop_AndV128,
23515 binop( Iop_ShrV128,
23516 mkexpr( vB ),
23517 mkU8( (3-i)*32 ) ),
23518 binop( Iop_64HLtoV128,
23519 mkU64( 0x0 ),
23520 mkU64( 0xFFFFFFFF ) ) ) ) ) );
23522 sign[i] = binop( Iop_And32, mkexpr( A_value[i] ),
23523 mkU32( 0x80000000 ) );
23524 expr[i] = binop( Iop_Shl32,
23525 binop( Iop_And32, mkexpr( B_value[i] ),
23526 mkU32( 0xFF ) ),
23527 mkU8( 23 ) );
23528 fract[i] = binop( Iop_And32, mkexpr( A_value[i] ),
23529 mkU32( 0x007FFFFF ) );
23531 new_XT[i+1] = newTemp(Ity_V128);
23532 assign( new_XT[i+1],
23533 binop( Iop_OrV128,
23534 binop( Iop_ShlV128,
23535 binop( Iop_64HLtoV128,
23536 mkU64( 0 ),
23537 binop( Iop_32HLto64,
23538 mkU32( 0 ),
23539 binop( Iop_Or32,
23540 binop( Iop_Or32,
23541 sign[i],
23542 expr[i] ),
23543 fract[i] ) ) ),
23544 mkU8( (3-i)*32 ) ),
23545 mkexpr( new_XT[i] ) ) );
23547 putVSReg( XT, mkexpr( new_XT[4] ) );
23549 break;
23551 case 0x396: // xsiexpdp (VSX Scalar Insert Exponent Double-Precision)
23553 IRExpr *sign, *expr, *fract;
23554 UChar rA_addr = ifieldRegA(theInstr);
23555 UChar rB_addr = ifieldRegB(theInstr);
23556 IRTemp rA = newTemp( Ity_I64 );
23557 IRTemp rB = newTemp( Ity_I64 );
23559 DIP("xsiexpdp v%u,%u,%u\n", (UInt)XT, (UInt)rA_addr, (UInt)rB_addr);
23560 assign( rA, getIReg(rA_addr));
23561 assign( rB, getIReg(rB_addr));
23563 sign = binop( Iop_And64, mkexpr( rA ), mkU64( 0x8000000000000000 ) );
23564 expr = binop( Iop_Shl64,
23565 binop( Iop_And64, mkexpr( rB ), mkU64( 0x7FF ) ),
23566 mkU8( 52 ) );
23567 fract = binop( Iop_And64, mkexpr( rA ), mkU64( 0x000FFFFFFFFFFFFF ) );
23569 putVSReg( XT, binop( Iop_64HLtoV128,
23570 binop( Iop_Or64,
23571 binop( Iop_Or64, sign, expr ),
23572 fract ),
23573 mkU64( 0 ) ) );
23575 break;
23577 case 0x3B6: // xvxexpdp (VSX Vector Extract Exponent Double-Precision)
23578 // xvxsigdp (VSX Vector Extract Significand Double-Precision)
23579 // xxbrh
23580 // xvxexpsp (VSX Vector Extract Exponent Single-Precision)
23581 // xvxsigsp (VSX Vector Extract Significand Single-Precision)
23582 // xxbrw
23583 // xxbrd
23584 // xxbrq
23585 // xvcvbf16spn (VSX Convert 16-bit bfloat to 32-bit float)
23586 // xvcvspbf16 (VSX Convert 32-bit float to 16-bit bfloat)
23587 // xvcvhpsp (VSX Vector Convert Half-Precision format to Single-Precision format)
23588 // xvcvsphp (VSX Vector round and convert Single-Precision format to Half-Precision format)
23590 UInt inst_select = IFIELD( theInstr, 16, 5);
23592 if (inst_select == 0) {
23593 DIP("xvxexpdp v%d,v%d\n", XT, XB);
23595 putVSReg( XT, binop( Iop_ShrV128,
23596 binop( Iop_AndV128,
23597 mkexpr( vB ),
23598 binop( Iop_64HLtoV128,
23599 mkU64( 0x7FF0000000000000 ),
23600 mkU64( 0x7FF0000000000000 ) ) ),
23601 mkU8( 52 ) ) );
23603 } else if (inst_select == 1) {
23604 Int i;
23605 IRExpr *normal[2];
23606 IRTemp value[2];
23607 IRTemp new_XT[3];
23609 DIP("xvxsigdp v%d,v%d\n", XT, XB);
23610 new_XT[0] = newTemp(Ity_V128);
23611 assign( new_XT[0], binop( Iop_64HLtoV128,
23612 mkU64( 0x0 ),
23613 mkU64( 0x0 ) ) );
23615 for (i = 0; i < 2; i++) {
23616 value[i] = newTemp(Ity_I64);
23617 assign( value[i],
23618 unop( Iop_V128to64,
23619 binop( Iop_AndV128,
23620 binop( Iop_ShrV128,
23621 mkexpr( vB ),
23622 mkU8( (1-i)*64 ) ),
23623 binop( Iop_64HLtoV128,
23624 mkU64( 0x0 ),
23625 mkU64( 0xFFFFFFFFFFFFFFFF ) ) ) ) );
23627 /* Value is normal if it isn't infinite, zero or denormalized */
23628 normal[i] = mkNOT1( mkOR1(
23629 mkOR1( is_NaN( Ity_I64, value[i] ),
23630 is_Inf( Ity_I64, value[i] ) ),
23631 mkOR1( is_Zero( Ity_I64, value[i] ),
23632 is_Denorm( Ity_I64,
23633 value[i] ) ) ) );
23634 new_XT[i+1] = newTemp(Ity_V128);
23636 assign( new_XT[i+1],
23637 binop( Iop_OrV128,
23638 binop( Iop_ShlV128,
23639 binop( Iop_64HLtoV128,
23640 mkU64( 0x0 ),
23641 binop( Iop_Or64,
23642 binop( Iop_And64,
23643 mkexpr( value[i] ),
23644 mkU64( 0xFFFFFFFFFFFFF ) ),
23645 binop( Iop_Shl64,
23646 unop( Iop_1Uto64,
23647 normal[i]),
23648 mkU8( 52 ) ) ) ),
23649 mkU8( (1-i)*64 ) ),
23650 mkexpr( new_XT[i] ) ) );
23652 putVSReg( XT, mkexpr( new_XT[2] ) );
23654 } else if (inst_select == 7) {
23655 IRTemp sub_element0 = newTemp( Ity_V128 );
23656 IRTemp sub_element1 = newTemp( Ity_V128 );
23658 DIP("xxbrh v%u, v%u\n", (UInt)XT, (UInt)XB);
23660 assign( sub_element0,
23661 binop( Iop_ShrV128,
23662 binop( Iop_AndV128,
23663 binop(Iop_64HLtoV128,
23664 mkU64( 0xFF00FF00FF00FF00 ),
23665 mkU64( 0xFF00FF00FF00FF00 ) ),
23666 mkexpr( vB ) ),
23667 mkU8( 8 ) ) );
23668 assign( sub_element1,
23669 binop( Iop_ShlV128,
23670 binop( Iop_AndV128,
23671 binop(Iop_64HLtoV128,
23672 mkU64( 0x00FF00FF00FF00FF ),
23673 mkU64( 0x00FF00FF00FF00FF ) ),
23674 mkexpr( vB ) ),
23675 mkU8( 8 ) ) );
23677 putVSReg(XT, binop( Iop_OrV128,
23678 mkexpr( sub_element1 ),
23679 mkexpr( sub_element0 ) ) );
23681 } else if (inst_select == 8) {
23682 DIP("xvxexpsp v%d,v%d\n", XT, XB);
23684 putVSReg( XT, binop( Iop_ShrV128,
23685 binop( Iop_AndV128,
23686 mkexpr( vB ),
23687 binop( Iop_64HLtoV128,
23688 mkU64( 0x7F8000007F800000 ),
23689 mkU64( 0x7F8000007F800000 ) ) ),
23690 mkU8( 23 ) ) );
23691 } else if (inst_select == 9) {
23692 Int i;
23693 IRExpr *normal[4];
23694 IRTemp value[4];
23695 IRTemp new_value[4];
23696 IRTemp new_XT[5];
23698 DIP("xvxsigsp v%d,v%d\n", XT, XB);
23699 new_XT[0] = newTemp(Ity_V128);
23700 assign( new_XT[0], binop( Iop_64HLtoV128,
23701 mkU64( 0x0 ),
23702 mkU64( 0x0 ) ) );
23704 for (i = 0; i < 4; i++) {
23705 value[i] = newTemp(Ity_I32);
23706 assign( value[i],
23707 unop( Iop_64to32,
23708 unop( Iop_V128to64,
23709 binop( Iop_AndV128,
23710 binop( Iop_ShrV128,
23711 mkexpr( vB ),
23712 mkU8( (3-i)*32 ) ),
23713 binop( Iop_64HLtoV128,
23714 mkU64( 0x0 ),
23715 mkU64( 0xFFFFFFFF ) ) ) ) ) );
23717 new_XT[i+1] = newTemp(Ity_V128);
23719 /* Value is normal if it isn't infinite, zero or denormalized */
23720 normal[i] = mkNOT1( mkOR1(
23721 mkOR1( is_NaN( Ity_I32, value[i] ),
23722 is_Inf( Ity_I32, value[i] ) ),
23723 mkOR1( is_Zero( Ity_I32, value[i] ),
23724 is_Denorm( Ity_I32,
23725 value[i] ) ) ) );
23726 new_value[i] = newTemp(Ity_I32);
23727 assign( new_value[i],
23728 binop( Iop_Or32,
23729 binop( Iop_And32,
23730 mkexpr( value[i] ),
23731 mkU32( 0x7FFFFF ) ),
23732 binop( Iop_Shl32,
23733 unop( Iop_1Uto32,
23734 normal[i]),
23735 mkU8( 23 ) ) ) );
23737 assign( new_XT[i+1],
23738 binop( Iop_OrV128,
23739 binop( Iop_ShlV128,
23740 binop( Iop_64HLtoV128,
23741 mkU64( 0x0 ),
23742 binop( Iop_32HLto64,
23743 mkU32( 0x0 ),
23744 mkexpr( new_value[i] ) ) ),
23745 mkU8( (3-i)*32 ) ),
23746 mkexpr( new_XT[i] ) ) );
23748 putVSReg( XT, mkexpr( new_XT[4] ) );
23750 } else if (inst_select == 15) {
23751 IRTemp sub_element0 = newTemp( Ity_V128 );
23752 IRTemp sub_element1 = newTemp( Ity_V128 );
23753 IRTemp sub_element2 = newTemp( Ity_V128 );
23754 IRTemp sub_element3 = newTemp( Ity_V128 );
23756 DIP("xxbrw v%u, v%u\n", (UInt)XT, (UInt)XB);
23758 assign( sub_element0,
23759 binop( Iop_ShrV128,
23760 binop( Iop_AndV128,
23761 binop(Iop_64HLtoV128,
23762 mkU64( 0xFF000000FF000000 ),
23763 mkU64( 0xFF000000FF000000 ) ),
23764 mkexpr( vB ) ),
23765 mkU8( 24 ) ) );
23766 assign( sub_element1,
23767 binop( Iop_ShrV128,
23768 binop( Iop_AndV128,
23769 binop(Iop_64HLtoV128,
23770 mkU64( 0x00FF000000FF0000 ),
23771 mkU64( 0x00FF000000FF0000 ) ),
23772 mkexpr( vB ) ),
23773 mkU8( 8 ) ) );
23774 assign( sub_element2,
23775 binop( Iop_ShlV128,
23776 binop( Iop_AndV128,
23777 binop(Iop_64HLtoV128,
23778 mkU64( 0x0000FF000000FF00 ),
23779 mkU64( 0x0000FF000000FF00 ) ),
23780 mkexpr( vB ) ),
23781 mkU8( 8 ) ) );
23782 assign( sub_element3,
23783 binop( Iop_ShlV128,
23784 binop( Iop_AndV128,
23785 binop(Iop_64HLtoV128,
23786 mkU64( 0x00000000FF000000FF ),
23787 mkU64( 0x00000000FF000000FF ) ),
23788 mkexpr( vB ) ),
23789 mkU8( 24 ) ) );
23791 putVSReg( XT,
23792 binop( Iop_OrV128,
23793 binop( Iop_OrV128,
23794 mkexpr( sub_element3 ),
23795 mkexpr( sub_element2 ) ),
23796 binop( Iop_OrV128,
23797 mkexpr( sub_element1 ),
23798 mkexpr( sub_element0 ) ) ) );
23800 } else if ((inst_select == 16) && !prefix) {
23801 IRTemp result = newTemp(Ity_V128);
23802 UChar xT_addr = ifieldRegXT ( theInstr );
23803 UChar xB_addr = ifieldRegXB ( theInstr );
23804 /* Convert 16-bit bfloat to 32-bit float, not a prefix inst */
23805 DIP("xvcvbf16spn v%u,v%u\n", xT_addr, xB_addr);
23806 assign( result, vector_convert_bf16tofloat( vbi, mkexpr( vB ) ) );
23807 putVSReg( XT, mkexpr( result) );
23809 } else if ((inst_select == 17) && !prefix) {
23810 IRTemp result = newTemp(Ity_V128);
23811 UChar xT_addr = ifieldRegXT ( theInstr );
23812 UChar xB_addr = ifieldRegXB ( theInstr );
23813 /* Convert 32-bit float to 16-bit bfloat, not a prefix inst */
23814 DIP("xvcvspbf16 v%u,v%u\n", xT_addr, xB_addr);
23815 assign( result, vector_convert_floattobf16( vbi, mkexpr( vB ) ) );
23816 putVSReg( XT, mkexpr( result) );
23818 } else if (inst_select == 23) {
23819 DIP("xxbrd v%u, v%u\n", (UInt)XT, (UInt)XB);
23821 int i;
23822 int shift = 56;
23823 IRTemp sub_element[16];
23824 IRTemp new_xT[17];
23826 new_xT[0] = newTemp( Ity_V128 );
23827 assign( new_xT[0], binop( Iop_64HLtoV128,
23828 mkU64( 0 ),
23829 mkU64( 0 ) ) );
23831 for ( i = 0; i < 4; i++ ) {
23832 new_xT[i+1] = newTemp( Ity_V128 );
23833 sub_element[i] = newTemp( Ity_V128 );
23834 sub_element[i+4] = newTemp( Ity_V128 );
23836 assign( sub_element[i],
23837 binop( Iop_ShrV128,
23838 binop( Iop_AndV128,
23839 binop( Iop_64HLtoV128,
23840 mkU64( (0xFFULL << (7 - i) * 8) ),
23841 mkU64( (0xFFULL << (7 - i) * 8) ) ),
23842 mkexpr( vB ) ),
23843 mkU8( shift ) ) );
23845 assign( sub_element[i+4],
23846 binop( Iop_ShlV128,
23847 binop( Iop_AndV128,
23848 binop( Iop_64HLtoV128,
23849 mkU64( (0xFFULL << i*8) ),
23850 mkU64( (0xFFULL << i*8) ) ),
23851 mkexpr( vB ) ),
23852 mkU8( shift ) ) );
23853 shift = shift - 16;
23855 assign( new_xT[i+1],
23856 binop( Iop_OrV128,
23857 mkexpr( new_xT[i] ),
23858 binop( Iop_OrV128,
23859 mkexpr ( sub_element[i] ),
23860 mkexpr ( sub_element[i+4] ) ) ) );
23863 putVSReg( XT, mkexpr( new_xT[4] ) );
23865 } else if (inst_select == 24) {
23866 // xvcvhpsp, (VSX Vector Convert half-precision format to
23867 // Single-precision format)
23868 /* only supported on ISA 3.0 and newer */
23869 IRTemp result = newTemp( Ity_V128 );
23870 IRTemp src = newTemp( Ity_I64 );
23872 if (!allow_isa_3_0) return False;
23874 DIP("xvcvhpsp v%d,v%d\n", XT,XB);
23875 /* The instruction does not set the C or FPCC fields. The
23876 * instruction takes four 16-bit values stored in a 128-bit value
23877 * as follows: x V | x V | x V | x V where V is a 16-bit
23878 * value and x is an unused 16-bit value. To use Iop_F16toF32x4
23879 * the four 16-bit values will be gathered into a single 64 bit
23880 * value. The backend will scatter the four 16-bit values back
23881 * into a 128-bit operand before issuing the instruction.
23883 /* Gather 16-bit float values from V128 source into new 64-bit
23884 * source value for the Iop.
23886 assign( src,
23887 unop( Iop_V128to64,
23888 binop( Iop_Perm8x16,
23889 mkexpr( vB ),
23890 binop ( Iop_64HLtoV128,
23891 mkU64( 0 ),
23892 mkU64( 0x020306070A0B0E0F) ) ) ) );
23894 assign( result, unop( Iop_F16toF32x4, mkexpr( src ) ) );
23896 putVSReg( XT, mkexpr( result ) );
23898 } else if (inst_select == 25) {
23899 // xvcvsphp, (VSX Vector round and Convert single-precision
23900 // format to half-precision format)
23901 /* only supported on ISA 3.0 and newer */
23902 IRTemp result = newTemp( Ity_V128 );
23903 IRTemp tmp64 = newTemp( Ity_I64 );
23905 if (!allow_isa_3_0) return False;
23906 DIP("xvcvsphp v%d,v%d\n", XT,XB);
23908 /* Iop_F32toF16x4 is V128 -> I64, scatter the 16-bit floats in the
23909 * I64 result to the V128 register to store.
23911 assign( tmp64, unop( Iop_F32toF16x4_DEP, mkexpr( vB ) ) );
23913 /* Scatter 16-bit float values from returned 64-bit value
23914 * of V128 result.
23916 if (host_endness == VexEndnessLE)
23917 /* Note location 0 may have a valid number in it. Location
23918 * 15 should always be zero. Use 0xF to put zeros in the
23919 * desired bytes.
23921 assign( result,
23922 binop( Iop_Perm8x16,
23923 binop( Iop_64HLtoV128,
23924 mkexpr( tmp64 ),
23925 mkU64( 0 ) ),
23926 binop ( Iop_64HLtoV128,
23927 mkU64( 0x0F0F00010F0F0203 ),
23928 mkU64( 0x0F0F04050F0F0607 ) ) ) );
23929 else
23930 assign( result,
23931 binop( Iop_Perm8x16,
23932 binop( Iop_64HLtoV128,
23933 mkexpr( tmp64 ),
23934 mkU64( 0 ) ),
23935 binop ( Iop_64HLtoV128,
23936 mkU64( 0x0F0F06070F0F0405 ),
23937 mkU64( 0x0F0F02030F0F0001 ) ) ) );
23938 putVSReg( XT, mkexpr( result ) );
23940 } else if ( inst_select == 31 ) {
23941 int i;
23942 int shift_left = 8;
23943 int shift_right = 120;
23944 IRTemp sub_element[16];
23945 IRTemp new_xT[9];
23947 DIP("xxbrq v%u, v%u\n", (UInt) XT, (UInt) XB);
23949 new_xT[0] = newTemp( Ity_V128 );
23950 assign( new_xT[0], binop( Iop_64HLtoV128,
23951 mkU64( 0 ),
23952 mkU64( 0 ) ) );
23954 for ( i = 0; i < 8; i++ ) {
23955 new_xT[i+1] = newTemp( Ity_V128 );
23956 sub_element[i] = newTemp( Ity_V128 );
23957 sub_element[i+8] = newTemp( Ity_V128 );
23959 assign( sub_element[i],
23960 binop( Iop_ShrV128,
23961 binop( Iop_AndV128,
23962 binop( Iop_64HLtoV128,
23963 mkU64( ( 0xFFULL << (7 - i) * 8 ) ),
23964 mkU64( 0x0ULL ) ),
23965 mkexpr( vB ) ),
23966 mkU8( shift_right ) ) );
23967 shift_right = shift_right - 16;
23969 assign( sub_element[i+8],
23970 binop( Iop_ShlV128,
23971 binop( Iop_AndV128,
23972 binop( Iop_64HLtoV128,
23973 mkU64( 0x0ULL ),
23974 mkU64( ( 0xFFULL << (7 - i) * 8 ) ) ),
23975 mkexpr( vB ) ),
23976 mkU8( shift_left ) ) );
23977 shift_left = shift_left + 16;
23979 assign( new_xT[i+1],
23980 binop( Iop_OrV128,
23981 mkexpr( new_xT[i] ),
23982 binop( Iop_OrV128,
23983 mkexpr ( sub_element[i] ),
23984 mkexpr ( sub_element[i+8] ) ) ) );
23987 putVSReg( XT, mkexpr( new_xT[8] ) );
23989 } else {
23990 vex_printf("dis_vxs_misc(ppc) Invalid instruction selection\n");
23991 return False;
23993 break;
23996 case 0x3D4: // xvtstdcdp (VSX Test Data Class Double-Precision)
23998 UInt DX_mask = IFIELD( theInstr, 16, 5 );
23999 UInt DC_mask = IFIELD( theInstr, 6, 1 );
24000 UInt DM_mask = IFIELD( theInstr, 2, 1 );
24001 UInt DCMX_mask = (DC_mask << 6) | (DM_mask << 5) | DX_mask;
24003 IRTemp NaN[2], inf[2], zero[2], dnorm[2], pos[2], DCM[2];
24004 IRTemp match_value[2];
24005 IRTemp value[2];
24006 Int i;
24008 DIP("xvtstdcdp v%u,v%u,%u\n", (UInt)XT, (UInt)XB, DCMX_mask);
24010 for (i = 0; i < 2; i++) {
24011 NaN[i] = newTemp(Ity_I64);
24012 inf[i] = newTemp(Ity_I64);
24013 pos[i] = newTemp(Ity_I64);
24014 DCM[i] = newTemp(Ity_I64);
24015 zero[i] = newTemp(Ity_I64);
24016 dnorm[i] = newTemp(Ity_I64);
24018 value[i] = newTemp(Ity_I64);
24019 match_value[i] = newTemp(Ity_I64);
24021 assign( value[i],
24022 unop( Iop_V128to64,
24023 binop( Iop_AndV128,
24024 binop( Iop_ShrV128,
24025 mkexpr( vB ),
24026 mkU8( (1-i)*64 ) ),
24027 binop( Iop_64HLtoV128,
24028 mkU64( 0x0 ),
24029 mkU64( 0xFFFFFFFFFFFFFFFF ) ) ) ) );
24031 assign( pos[i], unop( Iop_1Uto64,
24032 binop( Iop_CmpEQ64,
24033 binop( Iop_Shr64,
24034 mkexpr( value[i] ),
24035 mkU8( 63 ) ),
24036 mkU64( 0 ) ) ) );
24038 assign( NaN[i], unop( Iop_1Uto64, is_NaN( Ity_I64, value[i] ) ) );
24039 assign( inf[i], unop( Iop_1Uto64, is_Inf( Ity_I64, value[i] ) ) );
24040 assign( zero[i], unop( Iop_1Uto64, is_Zero( Ity_I64, value[i] ) ) );
24041 assign( dnorm[i], unop( Iop_1Uto64, is_Denorm( Ity_I64,
24042 value[i] ) ) );
24044 assign( DCM[i], create_DCM( Ity_I64, NaN[i], inf[i], zero[i],
24045 dnorm[i], pos[i] ) );
24047 assign( match_value[i],
24048 unop( Iop_1Sto64,
24049 binop( Iop_CmpNE64,
24050 binop( Iop_And64,
24051 mkU64( DCMX_mask ),
24052 mkexpr( DCM[i] ) ),
24053 mkU64( 0 ) ) ) );
24055 putVSReg( XT, binop( Iop_64HLtoV128,
24056 mkexpr( match_value[0] ),
24057 mkexpr( match_value[1] ) ) );
24059 break;
24061 case 0x3E0: // xviexpdp (VSX Vector Insert Exponent Double-Precision)
24063 Int i;
24064 IRTemp new_XT[3];
24065 IRTemp A_value[2];
24066 IRTemp B_value[2];
24067 IRExpr *sign[2], *expr[2], *fract[2];
24069 DIP("xviexpdp v%d,v%d\n", XT, XB);
24070 new_XT[0] = newTemp(Ity_V128);
24071 assign( new_XT[0], binop( Iop_64HLtoV128,
24072 mkU64( 0x0 ),
24073 mkU64( 0x0 ) ) );
24075 for (i = 0; i < 2; i++) {
24076 A_value[i] = newTemp(Ity_I64);
24077 B_value[i] = newTemp(Ity_I64);
24079 assign( A_value[i],
24080 unop( Iop_V128to64,
24081 binop( Iop_AndV128,
24082 binop( Iop_ShrV128,
24083 mkexpr( vA ),
24084 mkU8( (1-i)*64 ) ),
24085 binop( Iop_64HLtoV128,
24086 mkU64( 0x0 ),
24087 mkU64( 0xFFFFFFFFFFFFFFFF ) ) ) ) );
24088 assign( B_value[i],
24089 unop( Iop_V128to64,
24090 binop( Iop_AndV128,
24091 binop( Iop_ShrV128,
24092 mkexpr( vB ),
24093 mkU8( (1-i)*64 ) ),
24094 binop( Iop_64HLtoV128,
24095 mkU64( 0x0 ),
24096 mkU64( 0xFFFFFFFFFFFFFFFF ) ) ) ) );
24098 sign[i] = binop( Iop_And64, mkexpr( A_value[i] ),
24099 mkU64( 0x8000000000000000 ) );
24100 expr[i] = binop( Iop_Shl64,
24101 binop( Iop_And64, mkexpr( B_value[i] ),
24102 mkU64( 0x7FF ) ),
24103 mkU8( 52 ) );
24104 fract[i] = binop( Iop_And64, mkexpr( A_value[i] ),
24105 mkU64( 0x000FFFFFFFFFFFFF ) );
24107 new_XT[i+1] = newTemp(Ity_V128);
24108 assign( new_XT[i+1],
24109 binop( Iop_OrV128,
24110 binop( Iop_ShlV128,
24111 binop( Iop_64HLtoV128,
24112 mkU64( 0 ),
24113 binop( Iop_Or64,
24114 binop( Iop_Or64,
24115 sign[i],
24116 expr[i] ),
24117 fract[i] ) ),
24118 mkU8( (1-i)*64 ) ),
24119 mkexpr( new_XT[i] ) ) );
24121 putVSReg( XT, mkexpr( new_XT[2] ) );
24123 break;
24125 default:
24126 vex_printf( "dis_vxs_misc(ppc)(opc2)\n" );
24127 return False;
24129 return True;
24133 * VSX vector miscellaneous instructions
24136 static Bool
24137 dis_vx_misc ( UInt prefix, UInt theInstr, UInt opc2 )
24139 /* XX3-Form */
24140 UChar XT = ifieldRegXT ( theInstr );
24141 UChar XA = ifieldRegXA ( theInstr );
24142 UChar XB = ifieldRegXB ( theInstr );
24143 IRTemp vA = newTemp( Ity_V128 );
24144 IRTemp vB = newTemp( Ity_V128 );
24145 IRTemp src1 = newTemp(Ity_I64);
24146 IRTemp src2 = newTemp(Ity_I64);
24147 IRTemp result_mask = newTemp(Ity_I64);
24148 IRTemp cmp_mask = newTemp(Ity_I64);
24149 IRTemp nan_mask = newTemp(Ity_I64);
24150 IRTemp snan_mask = newTemp(Ity_I64);
24151 IRTemp word_result = newTemp(Ity_I64);
24152 IRTemp check_result = newTemp(Ity_I64);
24153 IRTemp xT = newTemp( Ity_V128 );
24154 IRTemp nan_cmp_value = newTemp(Ity_I64);
24155 UInt trap_enabled = 0; /* 0 - trap enabled is False */
24157 /* There is no prefixed version of these instructions. */
24158 PREFIX_CHECK
24160 assign( vA, getVSReg( XA ) );
24161 assign( vB, getVSReg( XB ) );
24162 assign( xT, getVSReg( XT ) );
24164 assign(src1, unop( Iop_V128HIto64, mkexpr( vA ) ) );
24165 assign(src2, unop( Iop_V128HIto64, mkexpr( vB ) ) );
24167 assign( nan_mask,
24168 binop( Iop_Or64,
24169 unop( Iop_1Sto64, is_NaN( Ity_I64, src1 ) ),
24170 unop( Iop_1Sto64, is_NaN( Ity_I64, src2 ) ) ) );
24172 if ( trap_enabled == 0 )
24173 /* Traps on invalid operation are assumed not enabled, assign
24174 result of comparison to xT.
24176 assign( snan_mask, mkU64( 0 ) );
24178 else
24179 assign( snan_mask,
24180 binop( Iop_Or64,
24181 unop( Iop_1Sto64, is_sNaN( Ity_I64, src1 ) ),
24182 unop( Iop_1Sto64, is_sNaN( Ity_I64, src2 ) ) ) );
24184 assign (result_mask, binop( Iop_Or64,
24185 mkexpr( snan_mask ),
24186 mkexpr( nan_mask ) ) );
24188 switch (opc2) {
24189 case 0xC: //xscmpeqdp
24191 DIP("xscmpeqdp v%d,v%d,v%d\n", XT, XA, XB);
24192 /* extract double-precision floating point source values from
24193 double word 0 */
24195 /* result of Iop_CmpF64 is 0x40 if operands are equal,
24196 mask is all 1's if equal. */
24198 assign( cmp_mask,
24199 unop( Iop_1Sto64,
24200 unop(Iop_32to1,
24201 binop(Iop_Shr32,
24202 binop( Iop_CmpF64,
24203 unop( Iop_ReinterpI64asF64,
24204 mkexpr( src1 ) ),
24205 unop( Iop_ReinterpI64asF64,
24206 mkexpr( src2 ) ) ),
24207 mkU8( 6 ) ) ) ) );
24209 assign( word_result,
24210 binop( Iop_Or64,
24211 binop( Iop_And64, mkexpr( cmp_mask ),
24212 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
24213 binop( Iop_And64,
24214 unop( Iop_Not64, mkexpr( cmp_mask ) ),
24215 mkU64( 0x0 ) ) ) );
24216 assign( nan_cmp_value, mkU64( 0 ) );
24217 break;
24220 case 0x2C: //xscmpgtdp
24222 DIP("xscmpgtdp v%d,v%d,v%d\n", XT, XA, XB);
24223 /* Test for src1 > src2 */
24225 /* Result of Iop_CmpF64 is 0x1 if op1 < op2, set mask to all 1's. */
24226 assign( cmp_mask,
24227 unop( Iop_1Sto64,
24228 unop(Iop_32to1,
24229 binop(Iop_CmpF64,
24230 unop( Iop_ReinterpI64asF64,
24231 mkexpr( src2 ) ),
24232 unop( Iop_ReinterpI64asF64,
24233 mkexpr( src1 ) ) ) ) ) );
24234 assign( word_result,
24235 binop( Iop_Or64,
24236 binop( Iop_And64, mkexpr( cmp_mask ),
24237 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
24238 binop( Iop_And64,
24239 unop( Iop_Not64, mkexpr( cmp_mask ) ),
24240 mkU64( 0x0 ) ) ) );
24241 assign( nan_cmp_value, mkU64( 0 ) );
24242 break;
24245 case 0x4C: //xscmpgedp
24247 DIP("xscmpgedp v%u,v%u,v%u\n", XT, XA, XB);
24248 /* compare src 1 >= src 2 */
24249 /* result of Iop_CmpF64 is 0x40 if operands are equal,
24250 mask is all 1's if equal. */
24251 assign( cmp_mask,
24252 unop( Iop_1Sto64,
24253 unop(Iop_32to1,
24254 binop( Iop_Or32,
24255 binop( Iop_Shr32,
24256 binop(Iop_CmpF64, /* EQ test */
24257 unop( Iop_ReinterpI64asF64,
24258 mkexpr( src1 ) ),
24259 unop( Iop_ReinterpI64asF64,
24260 mkexpr( src2 ) ) ),
24261 mkU8( 6 ) ),
24262 binop(Iop_CmpF64, /* src2 < src 1 test */
24263 unop( Iop_ReinterpI64asF64,
24264 mkexpr( src2 ) ),
24265 unop( Iop_ReinterpI64asF64,
24266 mkexpr( src1 ) ) ) ) ) ) );
24267 assign( word_result,
24268 binop( Iop_Or64,
24269 binop( Iop_And64, mkexpr( cmp_mask ),
24270 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
24271 binop( Iop_And64,
24272 unop( Iop_Not64, mkexpr( cmp_mask ) ),
24273 mkU64( 0x0 ) ) ) );
24274 assign( nan_cmp_value, mkU64( 0 ) );
24275 break;
24278 case 0x200: //xsmaxcdp
24280 DIP("xsmaxcdp v%d,v%d,v%d\n", XT, XA, XB);
24281 /* extract double-precision floating point source values from
24282 double word 0 */
24284 /* result of Iop_CmpF64 is 0x1 if arg1 LT then arg2, */
24285 assign( cmp_mask,
24286 unop( Iop_1Sto64,
24287 unop( Iop_32to1,
24288 binop(Iop_CmpF64,
24289 unop( Iop_ReinterpI64asF64,
24290 mkexpr( src2 ) ),
24291 unop( Iop_ReinterpI64asF64,
24292 mkexpr( src1 ) ) ) ) ) );
24293 assign( word_result,
24294 binop( Iop_Or64,
24295 binop( Iop_And64, mkexpr( cmp_mask ), mkexpr( src1 ) ),
24296 binop( Iop_And64,
24297 unop( Iop_Not64, mkexpr( cmp_mask ) ),
24298 mkexpr( src2 ) ) ) );
24299 assign( nan_cmp_value, mkexpr( src2 ) );
24300 break;
24303 case 0x220: //xsmincdp
24305 DIP("xsmincdp v%d,v%d,v%d\n", XT, XA, XB);
24306 /* extract double-precision floating point source values from
24307 double word 0 */
24309 /* result of Iop_CmpF64 is 0x1 if arg1 less then arg2, */
24310 assign( cmp_mask,
24311 unop( Iop_1Sto64,
24312 unop( Iop_32to1,
24313 binop(Iop_CmpF64,
24314 unop( Iop_ReinterpI64asF64,
24315 mkexpr( src1 ) ),
24316 unop( Iop_ReinterpI64asF64,
24317 mkexpr( src2 ) ) ) ) ) );
24318 assign( word_result,
24319 binop( Iop_Or64,
24320 binop( Iop_And64, mkexpr( cmp_mask ), mkexpr( src1 ) ),
24321 binop( Iop_And64,
24322 unop( Iop_Not64, mkexpr( cmp_mask ) ),
24323 mkexpr( src2 ) ) ) );
24324 assign( nan_cmp_value, mkexpr( src2 ) );
24325 break;
24328 default:
24329 vex_printf( "dis_vx_misc(ppc)(opc2)\n" );
24330 return False;
24333 /* If either argument is NaN, result is src2. If either argument is
24334 SNaN, we are supposed to generate invalid operation exception.
24335 Currently don't support generating exceptions. In case of an
24336 trap enabled invalid operation (SNaN) XT is not changed. The
24337 snan_mask is setup appropriately for trap enabled or not.
24339 assign( check_result,
24340 binop( Iop_Or64,
24341 binop( Iop_And64, mkexpr( snan_mask ),
24342 unop( Iop_V128HIto64, mkexpr( xT ) ) ),
24343 binop( Iop_And64, unop( Iop_Not64,
24344 mkexpr( snan_mask ) ),
24345 binop( Iop_Or64,
24346 binop( Iop_And64, mkexpr( nan_mask ),
24347 mkexpr( nan_cmp_value ) ),
24348 binop( Iop_And64,
24349 unop( Iop_Not64,
24350 mkexpr( nan_mask ) ),
24351 mkU64( 0 ) ) ) ) ) );
24353 /* If SNaN is true, then the result is unchanged if a trap-enabled
24354 Invalid Operation occurs. Result mask already setup for trap-enabled
24355 case.
24357 putVSReg( XT,
24358 binop( Iop_64HLtoV128,
24359 binop( Iop_Or64,
24360 binop( Iop_And64,
24361 unop( Iop_Not64, mkexpr( result_mask ) ),
24362 mkexpr( word_result ) ),
24363 binop( Iop_And64,
24364 mkexpr( result_mask ),
24365 mkexpr( check_result ) ) ),
24366 mkU64( 0 ) ) );
24367 return True;
24371 * VSX Logical Instructions
24373 static Bool
24374 dis_vx_logic ( UInt prefix, UInt theInstr, UInt opc2 )
24376 /* XX3-Form */
24377 UChar opc1 = ifieldOPC( theInstr );
24378 UChar XT = ifieldRegXT ( theInstr );
24379 UChar XA = ifieldRegXA ( theInstr );
24380 UChar XB = ifieldRegXB ( theInstr );
24381 IRTemp vA = newTemp( Ity_V128 );
24382 IRTemp vB = newTemp( Ity_V128 );
24384 /* There is no prefixed version of these instructions. */
24385 PREFIX_CHECK
24387 if (opc1 != 0x3C) {
24388 vex_printf( "dis_vx_logic(ppc)(instr)\n" );
24389 return False;
24392 assign( vA, getVSReg( XA ) );
24393 assign( vB, getVSReg( XB ) );
24395 switch (opc2) {
24396 case 0x268: // xxlxor
24397 DIP("xxlxor v%d,v%d,v%d\n", XT, XA, XB);
24398 putVSReg( XT, binop( Iop_XorV128, mkexpr( vA ), mkexpr( vB ) ) );
24399 break;
24400 case 0x248: // xxlor
24401 DIP("xxlor v%d,v%d,v%d\n", XT, XA, XB);
24402 putVSReg( XT, binop( Iop_OrV128, mkexpr( vA ), mkexpr( vB ) ) );
24403 break;
24404 case 0x288: // xxlnor
24405 DIP("xxlnor v%d,v%d,v%d\n", XT, XA, XB);
24406 putVSReg( XT, unop( Iop_NotV128, binop( Iop_OrV128, mkexpr( vA ),
24407 mkexpr( vB ) ) ) );
24408 break;
24409 case 0x208: // xxland
24410 DIP("xxland v%d,v%d,v%d\n", XT, XA, XB);
24411 putVSReg( XT, binop( Iop_AndV128, mkexpr( vA ), mkexpr( vB ) ) );
24412 break;
24413 case 0x228: //xxlandc
24414 DIP("xxlandc v%d,v%d,v%d\n", XT, XA, XB);
24415 putVSReg( XT, binop( Iop_AndV128, mkexpr( vA ), unop( Iop_NotV128,
24416 mkexpr( vB ) ) ) );
24417 break;
24418 case 0x2A8: // xxlorc (VSX Logical OR with complement)
24419 DIP("xxlorc v%d,v%d,v%d\n", XT, XA, XB);
24420 putVSReg( XT, binop( Iop_OrV128,
24421 mkexpr( vA ),
24422 unop( Iop_NotV128, mkexpr( vB ) ) ) );
24423 break;
24424 case 0x2C8: // xxlnand (VSX Logical NAND)
24425 DIP("xxlnand v%d,v%d,v%d\n", XT, XA, XB);
24426 putVSReg( XT, unop( Iop_NotV128,
24427 binop( Iop_AndV128, mkexpr( vA ),
24428 mkexpr( vB ) ) ) );
24429 break;
24430 case 0x2E8: // xxleqv (VSX Logical Equivalence)
24431 DIP("xxleqv v%d,v%d,v%d\n", XT, XA, XB);
24432 putVSReg( XT, unop( Iop_NotV128,
24433 binop( Iop_XorV128,
24434 mkexpr( vA ), mkexpr( vB ) ) ) );
24435 break;
24436 default:
24437 vex_printf( "dis_vx_logic(ppc)(opc2)\n" );
24438 return False;
24440 return True;
24444 * VSX Load Instructions
24445 * NOTE: VSX supports word-aligned storage access.
24447 static Bool
24448 dis_vx_load ( UInt prefix, UInt theInstr )
24450 /* XX1-Form */
24451 UChar opc1 = ifieldOPC( theInstr );
24452 UChar XT = ifieldRegXT ( theInstr );
24453 UChar rA_addr = ifieldRegA( theInstr );
24454 UChar rB_addr = ifieldRegB( theInstr );
24455 UInt opc2 = ifieldOPClo10( theInstr );
24457 IRType ty = mode64 ? Ity_I64 : Ity_I32;
24458 IRTemp EA = newTemp( ty );
24460 /* There is no prefixed version of these instructions. */
24461 PREFIX_CHECK
24463 if (opc1 != 0x1F) {
24464 vex_printf( "dis_vx_load(ppc)(instr)\n" );
24465 return False;
24468 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
24470 switch (opc2) {
24471 case 0x00C: // lxsiwzx (Load VSX Scalar as Integer Word and Zero Indexed)
24473 IRExpr * exp;
24474 DIP("lxsiwzx %d,r%u,r%u\n", XT, rA_addr, rB_addr);
24476 if (host_endness == VexEndnessLE)
24477 exp = unop( Iop_64to32, load( Ity_I64, mkexpr( EA ) ) );
24478 else
24479 exp = unop( Iop_64HIto32, load( Ity_I64, mkexpr( EA ) ) );
24481 putVSReg( XT, binop( Iop_64HLtoV128,
24482 unop( Iop_32Uto64, exp),
24483 mkU64(0) ) );
24484 break;
24487 case 0x00D: // lxvrbx
24489 IRExpr * exp;
24490 DIP("lxvrbx v%u,r%u,r%u\n", XT, rA_addr, rB_addr);
24491 exp = load( Ity_I64, mkexpr( EA ) );
24493 if (host_endness == VexEndnessLE)
24494 putVSReg( XT, binop( Iop_64HLtoV128,
24495 mkU64( 0x0 ),
24496 binop( Iop_And64, mkU64( 0xFF ), exp ) ) );
24497 else
24498 putVSReg( XT,
24499 binop( Iop_ShrV128,
24500 binop( Iop_64HLtoV128,
24501 mkU64( 0x0 ),
24502 binop( Iop_And64, mkU64( 0xFF ), exp ) ),
24503 mkU8( 15*8 ) ) ); // data is left most byte
24504 break;
24507 case 0x02D: // lxvrhx
24509 IRExpr * exp;
24511 DIP("lxvrhx v%u,r%u,r%u\n", XT, rA_addr, rB_addr);
24513 exp = load( Ity_I64, mkexpr( EA ) );
24515 if (host_endness == VexEndnessLE)
24516 putVSReg( XT, binop( Iop_64HLtoV128,
24517 mkU64( 0x0 ),
24518 binop( Iop_And64, mkU64( 0xFFFF ), exp ) ) );
24519 else
24520 putVSReg( XT,
24521 binop( Iop_ShrV128,
24522 binop( Iop_64HLtoV128,
24523 mkU64( 0x0 ),
24524 binop( Iop_And64, mkU64( 0xFFFF ), exp ) ),
24525 mkU8( 7*16 ) ) ); // data is left most half-word
24526 break;
24529 case 0x04D: // lxvrwx
24531 IRExpr * exp;
24533 DIP("lxvrwx v%u,r%u,r%u\n", XT, rA_addr, rB_addr);
24535 exp = load( Ity_I64, mkexpr( EA ) );
24537 if (host_endness == VexEndnessLE)
24538 putVSReg( XT, binop( Iop_64HLtoV128,
24539 mkU64( 0x0 ),
24540 binop( Iop_And64, mkU64( 0xFFFFFFFF ), exp ) ) );
24541 else
24542 putVSReg( XT,
24543 binop( Iop_ShrV128,
24544 binop( Iop_64HLtoV128,
24545 mkU64( 0x0 ),
24546 binop( Iop_And64,
24547 mkU64( 0xFFFFFFFF ), exp ) ),
24548 mkU8( 3*32 ) ) ); // data is left most word
24549 break;
24552 case 0x06D: // lxvrdx
24554 IRExpr * exp;
24556 DIP("lxvrdx v%u,r%u,r%u\n", XT, rA_addr, rB_addr);
24558 exp = load( Ity_I64, mkexpr( EA ) );
24560 if (host_endness == VexEndnessLE)
24561 putVSReg( XT, binop( Iop_64HLtoV128,
24562 mkU64( 0x0 ),
24563 binop( Iop_And64,
24564 mkU64( 0xFFFFFFFFFFFFFFFFULL), exp ) ) );
24565 else
24566 putVSReg( XT,
24567 binop( Iop_ShrV128,
24568 binop( Iop_64HLtoV128,
24569 mkU64( 0x0 ),
24570 binop( Iop_And64,
24571 mkU64( 0xFFFFFFFFFFFFFFFFULL), exp ) ),
24572 mkU8( 1*64 ) ) ); // data is left most double word
24573 break;
24576 case 0x08D: // stxvrbx
24578 IRExpr * fetched_exp;
24579 IRExpr * store_exp;
24580 IRTemp vS = newTemp( Ity_V128 );
24582 DIP("stxvrbx v%u,r%u,r%u\n", XT, rA_addr, rB_addr);
24584 fetched_exp = load( Ity_I64, mkexpr( EA ) );
24585 assign( vS, getVSReg( XT ) );
24587 /* Fetch 64 bits, merge byte element 15 into the fetched value and
24588 * store. */
24589 if (host_endness == VexEndnessLE) {
24590 store_exp = binop( Iop_Or64,
24591 binop( Iop_And64,
24592 mkU64( 0x00000000000000FF ),
24593 unop( Iop_V128to64, mkexpr( vS ) ) ),
24594 binop( Iop_And64,
24595 mkU64( 0xFFFFFFFFFFFFFF00 ),
24596 fetched_exp ) );
24597 store( mkexpr( EA ), store_exp );
24598 } else {
24599 store_exp = binop( Iop_Or64,
24600 binop( Iop_And64,
24601 mkU64( 0xFF00000000000000 ),
24602 unop( Iop_V128HIto64, mkexpr( vS ) ) ),
24603 binop( Iop_And64,
24604 mkU64( 0x00FFFFFFFFFFFFFF ),
24605 fetched_exp ) );
24606 store( mkexpr( EA ), store_exp );
24608 break;
24611 case 0x0AD: // stxvrhx
24613 IRExpr * fetched_exp;
24614 IRExpr * store_exp;
24615 IRTemp vS = newTemp( Ity_V128 );
24617 DIP("stxvrhx v%u,r%u,r%u\n", XT, rA_addr, rB_addr);
24619 fetched_exp = load( Ity_I64, mkexpr( EA ) );
24620 assign( vS, getVSReg( XT ) );
24622 /* Fetch 64 bits, merge half-word element 7 into the fetched value and
24623 * store. */
24624 if (host_endness == VexEndnessLE) {
24625 store_exp = binop( Iop_Or64,
24626 binop( Iop_And64,
24627 mkU64( 0x000000000000FFFF ),
24628 unop( Iop_V128to64, mkexpr( vS ) ) ),
24629 binop( Iop_And64,
24630 mkU64( 0xFFFFFFFFFFFF0000 ),
24631 fetched_exp ) );
24632 store( mkexpr( EA ), store_exp );
24633 } else {
24634 store_exp = binop( Iop_Or64,
24635 binop( Iop_And64,
24636 mkU64( 0xFFFF000000000000 ),
24637 unop( Iop_V128HIto64, mkexpr( vS ) ) ),
24638 binop( Iop_And64,
24639 mkU64( 0x0000FFFFFFFFFFFF ),
24640 fetched_exp ) );
24641 store( mkexpr( EA ), store_exp );
24643 break;
24646 case 0x0CD: // stxvrwx
24648 IRExpr * fetched_exp;
24649 IRExpr * store_exp;
24650 IRTemp vS = newTemp( Ity_V128 );
24652 DIP("stxvrwx v%u,r%u,r%u\n", XT, rA_addr, rB_addr);
24654 fetched_exp = load( Ity_I64, mkexpr( EA ) );
24655 assign( vS, getVSReg( XT ) );
24657 /* Fetch 64 bits, merge word element 3 into the fetched value and
24658 * store. */
24659 if (host_endness == VexEndnessLE) {
24660 store_exp = binop( Iop_Or64,
24661 binop( Iop_And64,
24662 mkU64( 0x00000000FFFFFFFF ),
24663 unop( Iop_V128to64, mkexpr( vS ) ) ),
24664 binop( Iop_And64,
24665 mkU64( 0xFFFFFFFF00000000 ),
24666 fetched_exp ) );
24667 store( mkexpr( EA ), store_exp );
24668 } else {
24669 store_exp = binop( Iop_Or64,
24670 binop( Iop_And64,
24671 mkU64( 0xFFFFFFFF00000000 ),
24672 unop( Iop_V128HIto64, mkexpr( vS ) ) ),
24673 binop( Iop_And64,
24674 mkU64( 0x00000000FFFFFFFF ),
24675 fetched_exp ) );
24676 store( mkexpr( EA ), store_exp );
24678 break;
24681 case 0x0ED: // stxvrdx
24683 IRExpr * store_exp;
24684 IRTemp vS = newTemp( Ity_V128 );
24686 DIP("stxvrdx v%u,r%u,r%u\n", XT, rA_addr, rB_addr);
24688 assign( vS, getVSReg( XT ) );
24690 /* Fetch 64 bits, merge double word element 1 into the fetched value and
24691 * store. Well, this is just store vS bits[63:0] at EA. */
24692 if (host_endness == VexEndnessLE) {
24693 store_exp = binop( Iop_And64,
24694 mkU64( 0xFFFFFFFFFFFFFFFF ),
24695 unop( Iop_V128to64, mkexpr( vS ) ) );
24696 store( mkexpr( EA ), store_exp );
24697 } else {
24698 store_exp = binop( Iop_And64,
24699 mkU64( 0xFFFFFFFFFFFFFFFF ),
24700 unop( Iop_V128HIto64, mkexpr( vS ) ) );
24701 store( mkexpr( EA ), store_exp );
24703 break;
24706 case 0x04C: // lxsiwax (Load VSX Scalar as Integer Word Algebraic Indexed)
24708 IRExpr * exp;
24709 DIP("lxsiwax %d,r%u,r%u\n", XT, rA_addr, rB_addr);
24711 if (host_endness == VexEndnessLE)
24712 exp = unop( Iop_64to32, load( Ity_I64, mkexpr( EA ) ) );
24713 else
24714 exp = unop( Iop_64HIto32, load( Ity_I64, mkexpr( EA ) ) );
24716 putVSReg( XT, binop( Iop_64HLtoV128,
24717 unop( Iop_32Sto64, exp),
24718 mkU64(0) ) );
24719 break;
24721 case 0x10C: // lxvx
24723 UInt ea_off = 0;
24724 IRExpr* irx_addr;
24725 IRTemp word[4];
24726 int i;
24728 DIP("lxvx %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
24730 if ( host_endness == VexEndnessBE ) {
24731 for ( i = 3; i>= 0; i-- ) {
24732 word[i] = newTemp( Ity_I64 );
24734 irx_addr =
24735 binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
24736 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
24738 assign( word[i], unop( Iop_32Uto64,
24739 load( Ity_I32, irx_addr ) ) );
24740 ea_off += 4;
24743 putVSReg( XT, binop( Iop_64HLtoV128,
24744 binop( Iop_Or64,
24745 mkexpr( word[2] ),
24746 binop( Iop_Shl64,
24747 mkexpr( word[3] ),
24748 mkU8( 32 ) ) ),
24749 binop( Iop_Or64,
24750 mkexpr( word[0] ),
24751 binop( Iop_Shl64,
24752 mkexpr( word[1] ),
24753 mkU8( 32 ) ) ) ) );
24754 } else {
24755 for ( i = 0; i< 4; i++ ) {
24756 word[i] = newTemp( Ity_I64 );
24758 irx_addr =
24759 binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
24760 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
24762 assign( word[i], unop( Iop_32Uto64,
24763 load( Ity_I32, irx_addr ) ) );
24764 ea_off += 4;
24767 putVSReg( XT, binop( Iop_64HLtoV128,
24768 binop( Iop_Or64,
24769 mkexpr( word[2] ),
24770 binop( Iop_Shl64,
24771 mkexpr( word[3] ),
24772 mkU8( 32 ) ) ),
24773 binop( Iop_Or64,
24774 mkexpr( word[0] ),
24775 binop( Iop_Shl64,
24776 mkexpr( word[1] ),
24777 mkU8( 32 ) ) ) ) );
24779 break;
24782 case 0x10D: // lxvl
24784 DIP("lxvl %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
24786 IRTemp byte[16];
24787 UInt i;
24788 UInt ea_off = 0;
24789 IRExpr* irx_addr;
24790 IRTemp tmp_low[9];
24791 IRTemp tmp_hi[9];
24792 IRTemp shift = newTemp( Ity_I8 );
24793 IRTemp nb_gt16 = newTemp( Ity_I8 );
24794 IRTemp ld_result = newTemp( Ity_V128 );
24795 IRTemp nb_not_zero = newTemp( Ity_I64 );
24797 IRTemp base_addr = newTemp( ty );
24799 tmp_low[0] = newTemp( Ity_I64 );
24800 tmp_hi[0] = newTemp( Ity_I64 );
24802 assign( base_addr, ea_rAor0( rA_addr ) );
24803 assign( tmp_low[0], mkU64( 0 ) );
24804 assign( tmp_hi[0], mkU64( 0 ) );
24806 /* shift is 15 - nb, where nb = rB[0:7], used to zero out upper bytes */
24807 assign( nb_not_zero, unop( Iop_1Sto64,
24808 binop( Iop_CmpNE64,
24809 mkU64( 0 ),
24810 binop( Iop_Shr64,
24811 getIReg( rB_addr ),
24812 mkU8( 56 ) ) ) ) );
24814 assign( nb_gt16, unop( Iop_1Sto8,
24815 binop( Iop_CmpLT64U,
24816 binop( Iop_Shr64,
24817 getIReg( rB_addr ),
24818 mkU8( 60 ) ),
24819 mkU64( 1 ) ) ) );
24821 /* Set the shift to 0, by ANDing with nb_gt16. nb_gt16 will be all
24822 * zeros if nb > 16. This will result in quad word load being stored.
24824 assign( shift,
24825 binop( Iop_And8,
24826 unop( Iop_64to8,
24827 binop( Iop_Mul64,
24828 binop( Iop_Sub64,
24829 mkU64 ( 16 ),
24830 binop( Iop_Shr64,
24831 getIReg( rB_addr ),
24832 mkU8( 56 ) ) ),
24833 mkU64( 8 ) ) ),
24834 mkexpr( nb_gt16 ) ) );
24836 /* fetch all 16 bytes, we will remove what we don't want later */
24837 if ( host_endness == VexEndnessBE ) {
24838 for ( i = 0; i < 8; i++ ) {
24839 byte[i] = newTemp( Ity_I64 );
24840 tmp_hi[i+1] = newTemp( Ity_I64 );
24842 irx_addr =
24843 binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
24844 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
24845 ea_off += 1;
24847 assign( byte[i], binop( Iop_Shl64,
24848 unop( Iop_8Uto64,
24849 load( Ity_I8, irx_addr ) ),
24850 mkU8( 8 * ( 7 - i ) ) ) );
24852 assign( tmp_hi[i+1], binop( Iop_Or64,
24853 mkexpr( byte[i] ),
24854 mkexpr( tmp_hi[i] ) ) );
24857 for ( i = 0; i < 8; i++ ) {
24858 byte[i+8] = newTemp( Ity_I64 );
24859 tmp_low[i+1] = newTemp( Ity_I64 );
24861 irx_addr =
24862 binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
24863 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
24864 ea_off += 1;
24866 assign( byte[i+8], binop( Iop_Shl64,
24867 unop( Iop_8Uto64,
24868 load( Ity_I8, irx_addr ) ),
24869 mkU8( 8 * ( 7 - i ) ) ) );
24871 assign( tmp_low[i+1], binop( Iop_Or64,
24872 mkexpr( byte[i+8] ),
24873 mkexpr( tmp_low[i] ) ) );
24875 assign( ld_result, binop( Iop_ShlV128,
24876 binop( Iop_ShrV128,
24877 binop( Iop_64HLtoV128,
24878 mkexpr( tmp_hi[8] ),
24879 mkexpr( tmp_low[8] ) ),
24880 mkexpr( shift ) ),
24881 mkexpr( shift ) ) );
24882 } else {
24883 for ( i = 0; i < 8; i++ ) {
24884 byte[i] = newTemp( Ity_I64 );
24885 tmp_low[i+1] = newTemp( Ity_I64 );
24887 irx_addr =
24888 binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
24889 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
24890 ea_off += 1;
24892 assign( byte[i], binop( Iop_Shl64,
24893 unop( Iop_8Uto64,
24894 load( Ity_I8, irx_addr ) ),
24895 mkU8( 8 * i ) ) );
24897 assign( tmp_low[i+1],
24898 binop( Iop_Or64,
24899 mkexpr( byte[i] ), mkexpr( tmp_low[i] ) ) );
24902 for ( i = 0; i < 8; i++ ) {
24903 byte[i + 8] = newTemp( Ity_I64 );
24904 tmp_hi[i+1] = newTemp( Ity_I64 );
24906 irx_addr =
24907 binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
24908 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
24909 ea_off += 1;
24911 assign( byte[i+8], binop( Iop_Shl64,
24912 unop( Iop_8Uto64,
24913 load( Ity_I8, irx_addr ) ),
24914 mkU8( 8 * i ) ) );
24916 assign( tmp_hi[i+1], binop( Iop_Or64,
24917 mkexpr( byte[i+8] ),
24918 mkexpr( tmp_hi[i] ) ) );
24920 assign( ld_result, binop( Iop_ShrV128,
24921 binop( Iop_ShlV128,
24922 binop( Iop_64HLtoV128,
24923 mkexpr( tmp_hi[8] ),
24924 mkexpr( tmp_low[8] ) ),
24925 mkexpr( shift ) ),
24926 mkexpr( shift ) ) );
24930 /* If nb = 0, mask out the calculated load result so the stored
24931 * value is zero.
24934 putVSReg( XT, binop( Iop_AndV128,
24935 mkexpr( ld_result ),
24936 binop( Iop_64HLtoV128,
24937 mkexpr( nb_not_zero ),
24938 mkexpr( nb_not_zero ) ) ) );
24939 break;
24942 case 0x12D: // lxvll (Load VSX Vector Left-Justified with Length XX1 form)
24944 IRTemp byte[16];
24945 IRTemp tmp_low[9];
24946 IRTemp tmp_hi[9];
24947 IRTemp mask = newTemp(Ity_V128);
24948 IRTemp rB = newTemp( Ity_I64 );
24949 IRTemp nb = newTemp( Ity_I64 );
24950 IRTemp nb_zero = newTemp(Ity_V128);
24951 IRTemp mask_shift = newTemp(Ity_I64);
24952 Int i;
24953 UInt ea_off = 0;
24954 IRExpr* irx_addr;
24955 IRTemp base_addr = newTemp( ty );
24956 IRTemp nb_compare_zero = newTemp( Ity_I64 );
24958 DIP("lxvll %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
24960 tmp_low[0] = newTemp(Ity_I64);
24961 tmp_hi[0] = newTemp(Ity_I64);
24963 assign( rB, getIReg(rB_addr));
24964 assign( base_addr, ea_rAor0( rA_addr ) );
24965 assign( tmp_low[0], mkU64( 0 ) );
24966 assign( tmp_hi[0], mkU64( 0 ) );
24968 /* mask_shift is number of 16 bytes minus (nb times 8-bits per byte) */
24969 assign( nb, binop( Iop_Shr64, mkexpr( rB ), mkU8( 56 ) ) );
24971 assign( nb_compare_zero, unop( Iop_1Sto64,
24972 binop( Iop_CmpEQ64,
24973 mkexpr( nb ),
24974 mkU64( 0 ) ) ) );
24976 /* nb_zero is 0xFF..FF if the nb_field = 0 */
24977 assign( nb_zero, binop( Iop_64HLtoV128,
24978 mkexpr( nb_compare_zero ),
24979 mkexpr( nb_compare_zero ) ) );
24981 assign( mask_shift, binop( Iop_Sub64,
24982 mkU64( 16*8 ),
24983 binop( Iop_Mul64,
24984 mkexpr( nb ),
24985 mkU64( 8 ) ) ) );
24987 /* fetch all 16 bytes, we will remove what we don't want later */
24988 for (i = 0; i < 8; i++) {
24989 byte[i] = newTemp(Ity_I64);
24990 tmp_hi[i+1] = newTemp(Ity_I64);
24992 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
24993 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
24994 ea_off += 1;
24996 /* Instruction always loads in Big Endian format */
24997 assign( byte[i], binop( Iop_Shl64,
24998 unop( Iop_8Uto64,
24999 load( Ity_I8, irx_addr ) ),
25000 mkU8( 8 * (7 - i) ) ) );
25001 assign( tmp_hi[i+1],
25002 binop( Iop_Or64,
25003 mkexpr( byte[i] ), mkexpr( tmp_hi[i] ) ) );
25006 for (i = 0; i < 8; i++) {
25007 byte[i + 8] = newTemp(Ity_I64);
25008 tmp_low[i+1] = newTemp(Ity_I64);
25010 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25011 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25012 ea_off += 1;
25014 /* Instruction always loads in Big Endian format */
25015 assign( byte[i+8], binop( Iop_Shl64,
25016 unop( Iop_8Uto64,
25017 load( Ity_I8, irx_addr ) ),
25018 mkU8( 8 * (7 - i) ) ) );
25019 assign( tmp_low[i+1], binop( Iop_Or64,
25020 mkexpr( byte[i+8] ),
25021 mkexpr( tmp_low[i] ) ) );
25024 /* Create mask to clear the right most 16 - nb bytes, set to zero
25025 * if nb= 0.
25027 assign( mask, binop( Iop_AndV128,
25028 binop( Iop_ShlV128,
25029 binop( Iop_ShrV128,
25030 mkV128( 0xFFFF ),
25031 unop( Iop_64to8, mkexpr( mask_shift ) ) ),
25032 unop( Iop_64to8, mkexpr( mask_shift ) ) ),
25033 unop( Iop_NotV128, mkexpr( nb_zero ) ) ) );
25035 putVSReg( XT, binop( Iop_AndV128,
25036 mkexpr( mask ),
25037 binop( Iop_64HLtoV128,
25038 mkexpr( tmp_hi[8] ),
25039 mkexpr( tmp_low[8] ) ) ) );
25040 break;
25043 case 0x16C: // lxvwsx
25045 IRTemp data = newTemp( Ity_I64 );
25047 DIP("lxvwsx %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
25049 /* The load is a 64-bit fetch that is Endian aware, just want
25050 * the lower 32 bits. */
25051 if ( host_endness == VexEndnessBE ) {
25052 UInt ea_off = 4;
25053 IRExpr* irx_addr;
25055 irx_addr =
25056 binop( mkSzOp( ty, Iop_Sub8 ), mkexpr( EA ),
25057 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25059 assign( data, binop( Iop_And64,
25060 load( Ity_I64, irx_addr ),
25061 mkU64( 0xFFFFFFFF ) ) );
25063 } else {
25064 assign( data, binop( Iop_And64,
25065 load( Ity_I64, mkexpr( EA ) ),
25066 mkU64( 0xFFFFFFFF ) ) );
25069 /* Take lower 32-bits and spat across the four word positions */
25070 putVSReg( XT,
25071 binop( Iop_64HLtoV128,
25072 binop( Iop_Or64,
25073 mkexpr( data ),
25074 binop( Iop_Shl64,
25075 mkexpr( data ),
25076 mkU8( 32 ) ) ),
25077 binop( Iop_Or64,
25078 mkexpr( data ),
25079 binop( Iop_Shl64,
25080 mkexpr( data ),
25081 mkU8( 32 ) ) ) ) );
25082 break;
25085 case 0x20C: // lxsspx (Load VSX Scalar Single-Precision Indexed)
25087 IRExpr * exp;
25088 DIP("lxsspx %d,r%u,r%u\n", XT, rA_addr, rB_addr);
25089 /* Take 32-bit floating point value in the upper half of the fetched
25090 * 64-bit value, convert to 64-bit floating point value and load into
25091 * top word of V128.
25093 exp = unop( Iop_ReinterpF64asI64,
25094 unop( Iop_F32toF64,
25095 unop( Iop_ReinterpI32asF32,
25096 load( Ity_I32, mkexpr( EA ) ) ) ) );
25098 putVSReg( XT, binop( Iop_64HLtoV128, exp, mkU64( 0 ) ) );
25099 break;
25101 case 0x24C: // lxsdx
25103 IRExpr * exp;
25104 DIP("lxsdx %d,r%u,r%u\n", XT, rA_addr, rB_addr);
25105 exp = load( Ity_I64, mkexpr( EA ) );
25106 // We need to pass an expression of type Ity_V128 with putVSReg, but the load
25107 // we just performed is only a DW. But since the contents of VSR[XT] element 1
25108 // are undefined after this operation, we can just do a splat op.
25109 putVSReg( XT, binop( Iop_64HLtoV128, exp, exp ) );
25110 break;
25113 case 0x30D: // lxsibzx
25115 IRExpr *byte;
25116 IRExpr* irx_addr;
25118 DIP("lxsibzx %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
25120 if ( host_endness == VexEndnessBE )
25121 irx_addr = binop( Iop_Sub64, mkexpr( EA ), mkU64( 7 ) );
25123 else
25124 irx_addr = mkexpr( EA );
25125 /* byte load */
25126 byte = load( Ity_I8, irx_addr );
25127 putVSReg( XT, binop( Iop_64HLtoV128,
25128 unop( Iop_8Uto64, byte ),
25129 mkU64( 0 ) ) );
25130 break;
25133 case 0x32D: // lxsihzx
25135 IRExpr *hword;
25136 IRExpr* irx_addr;
25138 DIP("lxsihzx %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
25140 if ( host_endness == VexEndnessBE )
25141 irx_addr = binop( Iop_Sub64, mkexpr( EA ), mkU64( 6 ) );
25143 else
25144 irx_addr = mkexpr( EA );
25146 hword = load( Ity_I16, irx_addr );
25147 putVSReg( XT, binop( Iop_64HLtoV128,
25148 unop( Iop_16Uto64,
25149 hword ),
25150 mkU64( 0 ) ) );
25151 break;
25153 case 0x34C: // lxvd2x
25155 IRExpr *t128;
25156 DIP("lxvd2x %d,r%u,r%u\n", XT, rA_addr, rB_addr);
25157 t128 = load( Ity_V128, mkexpr( EA ) );
25159 /* The data in the vec register should be in big endian order.
25160 So if we just did a little endian load then swap around the
25161 high and low double words. */
25162 if (host_endness == VexEndnessLE) {
25163 IRTemp high = newTemp(Ity_I64);
25164 IRTemp low = newTemp(Ity_I64);
25165 assign( high, unop(Iop_V128HIto64, t128) );
25166 assign( low, unop(Iop_V128to64, t128) );
25167 t128 = binop( Iop_64HLtoV128, mkexpr (low), mkexpr (high) );
25170 putVSReg( XT, t128 );
25171 break;
25173 case 0x14C: // lxvdsx
25175 IRTemp data = newTemp(Ity_I64);
25176 DIP("lxvdsx %d,r%u,r%u\n", XT, rA_addr, rB_addr);
25177 assign( data, load( Ity_I64, mkexpr( EA ) ) );
25178 putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( data ), mkexpr( data ) ) );
25179 break;
25181 case 0x30C:
25183 IRExpr *t0;
25185 DIP("lxvw4x %u,r%u,r%u\n", XT, rA_addr, rB_addr);
25187 /* The load will result in the data being in BE order. */
25188 if (host_endness == VexEndnessLE) {
25189 IRExpr *t0_BE;
25190 IRTemp perm_LE = newTemp(Ity_V128);
25192 t0_BE = load( Ity_V128, mkexpr( EA ) );
25194 /* Permute the data to LE format */
25195 assign( perm_LE, binop( Iop_64HLtoV128, mkU64(0x0c0d0e0f08090a0bULL),
25196 mkU64(0x0405060700010203ULL)));
25198 t0 = binop( Iop_Perm8x16, t0_BE, mkexpr(perm_LE) );
25199 } else {
25200 t0 = load( Ity_V128, mkexpr( EA ) );
25203 putVSReg( XT, t0 );
25204 break;
25207 case 0x32C: // lxvh8x
25209 DIP("lxvh8x %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
25211 IRTemp h_word[8];
25212 int i;
25213 UInt ea_off = 0;
25214 IRExpr* irx_addr;
25215 IRTemp tmp_low[5];
25216 IRTemp tmp_hi[5];
25218 tmp_low[0] = newTemp( Ity_I64 );
25219 tmp_hi[0] = newTemp( Ity_I64 );
25220 assign( tmp_low[0], mkU64( 0 ) );
25221 assign( tmp_hi[0], mkU64( 0 ) );
25223 for ( i = 0; i < 4; i++ ) {
25224 h_word[i] = newTemp(Ity_I64);
25225 tmp_low[i+1] = newTemp(Ity_I64);
25227 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
25228 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25229 ea_off += 2;
25231 assign( h_word[i], binop( Iop_Shl64,
25232 unop( Iop_16Uto64,
25233 load( Ity_I16, irx_addr ) ),
25234 mkU8( 16 * ( 3 - i ) ) ) );
25236 assign( tmp_low[i+1],
25237 binop( Iop_Or64,
25238 mkexpr( h_word[i] ), mkexpr( tmp_low[i] ) ) );
25241 for ( i = 0; i < 4; i++ ) {
25242 h_word[i+4] = newTemp( Ity_I64 );
25243 tmp_hi[i+1] = newTemp( Ity_I64 );
25245 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
25246 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25247 ea_off += 2;
25249 assign( h_word[i+4], binop( Iop_Shl64,
25250 unop( Iop_16Uto64,
25251 load( Ity_I16, irx_addr ) ),
25252 mkU8( 16 * ( 3 - i ) ) ) );
25254 assign( tmp_hi[i+1], binop( Iop_Or64,
25255 mkexpr( h_word[i+4] ),
25256 mkexpr( tmp_hi[i] ) ) );
25258 putVSReg( XT, binop( Iop_64HLtoV128,
25259 mkexpr( tmp_low[4] ), mkexpr( tmp_hi[4] ) ) );
25260 break;
25263 case 0x36C: // lxvb16x
25265 DIP("lxvb16x %u,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
25267 /* The result of lxvb16x should be the same on big and little
25268 endian systems. We do a host load, then reverse the bytes in
25269 the double words. If the host load was little endian we swap
25270 them around again. */
25272 IRTemp high = newTemp(Ity_I64);
25273 IRTemp high_rev = newTemp(Ity_I64);
25274 IRTemp low = newTemp(Ity_I64);
25275 IRTemp low_rev = newTemp(Ity_I64);
25277 IRExpr *t128 = load( Ity_V128, mkexpr( EA ) );
25279 assign( high, unop(Iop_V128HIto64, t128) );
25280 assign( high_rev, unop(Iop_Reverse8sIn64_x1, mkexpr(high)) );
25281 assign( low, unop(Iop_V128to64, t128) );
25282 assign( low_rev, unop(Iop_Reverse8sIn64_x1, mkexpr(low)) );
25284 if (host_endness == VexEndnessLE)
25285 t128 = binop( Iop_64HLtoV128, mkexpr (low_rev), mkexpr (high_rev) );
25286 else
25287 t128 = binop( Iop_64HLtoV128, mkexpr (high_rev), mkexpr (low_rev) );
25289 putVSReg( XT, t128 );
25290 break;
25293 default:
25294 vex_printf( "dis_vx_load(ppc)(opc2)\n" );
25295 return False;
25297 return True;
25301 * VSX Move Instructions
25303 static Bool
25304 dis_vx_move ( UInt prefix, UInt theInstr )
25306 /* XX1-Form */
25307 UChar opc1 = ifieldOPC( theInstr );
25308 UChar XS = ifieldRegXS( theInstr );
25309 UChar rA_addr = ifieldRegA( theInstr );
25310 UChar rB_addr = ifieldRegB( theInstr );
25311 IRTemp vS = newTemp( Ity_V128 );
25312 UInt opc2 = ifieldOPClo10( theInstr );
25313 IRType ty = Ity_I64;
25315 /* There is no prefixed version of these instructions. */
25316 PREFIX_CHECK
25318 if ( opc1 != 0x1F ) {
25319 vex_printf( "dis_vx_move(ppc)(instr)\n" );
25320 return False;
25323 switch (opc2) {
25324 case 0x133: // mfvsrld RA,XS Move From VSR Lower Doubleword
25325 DIP("mfvsrld %u,r%u\n", (UInt)XS, rA_addr);
25327 assign( vS, getVSReg( XS ) );
25328 putIReg( rA_addr, unop(Iop_V128to64, mkexpr( vS) ) );
25330 break;
25332 case 0x193: // mfvsrdd XT,RA,RB Move to VSR Double Doubleword
25334 IRTemp tmp = newTemp( Ity_I32 );
25336 DIP("mfvsrdd %u,r%u\n", (UInt)XS, rA_addr);
25338 assign( tmp, unop( Iop_64to32, getIReg(rA_addr) ) );
25339 assign( vS, binop( Iop_64HLtoV128,
25340 binop( Iop_32HLto64,
25341 mkexpr( tmp ),
25342 mkexpr( tmp ) ),
25343 binop( Iop_32HLto64,
25344 mkexpr( tmp ),
25345 mkexpr( tmp ) ) ) );
25346 putVSReg( XS, mkexpr( vS ) );
25348 break;
25350 case 0x1B3: // mtvsrws XT,RA Move to VSR word & Splat
25352 IRTemp rA = newTemp( ty );
25353 IRTemp rB = newTemp( ty );
25355 DIP("mfvsrws %u,r%u\n", (UInt)XS, rA_addr);
25357 if ( rA_addr == 0 )
25358 assign( rA, mkU64 ( 0 ) );
25359 else
25360 assign( rA, getIReg(rA_addr) );
25362 assign( rB, getIReg(rB_addr) );
25363 assign( vS, binop( Iop_64HLtoV128, mkexpr( rA ), mkexpr( rB ) ) );
25364 putVSReg( XS, mkexpr( vS ) );
25366 break;
25368 default:
25369 vex_printf( "dis_vx_move(ppc)(opc2)\n" );
25370 return False;
25372 return True;
25376 * VSX Store Instructions
25377 * NOTE: VSX supports word-aligned storage access.
25379 static Bool
25380 dis_vsx_vector_paired_load_store ( UInt prefix, UInt theInstr )
25382 /* X-Form/DS-Form */
25383 UInt opc2 = ifieldOPClo9(theInstr);
25384 UChar rA_addr = ifieldRegA(theInstr);
25385 UChar rB_addr = ifieldRegB(theInstr);
25386 IRType ty = mode64 ? Ity_I64 : Ity_I32;
25387 IRTemp EA = newTemp(ty);
25388 IRTemp EA_16 = newTemp(ty);
25389 UChar XTp = ifieldRegXTp(theInstr);
25391 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
25393 // address of next 128bits
25394 assign( EA_16, binop( Iop_Add64, mkU64( 16), mkexpr( EA ) ) );
25396 switch (opc2) {
25397 case 0x14D: // lxvpx
25398 DIP( "lxvpx %u,%d(%u)\n", XTp, rA_addr, rB_addr );
25399 if ( host_endness == VexEndnessBE ) {
25400 putVSReg( XTp, load( Ity_V128, mkexpr( EA ) ) );
25401 putVSReg( XTp+1, load( Ity_V128, mkexpr( EA_16 ) ) );
25402 } else {
25403 putVSReg( XTp+1, load( Ity_V128, mkexpr( EA ) ) );
25404 putVSReg( XTp, load( Ity_V128, mkexpr( EA_16 ) ) );
25406 break;
25408 case 0x1CD: { // stxvpx
25409 IRTemp EA_8 = newTemp(ty);
25410 IRTemp EA_24 = newTemp(ty);
25412 DIP( "stxvpx %u,%d(%u)\n", XTp, rA_addr, rB_addr );
25414 assign( EA_8, binop( Iop_Add64, mkU64( 8 ), mkexpr( EA ) ) );
25415 assign( EA_24, binop( Iop_Add64, mkU64( 24 ), mkexpr( EA ) ) );
25417 if ( host_endness == VexEndnessBE ) {
25418 store( mkexpr( EA ), unop( Iop_V128to64, getVSReg( XTp ) ) );
25419 store( mkexpr( EA_8 ), unop( Iop_V128HIto64, getVSReg( XTp ) ) );
25420 store( mkexpr( EA_16 ), unop( Iop_V128to64, getVSReg( XTp+1 ) ) );
25421 store( mkexpr( EA_24 ), unop( Iop_V128HIto64, getVSReg( XTp+1 ) ) );
25423 } else {
25424 store( mkexpr( EA ), unop( Iop_V128to64, getVSReg( XTp+1 ) ) );
25425 store( mkexpr( EA_8 ), unop( Iop_V128HIto64, getVSReg( XTp+1 ) ) );
25426 store( mkexpr( EA_16 ), unop( Iop_V128to64, getVSReg( XTp ) ) );
25427 store( mkexpr( EA_24 ), unop( Iop_V128HIto64, getVSReg( XTp ) ) );
25429 break;
25432 default:
25433 vex_printf("dis_vsx_vector_paired_load_store\n");
25434 return False;
25437 return True;
25440 static Bool
25441 dis_vx_store ( UInt prefix, UInt theInstr )
25443 /* XX1-Form */
25444 UChar opc1 = ifieldOPC( theInstr );
25445 UChar XS = ifieldRegXS( theInstr );
25446 UChar rA_addr = ifieldRegA( theInstr );
25447 UChar rB_addr = ifieldRegB( theInstr );
25448 IRTemp vS = newTemp( Ity_V128 );
25449 UInt opc2 = ifieldOPClo10( theInstr );
25451 IRType ty = mode64 ? Ity_I64 : Ity_I32;
25452 IRTemp EA = newTemp( ty );
25454 /* There is no prefixed version of these instructions. */
25455 PREFIX_CHECK
25457 if (opc1 != 0x1F) {
25458 vex_printf( "dis_vx_store(ppc)(instr)\n" );
25459 return False;
25462 assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
25463 assign( vS, getVSReg( XS ) );
25465 switch (opc2) {
25466 case 0x08C:
25468 /* Need the next to the most significant 32-bit word from
25469 * the 128-bit vector.
25471 IRExpr * high64, * low32;
25472 DIP("stxsiwx %d,r%u,r%u\n", XS, rA_addr, rB_addr);
25473 high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
25474 low32 = unop( Iop_64to32, high64 );
25475 store( mkexpr( EA ), low32 );
25476 break;
25479 case 0x18C: // stxvx Store VSX Vector Indexed
25481 UInt ea_off = 0;
25482 IRExpr* irx_addr;
25483 IRTemp word0 = newTemp( Ity_I64 );
25484 IRTemp word1 = newTemp( Ity_I64 );
25485 IRTemp word2 = newTemp( Ity_I64 );
25486 IRTemp word3 = newTemp( Ity_I64 );
25487 DIP("stxvx %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
25489 assign( word0, binop( Iop_Shr64,
25490 unop( Iop_V128HIto64, mkexpr( vS ) ),
25491 mkU8( 32 ) ) );
25493 assign( word1, binop( Iop_And64,
25494 unop( Iop_V128HIto64, mkexpr( vS ) ),
25495 mkU64( 0xFFFFFFFF ) ) );
25497 assign( word2, binop( Iop_Shr64,
25498 unop( Iop_V128to64, mkexpr( vS ) ),
25499 mkU8( 32 ) ) );
25501 assign( word3, binop( Iop_And64,
25502 unop( Iop_V128to64, mkexpr( vS ) ),
25503 mkU64( 0xFFFFFFFF ) ) );
25505 if (host_endness == VexEndnessBE) {
25506 store( mkexpr( EA ), unop( Iop_64to32, mkexpr( word0 ) ) );
25508 ea_off += 4;
25509 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
25510 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25512 store( irx_addr, unop( Iop_64to32, mkexpr( word1 ) ) );
25514 ea_off += 4;
25515 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
25516 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25518 store( irx_addr, unop( Iop_64to32, mkexpr( word2 ) ) );
25519 ea_off += 4;
25520 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
25521 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25523 store( irx_addr, unop( Iop_64to32, mkexpr( word3 ) ) );
25524 } else {
25525 store( mkexpr( EA ), unop( Iop_64to32, mkexpr( word3 ) ) );
25527 ea_off += 4;
25528 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
25529 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25531 store( irx_addr, unop( Iop_64to32, mkexpr( word2 ) ) );
25533 ea_off += 4;
25534 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
25535 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25537 store( irx_addr, unop( Iop_64to32, mkexpr( word1 ) ) );
25538 ea_off += 4;
25539 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
25540 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25542 store( irx_addr, unop( Iop_64to32, mkexpr( word0 ) ) );
25544 break;
25547 case 0x18D: // stxvl Store VSX Vector Indexed
25549 UInt ea_off = 0;
25550 IRExpr* irx_addr;
25551 IRTemp word0 = newTemp( Ity_I64 );
25552 IRTemp word1 = newTemp( Ity_I64 );
25553 IRTemp word2 = newTemp( Ity_I64 );
25554 IRTemp word3 = newTemp( Ity_I64 );
25555 IRTemp shift = newTemp( Ity_I8 );
25556 IRTemp nb_gt16 = newTemp( Ity_I8 );
25557 IRTemp nb_zero = newTemp( Ity_V128 );
25558 IRTemp nb = newTemp( Ity_I8 );
25559 IRTemp nb_field = newTemp( Ity_I64 );
25560 IRTemp n_bytes = newTemp( Ity_I8 );
25561 IRTemp base_addr = newTemp( ty );
25562 IRTemp current_mem = newTemp( Ity_V128 );
25563 IRTemp store_val = newTemp( Ity_V128 );
25564 IRTemp nb_mask = newTemp( Ity_V128 );
25566 DIP("stxvl %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
25568 assign( nb_field, binop( Iop_Shr64,
25569 getIReg(rB_addr),
25570 mkU8( 56 ) ) );
25572 assign( nb, unop( Iop_64to8, mkexpr( nb_field ) ) );
25574 /* nb_gt16 will be all zeros if nb > 16 */
25575 assign( nb_gt16, unop( Iop_1Sto8,
25576 binop( Iop_CmpLT64U,
25577 binop( Iop_Shr64,
25578 mkexpr( nb_field ),
25579 mkU8( 4 ) ),
25580 mkU64( 1 ) ) ) );
25582 /* nb_zero is 0xFF..FF if the nb_field = 0 */
25583 assign( nb_zero, binop( Iop_64HLtoV128,
25584 unop( Iop_1Sto64,
25585 binop( Iop_CmpEQ64,
25586 mkexpr( nb_field ),
25587 mkU64( 0 ) ) ),
25588 unop( Iop_1Sto64,
25589 binop( Iop_CmpEQ64,
25590 mkexpr( nb_field ),
25591 mkU64( 0 ) ) ) ) );
25593 /* set n_bytes to 0 if nb >= 16. Otherwise, set to nb. */
25594 assign( n_bytes, binop( Iop_And8, mkexpr( nb ), mkexpr( nb_gt16 ) ) );
25595 assign( shift, unop( Iop_64to8,
25596 binop( Iop_Mul64,
25597 binop( Iop_Sub64,
25598 mkU64( 16 ),
25599 unop( Iop_8Uto64,
25600 mkexpr( n_bytes ) ) ),
25601 mkU64( 8 ) ) ) );
25603 /* We only have a 32-bit store function. So, need to fetch the
25604 * contents of memory merge with the store value and do two
25605 * 32-byte stores so we preserve the contents of memory not
25606 * addressed by nb.
25608 assign( base_addr, ea_rAor0( rA_addr ) );
25610 assign( current_mem,
25611 binop( Iop_64HLtoV128,
25612 load( Ity_I64,
25613 binop( mkSzOp( ty, Iop_Add8 ),
25614 mkexpr( base_addr ),
25615 ty == Ity_I64 ? mkU64( 8 ) : mkU32( 8 )
25616 ) ),
25617 load( Ity_I64, mkexpr( base_addr ) ) ) );
25619 /* Set the nb_mask to all zeros if nb = 0 so the current contents
25620 * of memory get written back without modifications.
25622 * The store_val is a combination of the current memory value
25623 * and the bytes you want to store. The nb_mask selects the
25624 * bytes you want stored from Vs.
25626 assign( nb_mask,
25627 binop( Iop_OrV128,
25628 binop( Iop_AndV128,
25629 mkexpr( nb_zero ),
25630 mkV128( 0 ) ),
25631 binop( Iop_AndV128,
25632 binop( Iop_ShrV128,
25633 mkV128( 0xFFFF ),
25634 mkexpr( shift ) ),
25635 unop( Iop_NotV128, mkexpr( nb_zero ) ) ) ) );
25637 assign( store_val,
25638 binop( Iop_OrV128,
25639 binop( Iop_AndV128,
25640 mkexpr( vS ),
25641 mkexpr( nb_mask ) ),
25642 binop( Iop_AndV128,
25643 unop( Iop_NotV128, mkexpr( nb_mask ) ),
25644 mkexpr( current_mem) ) ) );
25646 /* Store the value in 32-byte chunks */
25647 assign( word0, binop( Iop_Shr64,
25648 unop( Iop_V128HIto64, mkexpr( store_val ) ),
25649 mkU8( 32 ) ) );
25651 assign( word1, binop( Iop_And64,
25652 unop( Iop_V128HIto64, mkexpr( store_val ) ),
25653 mkU64( 0xFFFFFFFF ) ) );
25655 assign( word2, binop( Iop_Shr64,
25656 unop( Iop_V128to64, mkexpr( store_val ) ),
25657 mkU8( 32 ) ) );
25659 assign( word3, binop( Iop_And64,
25660 unop( Iop_V128to64, mkexpr( store_val ) ),
25661 mkU64( 0xFFFFFFFF ) ) );
25663 ea_off = 0;
25664 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25665 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25667 store( irx_addr, unop( Iop_64to32, mkexpr( word3 ) ) );
25669 ea_off += 4;
25670 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25671 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25673 store( irx_addr, unop( Iop_64to32, mkexpr( word2 ) ) );
25675 ea_off += 4;
25676 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25677 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25679 store( irx_addr, unop( Iop_64to32, mkexpr( word1 ) ) );
25681 ea_off += 4;
25682 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25683 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25685 store( irx_addr, unop( Iop_64to32, mkexpr( word0 ) ) );
25686 break;
25689 case 0x1AD: // stxvll (Store VSX Vector Left-justified with length XX1-form)
25691 UInt ea_off = 0;
25692 IRExpr* irx_addr;
25693 IRTemp word0[5];
25694 IRTemp word1[5];
25695 IRTemp word2[5];
25696 IRTemp word3[5];
25697 IRTemp shift = newTemp(Ity_I8);
25698 IRTemp nb_gt16 = newTemp(Ity_I8);
25699 IRTemp nb_zero = newTemp(Ity_V128);
25700 IRTemp nb = newTemp(Ity_I8);
25701 IRTemp nb_field = newTemp(Ity_I64);
25702 IRTemp n_bytes = newTemp(Ity_I8);
25703 IRTemp base_addr = newTemp( ty );
25704 IRTemp current_mem = newTemp(Ity_V128);
25705 IRTemp store_val = newTemp(Ity_V128);
25706 IRTemp nb_mask = newTemp(Ity_V128);
25707 IRTemp mask = newTemp( Ity_I64 );
25708 IRTemp byte[16];
25709 IRTemp tmp_low[9];
25710 IRTemp tmp_hi[9];
25711 IRTemp nb_field_compare_zero = newTemp( Ity_I64 );
25712 Int i;
25714 DIP("stxvll %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
25716 assign( nb_field, binop( Iop_Shr64,
25717 getIReg(rB_addr),
25718 mkU8( 56 ) ) );
25719 assign( nb, unop( Iop_64to8, mkexpr( nb_field ) ) );
25720 assign( mask, mkU64( 0xFFFFFFFFFFFFFFFFULL ) );
25722 /* nb_gt16 will be all zeros if nb > 16 */
25723 assign( nb_gt16, unop( Iop_1Sto8,
25724 binop( Iop_CmpLT64U,
25725 binop( Iop_Shr64,
25726 mkexpr( nb_field ),
25727 mkU8( 4 ) ),
25728 mkU64( 1 ) ) ) );
25730 assign( nb_field_compare_zero, unop( Iop_1Sto64,
25731 binop( Iop_CmpEQ64,
25732 mkexpr( nb_field ),
25733 mkU64( 0 ) ) ) );
25735 /* nb_zero is 0xFF..FF if the nb_field = 0 */
25736 assign( nb_zero, binop( Iop_64HLtoV128,
25737 mkexpr( nb_field_compare_zero ),
25738 mkexpr( nb_field_compare_zero ) ) );
25741 /* set n_bytes to 0 if nb >= 16. Otherwise, set to nb. */
25742 assign( n_bytes, binop( Iop_And8, mkexpr( nb ), mkexpr( nb_gt16 ) ) );
25743 assign( shift,
25744 unop( Iop_64to8,
25745 binop( Iop_Mul64,
25746 binop( Iop_Sub64,
25747 mkU64( 16 ),
25748 unop( Iop_8Uto64, mkexpr( n_bytes ) )),
25749 mkU64( 8 ) ) ) );
25751 /* We only have a 32-bit store function. So, need to fetch the
25752 * contents of memory merge with the store value and do two
25753 * 32-byte stores so we preserve the contents of memory not
25754 * addressed by nb.
25756 assign( base_addr, ea_rAor0( rA_addr ) );
25757 /* fetch all 16 bytes and store in Big Endian format */
25758 word0[0] = newTemp(Ity_I64);
25759 assign( word0[0], mkU64( 0 ) );
25761 word1[0] = newTemp(Ity_I64);
25762 assign( word1[0], mkU64( 0 ) );
25764 word2[0] = newTemp(Ity_I64);
25765 assign( word2[0], mkU64( 0 ) );
25767 word3[0] = newTemp(Ity_I64);
25768 assign( word3[0], mkU64( 0 ) );
25770 for (i = 0; i < 4; i++) {
25771 word0[i+1] = newTemp(Ity_I64);
25773 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25774 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25775 ea_off += 1;
25777 /* Instruction always loads in Big Endian format */
25778 assign( word0[i+1],
25779 binop( Iop_Or64,
25780 binop( Iop_Shl64,
25781 unop( Iop_8Uto64,
25782 load( Ity_I8,
25783 irx_addr ) ),
25784 mkU8( (3-i)*8 ) ),
25785 mkexpr( word0[i] ) ) );
25788 for (i = 0; i < 4; i++) {
25789 word1[i+1] = newTemp(Ity_I64);
25791 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25792 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25793 ea_off += 1;
25795 /* Instruction always loads in Big Endian format */
25796 assign( word1[i+1],
25797 binop( Iop_Or64,
25798 binop( Iop_Shl64,
25799 unop( Iop_8Uto64,
25800 load( Ity_I8,
25801 irx_addr ) ),
25802 mkU8( (3-i)*8 ) ),
25803 mkexpr( word1[i] ) ) );
25805 for (i = 0; i < 4; i++) {
25806 word2[i+1] = newTemp(Ity_I64);
25808 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25809 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25810 ea_off += 1;
25812 /* Instruction always loads in Big Endian format */
25813 assign( word2[i+1],
25814 binop( Iop_Or64,
25815 binop( Iop_Shl64,
25816 unop( Iop_8Uto64,
25817 load( Ity_I8,
25818 irx_addr ) ),
25819 mkU8( (3-i)*8 ) ),
25820 mkexpr( word2[i] ) ) );
25822 for (i = 0; i < 4; i++) {
25823 word3[i+1] = newTemp(Ity_I64);
25825 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25826 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25827 ea_off += 1;
25829 /* Instruction always loads in Big Endian format */
25830 assign( word3[i+1],
25831 binop( Iop_Or64,
25832 binop( Iop_Shl64,
25833 unop( Iop_8Uto64,
25834 load( Ity_I8,
25835 irx_addr ) ),
25836 mkU8( (3-i)*8 ) ),
25837 mkexpr( word3[i] ) ) );
25841 assign( current_mem,
25842 binop( Iop_64HLtoV128,
25843 binop( Iop_Or64,
25844 binop( Iop_Shl64,
25845 mkexpr( word0[4] ),
25846 mkU8( 32 ) ),
25847 mkexpr( word1[4] ) ),
25848 binop( Iop_Or64,
25849 binop( Iop_Shl64,
25850 mkexpr( word2[4] ),
25851 mkU8( 32 ) ),
25852 mkexpr( word3[4] ) ) ) );
25854 /* Set the nb_mask to all zeros if nb = 0 so the current contents
25855 * of memory get written back without modifications.
25857 * The store_val is a combination of the current memory value
25858 * and the bytes you want to store. The nb_mask selects the
25859 * bytes you want stored from Vs.
25861 /* The instruction always uses Big Endian order */
25862 assign( nb_mask,
25863 binop( Iop_OrV128,
25864 binop( Iop_AndV128,
25865 binop( Iop_ShlV128,
25866 binop( Iop_ShrV128,
25867 binop( Iop_64HLtoV128,
25868 mkexpr( mask ),
25869 mkexpr( mask ) ),
25870 mkexpr( shift ) ),
25871 mkexpr( shift ) ),
25872 unop( Iop_NotV128, mkexpr( nb_zero ) ) ),
25873 binop( Iop_AndV128,
25874 mkexpr( nb_zero ),
25875 binop( Iop_64HLtoV128,
25876 mkU64( 0x0 ),
25877 mkU64( 0x0 ) ) ) ) );
25879 assign( store_val,
25880 binop( Iop_OrV128,
25881 binop( Iop_AndV128,
25882 mkexpr( vS ),
25883 mkexpr( nb_mask ) ),
25884 binop( Iop_AndV128,
25885 unop( Iop_NotV128, mkexpr( nb_mask ) ),
25886 mkexpr( current_mem) ) ) );
25888 /* store the merged value in Big Endian format */
25889 tmp_low[0] = newTemp(Ity_I64);
25890 tmp_hi[0] = newTemp(Ity_I64);
25891 assign( tmp_low[0], mkU64( 0 ) );
25892 assign( tmp_hi[0], mkU64( 0 ) );
25894 for (i = 0; i < 8; i++) {
25895 byte[i] = newTemp(Ity_I64);
25896 byte[i+8] = newTemp(Ity_I64);
25897 tmp_low[i+1] = newTemp(Ity_I64);
25898 tmp_hi[i+1] = newTemp(Ity_I64);
25900 assign( byte[i], binop( Iop_And64,
25901 binop( Iop_Shr64,
25902 unop( Iop_V128HIto64,
25903 mkexpr( store_val ) ),
25904 mkU8( (7-i)*8 ) ),
25905 mkU64( 0xFF ) ) );
25906 assign( byte[i+8], binop( Iop_And64,
25907 binop( Iop_Shr64,
25908 unop( Iop_V128to64,
25909 mkexpr( store_val ) ),
25910 mkU8( (7-i)*8 ) ),
25911 mkU64( 0xFF ) ) );
25913 assign( tmp_low[i+1],
25914 binop( Iop_Or64,
25915 mkexpr( tmp_low[i] ),
25916 binop( Iop_Shl64, mkexpr( byte[i] ), mkU8( i*8 ) ) ) );
25917 assign( tmp_hi[i+1],
25918 binop( Iop_Or64,
25919 mkexpr( tmp_hi[i] ),
25920 binop( Iop_Shl64, mkexpr( byte[i+8] ),
25921 mkU8( i*8 ) ) ) );
25924 /* Store the value in 32-byte chunks */
25925 ea_off = 0;
25926 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25927 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25929 store( irx_addr, unop( Iop_64to32, mkexpr( tmp_low[8] ) ) );
25931 ea_off += 4;
25932 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25933 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25935 store( irx_addr, unop( Iop_64HIto32, mkexpr( tmp_low[8] ) ) );
25937 ea_off += 4;
25938 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25939 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25941 store( irx_addr, unop( Iop_64to32, mkexpr( tmp_hi[8] ) ) );
25943 ea_off += 4;
25944 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( base_addr ),
25945 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
25947 store( irx_addr, unop( Iop_64HIto32, mkexpr( tmp_hi[8] ) ) );
25949 break;
25952 case 0x28C:
25954 IRTemp high64 = newTemp(Ity_F64);
25955 IRTemp val32 = newTemp(Ity_I32);
25956 DIP("stxsspx %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
25957 assign(high64, unop( Iop_ReinterpI64asF64,
25958 unop( Iop_V128HIto64, mkexpr( vS ) ) ) );
25959 assign(val32, unop( Iop_ReinterpF32asI32,
25960 unop( Iop_TruncF64asF32,
25961 mkexpr(high64) ) ) );
25962 store( mkexpr( EA ), mkexpr( val32 ) );
25963 break;
25965 case 0x2CC:
25967 IRExpr * high64;
25968 DIP("stxsdx %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
25969 high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
25970 store( mkexpr( EA ), high64 );
25971 break;
25974 case 0x38D: // stxsibx
25976 IRTemp byte_to_store = newTemp( Ity_I8 );
25978 DIP("stxsibx %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
25980 assign( byte_to_store, unop( Iop_64to8,
25981 unop( Iop_V128HIto64,
25982 mkexpr( vS ) ) ) );
25984 store( mkexpr( EA ), mkexpr( byte_to_store ) );
25985 break;
25988 case 0x3AD: // stxsihx
25990 IRTemp hword_to_store = newTemp( Ity_I16 );
25992 DIP("stxsihx %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
25994 assign( hword_to_store, unop( Iop_64to16,
25995 unop( Iop_V128HIto64,
25996 mkexpr( vS ) ) ) );
25998 store( mkexpr( EA ), mkexpr( hword_to_store ) );
25999 break;
26002 case 0x3CC:
26004 IRExpr * high64, *low64;
26005 DIP("stxvd2x %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
26006 high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
26007 low64 = unop( Iop_V128to64, mkexpr( vS ) );
26008 store( mkexpr( EA ), high64 );
26009 store( binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26010 ty == Ity_I64 ? mkU64( 8 ) : mkU32( 8 ) ), low64 );
26011 break;
26013 case 0x38C:
26015 UInt ea_off = 0;
26016 IRExpr* irx_addr;
26017 IRTemp hi64 = newTemp( Ity_I64 );
26018 IRTemp lo64 = newTemp( Ity_I64 );
26020 DIP("stxvw4x %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
26022 // This instruction supports word-aligned stores, so EA may not be
26023 // quad-word aligned. Therefore, do 4 individual word-size stores.
26024 assign( hi64, unop( Iop_V128HIto64, mkexpr( vS ) ) );
26025 assign( lo64, unop( Iop_V128to64, mkexpr( vS ) ) );
26026 store( mkexpr( EA ), unop( Iop_64HIto32, mkexpr( hi64 ) ) );
26027 ea_off += 4;
26028 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26029 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26030 store( irx_addr, unop( Iop_64to32, mkexpr( hi64 ) ) );
26031 ea_off += 4;
26032 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26033 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26034 store( irx_addr, unop( Iop_64HIto32, mkexpr( lo64 ) ) );
26035 ea_off += 4;
26036 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26037 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26038 store( irx_addr, unop( Iop_64to32, mkexpr( lo64 ) ) );
26040 break;
26042 case 0x3AC: // stxvh8x Store VSX Vector Halfword*8 Indexed
26044 UInt ea_off = 0;
26045 IRExpr* irx_addr;
26046 IRTemp half_word0 = newTemp( Ity_I64 );
26047 IRTemp half_word1 = newTemp( Ity_I64 );
26048 IRTemp half_word2 = newTemp( Ity_I64 );
26049 IRTemp half_word3 = newTemp( Ity_I64 );
26050 IRTemp half_word4 = newTemp( Ity_I64 );
26051 IRTemp half_word5 = newTemp( Ity_I64 );
26052 IRTemp half_word6 = newTemp( Ity_I64 );
26053 IRTemp half_word7 = newTemp( Ity_I64 );
26055 DIP("stxvb8x %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
26057 assign( half_word0, binop( Iop_Shr64,
26058 unop( Iop_V128HIto64, mkexpr( vS ) ),
26059 mkU8( 48 ) ) );
26061 assign( half_word1, binop( Iop_And64,
26062 binop( Iop_Shr64,
26063 unop( Iop_V128HIto64, mkexpr( vS ) ),
26064 mkU8( 32 ) ),
26065 mkU64( 0xFFFF ) ) );
26067 assign( half_word2, binop( Iop_And64,
26068 binop( Iop_Shr64,
26069 unop( Iop_V128HIto64, mkexpr( vS ) ),
26070 mkU8( 16 ) ),
26071 mkU64( 0xFFFF ) ) );
26073 assign( half_word3, binop( Iop_And64,
26074 unop( Iop_V128HIto64, mkexpr( vS ) ),
26075 mkU64( 0xFFFF ) ) );
26077 assign( half_word4, binop( Iop_Shr64,
26078 unop( Iop_V128to64, mkexpr( vS ) ),
26079 mkU8( 48 ) ) );
26081 assign( half_word5, binop( Iop_And64,
26082 binop( Iop_Shr64,
26083 unop( Iop_V128to64, mkexpr( vS ) ),
26084 mkU8( 32 ) ),
26085 mkU64( 0xFFFF ) ) );
26087 assign( half_word6, binop( Iop_And64,
26088 binop( Iop_Shr64,
26089 unop( Iop_V128to64, mkexpr( vS ) ),
26090 mkU8( 16 ) ),
26091 mkU64( 0xFFFF ) ) );
26093 assign( half_word7, binop( Iop_And64,
26094 unop( Iop_V128to64, mkexpr( vS ) ),
26095 mkU64( 0xFFFF ) ) );
26097 /* Do the 32-bit stores. The store() does an Endian aware store. */
26098 if ( host_endness == VexEndnessBE ) {
26099 store( mkexpr( EA ), unop( Iop_64to32,
26100 binop( Iop_Or64,
26101 mkexpr( half_word1 ),
26102 binop( Iop_Shl64,
26103 mkexpr( half_word0 ),
26104 mkU8( 16 ) ) ) ) );
26106 ea_off += 4;
26107 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26108 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26111 store( irx_addr, unop( Iop_64to32,
26112 binop( Iop_Or64,
26113 mkexpr( half_word3 ),
26114 binop( Iop_Shl64,
26115 mkexpr( half_word2 ),
26116 mkU8( 16 ) ) ) ) );
26118 ea_off += 4;
26119 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26120 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26122 store( irx_addr, unop( Iop_64to32,
26123 binop( Iop_Or64,
26124 mkexpr( half_word5 ),
26125 binop( Iop_Shl64,
26126 mkexpr( half_word4 ),
26127 mkU8( 16 ) ) ) ) );
26128 ea_off += 4;
26129 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26130 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26132 store( irx_addr, unop( Iop_64to32,
26133 binop( Iop_Or64,
26134 mkexpr( half_word7 ),
26135 binop( Iop_Shl64,
26136 mkexpr( half_word6 ),
26137 mkU8( 16 ) ) ) ) );
26139 } else {
26140 store( mkexpr( EA ), unop( Iop_64to32,
26141 binop( Iop_Or64,
26142 mkexpr( half_word0 ),
26143 binop( Iop_Shl64,
26144 mkexpr( half_word1 ),
26145 mkU8( 16 ) ) ) ) );
26147 ea_off += 4;
26148 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26149 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26151 store( irx_addr, unop( Iop_64to32,
26152 binop( Iop_Or64,
26153 mkexpr( half_word2 ),
26154 binop( Iop_Shl64,
26155 mkexpr( half_word3 ),
26156 mkU8( 16 ) ) ) ) );
26157 ea_off += 4;
26158 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26159 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26161 store( irx_addr, unop( Iop_64to32,
26162 binop( Iop_Or64,
26163 mkexpr( half_word4 ),
26164 binop( Iop_Shl64,
26165 mkexpr( half_word5 ),
26166 mkU8( 16 ) ) ) ) );
26167 ea_off += 4;
26168 irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26169 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26171 store( irx_addr, unop( Iop_64to32,
26172 binop( Iop_Or64,
26173 mkexpr( half_word6 ),
26174 binop( Iop_Shl64,
26175 mkexpr( half_word7 ),
26176 mkU8( 16 ) ) ) ) );
26178 break;
26181 case 0x3EC: // stxvb16x Store VSX Vector Byte*16 Indexed
26183 UInt ea_off = 0;
26184 int i;
26185 IRExpr* irx_addr;
26186 IRTemp byte[16];
26188 DIP("stxvb16x %u,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
26190 for ( i = 0; i < 8; i++ ) {
26191 byte[i] = newTemp( Ity_I64 );
26192 byte[i+8] = newTemp( Ity_I64 );
26194 assign( byte[i], binop( Iop_And64,
26195 binop( Iop_Shr64,
26196 unop( Iop_V128HIto64, mkexpr( vS ) ),
26197 mkU8( 56 - i*8 ) ),
26198 mkU64( 0xFF ) ) );
26200 assign( byte[i+8], binop( Iop_And64,
26201 binop( Iop_Shr64,
26202 unop( Iop_V128to64, mkexpr( vS ) ),
26203 mkU8( 56 - i*8) ),
26204 mkU64( 0xFF ) ) );
26207 if ( host_endness == VexEndnessBE ) {
26208 for ( i = 0; i < 16; i = i + 4) {
26209 irx_addr =
26210 binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26211 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26213 store( irx_addr,
26214 unop( Iop_64to32,
26215 binop( Iop_Or64,
26216 binop( Iop_Or64,
26217 mkexpr( byte[i+3] ),
26218 binop( Iop_Shl64,
26219 mkexpr( byte[i+2] ),
26220 mkU8( 8 ) ) ),
26221 binop( Iop_Or64,
26222 binop( Iop_Shl64,
26223 mkexpr( byte[i+1] ),
26224 mkU8( 16 ) ),
26225 binop( Iop_Shl64,
26226 mkexpr( byte[i] ),
26227 mkU8( 24 ) ) ) ) ) );
26228 ea_off += 4;
26231 } else {
26232 for ( i = 0; i < 16; i = i + 4) {
26233 irx_addr =
26234 binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
26235 ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
26237 store( irx_addr,
26238 unop( Iop_64to32,
26239 binop( Iop_Or64,
26240 binop( Iop_Or64,
26241 mkexpr( byte[i] ),
26242 binop( Iop_Shl64,
26243 mkexpr( byte[i+1] ),
26244 mkU8( 8 ) ) ),
26245 binop( Iop_Or64,
26246 binop( Iop_Shl64,
26247 mkexpr( byte[i+2] ),
26248 mkU8( 16 ) ),
26249 binop( Iop_Shl64,
26250 mkexpr( byte[i+3] ),
26251 mkU8( 24 ) ) ) ) ) );
26253 ea_off += 4;
26256 break;
26259 default:
26260 vex_printf( "dis_vx_store(ppc)(opc2)\n" );
26261 return False;
26263 return True;
26266 static Bool
26267 dis_vx_Scalar_Round_to_quad_integer( UInt prefix, UInt theInstr,
26268 const VexAbiInfo* vbi )
26270 /* The ISA 3.0 instructions supported in this function require
26271 * the underlying hardware platform that supports the ISA3.0
26272 * instruction set.
26274 /* XX1-Form */
26275 UChar opc1 = ifieldOPC( theInstr );
26276 UInt opc2 = IFIELD( theInstr, 1, 8 );
26277 UChar vT_addr = ifieldRegDS( theInstr );
26278 UChar vB_addr = ifieldRegB( theInstr );
26279 IRTemp vB = newTemp( Ity_F128 );
26280 IRTemp vT = newTemp( Ity_F128 );
26281 UChar EX = IFIELD( theInstr, 0, 1 );
26283 /* There is no prefixed version of these instructions. */
26284 PREFIX_CHECK
26286 assign( vB, getF128Reg( vB_addr ) );
26287 if (opc1 != 0x3F) {
26288 vex_printf( "dis_vx_Scalar_Round_to_quad_integer(ppc)(instr)\n" );
26289 return False;
26291 switch (opc2) {
26292 case 0x005: // VSX Scalar Round to Quad-Precision Integer [with Inexact]
26294 UChar R = IFIELD( theInstr, 16, 1 );
26295 UChar RMC = IFIELD( theInstr, 9, 2 );
26297 /* Store the rm specification bits. Will extract them later when
26298 * the isntruction is issued.
26300 IRExpr* rm = mkU32( R << 3 | RMC << 1 | EX);
26302 if ( EX == 0 ) { // xsrqpi
26303 DIP("xsrqpi %d,v%d,v%d,%d\n", R, vT_addr, vB_addr, RMC);
26304 assign( vT, binop( Iop_F128toI128S, rm, mkexpr( vB ) ) );
26306 } else { // xsrqpix
26307 DIP("xsrqpix %d,v%d,v%d,%d\n", R, vT_addr, vB_addr, RMC);
26308 assign( vT, binop( Iop_F128toI128S, rm, mkexpr( vB ) ) );
26310 generate_store_FPRF( Ity_F128, vT, vbi );
26311 } /* case 0x005 */
26312 break;
26313 case 0x025: // xsrqpxp VSX Scalar Round Quad-Precision to
26314 // Double-Extended Precision
26316 UChar R = IFIELD( theInstr, 16, 1 );
26317 UChar RMC = IFIELD( theInstr, 9, 2 );
26319 /* Store the rm specification bits. Will extract them later when
26320 * the isntruction is issued.
26322 IRExpr* rm = mkU32( R << 3 | RMC << 1 );
26324 DIP("xsrqpxp %d,v%d,v%d,%d\n", R, vT_addr, vB_addr, RMC);
26325 assign( vT, binop( Iop_RndF128, rm, mkexpr( vB ) ) );
26326 generate_store_FPRF( Ity_F128, vT, vbi );
26327 } /* case 0x025 */
26328 break;
26329 default:
26330 vex_printf( "dis_vx_Scalar_Round_to_quad_integer(ppc)(opc2)\n" );
26331 return False;
26332 } /* switch opc2 */
26333 putF128Reg( vT_addr, mkexpr( vT ) );
26334 return True;
26337 static Bool
26338 dis_vx_Floating_Point_Arithmetic_quad_precision( UInt prefix, UInt theInstr,
26339 const VexAbiInfo* vbi )
26341 /* The ISA 3.0 instructions supported in this function require
26342 * the underlying hardware platform that supports the ISA 3.0
26343 * instruction set.
26345 /* XX1-Form */
26346 UChar opc1 = ifieldOPC( theInstr );
26347 UInt opc2 = ifieldOPClo10( theInstr );
26348 UChar vT_addr = ifieldRegDS( theInstr );
26349 UChar vA_addr = ifieldRegA( theInstr );
26350 UChar vB_addr = ifieldRegB( theInstr );
26351 IRTemp vA = newTemp( Ity_F128 );
26352 IRTemp vB = newTemp( Ity_F128 );
26353 IRTemp vT = newTemp( Ity_F128 );
26354 IRExpr* rm = get_IR_roundingmode();
26355 UChar R0 = IFIELD( theInstr, 0, 1 );
26357 /* There is no prefixed version of these instructions. */
26358 PREFIX_CHECK
26360 assign( vB, getF128Reg( vB_addr ) );
26362 if ( opc1 != 0x3F ) {
26363 vex_printf( "Erorr, dis_vx_Floating_Point_Arithmetic_quad_precision(ppc)(instr)\n" );
26364 return False;
26366 switch ( opc2 ) {
26367 case 0x004: // xsaddqp (VSX Scalar Add Quad-Precision[using round to Odd])
26369 assign( vA, getF128Reg( vA_addr ) );
26371 if ( R0 == 0 ) {
26372 /* rounding mode specified by RN. Issue inst with R0 = 0 */
26373 DIP("xsaddqp v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26374 assign( vT, triop( Iop_AddF128, rm, mkexpr( vA ), mkexpr( vB ) ) );
26376 } else {
26377 /* rounding mode specified by Round to odd. Issue inst with R0 = 1 */
26378 DIP("xsaddqpo v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26379 assign( vT, triop( Iop_AddF128, set_round_to_Oddmode(),
26380 mkexpr( vA ), mkexpr( vB ) ) );
26382 generate_store_FPRF( Ity_F128, vT, vbi );
26383 break;
26385 case 0x024: // xsmulqp (VSX Scalar Multiply Quad-Precision[using round to Odd])
26387 assign( vA, getF128Reg( vA_addr ) );
26389 if ( R0 == 0 ) {
26390 /* rounding mode specified by RN. Issue inst with R0 = 0 */
26391 DIP("xsmulqp v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26392 assign( vT, triop( Iop_MulF128, rm, mkexpr( vA ), mkexpr( vB ) ) );
26394 } else {
26395 /* rounding mode specified by Round to odd. Issue inst with R0 = 1 */
26396 DIP("xsmulqpo v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26397 assign( vT, triop( Iop_MulF128, set_round_to_Oddmode(), mkexpr( vA ),
26398 mkexpr( vB ) ) );
26400 generate_store_FPRF( Ity_F128, vT, vbi );
26401 break;
26403 case 0x184: // xsmaddqp (VSX Scalar Multiply add Quad-Precision[using round to Odd])
26405 /* instruction computes (vA * vB) + vC */
26406 IRTemp vC = newTemp( Ity_F128 );
26408 assign( vA, getF128Reg( vA_addr ) );
26409 assign( vC, getF128Reg( vT_addr ) );
26411 if ( R0 == 0 ) {
26412 /* rounding mode specified by RN. Issue inst with R0 = 0 */
26413 DIP("xsmaddqp v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26414 assign( vT,
26415 qop( Iop_MAddF128, rm, mkexpr( vA ),
26416 mkexpr( vC ), mkexpr( vB ) ) );
26418 } else {
26419 /* rounding mode specified by Round to odd. Issue inst with R0 = 1 */
26420 DIP("xsmaddqpo v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26421 assign( vT,
26422 qop( Iop_MAddF128, set_round_to_Oddmode(), mkexpr( vA ),
26423 mkexpr( vC ), mkexpr( vB ) ) );
26425 generate_store_FPRF( Ity_F128, vT, vbi );
26426 break;
26428 case 0x1A4: // xsmsubqp (VSX Scalar Multiply Subtract Quad-Precision[using round to Odd])
26430 IRTemp vC = newTemp( Ity_F128 );
26432 assign( vA, getF128Reg( vA_addr ) );
26433 assign( vC, getF128Reg( vT_addr ) );
26435 if ( R0 == 0 ) {
26436 /* rounding mode specified by RN. Issue inst with R0 = 0 */
26437 DIP("xsmsubqp v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26438 assign( vT,
26439 qop( Iop_MSubF128, rm, mkexpr( vA ),
26440 mkexpr( vC ), mkexpr( vB ) ) );
26442 } else {
26443 /* rounding mode specified by Round to odd. Issue inst with R0 = 1 */
26444 DIP("xsmsubqpo v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26445 assign( vT,
26446 qop( Iop_MSubF128, set_round_to_Oddmode(),
26447 mkexpr( vA ), mkexpr( vC ), mkexpr( vB ) ) );
26449 generate_store_FPRF( Ity_F128, vT, vbi );
26450 break;
26452 case 0x1C4: // xsnmaddqp (VSX Scalar Negative Multiply Add Quad-Precision[using round to Odd])
26454 IRTemp vC = newTemp( Ity_F128 );
26456 assign( vA, getF128Reg( vA_addr ) );
26457 assign( vC, getF128Reg( vT_addr ) );
26459 if ( R0 == 0 ) {
26460 /* rounding mode specified by RN. Issue inst with R0 = 0 */
26461 DIP("xsnmaddqp v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26462 assign( vT,
26463 qop( Iop_NegMAddF128, rm, mkexpr( vA ),
26464 mkexpr( vC ), mkexpr( vB ) ) );
26466 } else {
26467 /* rounding mode specified by Round to odd. Issue inst with R0 = 1 */
26468 DIP("xsnmaddqpo v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26469 assign( vT,
26470 qop( Iop_NegMAddF128, set_round_to_Oddmode(),
26471 mkexpr( vA ), mkexpr( vC ), mkexpr( vB ) ) );
26473 generate_store_FPRF( Ity_F128, vT, vbi );
26474 break;
26476 case 0x1E4: // xsmsubqp (VSX Scalar Negatve Multiply Subtract Quad-Precision[using round to Odd])
26478 IRTemp vC = newTemp( Ity_F128 );
26480 assign( vA, getF128Reg( vA_addr ) );
26481 assign( vC, getF128Reg( vT_addr ) );
26483 if ( R0 == 0 ) {
26484 /* rounding mode specified by RN. Issue inst with R0 = 0 */
26485 DIP("xsnmsubqp v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26486 assign( vT,
26487 qop( Iop_NegMSubF128, rm, mkexpr( vA ),
26488 mkexpr( vC ), mkexpr( vB ) ) );
26490 } else {
26491 /* rounding mode specified by Round to odd. Issue inst with R0 = 1 */
26492 DIP("xsnmsubqpo v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26493 assign( vT,
26494 qop( Iop_NegMSubF128, set_round_to_Oddmode(),
26495 mkexpr( vA ), mkexpr( vC ), mkexpr( vB ) ) );
26497 generate_store_FPRF( Ity_F128, vT, vbi );
26498 break;
26500 case 0x204: // xssubqp (VSX Scalar Subtract Quad-Precision[using round to Odd])
26502 assign( vA, getF128Reg( vA_addr ) );
26503 if ( R0 == 0 ) {
26504 /* use rounding mode specified by RN. Issue inst with R0 = 0 */
26505 DIP("xssubqp v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26506 assign( vT, triop( Iop_SubF128, rm, mkexpr( vA ), mkexpr( vB ) ) );
26508 } else {
26509 /* use rounding mode specified by Round to odd. Issue inst with R0 = 1 */
26510 DIP("xssubqpo v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26511 assign( vT, triop( Iop_SubF128, set_round_to_Oddmode(), mkexpr( vA ),
26512 mkexpr( vB ) ) );
26514 generate_store_FPRF( Ity_F128, vT, vbi );
26515 break;
26517 case 0x224: // xsdivqp (VSX Scalar Divide Quad-Precision[using round to Odd])
26519 assign( vA, getF128Reg( vA_addr ) );
26520 if ( R0 == 0 ) {
26521 /* use rounding mode specified by RN. Issue inst with R0 = 0 */
26522 DIP("xsdivqp v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26523 assign( vT, triop( Iop_DivF128, rm, mkexpr( vA ), mkexpr( vB ) ) );
26525 } else {
26526 /* use rounding mode specified by Round to odd. Issue inst with R0 = 1 */
26527 DIP("xsdivqpo v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
26528 assign( vT, triop( Iop_DivF128, set_round_to_Oddmode(), mkexpr( vA ),
26529 mkexpr( vB ) ) );
26531 generate_store_FPRF( Ity_F128, vT, vbi );
26532 break;
26534 case 0x324: // xssqrtqp (VSX Scalar Square root Quad-Precision[using round to Odd])
26536 UInt inst_select = IFIELD( theInstr, 16, 5 );
26538 switch (inst_select) {
26539 case 27:
26541 if ( R0 == 0 ) { // xssqrtqp
26542 /* rounding mode specified by RN. Issue inst with R0 = 0 */
26543 DIP("xssqrtqp v%d,v%d\n", vT_addr, vB_addr);
26544 assign( vT, binop( Iop_SqrtF128, rm, mkexpr( vB ) ) );
26546 } else { // xssqrtqpo
26547 /* rounding mode is Round to odd. Issue inst with R0 = 1 */
26548 DIP("xssqrtqpo v%d,v%d\n", vT_addr, vB_addr);
26549 assign( vT, binop( Iop_SqrtF128, set_round_to_Oddmode(),
26550 mkexpr( vB ) ) );
26552 generate_store_FPRF( Ity_F128, vT, vbi );
26553 break;
26554 } /* end case 27 */
26555 default:
26556 vex_printf("dis_vx_Floating_Point_Arithmetic_quad_precision(0x324 unknown inst_select)\n");
26557 return False;
26558 } /* end switch inst_select */
26559 break;
26560 } /* end case 0x324 */
26562 case 0x344:
26564 UInt inst_select = IFIELD( theInstr, 16, 5);
26566 switch (inst_select) {
26567 case 0: // xscvqpuqz, VSX Scalar Convert with round to zero
26568 // Quad-Precision to Unsigned Quadword X-form
26570 DIP("xscvqpuqz, v%d,v%d\n", vT_addr, vB_addr);
26571 assign( vT, unop( Iop_TruncF128toI128U, mkexpr( vB ) ) );
26572 break;
26574 case 1: // xscvqpuwz VSX Scalar Truncate & Convert Quad-Precision
26575 // format to Unsigned Word format
26577 DIP("xscvqpuwz v%d,v%d\n", vT_addr, vB_addr);
26578 assign( vT, unop( Iop_TruncF128toI32U, mkexpr( vB ) ) );
26579 break;
26581 case 2: // xscvudqp VSX Scalar Convert from Unsigned Doubleword
26582 // format to Quad-Precision format
26584 IRTemp tmp = newTemp( Ity_I64 );
26586 DIP("xscvudqp v%d,v%d\n", vT_addr, vB_addr);
26587 assign( tmp, unop( Iop_ReinterpF64asI64,
26588 unop( Iop_F128HItoF64, mkexpr( vB ) ) ) );
26589 assign( vT, unop( Iop_I64UtoF128, mkexpr( tmp ) ) );
26590 generate_store_FPRF( Ity_F128, vT, vbi );
26591 break;
26593 case 3: // xscvuqqp, VSX Scalar Convert Unsigned Quadword
26594 // to Quad-Precision X-form
26596 DIP("xscvqpuqz, v%d,v%d\n", vT_addr, vB_addr);
26597 assign( vT,
26598 binop( Iop_I128UtoF128, rm,
26599 unop ( Iop_ReinterpF128asI128,
26600 getF128Reg( vB_addr ) ) ) );
26601 generate_store_FPRF( Ity_F128, vT, vbi );
26602 break;
26604 case 8: // xscvqpsqz, VSX Scalar Convert with round to zero
26605 // Quad-Precision to Signed Quadword X-form
26607 DIP("xscvqpsqz, v%d,v%d\n", vT_addr, vB_addr);
26608 assign( vT, unop( Iop_TruncF128toI128S, mkexpr( vB ) ) );
26609 break;
26611 case 9: // xsvqpswz VSX Scalar Truncate & Convert Quad-Precision
26612 // format to Signed Word format
26614 DIP("xscvqpswz v%d,v%d\n", vT_addr, vB_addr);
26615 assign( vT, unop( Iop_TruncF128toI32S, mkexpr( vB ) ) );
26616 break;
26618 case 10: // xscvsdqp VSX Scalar from Signed Doubleword format
26619 // Quad-Precision format
26621 IRTemp tmp = newTemp( Ity_I64 );
26623 DIP("xscvsdqp v%d,v%d\n", vT_addr, vB_addr);
26625 assign( tmp, unop( Iop_ReinterpF64asI64,
26626 unop( Iop_F128HItoF64, mkexpr( vB ) ) ) );
26627 assign( vT, unop( Iop_I64StoF128, mkexpr( tmp ) ) );
26628 generate_store_FPRF( Ity_F128, vT, vbi );
26629 break;
26631 case 11: // xscvsqqp, VSX Scalar Convert Unsigned Quadword
26632 // to Quad-Precision X-form
26634 DIP("xscvsqqp, v%d,v%d\n", vT_addr, vB_addr);
26635 assign( vT,
26636 binop( Iop_I128StoF128, rm,
26637 unop ( Iop_ReinterpF128asI128,
26638 mkexpr( vB ) ) ) );
26639 generate_store_FPRF( Ity_F128, vT, vbi );
26640 break;
26642 case 17: // xsvqpudz VSX Scalar Truncate & Convert Quad-Precision
26643 // format to Unigned Doubleword format
26645 DIP("xscvqpudz v%d,v%d\n", vT_addr, vB_addr);
26646 assign( vT, unop( Iop_TruncF128toI64U, mkexpr( vB ) ) );
26647 break;
26649 case 20: // xscvqpdp Scalar round & Conver Quad-Precision
26650 // format to Double-Precision format [using round to Odd]
26652 IRTemp ftmp = newTemp( Ity_F64 );
26653 IRTemp tmp = newTemp( Ity_I64 );
26655 /* This instruction takes a 128-bit floating point value and
26656 * converts it to a 64-bit floating point value. The 64-bit
26657 * result is stored in the upper 64-bit of the 128-bit result
26658 * register. The lower 64-bit are undefined.
26660 if (R0 == 0) { // xscvqpdp
26661 /* rounding mode specified by RN. Issue inst with R0 = 0 */
26662 DIP("xscvqpdp v%d,v%d\n", vT_addr, vB_addr);
26664 assign( ftmp, binop( Iop_F128toF64, rm, mkexpr( vB ) ) );
26666 } else { // xscvqpdpo
26667 /* rounding mode is Round to odd. Issue inst with R0 = 1 */
26668 DIP("xscvqpdpo v%d,v%d\n", vT_addr, vB_addr);
26669 assign( ftmp,
26670 binop( Iop_F128toF64,
26671 set_round_to_Oddmode(), mkexpr( vB ) ) );
26674 /* store 64-bit float in upper 64-bits of 128-bit register,
26675 * lower 64-bits are zero.
26677 if (host_endness == VexEndnessLE)
26678 assign( vT,
26679 binop( Iop_F64HLtoF128,
26680 mkexpr( ftmp ),
26681 unop( Iop_ReinterpI64asF64, mkU64( 0 ) ) ) );
26682 else
26683 assign( vT,
26684 binop( Iop_F64HLtoF128,
26685 unop( Iop_ReinterpI64asF64, mkU64( 0 ) ),
26686 mkexpr( ftmp ) ) );
26688 assign( tmp, unop( Iop_ReinterpF64asI64,
26689 unop( Iop_F128HItoF64, mkexpr( vT ) ) ) );
26691 generate_store_FPRF( Ity_I64, tmp, vbi );
26692 break;
26694 case 22: // xscvdpqp VSX Scalar Convert from Double-Precision
26695 // format to Quad-Precision format
26697 DIP("xscvdpqp v%d,v%d\n", vT_addr, vB_addr);
26698 /* The 64-bit value is in the upper 64 bit of the src */
26699 assign( vT, unop( Iop_F64toF128,
26700 unop( Iop_F128HItoF64, mkexpr( vB ) ) ) );
26702 generate_store_FPRF( Ity_F128, vT, vbi );
26703 break;
26705 case 25: // xsvqpsdz VSX Scalar Truncate & Convert Quad-Precision
26706 // format to Signed Doubleword format
26708 DIP("xscvqpsdz v%d,v%d\n", vT_addr, vB_addr);
26709 assign( vT, unop( Iop_TruncF128toI64S, mkexpr( vB ) ) );
26710 break;
26712 default:
26713 vex_printf( "dis_vx_Floating_Point_Arithmetic_quad_precision invalid inst_select (ppc)(opc2)\n" );
26714 return False;
26715 } /* switch inst_select */
26716 } /* end case 0x344 */
26717 break;
26718 default: /* switch opc2 */
26719 vex_printf( "dis_vx_Floating_Point_Arithmetic_quad_precision(ppc)(opc2)\n" );
26720 return False;
26722 putF128Reg( vT_addr, mkexpr( vT ) );
26723 return True;
26727 /* VSX Scalar Quad-Precision instructions */
26728 static Bool
26729 dis_vx_scalar_quad_precision ( UInt prefix, UInt theInstr )
26731 /* This function emulates the 128-bit floating point instructions
26732 * using existing 128-bit vector instructions (Iops). The 128-bit
26733 * floating point instructions use the same 128-bit vector register
26734 * set.
26736 /* XX1-Form */
26737 UChar opc1 = ifieldOPC( theInstr );
26738 UInt opc2 = ifieldOPClo10( theInstr );
26739 UChar VRT = ifieldRegDS( theInstr );
26740 UChar VRA = ifieldRegA( theInstr );
26741 UChar VRB = ifieldRegB( theInstr );
26742 UChar vT_addr = VRT + 32;
26743 UChar vA_addr = VRA + 32;
26744 UChar vB_addr = VRB + 32;
26745 IRTemp vA = newTemp( Ity_V128 );
26746 IRTemp vB = newTemp( Ity_V128 );
26747 IRTemp vT = newTemp( Ity_V128 );
26749 /* There is no prefixed version of these instructions. */
26750 PREFIX_CHECK
26752 assign( vB, getVSReg( vB_addr ) );
26754 if (opc1 != 0x3F) {
26755 vex_printf( "dis_vx_scalar_quad_precision(ppc)(instr)\n" );
26756 return False;
26759 switch (opc2) {
26761 case 0x044: // xscmpeqqp (VSX Scalar Compare Equal Quad-Precision X-form)
26763 IRTemp vA_hi = newTemp( Ity_I64 );
26764 IRTemp vA_lo = newTemp( Ity_I64 );
26765 IRTemp vB_hi = newTemp( Ity_I64 );
26766 IRTemp vB_lo = newTemp( Ity_I64 );
26767 IRTemp tmp = newTemp( Ity_I64 );
26768 IRTemp src_not_NaN = newTemp( Ity_I64 );
26770 /* NOTE: exceptions are not implemented, will not set VXSNAN, VXVC or
26771 FX registers. */
26772 DIP("xscmpeqqp v%u,v%u,v%u\n", VRT, VRA, VRB);
26774 assign( vA, getVSReg( vA_addr ) );
26776 /* neither vA or vB is NaN */
26777 assign( src_not_NaN,
26778 unop(Iop_Not64,
26779 unop(Iop_1Sto64,
26780 mkOR1( is_NaN( Ity_V128, vA ),
26781 is_NaN( Ity_V128, vB ) ) ) ) );
26783 assign( vA_hi, unop( Iop_V128HIto64, mkexpr( vA ) ) );
26784 assign( vA_lo, unop( Iop_V128to64, mkexpr( vA ) ) );
26785 assign( vB_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
26786 assign( vB_lo, unop( Iop_V128to64, mkexpr( vB ) ) );
26788 assign( tmp,
26789 binop( Iop_And64,
26790 mkexpr( src_not_NaN ),
26791 binop( Iop_And64,
26792 unop( Iop_1Sto64,
26793 binop( Iop_CmpEQ64,
26794 mkexpr( vA_hi ),
26795 mkexpr( vB_hi ) ) ),
26796 unop( Iop_1Sto64,
26797 binop( Iop_CmpEQ64,
26798 mkexpr( vA_lo ),
26799 mkexpr( vB_lo ) ) ) ) ) );
26800 assign( vT, binop( Iop_64HLtoV128, mkexpr( tmp ), mkexpr( tmp ) ) );
26802 break;
26804 case 0x064: // xscpsgnqp (VSX Scalar Copy Sign Quad-Precision)
26806 IRTemp sign_vA = newTemp( Ity_I64 );
26807 IRTemp vB_hi = newTemp( Ity_I64 );
26809 DIP("xscpsgnqp v%u,v%u,v%u\n", VRT, VRA, VRB);
26811 assign( vA, getVSReg(vA_addr) );
26813 assign( sign_vA, binop( Iop_And64,
26814 unop( Iop_V128HIto64,
26815 mkexpr( vA ) ),
26816 mkU64( 0x8000000000000000ULL ) ) );
26817 assign( vB_hi, binop( Iop_Or64,
26818 binop( Iop_And64,
26819 unop( Iop_V128HIto64,
26820 mkexpr( vB ) ),
26821 mkU64( 0x7FFFFFFFFFFFFFFFULL ) ),
26822 mkexpr( sign_vA ) ) );
26823 assign( vT, binop( Iop_64HLtoV128,
26824 mkexpr( vB_hi ),
26825 unop( Iop_V128to64, mkexpr( vB ) ) ) );
26826 break;
26829 case 0x0C4: // xscmpgeqp (VSX Scalar Compare Greater Than or
26830 // Equal Quad-Precision X-form)
26832 IRTemp tmp = newTemp( Ity_I64 );
26833 IRTemp src_not_NaN = newTemp( Ity_I64 );
26835 /* NOTE: exceptions are not implemented, will not set VXSNAN, VXVC or
26836 FX registers. */
26837 DIP("xscmpgeqp v%u,v%u,v%u\n", VRT, VRA, VRB);
26839 assign( vA, getVSReg( vA_addr ) );
26841 /* neither vA or vB is NaN */
26842 assign( src_not_NaN,
26843 unop(Iop_Not64,
26844 unop(Iop_1Sto64,
26845 mkOR1( is_NaN( Ity_V128, vA ),
26846 is_NaN( Ity_V128, vB ) ) ) ) );
26848 /* vA >= vB is Not( vB > vA) */
26849 assign( tmp,
26850 binop( Iop_And64,
26851 mkexpr( src_not_NaN ),
26852 unop( Iop_Not64,
26853 unop( Iop_1Sto64,
26854 Quad_precision_gt( vB, vA ) ) ) ) ) ;
26855 assign( vT, binop( Iop_64HLtoV128, mkexpr( tmp ), mkexpr( tmp ) ) );
26857 break;
26859 case 0x0E4: // xscmpgtqp (VSX Scalar Compare Greater Than
26860 // Quad-Precision X-form)
26862 IRTemp tmp = newTemp( Ity_I64 );
26863 IRTemp src_not_NaN = newTemp( Ity_I64 );
26865 /* NOTE: exceptions are not implemented, will not set VXSNAN, VXVC or
26866 FX registers. */
26867 DIP("xscmpgtqp v%u,v%u,v%u\n", VRT, VRA, VRB);
26869 assign( vA, getVSReg( vA_addr ) );
26871 /* neither vA or vB is NaN */
26872 assign( src_not_NaN,
26873 unop(Iop_Not64,
26874 unop(Iop_1Sto64,
26875 mkOR1( is_NaN( Ity_V128, vA ),
26876 is_NaN( Ity_V128, vB ) ) ) ) );
26878 assign( tmp,
26879 binop( Iop_And64,
26880 mkexpr( src_not_NaN ),
26881 unop( Iop_1Sto64, Quad_precision_gt( vA, vB ) ) ) );
26883 assign( vT, binop( Iop_64HLtoV128, mkexpr( tmp ), mkexpr( tmp ) ) );
26885 break;
26887 case 0x084: // xscmpoqp (VSX Scalar Compare Ordered Quad-Precision)
26888 case 0x284: // xscmpuqp (VSX Scalar Compare Unrdered Quad-Precision)
26890 /* Note, only differece between xscmoqp and xscmpuqp is the
26891 exception flag settings which are not supported anyway. */
26892 IRExpr *bit4, *bit5, *bit6, *bit7;
26893 IRExpr *bit_zero, *bit_inf, *same_sign;
26894 UInt BF = IFIELD( theInstr, 23, 3 );
26895 IRTemp eq_lt_gt = newTemp( Ity_I32 );
26896 IRTemp CC = newTemp( Ity_I32 );
26898 if (opc2 == 0x084) {
26899 DIP("xscmpoqp %u,v%d,v%u\n", BF, VRA, VRB);
26900 } else {
26901 DIP("xscmpuqp %u,v%d,v%u\n", BF, VRA, VRB);
26904 assign( vA, getVSReg(vA_addr));
26906 /* A and B have the same sign */
26907 same_sign = binop( Iop_CmpEQ64,
26908 binop( Iop_Shr64,
26909 unop( Iop_V128HIto64,
26910 mkexpr( vA ) ),
26911 mkU8( 63 ) ),
26912 binop( Iop_Shr64,
26913 unop( Iop_V128HIto64,
26914 mkexpr( vB ) ),
26915 mkU8( 63 ) ) );
26917 /* A < B */
26918 bit4 = Quad_precision_gt( vB, vA );
26920 /* A > B */
26921 bit5 = Quad_precision_gt( vA, vB );
26923 /* A equal B */
26924 bit6 = mkAND1( binop( Iop_CmpEQ64,
26925 unop( Iop_V128HIto64,
26926 mkexpr( vA ) ),
26927 unop( Iop_V128HIto64,
26928 mkexpr( vB ) ) ),
26929 binop( Iop_CmpEQ64,
26930 unop( Iop_V128to64,
26931 mkexpr( vA ) ),
26932 unop( Iop_V128to64,
26933 mkexpr( vB ) ) ) );
26935 /* test both zero don't care about sign */
26936 bit_zero = mkAND1( is_Zero( Ity_V128, vA ), is_Zero( Ity_V128, vB ) );
26938 /* test both for infinity, don't care about sign */
26939 bit_inf = mkAND1(
26940 mkAND1( is_Inf( Ity_V128, vA ), is_Inf( Ity_V128, vB ) ),
26941 binop( Iop_CmpEQ64,
26942 binop( Iop_And64,
26943 unop( Iop_V128to64,
26944 mkexpr( vA ) ),
26945 mkU64( 0x80000000) ),
26946 binop( Iop_And64,
26947 unop( Iop_V128to64,
26948 mkexpr( vB ) ),
26949 mkU64( 0x80000000) ) ) );
26951 /* exp A or exp B is NaN */
26952 bit7 = mkOR1( is_NaN( Ity_V128, vA ),
26953 is_NaN( Ity_V128, vB ) );
26955 assign( eq_lt_gt,
26956 binop( Iop_Or32,
26957 binop( Iop_Or32,
26958 binop( Iop_Shl32,
26959 unop( Iop_1Uto32, bit4 ),
26960 mkU8( 3 ) ),
26961 binop( Iop_Shl32,
26962 unop( Iop_1Uto32, bit5 ),
26963 mkU8( 2 ) ) ),
26964 binop( Iop_Or32,
26965 binop( Iop_Shl32,
26966 unop( Iop_1Uto32, bit6 ),
26967 mkU8( 1 ) ),
26968 binop( Iop_Or32,
26969 binop( Iop_Shl32,
26970 unop( Iop_1Uto32,
26971 bit_zero ),
26972 mkU8( 1 ) ),
26973 binop( Iop_Shl32,
26974 unop( Iop_1Uto32,
26975 mkAND1( bit_inf, same_sign ) ),
26976 mkU8( 1 ) ) ) ) ) );
26978 assign(CC, binop( Iop_Or32,
26979 binop( Iop_And32,
26980 unop( Iop_Not32,
26981 unop( Iop_1Sto32, bit7 ) ),
26982 mkexpr( eq_lt_gt ) ),
26983 unop( Iop_1Uto32, bit7 ) ) );
26985 /* put result of the comparison into CC and FPCC */
26986 putGST_field( PPC_GST_CR, mkexpr( CC ), BF );
26987 putFPCC( mkexpr( CC ) );
26988 return True;
26990 break;
26992 case 0xA4: // xscmpexpqp (VSX Scalar Compare Exponents Double-Precision)
26994 IRExpr *bit4, *bit5, *bit6, *bit7;
26995 UInt BF = IFIELD( theInstr, 23, 3 );
26997 IRTemp eq_lt_gt = newTemp( Ity_I32 );
26998 IRTemp CC = newTemp( Ity_I32 );
27000 DIP("xscmpexpqp %u,v%u,v%u\n", BF, VRA, VRB);
27002 assign( vA, getVSReg(vA_addr));
27004 /* A exp < B exp */
27005 bit4 = binop( Iop_CmpLT64U,
27006 binop( Iop_And64,
27007 unop( Iop_V128HIto64,
27008 mkexpr( vA ) ),
27009 mkU64( 0x7FFF000000000000 ) ),
27010 binop( Iop_And64,
27011 unop( Iop_V128HIto64,
27012 mkexpr( vB ) ),
27013 mkU64( 0x7FFF000000000000 ) ) );
27014 /* exp > B exp */
27015 bit5 = binop( Iop_CmpLT64U,
27016 binop( Iop_And64,
27017 unop( Iop_V128HIto64,
27018 mkexpr( vB ) ),
27019 mkU64( 0x7FFF000000000000 ) ),
27020 binop( Iop_And64,
27021 unop( Iop_V128HIto64,
27022 mkexpr( vA ) ),
27023 mkU64( 0x7FFF000000000000 ) ) );
27024 /* test equal */
27025 bit6 = binop( Iop_CmpEQ64,
27026 binop( Iop_And64,
27027 unop( Iop_V128HIto64,
27028 mkexpr( vA ) ),
27029 mkU64( 0x7FFF000000000000 ) ),
27030 binop( Iop_And64,
27031 unop( Iop_V128HIto64,
27032 mkexpr( vB ) ),
27033 mkU64( 0x7FFF000000000000 ) ) );
27035 /* exp A or exp B is NaN */
27036 bit7 = mkOR1( is_NaN( Ity_V128, vA ),
27037 is_NaN( Ity_V128, vB ) );
27039 /* NaN over rules the other comparisons */
27040 assign( eq_lt_gt, binop( Iop_Or32,
27041 binop( Iop_Shl32,
27042 unop( Iop_1Uto32, bit4 ),
27043 mkU8( 3) ),
27044 binop( Iop_Or32,
27045 binop( Iop_Shl32,
27046 unop( Iop_1Uto32, bit5 ),
27047 mkU8( 2) ),
27048 binop( Iop_Shl32,
27049 unop( Iop_1Uto32, bit6 ),
27050 mkU8( 1 ) ) ) ) );
27051 assign(CC, binop( Iop_Or32,
27052 binop( Iop_And32,
27053 unop( Iop_Not32,
27054 unop( Iop_1Sto32, bit7 ) ),
27055 mkexpr( eq_lt_gt ) ),
27056 unop( Iop_1Uto32, bit7 ) ) );
27058 /* put result of the comparison into CC and FPCC */
27059 putGST_field( PPC_GST_CR, mkexpr( CC ), BF );
27060 putFPCC( mkexpr( CC ) );
27061 return True;
27063 break;
27065 case 0x2A4: // xsmaxcqp (VSX Scalar Maximum Type-C Quad Precision)
27066 case 0x2E4: // xsmincqp (VSX Scalar Minimum Type-C Quad Precision)
27068 IRTemp tmp_cmp = newTemp( Ity_I64 );
27069 IRTemp cmp_mask = newTemp( Ity_V128 );
27070 IRTemp result = newTemp( Ity_V128 );
27071 IRTemp src_not_NaN = newTemp( Ity_V128 );
27072 IRTemp tmp_src_not_NaN = newTemp( Ity_I64 );
27074 /* NOTE: exceptions are not implemented, will not set VXSNAN, VXVC or
27075 FX registers. */
27076 assign( vA, getVSReg( vA_addr ) );
27078 if (opc2 == 0x2A4) {
27079 DIP("xsmaxcqp v%u,v%u,v%u\n", VRT, VRA, VRB);
27080 assign( tmp_cmp, unop( Iop_1Sto64, Quad_precision_gt( vA, vB ) ) );
27082 } else {
27083 DIP("xsmincqp v%u,v%u,v%u\n", VRT, VRA, VRB);
27084 assign( tmp_cmp, unop( Iop_1Sto64, Quad_precision_gt( vB, vA ) ) );
27087 /* if either vA or vB is NaN, result is vB */
27088 assign( tmp_src_not_NaN,
27089 unop( Iop_Not64,
27090 unop( Iop_1Sto64,
27091 mkOR1( is_NaN( Ity_V128, vA ),
27092 is_NaN( Ity_V128, vB ) ) ) ) );
27094 assign( src_not_NaN, binop( Iop_64HLtoV128,
27095 mkexpr( tmp_src_not_NaN ),
27096 mkexpr( tmp_src_not_NaN ) ) );
27098 assign( cmp_mask, binop( Iop_64HLtoV128,
27099 mkexpr( tmp_cmp ), mkexpr( tmp_cmp ) ) );
27101 /* comparison is True, then result = vA, otherwise result = vB */
27102 assign( result, binop( Iop_OrV128,
27103 binop( Iop_AndV128,
27104 mkexpr( cmp_mask ),
27105 mkexpr( vA ) ),
27106 binop( Iop_AndV128,
27107 unop( Iop_NotV128, mkexpr( cmp_mask ) ),
27108 mkexpr( vB ) ) ) );
27110 assign( vT,
27111 binop( Iop_OrV128,
27112 binop( Iop_AndV128,
27113 mkexpr( src_not_NaN ),
27114 mkexpr( result ) ),
27115 binop( Iop_AndV128,
27116 unop( Iop_NotV128, mkexpr( src_not_NaN ) ),
27117 mkexpr( vB ) ) ) );
27119 break;
27121 case 0x2C4: // xststdcqp (VSX Scalar Quad-Precision Test Data Class)
27123 UInt BF = IFIELD( theInstr, 23, 3 );
27124 UInt DCMX_mask = IFIELD( theInstr, 16, 7 );
27125 IRTemp CC = newTemp( Ity_I64 );
27126 IRTemp NaN = newTemp( Ity_I64 );
27127 IRTemp inf = newTemp( Ity_I64 );
27128 IRTemp pos = newTemp( Ity_I64 );
27129 IRTemp DCM = newTemp( Ity_I64 );
27130 IRTemp zero = newTemp( Ity_I64 );
27131 IRTemp dnorm = newTemp( Ity_I64 );
27133 DIP("xststdcqp %u,v%u,%u\n", BF, VRB, DCMX_mask);
27135 assign( zero, unop( Iop_1Uto64, is_Zero( Ity_V128, vB ) ) );
27136 assign( pos, unop( Iop_1Uto64,
27137 binop( Iop_CmpEQ64,
27138 binop( Iop_Shr64,
27139 unop( Iop_V128HIto64,
27140 mkexpr( vB ) ),
27141 mkU8( 63 ) ),
27142 mkU64( 0 ) ) ) );
27144 assign( NaN, unop( Iop_1Uto64, is_NaN( Ity_V128, vB ) ) );
27145 assign( inf, unop( Iop_1Uto64, is_Inf( Ity_V128, vB ) ) );
27147 assign( dnorm, unop( Iop_1Uto64, is_Denorm( Ity_V128, vB ) ) );
27148 assign( DCM, create_DCM( Ity_I64, NaN, inf, zero, dnorm, pos ) );
27149 assign( CC, binop( Iop_Or64,
27150 binop( Iop_And64, /* vB sign bit */
27151 binop( Iop_Shr64,
27152 unop( Iop_V128HIto64, mkexpr( vB ) ),
27153 mkU8( 60 ) ),
27154 mkU64( 0x8 ) ),
27155 binop( Iop_Shl64,
27156 unop( Iop_1Uto64,
27157 binop( Iop_CmpNE64,
27158 binop( Iop_And64,
27159 mkexpr( DCM ),
27160 mkU64( DCMX_mask ) ),
27161 mkU64( 0 ) ) ),
27162 mkU8( 1 ) ) ) );
27164 putGST_field( PPC_GST_CR, unop(Iop_64to32, mkexpr( CC ) ), BF );
27165 putFPCC( unop(Iop_64to32, mkexpr( CC ) ) );
27166 return True;
27168 break;
27170 case 0x324: // xsabsqp (VSX Scalar Absolute Quad-Precision)
27171 // xsxexpqp (VSX Scalaar Extract Exponent Quad-Precision)
27172 // xsnabsqp (VSX Scalar Negative Absolute Quad-Precision)
27173 // xsnegqp (VSX Scalar Negate Quad-Precision)
27174 // xsxsigqp (VSX Scalar Extract Significand Quad-Precision)
27176 UInt inst_select = IFIELD( theInstr, 16, 5);
27178 switch (inst_select) {
27179 case 0:
27180 DIP("xsabsqp v%u,v%u\n", VRT, VRB);
27181 assign( vT, binop( Iop_AndV128, mkexpr( vB ),
27182 binop( Iop_64HLtoV128,
27183 mkU64( 0x7FFFFFFFFFFFFFFF ),
27184 mkU64( 0xFFFFFFFFFFFFFFFF ) ) ) );
27185 break;
27187 case 2:
27188 DIP("xsxexpqp v%u,v%u\n", VRT, VRB);
27189 assign( vT, binop( Iop_ShrV128,
27190 binop( Iop_AndV128, mkexpr( vB ),
27191 binop( Iop_64HLtoV128,
27192 mkU64( 0x7FFF000000000000 ),
27193 mkU64( 0x0000000000000000 ) ) ),
27194 mkU8( 48 ) ) );
27195 break;
27197 case 8:
27198 DIP("xsnabsqp v%u,v%u\n", VRT, VRB);
27199 assign( vT, binop( Iop_OrV128, mkexpr( vB ),
27200 binop( Iop_64HLtoV128,
27201 mkU64( 0x8000000000000000 ),
27202 mkU64( 0x0000000000000000 ) ) ) );
27203 break;
27205 case 16:
27206 DIP("xsnegqp v%u,v%u\n", VRT, VRB);
27207 assign( vT, binop( Iop_XorV128, mkexpr( vB ),
27208 binop( Iop_64HLtoV128,
27209 mkU64( 0x8000000000000000 ),
27210 mkU64( 0x0000000000000000 ) ) ) );
27211 break;
27213 case 18:
27215 IRTemp expZero = newTemp( Ity_I64 );
27216 IRTemp expInfinity = newTemp( Ity_I64 );
27218 DIP("xsxsigqp v%u,v%u\n", VRT, VRB);
27220 assign( expZero, unop( Iop_1Uto64,
27221 binop( Iop_CmpNE64,
27222 binop( Iop_And64,
27223 unop( Iop_V128HIto64,
27224 mkexpr( vB ) ),
27225 mkU64( 0x7FFF000000000000 ) ),
27226 mkU64( 0x0 ) ) ) );
27228 assign( expInfinity,
27229 unop( Iop_1Uto64,
27230 binop( Iop_CmpNE64,
27231 binop( Iop_And64,
27232 unop( Iop_V128HIto64,
27233 mkexpr( vB ) ),
27234 mkU64( 0x7FFF000000000000 ) ),
27235 mkU64( 0x7FFF000000000000 ) ) ) );
27237 /* Clear upper 16 bits to 0x0000. If the exp was zero or infinity
27238 * set bit 48 (lsb = 0) to 0, otherwise set bit 48 to 1.
27240 assign( vT,
27241 binop( Iop_OrV128,
27242 binop( Iop_ShrV128,
27243 binop( Iop_ShlV128,
27244 mkexpr( vB ),
27245 mkU8( 16 ) ),
27246 mkU8( 16 ) ),
27247 binop( Iop_64HLtoV128,
27248 binop( Iop_Shl64,
27249 binop( Iop_And64,
27250 mkexpr( expZero ),
27251 mkexpr( expInfinity ) ),
27252 mkU8( 48 ) ),
27253 mkU64( 0 ) ) ) );
27255 break;
27257 default:
27258 vex_printf( "dis_vx_scalar_quad_precision invalid inst_select (ppc)(opc2)\n" );
27259 return False;
27262 break;
27263 case 0x364: // xsiexpqp (VST Scalar Insert Exponent Quad-Precision)
27265 IRTemp exp = newTemp( Ity_I64 );
27267 DIP("xsiexpqp v%d,v%d,v%d\n", VRT, VRA, VRB);
27269 assign( vA, getVSReg( vA_addr ) );
27270 assign( exp, binop( Iop_And64,
27271 unop( Iop_V128HIto64,
27272 mkexpr( vB ) ),
27273 mkU64( 0x7FFFULL ) ) );
27274 assign( vT, binop( Iop_64HLtoV128,
27275 binop( Iop_Or64,
27276 binop( Iop_And64,
27277 unop( Iop_V128HIto64,
27278 mkexpr( vA ) ),
27279 mkU64( 0x8000FFFFFFFFFFFFULL ) ),
27280 binop( Iop_Shl64,
27281 mkexpr( exp ),
27282 mkU8( 48 ) ) ),
27283 unop( Iop_V128to64,
27284 mkexpr( vA ) ) ) );
27286 break;
27288 default:
27289 vex_printf( "dis_vx_scalar_quad_precision(ppc)(opc2)\n" );
27291 return False;
27294 putVSReg( vT_addr, mkexpr( vT ) );
27295 return True;
27299 * VSX permute and other miscealleous instructions
27301 static Bool
27302 dis_vx_permute_misc( UInt prefix, UInt theInstr, UInt opc2 )
27304 /* XX3-Form */
27305 UChar opc1 = ifieldOPC( theInstr );
27306 UChar XT = ifieldRegXT ( theInstr );
27307 UChar XA = ifieldRegXA ( theInstr );
27308 UChar XB = ifieldRegXB ( theInstr );
27309 IRTemp vT = newTemp( Ity_V128 );
27310 IRTemp vA = newTemp( Ity_V128 );
27311 IRTemp vB = newTemp( Ity_V128 );
27313 /* There is no prefixed version of these instructions. */
27314 PREFIX_CHECK
27316 if (opc1 != 0x3C) {
27317 vex_printf( "dis_vx_permute_misc(ppc)(instr)\n" );
27318 return False;
27321 assign( vA, getVSReg( XA ) );
27322 assign( vB, getVSReg( XB ) );
27324 switch (opc2) {
27325 case 0x8: // xxsldwi (VSX Shift Left Double by Word Immediate)
27327 UChar SHW = ifieldSHW ( theInstr );
27328 IRTemp result = newTemp(Ity_V128);
27329 if ( SHW != 0 ) {
27330 IRTemp hi = newTemp(Ity_V128);
27331 IRTemp lo = newTemp(Ity_V128);
27332 assign( hi, binop(Iop_ShlV128, mkexpr(vA), mkU8(SHW*32)) );
27333 assign( lo, binop(Iop_ShrV128, mkexpr(vB), mkU8(128-SHW*32)) );
27334 assign ( result, binop(Iop_OrV128, mkexpr(hi), mkexpr(lo)) );
27335 } else
27336 assign ( result, mkexpr(vA) );
27337 DIP("xxsldwi v%d,v%d,v%d,%d\n", XT, XA, XB, SHW);
27338 putVSReg( XT, mkexpr(result) );
27339 break;
27341 case 0x28: // xpermdi (VSX Permute Doubleword Immediate)
27343 UChar DM = ifieldDM ( theInstr );
27344 IRTemp hi = newTemp(Ity_I64);
27345 IRTemp lo = newTemp(Ity_I64);
27347 if (DM & 0x2)
27348 assign( hi, unop(Iop_V128to64, mkexpr(vA)) );
27349 else
27350 assign( hi, unop(Iop_V128HIto64, mkexpr(vA)) );
27352 if (DM & 0x1)
27353 assign( lo, unop(Iop_V128to64, mkexpr(vB)) );
27354 else
27355 assign( lo, unop(Iop_V128HIto64, mkexpr(vB)) );
27357 assign( vT, binop(Iop_64HLtoV128, mkexpr(hi), mkexpr(lo)) );
27359 DIP("xxpermdi v%d,v%d,v%d,0x%x\n", XT, XA, XB, DM);
27360 putVSReg( XT, mkexpr( vT ) );
27361 break;
27363 case 0x48: // xxmrghw (VSX Merge High Word)
27364 case 0xc8: // xxmrglw (VSX Merge Low Word)
27366 const HChar type = (opc2 == 0x48) ? 'h' : 'l';
27367 IROp word_op = (opc2 == 0x48) ? Iop_V128HIto64 : Iop_V128to64;
27368 IRTemp a64 = newTemp(Ity_I64);
27369 IRTemp ahi32 = newTemp(Ity_I32);
27370 IRTemp alo32 = newTemp(Ity_I32);
27371 IRTemp b64 = newTemp(Ity_I64);
27372 IRTemp bhi32 = newTemp(Ity_I32);
27373 IRTemp blo32 = newTemp(Ity_I32);
27375 assign( a64, unop(word_op, mkexpr(vA)) );
27376 assign( ahi32, unop(Iop_64HIto32, mkexpr(a64)) );
27377 assign( alo32, unop(Iop_64to32, mkexpr(a64)) );
27379 assign( b64, unop(word_op, mkexpr(vB)) );
27380 assign( bhi32, unop(Iop_64HIto32, mkexpr(b64)) );
27381 assign( blo32, unop(Iop_64to32, mkexpr(b64)) );
27383 assign( vT, binop(Iop_64HLtoV128,
27384 binop(Iop_32HLto64, mkexpr(ahi32), mkexpr(bhi32)),
27385 binop(Iop_32HLto64, mkexpr(alo32), mkexpr(blo32))) );
27387 DIP("xxmrg%cw v%d,v%d,v%d\n", type, XT, XA, XB);
27388 putVSReg( XT, mkexpr( vT ) );
27389 break;
27391 case 0x018: // xxsel (VSX Select)
27393 UChar XC = ifieldRegXC(theInstr);
27394 IRTemp vC = newTemp( Ity_V128 );
27395 assign( vC, getVSReg( XC ) );
27396 DIP("xxsel v%d,v%d,v%d,v%d\n", XT, XA, XB, XC);
27397 /* vD = (vA & ~vC) | (vB & vC) */
27398 putVSReg( XT, binop(Iop_OrV128,
27399 binop(Iop_AndV128, mkexpr(vA), unop(Iop_NotV128, mkexpr(vC))),
27400 binop(Iop_AndV128, mkexpr(vB), mkexpr(vC))) );
27401 break;
27404 case 0x68: // xxperm (VSX Permute )
27405 case 0xE8: // xxpermr (VSX Permute right-index )
27408 /* The xxperm instruction performs the same operation as
27409 the vperm except the xxperm operates on the VSR register
27410 file. while vperm operates on the VR register file.
27411 Lets borrow some code here from vperm. The mapping of
27412 the source registers is also a little different.
27414 IRTemp a_perm = newTemp(Ity_V128);
27415 IRTemp b_perm = newTemp(Ity_V128);
27416 IRTemp mask = newTemp(Ity_V128);
27417 IRTemp perm_val = newTemp(Ity_V128);
27418 IRTemp vB_adj = newTemp( Ity_V128 );
27420 if ( opc2 == 0x68 ) {
27421 DIP("xxperm v%u,v%u,v%u\n", (UInt)XT, (UInt)XA, (UInt)XB);
27423 } else {
27424 /* Same as xperm just the index is 31 - idx */
27425 DIP("xxpermr v%u,v%u,v%u\n", (UInt)XT, (UInt)XA, (UInt)XB);
27428 assign( vT, getVSReg( XT ) );
27430 if ( opc2 == 0x68 ) // xxperm
27431 assign( vB_adj, mkexpr( vB ) );
27433 else // xxpermr
27434 assign( vB_adj,
27435 binop( Iop_Sub16x8,
27436 unop( Iop_Dup8x16, mkU8( 0x1F ) ),
27437 mkexpr( vB ) ) );
27439 /* Limit the Perm8x16 steering values to 0 .. 15 as that is what
27440 IR specifies, and also to hide irrelevant bits from
27441 memcheck.
27443 assign( perm_val,
27444 binop( Iop_AndV128, mkexpr( vB_adj ),
27445 unop( Iop_Dup8x16, mkU8( 0xF ) ) ) );
27446 assign( a_perm,
27447 binop( Iop_Perm8x16, mkexpr( vA ), mkexpr( perm_val ) ) );
27448 assign( b_perm,
27449 binop( Iop_Perm8x16, mkexpr( vT ), mkexpr( perm_val ) ) );
27450 assign( mask, binop( Iop_SarN8x16,
27451 binop( Iop_ShlN8x16, mkexpr( vB_adj ),
27452 mkU8( 3 ) ),
27453 mkU8( 7 ) ) );
27454 // dst = (a & ~mask) | (b & mask)
27455 putVSReg( XT, binop( Iop_OrV128,
27456 binop( Iop_AndV128, mkexpr( a_perm ),
27457 unop( Iop_NotV128, mkexpr( mask ) ) ),
27458 binop( Iop_AndV128, mkexpr( b_perm ),
27459 mkexpr( mask ) ) ) );
27460 break;
27463 case 0x148: // xxspltw (VSX Splat Word)
27465 UChar UIM = ifieldRegA(theInstr) & 3;
27466 UChar sh_uim = (3 - (UIM)) * 32;
27467 DIP("xxspltw v%d,v%d,%d\n", XT, XB, UIM);
27468 putVSReg( XT,
27469 unop( Iop_Dup32x4,
27470 unop( Iop_V128to32,
27471 binop( Iop_ShrV128, mkexpr( vB ), mkU8( sh_uim ) ) ) ) );
27472 break;
27475 default:
27476 vex_printf( "dis_vx_permute_misc(ppc)(opc2)\n" );
27477 return False;
27479 return True;
27483 AltiVec Load Instructions
27485 static Bool dis_av_load ( const VexAbiInfo* vbi, UInt prefix, UInt theInstr )
27487 /* X-Form */
27488 UChar opc1 = ifieldOPC(theInstr);
27489 UChar vD_addr = ifieldRegDS(theInstr);
27490 UChar rA_addr = ifieldRegA(theInstr);
27491 UChar rB_addr = ifieldRegB(theInstr);
27492 UInt opc2 = ifieldOPClo10(theInstr);
27493 UChar b0 = ifieldBIT0(theInstr);
27495 IRType ty = mode64 ? Ity_I64 : Ity_I32;
27496 IRTemp EA = newTemp(ty);
27497 IRTemp EA_align16 = newTemp(ty);
27499 /* There is no prefixed version of these instructions. */
27500 PREFIX_CHECK
27502 if (opc1 != 0x1F || b0 != 0) {
27503 vex_printf("dis_av_load(ppc)(instr)\n");
27504 return False;
27507 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
27508 assign( EA_align16, addr_align( mkexpr(EA), 16 ) );
27510 switch (opc2) {
27512 case 0x006: { // lvsl (Load Vector for Shift Left, AV p123)
27513 IRDirty* d;
27514 UInt vD_off = vectorGuestRegOffset(vD_addr);
27515 IRExpr** args_be = mkIRExprVec_5(
27516 IRExpr_GSPTR(),
27517 mkU32(vD_off),
27518 binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
27519 mkU32(0xF)),
27520 mkU32(0)/*left*/,
27521 mkU32(1)/*Big Endian*/);
27522 IRExpr** args_le = mkIRExprVec_5(
27523 IRExpr_GSPTR(),
27524 mkU32(vD_off),
27525 binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
27526 mkU32(0xF)),
27527 mkU32(0)/*left*/,
27528 mkU32(0)/*Little Endian*/);
27529 if (!mode64) {
27530 d = unsafeIRDirty_0_N (
27531 0/*regparms*/,
27532 "ppc32g_dirtyhelper_LVS",
27533 fnptr_to_fnentry(vbi, &ppc32g_dirtyhelper_LVS),
27534 args_be );
27535 } else {
27536 if (host_endness == VexEndnessBE)
27537 d = unsafeIRDirty_0_N (
27538 0/*regparms*/,
27539 "ppc64g_dirtyhelper_LVS",
27540 fnptr_to_fnentry(vbi, &ppc64g_dirtyhelper_LVS),
27541 args_be );
27542 else
27543 d = unsafeIRDirty_0_N (
27544 0/*regparms*/,
27545 "ppc64g_dirtyhelper_LVS",
27546 fnptr_to_fnentry( vbi, &ppc64g_dirtyhelper_LVS ),
27547 args_le );
27549 DIP("lvsl v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
27550 /* declare guest state effects */
27551 d->nFxState = 1;
27552 vex_bzero(&d->fxState, sizeof(d->fxState));
27553 d->fxState[0].fx = Ifx_Write;
27554 d->fxState[0].offset = vD_off;
27555 d->fxState[0].size = sizeof(U128);
27557 /* execute the dirty call, side-effecting guest state */
27558 stmt( IRStmt_Dirty(d) );
27559 break;
27561 case 0x026: { // lvsr (Load Vector for Shift Right, AV p125)
27562 IRDirty* d;
27563 UInt vD_off = vectorGuestRegOffset(vD_addr);
27564 IRExpr** args_be = mkIRExprVec_5(
27565 IRExpr_GSPTR(),
27566 mkU32(vD_off),
27567 binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
27568 mkU32(0xF)),
27569 mkU32(1)/*right*/,
27570 mkU32(1)/*Big Endian*/);
27571 IRExpr** args_le = mkIRExprVec_5(
27572 IRExpr_GSPTR(),
27573 mkU32(vD_off),
27574 binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
27575 mkU32(0xF)),
27576 mkU32(1)/*right*/,
27577 mkU32(0)/*Little Endian*/);
27579 if (!mode64) {
27580 d = unsafeIRDirty_0_N (
27581 0/*regparms*/,
27582 "ppc32g_dirtyhelper_LVS",
27583 fnptr_to_fnentry(vbi, &ppc32g_dirtyhelper_LVS),
27584 args_be );
27585 } else {
27586 if (host_endness == VexEndnessBE)
27587 d = unsafeIRDirty_0_N (
27588 0/*regparms*/,
27589 "ppc64g_dirtyhelper_LVS",
27590 fnptr_to_fnentry(vbi, &ppc64g_dirtyhelper_LVS),
27591 args_be );
27592 else
27593 d = unsafeIRDirty_0_N (
27594 0/*regparms*/,
27595 "ppc64g_dirtyhelper_LVS",
27596 fnptr_to_fnentry( vbi, &ppc64g_dirtyhelper_LVS ),
27597 args_le );
27599 DIP("lvsr v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
27600 /* declare guest state effects */
27601 d->nFxState = 1;
27602 vex_bzero(&d->fxState, sizeof(d->fxState));
27603 d->fxState[0].fx = Ifx_Write;
27604 d->fxState[0].offset = vD_off;
27605 d->fxState[0].size = sizeof(U128);
27607 /* execute the dirty call, side-effecting guest state */
27608 stmt( IRStmt_Dirty(d) );
27609 break;
27611 case 0x007: // lvebx (Load Vector Element Byte Indexed, AV p119)
27612 DIP("lvebx v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
27613 /* loads addressed byte into vector[EA[0:3]
27614 since all other destination bytes are undefined,
27615 can simply load entire vector from 16-aligned EA */
27616 putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
27617 break;
27619 case 0x027: // lvehx (Load Vector Element Half Word Indexed, AV p121)
27620 DIP("lvehx v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
27621 /* see note for lvebx */
27622 putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
27623 break;
27625 case 0x047: // lvewx (Load Vector Element Word Indexed, AV p122)
27626 DIP("lvewx v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
27627 /* see note for lvebx */
27628 putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
27629 break;
27631 case 0x067: // lvx (Load Vector Indexed, AV p127)
27632 DIP("lvx v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
27633 putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
27634 break;
27636 case 0x167: // lvxl (Load Vector Indexed LRU, AV p128)
27637 DIP("lvxl v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
27638 putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
27639 break;
27641 default:
27642 vex_printf("dis_av_load(ppc)(opc2)\n");
27643 return False;
27645 return True;
27649 AltiVec Store Instructions
27651 static Bool dis_av_store ( UInt prefix, UInt theInstr )
27653 /* X-Form */
27654 UChar opc1 = ifieldOPC(theInstr);
27655 UChar vS_addr = ifieldRegDS(theInstr);
27656 UChar rA_addr = ifieldRegA(theInstr);
27657 UChar rB_addr = ifieldRegB(theInstr);
27658 UInt opc2 = ifieldOPClo10(theInstr);
27659 UChar b0 = ifieldBIT0(theInstr);
27661 IRType ty = mode64 ? Ity_I64 : Ity_I32;
27662 IRTemp EA = newTemp(ty);
27663 IRTemp addr_aligned = newTemp(ty);
27664 IRTemp vS = newTemp(Ity_V128);
27665 IRTemp eb = newTemp(Ity_I8);
27666 IRTemp idx = newTemp(Ity_I8);
27668 /* There is no prefixed version of these instructions. */
27669 PREFIX_CHECK
27671 if (opc1 != 0x1F || b0 != 0) {
27672 vex_printf("dis_av_store(ppc)(instr)\n");
27673 return False;
27676 assign( vS, getVReg(vS_addr));
27677 assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
27679 switch (opc2) {
27680 case 0x087: { // stvebx (Store Vector Byte Indexed, AV p131)
27681 DIP("stvebx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
27682 assign( eb, binop(Iop_And8, mkU8(0xF),
27683 unop(Iop_32to8,
27684 mkNarrowTo32(ty, mkexpr(EA)) )) );
27685 if (host_endness == VexEndnessLE) {
27686 assign( idx, binop(Iop_Shl8, mkexpr(eb), mkU8(3)) );
27687 } else {
27688 assign( idx, binop(Iop_Shl8,
27689 binop(Iop_Sub8, mkU8(15), mkexpr(eb)),
27690 mkU8(3)) );
27692 store( mkexpr(EA),
27693 unop( Iop_32to8, unop(Iop_V128to32,
27694 binop(Iop_ShrV128, mkexpr(vS), mkexpr(idx)))) );
27695 break;
27697 case 0x0A7: { // stvehx (Store Vector Half Word Indexed, AV p132)
27698 DIP("stvehx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
27699 assign( addr_aligned, addr_align(mkexpr(EA), 2) );
27700 assign( eb, binop(Iop_And8, mkU8(0xF),
27701 mkNarrowTo8(ty, mkexpr(addr_aligned) )) );
27702 if (host_endness == VexEndnessLE) {
27703 assign( idx, binop(Iop_Shl8, mkexpr(eb), mkU8(3)) );
27704 } else {
27705 assign( idx, binop(Iop_Shl8,
27706 binop(Iop_Sub8, mkU8(14), mkexpr(eb)),
27707 mkU8(3)) );
27709 store( mkexpr(addr_aligned),
27710 unop( Iop_32to16, unop(Iop_V128to32,
27711 binop(Iop_ShrV128, mkexpr(vS), mkexpr(idx)))) );
27712 break;
27714 case 0x0C7: { // stvewx (Store Vector Word Indexed, AV p133)
27715 DIP("stvewx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
27716 assign( addr_aligned, addr_align(mkexpr(EA), 4) );
27717 assign( eb, binop(Iop_And8, mkU8(0xF),
27718 mkNarrowTo8(ty, mkexpr(addr_aligned) )) );
27719 if (host_endness == VexEndnessLE) {
27720 assign( idx, binop(Iop_Shl8, mkexpr(eb), mkU8(3)) );
27721 } else {
27722 assign( idx, binop(Iop_Shl8,
27723 binop(Iop_Sub8, mkU8(12), mkexpr(eb)),
27724 mkU8(3)) );
27726 store( mkexpr( addr_aligned),
27727 unop( Iop_V128to32,
27728 binop(Iop_ShrV128, mkexpr(vS), mkexpr(idx))) );
27729 break;
27732 case 0x0E7: // stvx (Store Vector Indexed, AV p134)
27733 DIP("stvx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
27734 store( addr_align( mkexpr(EA), 16 ), mkexpr(vS) );
27735 break;
27737 case 0x1E7: // stvxl (Store Vector Indexed LRU, AV p135)
27738 DIP("stvxl v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
27739 store( addr_align( mkexpr(EA), 16 ), mkexpr(vS) );
27740 break;
27742 default:
27743 vex_printf("dis_av_store(ppc)(opc2)\n");
27744 return False;
27746 return True;
27750 AltiVec Arithmetic Instructions
27752 static Bool dis_av_arith ( UInt prefix, UInt theInstr )
27754 /* VX-Form */
27755 UChar opc1 = ifieldOPC(theInstr);
27756 UChar vD_addr = ifieldRegDS(theInstr);
27757 UChar vA_addr = ifieldRegA(theInstr);
27758 UChar vB_addr = ifieldRegB(theInstr);
27759 UInt opc2 = IFIELD( theInstr, 0, 11 );
27761 IRTemp vA = newTemp(Ity_V128);
27762 IRTemp vB = newTemp(Ity_V128);
27763 IRTemp z3 = newTemp(Ity_I64);
27764 IRTemp z2 = newTemp(Ity_I64);
27765 IRTemp z1 = newTemp(Ity_I64);
27766 IRTemp z0 = newTemp(Ity_I64);
27767 IRTemp aEvn, aOdd;
27768 IRTemp a15, a14, a13, a12, a11, a10, a9, a8;
27769 IRTemp a7, a6, a5, a4, a3, a2, a1, a0;
27770 IRTemp b3, b2, b1, b0;
27772 /* There is no prefixed version of these instructions. */
27773 PREFIX_CHECK
27775 aEvn = aOdd = IRTemp_INVALID;
27776 a15 = a14 = a13 = a12 = a11 = a10 = a9 = a8 = IRTemp_INVALID;
27777 a7 = a6 = a5 = a4 = a3 = a2 = a1 = a0 = IRTemp_INVALID;
27778 b3 = b2 = b1 = b0 = IRTemp_INVALID;
27780 assign( vA, getVReg( vA_addr ) );
27781 assign( vB, getVReg( vB_addr ) );
27783 if (opc1 != 0x4) {
27784 vex_printf("dis_av_arith(ppc)(opc1 != 0x4)\n");
27785 return False;
27788 switch (opc2) {
27789 /* Add */
27790 case 0x180: { // vaddcuw (Add Carryout Unsigned Word, AV p136)
27791 DIP("vaddcuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27792 /* unsigned_ov(x+y) = (y >u not(x)) */
27793 putVReg( vD_addr, binop( Iop_ShrN32x4,
27794 binop( Iop_CmpGT32Ux4, mkexpr( vB ),
27795 unop( Iop_NotV128, mkexpr( vA ) ) ),
27796 mkU8( 31 ) ) );
27797 break;
27799 case 0x000: // vaddubm (Add Unsigned Byte Modulo, AV p141)
27800 DIP("vaddubm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27801 putVReg( vD_addr, binop(Iop_Add8x16, mkexpr(vA), mkexpr(vB)) );
27802 break;
27804 case 0x040: // vadduhm (Add Unsigned Half Word Modulo, AV p143)
27805 DIP("vadduhm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27806 putVReg( vD_addr, binop(Iop_Add16x8, mkexpr(vA), mkexpr(vB)) );
27807 break;
27809 case 0x080: // vadduwm (Add Unsigned Word Modulo, AV p145)
27810 DIP("vadduwm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27811 putVReg( vD_addr, binop(Iop_Add32x4, mkexpr(vA), mkexpr(vB)) );
27812 break;
27814 case 0x0C0: // vaddudm (Add Unsigned Double Word Modulo)
27815 DIP("vaddudm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27816 putVReg( vD_addr, binop(Iop_Add64x2, mkexpr(vA), mkexpr(vB)) );
27817 break;
27819 case 0x200: // vaddubs (Add Unsigned Byte Saturate, AV p142)
27820 DIP("vaddubs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27821 putVReg( vD_addr, binop(Iop_QAdd8Ux16, mkexpr(vA), mkexpr(vB)) );
27822 // TODO: set VSCR[SAT], perhaps via new primop: Iop_SatOfQAdd8Ux16
27823 break;
27825 case 0x240: // vadduhs (Add Unsigned Half Word Saturate, AV p144)
27826 DIP("vadduhs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27827 putVReg( vD_addr, binop(Iop_QAdd16Ux8, mkexpr(vA), mkexpr(vB)) );
27828 // TODO: set VSCR[SAT]
27829 break;
27831 case 0x280: // vadduws (Add Unsigned Word Saturate, AV p146)
27832 DIP("vadduws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27833 putVReg( vD_addr, binop(Iop_QAdd32Ux4, mkexpr(vA), mkexpr(vB)) );
27834 // TODO: set VSCR[SAT]
27835 break;
27837 case 0x0C8: // vmuloud (Vector multiply Odd Unsigned Doubleword VX-form)
27838 case 0x1C8: // vmulosd (Vector multiply Odd Signed Doubleword VX-form)
27839 case 0x2C8: // vmuleud (Vector multiply Even Unsigned Doubleword VX-form)
27840 case 0x3C8: // vmulesd (Vector multiply Even Signed Doubleword VX-form)
27842 IRTemp hi = newTemp(Ity_I64);
27843 IRTemp lo = newTemp(Ity_I64);
27844 IRTemp tmp128 = newTemp(Ity_I128);
27846 if ( opc2 == 0x0C8) {
27847 DIP("vmuloud v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
27848 /* multiply lower D-words together, upper D-words not used. */
27849 assign( tmp128, binop( Iop_MullU64,
27850 unop( Iop_V128to64, mkexpr( vA ) ),
27851 unop( Iop_V128to64, mkexpr( vB ) ) ) );
27853 } else if ( opc2 == 0x1C8) {
27854 DIP("vmulosd v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
27855 /* multiply lower D-words together, upper D-words not used. */
27856 assign( tmp128, binop( Iop_MullS64,
27857 unop( Iop_V128to64, mkexpr( vA ) ),
27858 unop( Iop_V128to64, mkexpr( vB ) ) ) );
27860 } else if ( opc2 == 0x2C8) {
27861 DIP("vmuleud v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
27862 /* multiply upper D-words together, lower D-words not used. */
27863 assign( tmp128, binop( Iop_MullU64,
27864 unop( Iop_V128HIto64, mkexpr( vA ) ),
27865 unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
27867 } else {
27868 DIP("vmulesd v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
27869 /* multiply upper D-words together, lower D-words not used. */
27870 assign( tmp128, binop( Iop_MullS64,
27871 unop( Iop_V128HIto64, mkexpr( vA ) ),
27872 unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
27875 /* Need to convert from I128 to V128. Don't have a direct
27876 conversion. */
27877 assign( hi, unop( Iop_128HIto64, mkexpr( tmp128 ) ) );
27878 assign( lo, unop( Iop_128to64, mkexpr( tmp128 ) ) );
27880 putVReg( vD_addr,
27881 binop( Iop_64HLtoV128, mkexpr( hi ), mkexpr( lo ) ) );
27883 break;
27885 case 0x300: // vaddsbs (Add Signed Byte Saturate, AV p138)
27886 DIP("vaddsbs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27887 putVReg( vD_addr, binop(Iop_QAdd8Sx16, mkexpr(vA), mkexpr(vB)) );
27888 // TODO: set VSCR[SAT]
27889 break;
27891 case 0x340: // vaddshs (Add Signed Half Word Saturate, AV p139)
27892 DIP("vaddshs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27893 putVReg( vD_addr, binop(Iop_QAdd16Sx8, mkexpr(vA), mkexpr(vB)) );
27894 // TODO: set VSCR[SAT]
27895 break;
27897 case 0x380: // vaddsws (Add Signed Word Saturate, AV p140)
27898 DIP("vaddsws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27899 putVReg( vD_addr, binop(Iop_QAdd32Sx4, mkexpr(vA), mkexpr(vB)) );
27900 // TODO: set VSCR[SAT]
27901 break;
27903 case 0x08B: // vdivuw Vector Divide Unsigned Word
27904 case 0x18B: // vdivsw Vector Divide Signed Word
27905 case 0x289: // vmulhuw Vector Multiply High Unsigned Word
27906 case 0x389: // vmulhsw Vector Multiply High Signed Word
27907 case 0x28B: // vdiveuw Vector divide Extended Unsigned Word
27908 case 0x38B: // vdivesw Vector divide Extended Signed Word
27909 case 0x68B: // vmoduw Vector Modulo Unsigned Word
27910 case 0x78B: // vmodsw Vector Modulo Signed Word
27912 #define MAX_ELE 4
27913 IROp expand_op = Iop_32Uto64;
27914 IROp extract_res = Iop_64to32;
27915 IROp operation = Iop_DivU64;
27916 IRTemp srcA_tmp[MAX_ELE];
27917 IRTemp srcB_tmp[MAX_ELE];
27918 IRTemp res_tmp[MAX_ELE];
27919 IRTemp res_tmp2[MAX_ELE];
27920 IRTemp res_tmp3[MAX_ELE];
27921 UInt shift_by = 32;
27922 UInt i;
27923 IRType size_op = Ity_I64, size_res = Ity_I32;
27925 if (opc2 == 0x08B) {
27926 DIP("vdivuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27927 expand_op= Iop_32Uto64;
27928 operation = Iop_DivU64;
27929 extract_res = Iop_64to32;
27931 } else if (opc2 == 0x68B) {
27932 DIP("vmoduw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27933 expand_op= Iop_32Uto64;
27934 operation = Iop_DivU64;
27935 extract_res = Iop_64to32;
27937 } else if (opc2 == 0x18B) {
27938 DIP("vdivsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27939 expand_op= Iop_32Sto64;
27940 operation = Iop_DivS64;
27941 extract_res = Iop_64to32;
27943 } else if (opc2 == 0x78B) {
27944 DIP("vmodsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27945 expand_op= Iop_32Sto64;
27946 operation = Iop_DivS64;
27947 extract_res = Iop_64to32;
27949 } else if (opc2 == 0x289) {
27950 DIP("vmulhuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27951 expand_op = Iop_32Uto64;
27952 operation = Iop_Mul64;
27953 extract_res = Iop_64HIto32;
27955 } else if (opc2 == 0x389) {
27956 DIP("vmulhsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27957 expand_op= Iop_32Sto64;
27958 operation = Iop_Mul64;
27959 extract_res = Iop_64HIto32;
27961 } else if (opc2 == 0x28B) {
27962 DIP("vdiveuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27963 expand_op= Iop_32Uto64;
27964 operation = Iop_DivU64;
27965 extract_res = Iop_64to32;
27967 } else if (opc2 == 0x38B) {
27968 DIP("vdivesw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
27969 expand_op= Iop_32Sto64;
27970 operation = Iop_DivS64;
27971 extract_res = Iop_64to32;
27974 for (i=0; i<MAX_ELE; i++) {
27975 srcA_tmp[i] = newTemp( size_op );
27976 srcB_tmp[i] = newTemp( size_op );
27977 res_tmp[i] = newTemp( size_res );
27979 if (( opc2 == 0x28B ) || ( opc2 == 0x38B )) {
27980 // Operand A is left shifted 32 bits
27981 assign( srcA_tmp[i],
27982 binop( Iop_Shl64,
27983 unop( expand_op,
27984 unop( Iop_64to32,
27985 unop( Iop_V128to64,
27986 binop( Iop_ShrV128,
27987 mkexpr( vA ),
27988 mkU8( i*shift_by ) )))),
27989 mkU8( 32 ) ) );
27990 } else {
27991 assign( srcA_tmp[i],
27992 unop( expand_op,
27993 unop( Iop_64to32,
27994 unop( Iop_V128to64,
27995 binop( Iop_ShrV128,
27996 mkexpr( vA ),
27997 mkU8( i*shift_by ) ) ) ) ) );
28000 assign( srcB_tmp[i],
28001 unop( expand_op,
28002 unop( Iop_64to32,
28003 unop( Iop_V128to64,
28004 binop( Iop_ShrV128,
28005 mkexpr( vB ),
28006 mkU8( i*shift_by ) ) ) ) ) );
28008 if ( opc2 == 0x38B ) { // vdivesw
28009 /* Take absolute value of signed operands to determine if the result fits in 31 bits.
28010 Set result to zeros if it doesn't fit to match the HW functionality. */
28011 res_tmp2[i] = newTemp( Ity_I64 );
28012 res_tmp3[i] = newTemp( Ity_I64 );
28014 /* Calculate actual result */
28015 assign( res_tmp2[i],
28016 binop( operation,
28017 mkexpr( srcA_tmp[i] ),
28018 mkexpr( srcB_tmp[i] ) ) );
28020 /* Calculate result for ABS(srcA) and ABS(srcB) */
28021 assign( res_tmp3[i], binop( operation, absI64( srcA_tmp[i] ), absI64( srcB_tmp[i] ) ) );
28023 assign( res_tmp[i],
28024 unop( extract_res,
28025 binop( Iop_And64,
28026 unop( Iop_1Sto64,
28027 binop( Iop_CmpEQ64,
28028 binop( Iop_Shr64, mkexpr( res_tmp3[i] ), mkU8( 31 )),
28029 mkU64( 0x0 ) ) ),
28030 mkexpr( res_tmp2[i] ) ) ) );
28032 } else if ( opc2 == 0x28B ) { // vdiveuw
28033 /* Check if result fits in 32-bits, set result to zeros if it doesn't fit to
28034 match the HW functionality. */
28035 res_tmp2[i] = newTemp( Ity_I64 );
28036 assign( res_tmp2[i],
28037 binop( operation,
28038 mkexpr( srcA_tmp[i] ),
28039 mkexpr( srcB_tmp[i] ) ) );
28040 assign( res_tmp[i],
28041 unop( extract_res,
28042 binop( Iop_And64,
28043 unop( Iop_1Sto64,
28044 binop( Iop_CmpEQ64,
28045 binop( Iop_Shr64, mkexpr( res_tmp2[i] ), mkU8( 32 )),
28046 mkU64( 0x0 ) ) ),
28047 mkexpr( res_tmp2[i] ) ) ) );
28048 } else {
28049 assign( res_tmp[i],
28050 unop( extract_res,
28051 binop( operation,
28052 mkexpr( srcA_tmp[i] ),
28053 mkexpr( srcB_tmp[i] ) ) ) );
28057 if (!(( opc2 == 0x68B ) || ( opc2 == 0x78B ))) {
28058 /* Doing a multiply or divide instruction */
28059 putVReg( vD_addr,
28060 Abs_Zero_Vector( Ity_I32,
28061 binop( Iop_64HLtoV128,
28062 binop( Iop_32HLto64,
28063 mkexpr( res_tmp[ 3 ] ),
28064 mkexpr( res_tmp[ 2 ] ) ),
28065 binop( Iop_32HLto64,
28066 mkexpr( res_tmp[ 1 ] ),
28067 mkexpr( res_tmp[ 0 ] ) ) ) ) );
28068 } else {
28069 /* Doing a modulo instruction, vmodsw/vmoduw
28070 res_tmp[] contains the quotients of VRA/VRB.
28071 Calculate modulo as VRA - VRB * res_tmp. */
28072 IRTemp res_Tmp = newTemp( Ity_V128 );
28074 assign( res_Tmp,
28075 Abs_Zero_Vector( Ity_I32,
28076 binop( Iop_64HLtoV128,
28077 binop( Iop_32HLto64,
28078 mkexpr( res_tmp[ 3 ] ),
28079 mkexpr( res_tmp[ 2 ] ) ),
28080 binop( Iop_32HLto64,
28081 mkexpr( res_tmp[ 1 ] ),
28082 mkexpr( res_tmp[ 0 ] ) ) ) ) );
28084 putVReg( vD_addr, binop( Iop_Sub32x4,
28085 mkexpr( vA ),
28086 binop( Iop_Mul32x4,
28087 mkexpr( res_Tmp ),
28088 mkexpr( vB ) ) ) );
28090 #undef MAX_ELE
28092 break;
28093 case 0x1C9: // vmulld Vector Multiply Low Signed Doubleword
28094 case 0x2C9: // vmulhud Vector Multiply High Unsigned Doubleword
28095 case 0x3C9: // vmulhsd Vector Multiply High Signed Doubleword
28096 case 0x0CB: // vdivud Vector Divide Unsigned Doubleword
28097 case 0x1CB: // vdivsd Vector Divide Signed Doubleword
28098 case 0x6CB: // vmodud Vector Modulo Unsigned Doubleword
28099 case 0x7CB: // vmodsd Vector Modulo Signed Doubleword
28101 #define MAX_ELE 2
28102 IROp extract_res = Iop_64to32;
28103 IROp operation = Iop_MullS64;
28104 IRTemp srcA_tmp[MAX_ELE];
28105 IRTemp srcB_tmp[MAX_ELE];
28106 IRTemp res_tmp[MAX_ELE];
28107 UInt shift_by = 64;
28108 UInt i;
28109 IRType size_op = Ity_I64, size_res = Ity_I64;
28111 if (opc2 == 0x1C9) {
28112 DIP("vmulld v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28113 operation = Iop_MullS64;
28114 extract_res = Iop_128to64;
28116 } else if (opc2 == 0x2C9) {
28117 DIP("vmulhud v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28118 operation = Iop_MullU64;
28119 extract_res = Iop_128HIto64;
28121 } else if (opc2 == 0x3C9) {
28122 DIP("vmulhsd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28123 operation = Iop_MullS64;
28124 extract_res = Iop_128HIto64;
28126 } else if (opc2 == 0x0CB) {
28127 DIP("vdivud v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28128 operation = Iop_DivU64;
28130 } else if (opc2 == 0x1CB) {
28131 DIP("vdivsd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28132 operation = Iop_DivS64;
28134 } else if (opc2 == 0x6CB) {
28135 DIP("vmodud v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28136 operation = Iop_DivU64;
28138 } else if (opc2 == 0x7CB) {
28139 DIP("vmodsd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28140 operation = Iop_DivS64;
28143 for (i=0; i<MAX_ELE; i++) {
28144 srcA_tmp[i] = newTemp( size_op );
28145 srcB_tmp[i] = newTemp( size_op );
28146 res_tmp[i] = newTemp( size_res );
28148 assign( srcA_tmp[i],
28149 unop( Iop_V128to64,
28150 binop( Iop_ShrV128,
28151 mkexpr( vA ),
28152 mkU8( i*shift_by ) ) ) );
28154 assign( srcB_tmp[i],
28155 unop( Iop_V128to64,
28156 binop( Iop_ShrV128,
28157 mkexpr( vB ),
28158 mkU8( i*shift_by ) ) ) );
28160 if ((opc2 == 0x1C9) || (opc2 == 0x2C9) || (opc2 == 0x3C9)) {
28161 /* multiply result is I128 */
28162 assign( res_tmp[i],
28163 unop( extract_res,
28164 binop( operation,
28165 mkexpr( srcA_tmp[i] ),
28166 mkexpr( srcB_tmp[i] ) ) ) );
28167 } else {
28168 /* divide result is I64 */
28169 assign( res_tmp[i],
28170 binop( operation,
28171 mkexpr( srcA_tmp[i] ),
28172 mkexpr( srcB_tmp[i] ) ) );
28176 if ((opc2 == 0x6CB) || (opc2 == 0x7CB)) {
28177 /* Doing a modulo instruction,
28178 res_tmp[] contains the quotients of VRA/VRB.
28179 Calculate modulo as VRA - VRB * res_tmp. */
28180 IRTemp res_Tmp = newTemp( Ity_V128 );
28182 assign( res_Tmp, binop( Iop_64HLtoV128,
28183 binop( Iop_Mul64,
28184 mkexpr( res_tmp[ 1 ] ),
28185 mkexpr( srcB_tmp[1] ) ),
28186 binop( Iop_Mul64,
28187 mkexpr( res_tmp[0] ),
28188 mkexpr( srcB_tmp[0] ) ) ) );
28190 putVReg( vD_addr, binop( Iop_Sub64x2,
28191 mkexpr( vA ),
28192 mkexpr( res_Tmp ) ) );
28194 } else {
28195 putVReg( vD_addr, binop( Iop_64HLtoV128,
28196 mkexpr( res_tmp[ 1 ] ),
28197 mkexpr( res_tmp[ 0 ] ) ) );
28200 #undef MAX_ELE
28202 break;
28204 case 0x2CB: // vdiveud Vector Divide Extended Unsigned Doubleword
28205 case 0x3CB: { // vdivesd Vector Divide Extended Signed Doubleword
28206 /* Do vector inst as two scalar operations */
28207 IRTemp divisor_hi = newTemp(Ity_I64);
28208 IRTemp divisor_lo = newTemp(Ity_I64);
28209 IRTemp dividend_hi = newTemp(Ity_I64);
28210 IRTemp dividend_lo = newTemp(Ity_I64);
28211 IRTemp result_hi = newTemp(Ity_I64);
28212 IRTemp result_lo = newTemp(Ity_I64);
28214 assign( dividend_hi, unop( Iop_V128HIto64, mkexpr( vA ) ) );
28215 assign( dividend_lo, unop( Iop_V128to64, mkexpr( vA ) ) );
28216 assign( divisor_hi, unop( Iop_V128HIto64, mkexpr( vB ) ) );
28217 assign( divisor_lo, unop( Iop_V128to64, mkexpr( vB ) ) );
28219 if (opc2 == 0x2CB) {
28220 DIP("vdiveud v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28221 assign( result_hi,
28222 binop( Iop_DivU64E, mkexpr( dividend_hi ),
28223 mkexpr( divisor_hi ) ) );
28224 assign( result_lo,
28225 binop( Iop_DivU64E, mkexpr( dividend_lo ),
28226 mkexpr( divisor_lo ) ) );
28227 putVReg( vD_addr, binop( Iop_64HLtoV128, mkexpr( result_hi ),
28228 mkexpr( result_lo ) ) );
28230 } else {
28231 DIP("vdivesd v%d,v%d,v%d", vD_addr, vA_addr, vB_addr);
28232 assign( result_hi,
28233 binop( Iop_DivS64E, mkexpr( dividend_hi ),
28234 mkexpr( divisor_hi ) ) );
28235 assign( result_lo,
28236 binop( Iop_DivS64E, mkexpr( dividend_lo ),
28237 mkexpr( divisor_lo ) ) );
28238 putVReg( vD_addr, binop( Iop_64HLtoV128, mkexpr( result_hi ),
28239 mkexpr( result_lo ) ) );
28241 break;
28244 /* Subtract */
28245 case 0x580: { // vsubcuw (Subtract Carryout Unsigned Word, AV p260)
28246 DIP("vsubcuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28247 /* unsigned_ov(x-y) = (y >u x) */
28248 putVReg( vD_addr, binop(Iop_ShrN32x4,
28249 unop(Iop_NotV128,
28250 binop(Iop_CmpGT32Ux4, mkexpr(vB),
28251 mkexpr(vA))),
28252 mkU8(31)) );
28253 break;
28255 case 0x400: // vsububm (Subtract Unsigned Byte Modulo, AV p265)
28256 DIP("vsububm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28257 putVReg( vD_addr, binop(Iop_Sub8x16, mkexpr(vA), mkexpr(vB)) );
28258 break;
28260 case 0x440: // vsubuhm (Subtract Unsigned Half Word Modulo, AV p267)
28261 DIP("vsubuhm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28262 putVReg( vD_addr, binop(Iop_Sub16x8, mkexpr(vA), mkexpr(vB)) );
28263 break;
28265 case 0x480: // vsubuwm (Subtract Unsigned Word Modulo, AV p269)
28266 DIP("vsubuwm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28267 putVReg( vD_addr, binop(Iop_Sub32x4, mkexpr(vA), mkexpr(vB)) );
28268 break;
28270 case 0x4C0: // vsubudm (Subtract Unsigned Double Word Modulo)
28271 DIP("vsubudm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28272 putVReg( vD_addr, binop(Iop_Sub64x2, mkexpr(vA), mkexpr(vB)) );
28273 break;
28275 case 0x600: // vsububs (Subtract Unsigned Byte Saturate, AV p266)
28276 DIP("vsububs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28277 putVReg( vD_addr, binop(Iop_QSub8Ux16, mkexpr(vA), mkexpr(vB)) );
28278 // TODO: set VSCR[SAT]
28279 break;
28281 case 0x640: // vsubuhs (Subtract Unsigned HWord Saturate, AV p268)
28282 DIP("vsubuhs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28283 putVReg( vD_addr, binop(Iop_QSub16Ux8, mkexpr(vA), mkexpr(vB)) );
28284 // TODO: set VSCR[SAT]
28285 break;
28287 case 0x680: // vsubuws (Subtract Unsigned Word Saturate, AV p270)
28288 DIP("vsubuws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28289 putVReg( vD_addr, binop(Iop_QSub32Ux4, mkexpr(vA), mkexpr(vB)) );
28290 // TODO: set VSCR[SAT]
28291 break;
28293 case 0x700: // vsubsbs (Subtract Signed Byte Saturate, AV p262)
28294 DIP("vsubsbs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28295 putVReg( vD_addr, binop(Iop_QSub8Sx16, mkexpr(vA), mkexpr(vB)) );
28296 // TODO: set VSCR[SAT]
28297 break;
28299 case 0x740: // vsubshs (Subtract Signed Half Word Saturate, AV p263)
28300 DIP("vsubshs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28301 putVReg( vD_addr, binop(Iop_QSub16Sx8, mkexpr(vA), mkexpr(vB)) );
28302 // TODO: set VSCR[SAT]
28303 break;
28305 case 0x780: // vsubsws (Subtract Signed Word Saturate, AV p264)
28306 DIP("vsubsws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28307 putVReg( vD_addr, binop(Iop_QSub32Sx4, mkexpr(vA), mkexpr(vB)) );
28308 // TODO: set VSCR[SAT]
28309 break;
28312 /* Maximum */
28313 case 0x002: // vmaxub (Maximum Unsigned Byte, AV p182)
28314 DIP("vmaxub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28315 putVReg( vD_addr, binop(Iop_Max8Ux16, mkexpr(vA), mkexpr(vB)) );
28316 break;
28318 case 0x042: // vmaxuh (Maximum Unsigned Half Word, AV p183)
28319 DIP("vmaxuh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28320 putVReg( vD_addr, binop(Iop_Max16Ux8, mkexpr(vA), mkexpr(vB)) );
28321 break;
28323 case 0x082: // vmaxuw (Maximum Unsigned Word, AV p184)
28324 DIP("vmaxuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28325 putVReg( vD_addr, binop(Iop_Max32Ux4, mkexpr(vA), mkexpr(vB)) );
28326 break;
28328 case 0x0C2: // vmaxud (Maximum Unsigned Double word)
28329 DIP("vmaxud v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28330 putVReg( vD_addr, binop(Iop_Max64Ux2, mkexpr(vA), mkexpr(vB)) );
28331 break;
28333 case 0x102: // vmaxsb (Maximum Signed Byte, AV p179)
28334 DIP("vmaxsb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28335 putVReg( vD_addr, binop(Iop_Max8Sx16, mkexpr(vA), mkexpr(vB)) );
28336 break;
28338 case 0x142: // vmaxsh (Maximum Signed Half Word, AV p180)
28339 DIP("vmaxsh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28340 putVReg( vD_addr, binop(Iop_Max16Sx8, mkexpr(vA), mkexpr(vB)) );
28341 break;
28343 case 0x182: // vmaxsw (Maximum Signed Word, AV p181)
28344 DIP("vmaxsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28345 putVReg( vD_addr, binop(Iop_Max32Sx4, mkexpr(vA), mkexpr(vB)) );
28346 break;
28348 case 0x1C2: // vmaxsd (Maximum Signed Double word)
28349 DIP("vmaxsd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28350 putVReg( vD_addr, binop(Iop_Max64Sx2, mkexpr(vA), mkexpr(vB)) );
28351 break;
28353 /* Minimum */
28354 case 0x202: // vminub (Minimum Unsigned Byte, AV p191)
28355 DIP("vminub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28356 putVReg( vD_addr, binop(Iop_Min8Ux16, mkexpr(vA), mkexpr(vB)) );
28357 break;
28359 case 0x242: // vminuh (Minimum Unsigned Half Word, AV p192)
28360 DIP("vminuh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28361 putVReg( vD_addr, binop(Iop_Min16Ux8, mkexpr(vA), mkexpr(vB)) );
28362 break;
28364 case 0x282: // vminuw (Minimum Unsigned Word, AV p193)
28365 DIP("vminuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28366 putVReg( vD_addr, binop(Iop_Min32Ux4, mkexpr(vA), mkexpr(vB)) );
28367 break;
28369 case 0x2C2: // vminud (Minimum Unsigned Double Word)
28370 DIP("vminud v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28371 putVReg( vD_addr, binop(Iop_Min64Ux2, mkexpr(vA), mkexpr(vB)) );
28372 break;
28374 case 0x302: // vminsb (Minimum Signed Byte, AV p188)
28375 DIP("vminsb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28376 putVReg( vD_addr, binop(Iop_Min8Sx16, mkexpr(vA), mkexpr(vB)) );
28377 break;
28379 case 0x342: // vminsh (Minimum Signed Half Word, AV p189)
28380 DIP("vminsh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28381 putVReg( vD_addr, binop(Iop_Min16Sx8, mkexpr(vA), mkexpr(vB)) );
28382 break;
28384 case 0x382: // vminsw (Minimum Signed Word, AV p190)
28385 DIP("vminsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28386 putVReg( vD_addr, binop(Iop_Min32Sx4, mkexpr(vA), mkexpr(vB)) );
28387 break;
28389 case 0x3C2: // vminsd (Minimum Signed Double Word)
28390 DIP("vminsd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28391 putVReg( vD_addr, binop(Iop_Min64Sx2, mkexpr(vA), mkexpr(vB)) );
28392 break;
28395 /* Average */
28396 case 0x402: // vavgub (Average Unsigned Byte, AV p152)
28397 DIP("vavgub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28398 putVReg( vD_addr, binop(Iop_Avg8Ux16, mkexpr(vA), mkexpr(vB)) );
28399 break;
28401 case 0x442: // vavguh (Average Unsigned Half Word, AV p153)
28402 DIP("vavguh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28403 putVReg( vD_addr, binop(Iop_Avg16Ux8, mkexpr(vA), mkexpr(vB)) );
28404 break;
28406 case 0x482: // vavguw (Average Unsigned Word, AV p154)
28407 DIP("vavguw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28408 putVReg( vD_addr, binop(Iop_Avg32Ux4, mkexpr(vA), mkexpr(vB)) );
28409 break;
28411 case 0x502: // vavgsb (Average Signed Byte, AV p149)
28412 DIP("vavgsb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28413 putVReg( vD_addr, binop(Iop_Avg8Sx16, mkexpr(vA), mkexpr(vB)) );
28414 break;
28416 case 0x542: // vavgsh (Average Signed Half Word, AV p150)
28417 DIP("vavgsh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28418 putVReg( vD_addr, binop(Iop_Avg16Sx8, mkexpr(vA), mkexpr(vB)) );
28419 break;
28421 case 0x582: // vavgsw (Average Signed Word, AV p151)
28422 DIP("vavgsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28423 putVReg( vD_addr, binop(Iop_Avg32Sx4, mkexpr(vA), mkexpr(vB)) );
28424 break;
28427 /* Multiply */
28428 case 0x008: // vmuloub (Multiply Odd Unsigned Byte, AV p213)
28429 DIP("vmuloub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28430 putVReg( vD_addr,
28431 binop(Iop_MullEven8Ux16, mkexpr(vA), mkexpr(vB)));
28432 break;
28434 case 0x048: // vmulouh (Multiply Odd Unsigned Half Word, AV p214)
28435 DIP("vmulouh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28436 putVReg( vD_addr,
28437 binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)));
28438 break;
28440 case 0x088: // vmulouw (Multiply Odd Unsigned Word)
28441 DIP("vmulouw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28442 putVReg( vD_addr, binop( Iop_MullEven32Ux4, mkexpr(vA), mkexpr(vB) ) );
28443 break;
28445 case 0x089: // vmuluwm (Multiply Unsigned Word Modulo)
28446 DIP("vmuluwm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28447 putVReg( vD_addr, binop( Iop_Mul32x4, mkexpr(vA), mkexpr(vB) ) );
28448 break;
28450 case 0x108: // vmulosb (Multiply Odd Signed Byte, AV p211)
28451 DIP("vmulosb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28452 putVReg( vD_addr,
28453 binop(Iop_MullEven8Sx16, mkexpr(vA), mkexpr(vB)));
28454 break;
28456 case 0x148: // vmulosh (Multiply Odd Signed Half Word, AV p212)
28457 DIP("vmulosh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28458 putVReg( vD_addr,
28459 binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)));
28460 break;
28462 case 0x188: // vmulosw (Multiply Odd Signed Word)
28463 DIP("vmulosw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28464 putVReg( vD_addr, binop( Iop_MullEven32Sx4, mkexpr(vA), mkexpr(vB) ) );
28465 break;
28467 case 0x208: // vmuleub (Multiply Even Unsigned Byte, AV p209)
28468 DIP("vmuleub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28469 putVReg( vD_addr, MK_Iop_MullOdd8Ux16( mkexpr(vA), mkexpr(vB) ));
28470 break;
28472 case 0x248: // vmuleuh (Multiply Even Unsigned Half Word, AV p210)
28473 DIP("vmuleuh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28474 putVReg( vD_addr, MK_Iop_MullOdd16Ux8( mkexpr(vA), mkexpr(vB) ));
28475 break;
28477 case 0x288: // vmuleuw (Multiply Even Unsigned Word)
28478 DIP("vmuleuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28479 putVReg( vD_addr, MK_Iop_MullOdd32Ux4( mkexpr(vA), mkexpr(vB) ) );
28480 break;
28482 case 0x308: // vmulesb (Multiply Even Signed Byte, AV p207)
28483 DIP("vmulesb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28484 putVReg( vD_addr, MK_Iop_MullOdd8Sx16( mkexpr(vA), mkexpr(vB) ));
28485 break;
28487 case 0x348: // vmulesh (Multiply Even Signed Half Word, AV p208)
28488 DIP("vmulesh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28489 putVReg( vD_addr, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) ));
28490 break;
28492 case 0x388: // vmulesw (Multiply Even Signed Word)
28493 DIP("vmulesw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28494 putVReg( vD_addr, MK_Iop_MullOdd32Sx4( mkexpr(vA), mkexpr(vB) ) );
28495 break;
28497 /* Sum Across Partial */
28498 case 0x608: { // vsum4ubs (Sum Partial (1/4) UB Saturate, AV p275)
28499 IRTemp aEE, aEO, aOE, aOO;
28500 aEE = aEO = aOE = aOO = IRTemp_INVALID;
28501 DIP("vsum4ubs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28503 /* vA: V128_8Ux16 -> 4 x V128_32Ux4, sign-extended */
28504 expand8Ux16( mkexpr(vA), &aEvn, &aOdd ); // (15,13...),(14,12...)
28505 expand16Ux8( mkexpr(aEvn), &aEE, &aEO ); // (15,11...),(13, 9...)
28506 expand16Ux8( mkexpr(aOdd), &aOE, &aOO ); // (14,10...),(12, 8...)
28508 /* break V128 to 4xI32's, zero-extending to I64's */
28509 breakV128to4x64U( mkexpr(aEE), &a15, &a11, &a7, &a3 );
28510 breakV128to4x64U( mkexpr(aOE), &a14, &a10, &a6, &a2 );
28511 breakV128to4x64U( mkexpr(aEO), &a13, &a9, &a5, &a1 );
28512 breakV128to4x64U( mkexpr(aOO), &a12, &a8, &a4, &a0 );
28513 breakV128to4x64U( mkexpr(vB), &b3, &b2, &b1, &b0 );
28515 /* add lanes */
28516 assign( z3, binop(Iop_Add64, mkexpr(b3),
28517 binop(Iop_Add64,
28518 binop(Iop_Add64, mkexpr(a15), mkexpr(a14)),
28519 binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) );
28520 assign( z2, binop(Iop_Add64, mkexpr(b2),
28521 binop(Iop_Add64,
28522 binop(Iop_Add64, mkexpr(a11), mkexpr(a10)),
28523 binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) );
28524 assign( z1, binop(Iop_Add64, mkexpr(b1),
28525 binop(Iop_Add64,
28526 binop(Iop_Add64, mkexpr(a7), mkexpr(a6)),
28527 binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) );
28528 assign( z0, binop(Iop_Add64, mkexpr(b0),
28529 binop(Iop_Add64,
28530 binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
28531 binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
28533 /* saturate-narrow to 32bit, and combine to V128 */
28534 putVReg( vD_addr, mkV128from4x64U( mkexpr(z3), mkexpr(z2),
28535 mkexpr(z1), mkexpr(z0)) );
28536 break;
28538 case 0x708: { // vsum4sbs (Sum Partial (1/4) SB Saturate, AV p273)
28539 IRTemp aEE, aEO, aOE, aOO;
28540 aEE = aEO = aOE = aOO = IRTemp_INVALID;
28541 DIP("vsum4sbs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28543 /* vA: V128_8Sx16 -> 4 x V128_32Sx4, sign-extended */
28544 expand8Sx16( mkexpr(vA), &aEvn, &aOdd ); // (15,13...),(14,12...)
28545 expand16Sx8( mkexpr(aEvn), &aEE, &aEO ); // (15,11...),(13, 9...)
28546 expand16Sx8( mkexpr(aOdd), &aOE, &aOO ); // (14,10...),(12, 8...)
28548 /* break V128 to 4xI32's, sign-extending to I64's */
28549 breakV128to4x64S( mkexpr(aEE), &a15, &a11, &a7, &a3 );
28550 breakV128to4x64S( mkexpr(aOE), &a14, &a10, &a6, &a2 );
28551 breakV128to4x64S( mkexpr(aEO), &a13, &a9, &a5, &a1 );
28552 breakV128to4x64S( mkexpr(aOO), &a12, &a8, &a4, &a0 );
28553 breakV128to4x64S( mkexpr(vB), &b3, &b2, &b1, &b0 );
28555 /* add lanes */
28556 assign( z3, binop(Iop_Add64, mkexpr(b3),
28557 binop(Iop_Add64,
28558 binop(Iop_Add64, mkexpr(a15), mkexpr(a14)),
28559 binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) );
28560 assign( z2, binop(Iop_Add64, mkexpr(b2),
28561 binop(Iop_Add64,
28562 binop(Iop_Add64, mkexpr(a11), mkexpr(a10)),
28563 binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) );
28564 assign( z1, binop(Iop_Add64, mkexpr(b1),
28565 binop(Iop_Add64,
28566 binop(Iop_Add64, mkexpr(a7), mkexpr(a6)),
28567 binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) );
28568 assign( z0, binop(Iop_Add64, mkexpr(b0),
28569 binop(Iop_Add64,
28570 binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
28571 binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
28573 /* saturate-narrow to 32bit, and combine to V128 */
28574 putVReg( vD_addr, mkV128from4x64S( mkexpr(z3), mkexpr(z2),
28575 mkexpr(z1), mkexpr(z0)) );
28576 break;
28578 case 0x648: { // vsum4shs (Sum Partial (1/4) SHW Saturate, AV p274)
28579 DIP("vsum4shs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28581 /* vA: V128_16Sx8 -> 2 x V128_32Sx4, sign-extended */
28582 expand16Sx8( mkexpr(vA), &aEvn, &aOdd ); // (7,5...),(6,4...)
28584 /* break V128 to 4xI32's, sign-extending to I64's */
28585 breakV128to4x64S( mkexpr(aEvn), &a7, &a5, &a3, &a1 );
28586 breakV128to4x64S( mkexpr(aOdd), &a6, &a4, &a2, &a0 );
28587 breakV128to4x64S( mkexpr(vB), &b3, &b2, &b1, &b0 );
28589 /* add lanes */
28590 assign( z3, binop(Iop_Add64, mkexpr(b3),
28591 binop(Iop_Add64, mkexpr(a7), mkexpr(a6))));
28592 assign( z2, binop(Iop_Add64, mkexpr(b2),
28593 binop(Iop_Add64, mkexpr(a5), mkexpr(a4))));
28594 assign( z1, binop(Iop_Add64, mkexpr(b1),
28595 binop(Iop_Add64, mkexpr(a3), mkexpr(a2))));
28596 assign( z0, binop(Iop_Add64, mkexpr(b0),
28597 binop(Iop_Add64, mkexpr(a1), mkexpr(a0))));
28599 /* saturate-narrow to 32bit, and combine to V128 */
28600 putVReg( vD_addr, mkV128from4x64S( mkexpr(z3), mkexpr(z2),
28601 mkexpr(z1), mkexpr(z0)) );
28602 break;
28604 case 0x688: { // vsum2sws (Sum Partial (1/2) SW Saturate, AV p272)
28605 DIP("vsum2sws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28607 /* break V128 to 4xI32's, sign-extending to I64's */
28608 breakV128to4x64S( mkexpr(vA), &a3, &a2, &a1, &a0 );
28609 breakV128to4x64S( mkexpr(vB), &b3, &b2, &b1, &b0 );
28611 /* add lanes */
28612 assign( z2, binop(Iop_Add64, mkexpr(b2),
28613 binop(Iop_Add64, mkexpr(a3), mkexpr(a2))) );
28614 assign( z0, binop(Iop_Add64, mkexpr(b0),
28615 binop(Iop_Add64, mkexpr(a1), mkexpr(a0))) );
28617 /* saturate-narrow to 32bit, and combine to V128 */
28618 putVReg( vD_addr, mkV128from4x64S( mkU64(0), mkexpr(z2),
28619 mkU64(0), mkexpr(z0)) );
28620 break;
28622 case 0x788: { // vsumsws (Sum SW Saturate, AV p271)
28623 DIP("vsumsws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
28625 /* break V128 to 4xI32's, sign-extending to I64's */
28626 breakV128to4x64S( mkexpr(vA), &a3, &a2, &a1, &a0 );
28627 breakV128to4x64S( mkexpr(vB), &b3, &b2, &b1, &b0 );
28629 /* add lanes */
28630 assign( z0, binop(Iop_Add64, mkexpr(b0),
28631 binop(Iop_Add64,
28632 binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
28633 binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
28635 /* saturate-narrow to 32bit, and combine to V128 */
28636 putVReg( vD_addr, mkV128from4x64S( mkU64(0), mkU64(0),
28637 mkU64(0), mkexpr(z0)) );
28638 break;
28640 default:
28641 vex_printf("dis_av_arith(ppc)(opc2=0x%x)\n", opc2);
28642 return False;
28644 return True;
28647 static Bool dis_vx_quadword_arith ( UInt prefix, UInt theInstr )
28649 /* Quad word operations, VX-Form */
28650 UChar vT_addr = ifieldRegDS(theInstr);
28651 UChar vA_addr = ifieldRegA(theInstr);
28652 UChar vB_addr = ifieldRegB(theInstr);
28653 UChar opc1 = ifieldOPC(theInstr);
28654 UInt opc2;
28655 IRTemp vA = newTemp(Ity_V128);
28656 IRTemp vB = newTemp(Ity_V128);
28658 if (opc1 != 0x4) {
28659 vex_printf("ERROR: dis_vx_quadword_arith(ppc)\n");
28660 return False;
28663 assign( vA, getVReg( vA_addr ) );
28664 assign( vB, getVReg( vB_addr ) );
28666 opc2 = IFIELD(theInstr, 0, 6);
28667 switch (opc2) {
28668 case 0x017: // vmsumcud Vector Multiply-Sum & write Carry-out Unsigned
28669 // Doubleword VA-form
28671 UChar vC_addr = ifieldRegC(theInstr);
28672 IRTemp vC = newTemp(Ity_V128);
28674 assign( vC, getVReg( vC_addr ) );
28676 DIP("vmsumcud %d,%d,%d,%d\n", vT_addr, vA_addr, vB_addr, vC_addr);
28677 putVReg( vT_addr, triop( Iop_2xMultU64Add128CarryOut,
28678 mkexpr( vA ), mkexpr( vB ), mkexpr( vC ) ) );
28679 return True;
28682 default:
28683 break; /* fall thru to next case statement */
28684 } /* switch (opc2) */
28686 opc2 = ifieldOPClo11( theInstr );
28687 switch (opc2) {
28688 case 0x005: //vrlq Vector Rotate Left Quadword
28690 IRTemp sh = newTemp(Ity_I8); /* shift amout is vB[57:63] */
28691 IRTemp shr = newTemp(Ity_I8);
28692 IRTemp vA_shl = newTemp(Ity_V128);
28693 IRTemp vA_shr = newTemp(Ity_V128);
28695 DIP("vrlq v%u,v%u,v%u\n", vT_addr, vA_addr, vB_addr);
28697 assign( sh,
28698 binop( Iop_And8,
28699 mkU8( 0x7F ),
28700 unop( Iop_16to8,
28701 unop( Iop_32to16,
28702 unop( Iop_64to32,
28703 unop( Iop_V128HIto64,
28704 mkexpr( vB ) ) ) ) ) ) );
28706 assign( shr, binop( Iop_Sub8, mkU8( 128 ), mkexpr( sh ) ) );
28707 assign( vA_shl, binop( Iop_ShlV128, mkexpr( vA ), mkexpr( sh ) ) );
28708 assign( vA_shr, binop( Iop_ShrV128, mkexpr( vA ), mkexpr( shr ) ) );
28709 putVReg( vT_addr,
28710 binop( Iop_OrV128, mkexpr( vA_shl ), mkexpr( vA_shr ) ) );
28712 break;
28714 case 0x00B: //vdivuq Vector Divide Unsigned Quadword
28715 DIP("vdivuq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
28716 putVReg( vT_addr, binop( Iop_DivU128, mkexpr( vA ), mkexpr( vB ) ) );
28717 break;
28719 case 0x101: //vcmpuq Vector Compare Unsigned Quadword
28721 IRTemp lt = newTemp(Ity_I32);
28722 IRTemp gt = newTemp(Ity_I32);
28723 IRTemp eq = newTemp(Ity_I32);
28724 IRTemp cc = newTemp(Ity_I32);
28725 UInt BF = IFIELD( theInstr, (31-8), 3 );
28727 DIP("vcmpuq %u,v%u,v%u\n", BF, vA_addr, vB_addr);
28729 assign ( lt, unop( Iop_1Uto32, Quad_precision_uint_gt( vB, vA ) ) );
28730 assign ( gt, unop( Iop_1Uto32, Quad_precision_uint_gt( vA, vB ) ) );
28731 assign ( eq, unop( Iop_1Uto32, Quad_precision_int_eq( vA, vB ) ) );
28733 assign( cc, binop( Iop_Or32,
28734 binop( Iop_Shl32, mkexpr( lt ), mkU8( 3 ) ),
28735 binop( Iop_Or32,
28736 binop( Iop_Shl32,
28737 mkexpr( gt ), mkU8( 2 ) ),
28738 binop( Iop_Shl32,
28739 mkexpr( eq ), mkU8( 1 ) ) ) ) );
28741 putGST_field( PPC_GST_CR, mkexpr( cc ), BF );
28743 break;
28745 case 0x105: //vslq Vector Shift Left Quadword
28746 case 0x205: //vsrq Vector Shift Right Quadword
28748 IRTemp sh = newTemp(Ity_I8); /* shift amout is vB[57:63] */
28750 assign( sh,
28751 binop( Iop_And8,
28752 mkU8( 0x7F ),
28753 unop( Iop_16to8,
28754 unop( Iop_32to16,
28755 unop( Iop_64to32,
28756 unop( Iop_V128HIto64,
28757 mkexpr( vB ) ) ) ) ) ) );
28759 if (opc2 == 0x105) {
28760 DIP("vslq v%u,v%u,v%u\n", vT_addr, vA_addr, vB_addr);
28761 putVReg( vT_addr,
28762 binop( Iop_ShlV128, mkexpr( vA ), mkexpr( sh ) ) );
28764 } else {
28765 DIP("vsrq v%u,v%u,v%u\n", vT_addr, vA_addr, vB_addr);
28766 putVReg( vT_addr,
28767 binop( Iop_ShrV128, mkexpr( vA ), mkexpr( sh ) ) );
28770 break;
28772 case 0x10B: //vdivsq Vector Divide Signed Quadword
28773 DIP("vdivsq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
28774 putVReg( vT_addr, binop( Iop_DivS128, mkexpr( vA ), mkexpr( vB ) ) );
28775 break;
28777 case 0x141: //vcmpsq Vector Compare Signed Quadword
28779 IRTemp lt = newTemp(Ity_I32);
28780 IRTemp gt = newTemp(Ity_I32);
28781 IRTemp eq = newTemp(Ity_I32);
28782 IRTemp cc = newTemp(Ity_I32);
28783 UInt BF = IFIELD( theInstr, (31-8), 3 );
28785 DIP("vcmpsq %u,v%u,v%u\n", BF, vA_addr, vB_addr);
28787 assign ( lt, unop( Iop_1Uto32, Quad_precision_sint_gt( vB, vA ) ) );
28788 assign ( gt, unop( Iop_1Uto32, Quad_precision_sint_gt( vA, vB ) ) );
28789 assign ( eq, unop( Iop_1Uto32, Quad_precision_int_eq( vA, vB ) ) );
28791 assign( cc, binop( Iop_Or32,
28792 binop( Iop_Shl32, mkexpr( lt ), mkU8( 3 ) ),
28793 binop( Iop_Or32,
28794 binop( Iop_Shl32,
28795 mkexpr( gt ), mkU8( 2 ) ),
28796 binop( Iop_Shl32,
28797 mkexpr( eq ), mkU8( 1 ) ) ) ) );
28799 putGST_field( PPC_GST_CR, mkexpr( cc ), BF );
28801 break;
28803 case 0x045: //vrlqmi Vector Rotate Left Quadword then Mask Insert
28804 case 0x145: //vrlqnm Vector Rotate Left Quadword then AND with Mask
28806 IRTemp sh = newTemp(Ity_I8);
28807 IRTemp shr = newTemp(Ity_I8);
28808 IRTemp vA_shl = newTemp(Ity_V128);
28809 IRTemp vA_shr = newTemp(Ity_V128);
28810 IRTemp mask = newTemp(Ity_V128);
28811 IRTemp mb = newTemp(Ity_I8); /* mask begin */
28812 IRTemp me = newTemp(Ity_I8); /* mask end */
28813 IRTemp tmp = newTemp(Ity_I8); /* mask end tmp */
28815 /* rotate value in bits vB[57:63] */
28816 assign( sh,
28817 binop( Iop_And8,
28818 mkU8( 0x7F ),
28819 unop ( Iop_16to8,
28820 unop ( Iop_32to16,
28821 unop ( Iop_64to32,
28822 unop( Iop_V128HIto64,
28823 mkexpr( vB ) ) ) ) ) ) );
28825 /* mask begin in bits vB[41:47] */
28826 assign( mb,
28827 binop( Iop_And8,
28828 mkU8( 0x7F ),
28829 unop ( Iop_16to8,
28830 unop ( Iop_32to16,
28831 binop( Iop_Shr32,
28832 unop ( Iop_64to32,
28833 unop( Iop_V128HIto64,
28834 mkexpr( vB ) ) ),
28835 mkU8 ( 16 ) ) ) ) ) );
28837 /* mask end in bits vB[49:55] */
28838 assign( tmp,
28839 unop ( Iop_16to8,
28840 unop ( Iop_32to16,
28841 binop( Iop_Shr32,
28842 unop ( Iop_64to32,
28843 unop( Iop_V128HIto64,
28844 mkexpr( vB ) ) ),
28845 mkU8 ( 8 ) ) ) ) );
28847 assign( me,
28848 binop( Iop_Sub8,
28849 mkU8( 127 ),
28850 binop( Iop_And8,
28851 mkU8( 0x7F ),
28852 mkexpr( tmp ) ) ) );
28854 /* Create mask, Start with all 1's, shift right and then left by
28855 (127-me) to clear the lower me bits. Similarly, shift left then
28856 right by mb to clear upper bits. */
28858 assign( mask,
28859 binop( Iop_ShrV128,
28860 binop( Iop_ShlV128,
28861 binop( Iop_ShlV128,
28862 binop( Iop_ShrV128,
28863 binop( Iop_64HLtoV128,
28864 mkU64( 0xFFFFFFFFFFFFFFFF ),
28865 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
28866 mkexpr( me ) ),
28867 mkexpr( me ) ),
28868 mkexpr( mb ) ),
28869 mkexpr( mb ) ) );
28871 assign( shr, binop( Iop_Sub8, mkU8( 128 ), mkexpr( sh ) ) );
28872 assign( vA_shl, binop( Iop_ShlV128, mkexpr( vA ), mkexpr( sh ) ) );
28873 assign( vA_shr, binop( Iop_ShrV128, mkexpr( vA ), mkexpr( shr ) ) );
28875 if (opc2 == 0x045) {
28876 IRTemp vT_initial = newTemp(Ity_V128);
28878 DIP("vrlqmi v%u,v%u,v%u\n", vT_addr, vA_addr, vB_addr);
28880 assign( vT_initial, getVReg( vT_addr ) );
28882 /* Mask rotated value from vA and insert into vT */
28883 putVReg( vT_addr,
28884 binop( Iop_OrV128,
28885 binop( Iop_AndV128,
28886 unop( Iop_NotV128, mkexpr( mask ) ),
28887 mkexpr( vT_initial ) ),
28888 binop( Iop_AndV128,
28889 binop( Iop_OrV128,
28890 mkexpr( vA_shl ),
28891 mkexpr( vA_shr ) ),
28892 mkexpr( mask ) ) ) );
28894 } else {
28895 DIP("vrlqnm v%u,v%u\n", vA_addr, vB_addr);
28897 putVReg( vT_addr,
28898 binop( Iop_AndV128,
28899 binop( Iop_OrV128,
28900 mkexpr( vA_shl ),
28901 mkexpr( vA_shr ) ),
28902 mkexpr( mask ) ) );
28905 break;
28907 case 0x1C7: //vcmpequq Vector Compare Equal Quadword
28908 case 0x5C7: //vcmpequq.
28910 IRTemp eq = newTemp(Ity_I1);
28911 IRTemp cc = newTemp(Ity_I32);
28912 UInt Rc = IFIELD( theInstr, (31-21), 1 );
28913 UInt cc_field = 6;
28915 DIP("vcmpequq%s v%u,v%u,v%u\n",
28916 Rc ? ".":"", vT_addr, vA_addr, vB_addr);
28918 assign ( eq, Quad_precision_int_eq( vA, vB ) );
28920 /* if true cc = 0b0100, if flase cc= 0b0010 */
28921 assign( cc, binop( Iop_Or32,
28922 binop( Iop_Shl32,
28923 unop( Iop_1Uto32, mkexpr( eq ) ),
28924 mkU8( 3 ) ),
28925 binop( Iop_Shl32,
28926 unop( Iop_1Uto32,
28927 unop( Iop_Not1, mkexpr( eq ) ) ),
28928 mkU8( 1 ) ) ) );
28930 if (Rc) putGST_field( PPC_GST_CR, mkexpr( cc ), cc_field );
28932 putVReg( vT_addr, binop( Iop_64HLtoV128,
28933 unop( Iop_1Sto64, mkexpr( eq ) ),
28934 unop( Iop_1Sto64, mkexpr( eq ) ) ) );
28936 break;
28938 case 0x287: //vcmpgtuq Vector Compare Greater Than Unsigned Quadword
28939 case 0x687: //vcmpgtuq.
28940 case 0x387: //vcmpgtsq Vector Compare Greater Than Signed Quadword
28941 case 0x787: //vcmpgtsq.
28943 IRTemp gt = newTemp(Ity_I1);
28944 IRTemp cc = newTemp(Ity_I32);
28945 UInt Rc = IFIELD( theInstr, (31-21), 1 );
28946 UInt cc_field = 6;
28948 if ((opc2 == 0x287) || (opc2 == 0x687)) {
28949 DIP("vcmpgtuq%s v%u,v%u,v%u\n",
28950 Rc ? ".":"", vT_addr, vA_addr, vB_addr);
28952 assign ( gt, Quad_precision_uint_gt( vA, vB ) );
28954 } else {
28955 DIP("vcmpgtsq%s v%u,v%u,v%u\n",
28956 Rc ? ".":"", vT_addr, vA_addr, vB_addr);
28958 assign ( gt, Quad_precision_sint_gt( vA, vB ) );
28961 /* if true cc = 0b0100, if flase cc= 0b0010 */
28962 assign( cc, binop( Iop_Or32,
28963 binop( Iop_Shl32,
28964 unop( Iop_1Uto32, mkexpr( gt ) ),
28965 mkU8( 3 ) ),
28966 binop( Iop_Shl32,
28967 unop( Iop_1Uto32,
28968 unop( Iop_Not1, mkexpr( gt ) ) ),
28969 mkU8( 1 ) ) ) );
28970 if (Rc) putGST_field( PPC_GST_CR, mkexpr( cc ), cc_field );
28972 putVReg( vT_addr, binop( Iop_64HLtoV128,
28973 unop( Iop_1Sto64, mkexpr( gt ) ),
28974 unop( Iop_1Sto64, mkexpr( gt ) ) ) );
28976 break;
28978 case 0x20B: //vdiveuq Vector Divide Extended Unsigned Quadword VX form
28979 DIP("vdiveuq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
28980 putVReg( vT_addr, binop( Iop_DivU128E, mkexpr( vA ), mkexpr( vB ) ) );
28981 break;
28983 case 0x305: //vsraq Vector Shift Right Algebraic Quadword
28985 IRTemp sh = newTemp(Ity_I8); /* shift amout is vB[57:63] */
28986 IRTemp shr = newTemp(Ity_I8);
28987 IRTemp tmp = newTemp(Ity_I64);
28988 IRTemp vA_sign = newTemp(Ity_V128); /* sign bit of vA replicated */
28990 DIP("vsraq v%u,v%u,v%u\n", vT_addr, vA_addr, vB_addr);
28992 assign( sh,
28993 binop( Iop_And8,
28994 mkU8( 0x7F ),
28995 unop( Iop_16to8,
28996 unop( Iop_32to16,
28997 unop( Iop_64to32,
28998 unop( Iop_V128HIto64,
28999 mkexpr( vB ) ) ) ) ) ) );
29000 assign( shr, binop( Iop_Sub8, mkU8( 128 ), mkexpr( sh ) ) );
29002 /* Replicate the sign bit in all bit positions if sh is not zero. Clear the lower bits
29003 from [sh:127] by shifting right, then left by (127-sh).
29005 assign( tmp,
29006 binop( Iop_And64,
29007 unop( Iop_1Sto64,
29008 binop( Iop_CmpNE8, mkexpr( sh ), mkU8( 0 ) ) ),
29009 unop( Iop_1Sto64,
29010 unop( Iop_64to1,
29011 binop( Iop_Shr64,
29012 unop( Iop_V128HIto64,
29013 mkexpr( vA ) ),
29014 mkU8( 63 ) ) ) ) ) );
29015 assign( vA_sign,
29016 binop( Iop_ShlV128,
29017 binop( Iop_ShrV128,
29018 binop( Iop_64HLtoV128,
29019 mkexpr( tmp ),
29020 mkexpr( tmp ) ),
29021 mkexpr( shr ) ),
29022 mkexpr( shr ) ) );
29024 putVReg( vT_addr,
29025 binop( Iop_OrV128,
29026 binop( Iop_ShrV128, mkexpr( vA ), mkexpr( sh ) ),
29027 mkexpr( vA_sign ) ) );
29029 break;
29031 case 0x30B: //vdivesq Vector Divide Extended Signed Quadword VX form
29032 DIP("vdivesq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
29033 putVReg( vT_addr, binop( Iop_DivS128E, mkexpr( vA ), mkexpr( vB ) ) );
29034 break;
29036 case 0x60B: //vmoduq Vector Modulo Unsigned Quadword
29037 DIP("vmoduq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
29038 putVReg( vT_addr, binop( Iop_ModU128, mkexpr( vA ), mkexpr( vB ) ) );
29039 break;
29041 case 0x70B: //vmodsq Vector Modulo Signed Quadword
29042 DIP("vmodsq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
29043 putVReg( vT_addr, binop( Iop_ModS128, mkexpr( vA ), mkexpr( vB ) ) );
29044 break;
29046 default:
29047 vex_printf("dis_av_arith(ppc)(opc2 bits[21:31]=0x%x)\n", opc2);
29048 return False;
29049 } /* switch (opc2) */
29051 return True;
29056 AltiVec Logic Instructions
29058 static Bool dis_av_logic ( UInt prefix, UInt theInstr )
29060 /* VX-Form */
29061 UChar opc1 = ifieldOPC(theInstr);
29062 UChar vT_addr = ifieldRegDS(theInstr);
29063 UChar vA_addr = ifieldRegA(theInstr);
29064 UChar vB_addr = ifieldRegB(theInstr);
29065 UInt opc2 = IFIELD( theInstr, 0, 11 );
29067 IRTemp vA = newTemp(Ity_V128);
29068 IRTemp vB = newTemp(Ity_V128);
29069 assign( vA, getVReg(vA_addr));
29070 assign( vB, getVReg(vB_addr));
29072 /* There is no prefixed version of these instructions. */
29073 PREFIX_CHECK
29075 if (opc1 != 0x4) {
29076 vex_printf("dis_av_logic(ppc)(opc1 != 0x4)\n");
29077 return False;
29080 switch (opc2) {
29081 case 0x404: // vand (And, AV p147)
29082 DIP("vand v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
29083 putVReg( vT_addr, binop(Iop_AndV128, mkexpr(vA), mkexpr(vB)) );
29084 break;
29086 case 0x444: // vandc (And, AV p148)
29087 DIP("vandc v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
29088 putVReg( vT_addr, binop(Iop_AndV128, mkexpr(vA),
29089 unop(Iop_NotV128, mkexpr(vB))) );
29090 break;
29092 case 0x484: // vor (Or, AV p217)
29093 DIP("vor v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
29094 putVReg( vT_addr, binop(Iop_OrV128, mkexpr(vA), mkexpr(vB)) );
29095 break;
29097 case 0x4C4: // vxor (Xor, AV p282)
29098 DIP("vxor v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
29099 putVReg( vT_addr, binop(Iop_XorV128, mkexpr(vA), mkexpr(vB)) );
29100 break;
29102 case 0x504: // vnor (Nor, AV p216)
29103 DIP("vnor v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
29104 putVReg( vT_addr,
29105 unop(Iop_NotV128, binop(Iop_OrV128, mkexpr(vA), mkexpr(vB))) );
29106 break;
29108 case 0x544: // vorc (vA Or'd with complement of vb)
29109 DIP("vorc v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
29110 putVReg( vT_addr, binop( Iop_OrV128,
29111 mkexpr( vA ),
29112 unop( Iop_NotV128, mkexpr( vB ) ) ) );
29113 break;
29115 case 0x584: // vnand (Nand)
29116 DIP("vnand v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
29117 putVReg( vT_addr, unop( Iop_NotV128,
29118 binop(Iop_AndV128, mkexpr( vA ),
29119 mkexpr( vB ) ) ) );
29120 break;
29122 case 0x684: // veqv (complemented XOr)
29123 DIP("veqv v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
29124 putVReg( vT_addr, unop( Iop_NotV128,
29125 binop( Iop_XorV128, mkexpr( vA ),
29126 mkexpr( vB ) ) ) );
29127 break;
29129 default:
29130 vex_printf("dis_av_logic(ppc)(opc2=0x%x)\n", opc2);
29131 return False;
29133 return True;
29137 AltiVec Compare Instructions
29139 static Bool dis_av_cmp ( UInt prefix, UInt theInstr )
29141 /* VXR-Form */
29142 UChar opc1 = ifieldOPC(theInstr);
29143 UChar vD_addr = ifieldRegDS(theInstr);
29144 UChar vA_addr = ifieldRegA(theInstr);
29145 UChar vB_addr = ifieldRegB(theInstr);
29146 UChar flag_rC = ifieldBIT10(theInstr);
29147 UInt opc2 = IFIELD( theInstr, 0, 10 );
29149 IRTemp vA = newTemp(Ity_V128);
29150 IRTemp vB = newTemp(Ity_V128);
29151 IRTemp vD = newTemp(Ity_V128);
29153 /* There is no prefixed version of these instructions. */
29154 PREFIX_CHECK
29156 assign( vA, getVReg(vA_addr));
29157 assign( vB, getVReg(vB_addr));
29159 if (opc1 != 0x4) {
29160 vex_printf("dis_av_cmp(ppc)(instr)\n");
29161 return False;
29164 switch (opc2) {
29165 case 0x006: // vcmpequb (Compare Equal-to Unsigned B, AV p160)
29166 DIP("vcmpequb%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29167 vD_addr, vA_addr, vB_addr);
29168 assign( vD, binop(Iop_CmpEQ8x16, mkexpr(vA), mkexpr(vB)) );
29169 break;
29171 case 0x007: // vcmpneb (Compare Not Equal byte)
29172 DIP("vcmpneb%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29173 vD_addr, vA_addr, vB_addr);
29174 assign( vD, unop( Iop_NotV128,
29175 binop( Iop_CmpEQ8x16, mkexpr( vA ), mkexpr( vB ) ) ) );
29176 break;
29178 case 0x046: // vcmpequh (Compare Equal-to Unsigned HW, AV p161)
29179 DIP("vcmpequh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29180 vD_addr, vA_addr, vB_addr);
29181 assign( vD, binop(Iop_CmpEQ16x8, mkexpr(vA), mkexpr(vB)) );
29182 break;
29184 case 0x047: // vcmpneh (Compare Not Equal-to Halfword)
29185 DIP("vcmpneh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29186 vD_addr, vA_addr, vB_addr);
29187 assign( vD, unop( Iop_NotV128,
29188 binop( Iop_CmpEQ16x8, mkexpr( vA ), mkexpr( vB ) ) ) );
29189 break;
29191 case 0x086: // vcmpequw (Compare Equal-to Unsigned W, AV p162)
29192 DIP("vcmpequw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29193 vD_addr, vA_addr, vB_addr);
29194 assign( vD, binop(Iop_CmpEQ32x4, mkexpr(vA), mkexpr(vB)) );
29195 break;
29197 case 0x087: // vcmpnew (Compare Not Equal-to Word)
29198 DIP("vcmpnew%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29199 vD_addr, vA_addr, vB_addr);
29200 assign( vD, unop( Iop_NotV128,
29201 binop( Iop_CmpEQ32x4, mkexpr( vA ), mkexpr( vB ) ) ) );
29202 break;
29204 case 0x107: // vcmpnezb (Compare Not Equal or Zero byte)
29206 IRTemp vAeqvB = newTemp( Ity_V128 );
29207 IRTemp vAeq0 = newTemp( Ity_V128 );
29208 IRTemp vBeq0 = newTemp( Ity_V128 );
29209 IRTemp zero = newTemp( Ity_V128 );
29211 DIP("vcmpnezb%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29212 vD_addr, vA_addr, vB_addr);
29214 assign( zero, binop( Iop_64HLtoV128, mkU64( 0 ), mkU64( 0 ) ) );
29215 assign( vAeq0, binop( Iop_CmpEQ8x16, mkexpr( vA ), mkexpr( zero ) ) );
29216 assign( vBeq0, binop( Iop_CmpEQ8x16, mkexpr( vB ), mkexpr( zero ) ) );
29217 assign( vAeqvB, unop( Iop_NotV128,
29218 binop( Iop_CmpEQ8x16, mkexpr( vA ),
29219 mkexpr( vB ) ) ) );
29221 assign( vD, mkOr3_V128( vAeqvB, vAeq0, vBeq0 ) );
29223 break;
29225 case 0x147: // vcmpnezh (Compare Not Equal or Zero Halfword)
29227 IRTemp vAeqvB = newTemp( Ity_V128 );
29228 IRTemp vAeq0 = newTemp( Ity_V128 );
29229 IRTemp vBeq0 = newTemp( Ity_V128 );
29230 IRTemp zero = newTemp( Ity_V128 );
29232 DIP("vcmpnezh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29233 vD_addr, vA_addr, vB_addr);
29235 assign( zero, binop( Iop_64HLtoV128, mkU64( 0 ), mkU64( 0 ) ) );
29236 assign( vAeq0, binop( Iop_CmpEQ16x8, mkexpr( vA ), mkexpr( zero ) ) );
29237 assign( vBeq0, binop( Iop_CmpEQ16x8, mkexpr( vB ), mkexpr( zero ) ) );
29238 assign( vAeqvB, unop( Iop_NotV128,
29239 binop(Iop_CmpEQ16x8, mkexpr( vA ),
29240 mkexpr( vB ) ) ) );
29242 assign( vD, binop( Iop_OrV128,
29243 binop( Iop_OrV128,
29244 mkexpr( vAeq0 ),
29245 mkexpr( vBeq0 ) ),
29246 mkexpr( vAeqvB ) ) );
29248 break;
29250 case 0x187: // vcmpnezw (Compare Not Equal or Zero Word)
29252 IRTemp vAeqvB = newTemp( Ity_V128 );
29253 IRTemp vAeq0 = newTemp( Ity_V128 );
29254 IRTemp vBeq0 = newTemp( Ity_V128 );
29255 IRTemp zero = newTemp( Ity_V128 );
29257 DIP("vcmpnezw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29258 vD_addr, vA_addr, vB_addr);
29260 assign( zero, binop( Iop_64HLtoV128, mkU64( 0 ), mkU64( 0 ) ) );
29261 assign( vAeq0, binop( Iop_CmpEQ32x4, mkexpr( vA ), mkexpr( zero ) ) );
29262 assign( vBeq0, binop( Iop_CmpEQ32x4, mkexpr( vB ), mkexpr( zero ) ) );
29263 assign( vAeqvB, unop( Iop_NotV128,
29264 binop(Iop_CmpEQ32x4, mkexpr( vA ),
29265 mkexpr( vB ) ) ) );
29267 assign( vD, binop( Iop_OrV128,
29268 binop( Iop_OrV128,
29269 mkexpr( vAeq0 ),
29270 mkexpr( vBeq0 ) ),
29271 mkexpr( vAeqvB ) ) );
29273 break;
29275 case 0x0C7: // vcmpequd (Compare Equal-to Unsigned Doubleword)
29276 DIP("vcmpequd%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29277 vD_addr, vA_addr, vB_addr);
29278 assign( vD, binop(Iop_CmpEQ64x2, mkexpr(vA), mkexpr(vB)) );
29279 break;
29281 case 0x206: // vcmpgtub (Compare Greater-than Unsigned B, AV p168)
29282 DIP("vcmpgtub%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29283 vD_addr, vA_addr, vB_addr);
29284 assign( vD, binop(Iop_CmpGT8Ux16, mkexpr(vA), mkexpr(vB)) );
29285 break;
29287 case 0x246: // vcmpgtuh (Compare Greater-than Unsigned HW, AV p169)
29288 DIP("vcmpgtuh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29289 vD_addr, vA_addr, vB_addr);
29290 assign( vD, binop(Iop_CmpGT16Ux8, mkexpr(vA), mkexpr(vB)) );
29291 break;
29293 case 0x286: // vcmpgtuw (Compare Greater-than Unsigned W, AV p170)
29294 DIP("vcmpgtuw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29295 vD_addr, vA_addr, vB_addr);
29296 assign( vD, binop(Iop_CmpGT32Ux4, mkexpr(vA), mkexpr(vB)) );
29297 break;
29299 case 0x2C7: // vcmpgtud (Compare Greater-than Unsigned double)
29300 DIP("vcmpgtud%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29301 vD_addr, vA_addr, vB_addr);
29302 assign( vD, binop(Iop_CmpGT64Ux2, mkexpr(vA), mkexpr(vB)) );
29303 break;
29305 case 0x306: // vcmpgtsb (Compare Greater-than Signed B, AV p165)
29306 DIP("vcmpgtsb%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29307 vD_addr, vA_addr, vB_addr);
29308 assign( vD, binop(Iop_CmpGT8Sx16, mkexpr(vA), mkexpr(vB)) );
29309 break;
29311 case 0x346: // vcmpgtsh (Compare Greater-than Signed HW, AV p166)
29312 DIP("vcmpgtsh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29313 vD_addr, vA_addr, vB_addr);
29314 assign( vD, binop(Iop_CmpGT16Sx8, mkexpr(vA), mkexpr(vB)) );
29315 break;
29317 case 0x386: // vcmpgtsw (Compare Greater-than Signed W, AV p167)
29318 DIP("vcmpgtsw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29319 vD_addr, vA_addr, vB_addr);
29320 assign( vD, binop(Iop_CmpGT32Sx4, mkexpr(vA), mkexpr(vB)) );
29321 break;
29323 case 0x3C7: // vcmpgtsd (Compare Greater-than Signed double)
29324 DIP("vcmpgtsd%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
29325 vD_addr, vA_addr, vB_addr);
29326 assign( vD, binop(Iop_CmpGT64Sx2, mkexpr(vA), mkexpr(vB)) );
29327 break;
29329 default:
29330 vex_printf("dis_av_cmp(ppc)(opc2)\n");
29331 return False;
29334 putVReg( vD_addr, mkexpr(vD) );
29336 if (flag_rC) {
29337 set_AV_CR6( mkexpr(vD), True );
29339 return True;
29343 AltiVec Multiply-Sum Instructions
29345 static Bool dis_av_multarith ( UInt prefix, UInt theInstr )
29347 /* VA-Form */
29348 UChar opc1 = ifieldOPC(theInstr);
29349 UChar vD_addr = ifieldRegDS(theInstr);
29350 UChar vA_addr = ifieldRegA(theInstr);
29351 UChar vB_addr = ifieldRegB(theInstr);
29352 UChar vC_addr = ifieldRegC(theInstr);
29353 UChar opc2 = toUChar( IFIELD( theInstr, 0, 6 ) );
29355 IRTemp vA = newTemp(Ity_V128);
29356 IRTemp vB = newTemp(Ity_V128);
29357 IRTemp vC = newTemp(Ity_V128);
29358 IRTemp zeros = newTemp(Ity_V128);
29359 IRTemp aLo = newTemp(Ity_V128);
29360 IRTemp bLo = newTemp(Ity_V128);
29361 IRTemp cLo = newTemp(Ity_V128);
29362 IRTemp zLo = newTemp(Ity_V128);
29363 IRTemp aHi = newTemp(Ity_V128);
29364 IRTemp bHi = newTemp(Ity_V128);
29365 IRTemp cHi = newTemp(Ity_V128);
29366 IRTemp zHi = newTemp(Ity_V128);
29367 IRTemp abEvn = newTemp(Ity_V128);
29368 IRTemp abOdd = newTemp(Ity_V128);
29369 IRTemp z3 = newTemp(Ity_I64);
29370 IRTemp z2 = newTemp(Ity_I64);
29371 IRTemp z1 = newTemp(Ity_I64);
29372 IRTemp z0 = newTemp(Ity_I64);
29373 IRTemp ab7, ab6, ab5, ab4, ab3, ab2, ab1, ab0;
29374 IRTemp c3, c2, c1, c0;
29376 /* There is no prefixed version of these instructions. */
29377 PREFIX_CHECK
29379 ab7 = ab6 = ab5 = ab4 = ab3 = ab2 = ab1 = ab0 = IRTemp_INVALID;
29380 c3 = c2 = c1 = c0 = IRTemp_INVALID;
29382 assign( vA, getVReg(vA_addr));
29383 assign( vB, getVReg(vB_addr));
29384 assign( vC, getVReg(vC_addr));
29385 assign( zeros, unop(Iop_Dup32x4, mkU32(0)) );
29387 if (opc1 != 0x4) {
29388 vex_printf("dis_av_multarith(ppc)(instr)\n");
29389 return False;
29392 switch (opc2) {
29393 /* Multiply-Add */
29394 case 0x20: { // vmhaddshs (Mult Hi, Add Signed HW Saturate, AV p185)
29395 IRTemp cSigns = newTemp(Ity_V128);
29396 DIP("vmhaddshs v%d,v%d,v%d,v%d\n",
29397 vD_addr, vA_addr, vB_addr, vC_addr);
29398 assign(cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)));
29399 assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
29400 assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
29401 assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns),mkexpr(vC)));
29402 assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
29403 assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
29404 assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns),mkexpr(vC)));
29406 assign( zLo, binop(Iop_Add32x4, mkexpr(cLo),
29407 binop(Iop_SarN32x4,
29408 binop(Iop_MullEven16Sx8,
29409 mkexpr(aLo), mkexpr(bLo)),
29410 mkU8(15))) );
29412 assign( zHi, binop(Iop_Add32x4, mkexpr(cHi),
29413 binop(Iop_SarN32x4,
29414 binop(Iop_MullEven16Sx8,
29415 mkexpr(aHi), mkexpr(bHi)),
29416 mkU8(15))) );
29418 putVReg( vD_addr,
29419 binop(Iop_QNarrowBin32Sto16Sx8, mkexpr(zHi), mkexpr(zLo)) );
29420 break;
29422 case 0x21: { // vmhraddshs (Mult High Round, Add Signed HW Saturate, AV p186)
29423 IRTemp zKonst = newTemp(Ity_V128);
29424 IRTemp cSigns = newTemp(Ity_V128);
29425 DIP("vmhraddshs v%d,v%d,v%d,v%d\n",
29426 vD_addr, vA_addr, vB_addr, vC_addr);
29427 assign(cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)) );
29428 assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
29429 assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
29430 assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns),mkexpr(vC)));
29431 assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
29432 assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
29433 assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns),mkexpr(vC)));
29435 /* shifting our const avoids store/load version of Dup */
29436 assign( zKonst, binop(Iop_ShlN32x4, unop(Iop_Dup32x4, mkU32(0x1)),
29437 mkU8(14)) );
29439 assign( zLo, binop(Iop_Add32x4, mkexpr(cLo),
29440 binop(Iop_SarN32x4,
29441 binop(Iop_Add32x4, mkexpr(zKonst),
29442 binop(Iop_MullEven16Sx8,
29443 mkexpr(aLo), mkexpr(bLo))),
29444 mkU8(15))) );
29446 assign( zHi, binop(Iop_Add32x4, mkexpr(cHi),
29447 binop(Iop_SarN32x4,
29448 binop(Iop_Add32x4, mkexpr(zKonst),
29449 binop(Iop_MullEven16Sx8,
29450 mkexpr(aHi), mkexpr(bHi))),
29451 mkU8(15))) );
29453 putVReg( vD_addr,
29454 binop(Iop_QNarrowBin32Sto16Sx8, mkexpr(zHi), mkexpr(zLo)) );
29455 break;
29457 case 0x22: { // vmladduhm (Mult Low, Add Unsigned HW Modulo, AV p194)
29458 DIP("vmladduhm v%d,v%d,v%d,v%d\n",
29459 vD_addr, vA_addr, vB_addr, vC_addr);
29460 assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
29461 assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
29462 assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vC)));
29463 assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
29464 assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
29465 assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vC)));
29466 assign(zLo, binop(Iop_Add32x4,
29467 binop(Iop_MullEven16Ux8, mkexpr(aLo), mkexpr(bLo)),
29468 mkexpr(cLo)) );
29469 assign(zHi, binop(Iop_Add32x4,
29470 binop(Iop_MullEven16Ux8, mkexpr(aHi), mkexpr(bHi)),
29471 mkexpr(cHi)));
29472 putVReg( vD_addr,
29473 binop(Iop_NarrowBin32to16x8, mkexpr(zHi), mkexpr(zLo)) );
29474 break;
29477 case 0x23: { // vmsumudm
29478 DIP("vmsumudm v%d,v%d,v%d,v%d\n",
29479 vD_addr, vA_addr, vB_addr, vC_addr);
29480 /* This instruction takes input vectors VA, VB consisting of 2 usigned
29481 64-bit integer elements and a 128 bit unsigned input U128_C. The
29482 instruction performs the following operation:
29484 VA[0] * VB[0] -> U128_mul_result0;
29485 VA[1] * VB[1] -> U128_mul_result1;
29486 U128_C + U128_mul_result0 + U128_mul_result1 -> U128_partial_sum;
29487 carry out and overflow is discarded.
29490 /* The Iop_MulI128low assumes the upper 64-bits in the two input operands
29491 are zero. */
29492 IRTemp mul_result0 = newTemp( Ity_I128 );
29493 IRTemp mul_result1 = newTemp( Ity_I128 );
29494 IRTemp partial_sum_hi = newTemp( Ity_I64 );
29495 IRTemp partial_sum_low = newTemp( Ity_I64 );
29496 IRTemp result_hi = newTemp( Ity_I64 );
29497 IRTemp result_low = newTemp( Ity_I64 );
29498 IRExpr *ca_sum, *ca_result;
29501 /* Do multiplications */
29502 assign ( mul_result0, binop( Iop_MullU64,
29503 unop( Iop_V128to64, mkexpr( vA ) ),
29504 unop( Iop_V128to64, mkexpr( vB) ) ) );
29506 assign ( mul_result1, binop( Iop_MullU64,
29507 unop( Iop_V128HIto64, mkexpr( vA ) ),
29508 unop( Iop_V128HIto64, mkexpr( vB) ) ) );
29510 /* Add the two 128-bit results using 64-bit unsigned adds, calculate carry
29511 from low 64-bits add into sum of upper 64-bits. Throw away carry out
29512 of the upper 64-bit sum. */
29513 assign ( partial_sum_low, binop( Iop_Add64,
29514 unop( Iop_128to64, mkexpr( mul_result0 ) ),
29515 unop( Iop_128to64, mkexpr( mul_result1 ) )
29516 ) );
29518 /* ca_sum is type U32 */
29519 ca_sum = calculate_XER_CA_64 ( PPCG_FLAG_OP_ADD,
29520 mkexpr(partial_sum_low ),
29521 unop( Iop_128to64, mkexpr( mul_result0 ) ),
29522 unop( Iop_128to64, mkexpr( mul_result1 ) ),
29523 mkU64( 0 ) );
29525 assign ( partial_sum_hi,
29526 binop( Iop_Add64,
29527 binop( Iop_Add64,
29528 unop( Iop_128HIto64, mkexpr( mul_result0 ) ),
29529 unop( Iop_128HIto64, mkexpr( mul_result1 ) ) ),
29530 binop( Iop_32HLto64, mkU32( 0 ), ca_sum ) ) );
29532 /* Now add in the value of C */
29533 assign ( result_low, binop( Iop_Add64,
29534 mkexpr( partial_sum_low ),
29535 unop( Iop_V128to64, mkexpr( vC ) ) ) );
29537 /* ca_result is type U32 */
29538 ca_result = calculate_XER_CA_64( PPCG_FLAG_OP_ADD,
29539 mkexpr( result_low ),
29540 mkexpr( partial_sum_low ),
29541 unop( Iop_V128to64,
29542 mkexpr( vC ) ),
29543 mkU64( 0 ) );
29545 assign ( result_hi,
29546 binop( Iop_Add64,
29547 binop( Iop_Add64,
29548 mkexpr( partial_sum_hi ),
29549 unop( Iop_V128HIto64, mkexpr( vC ) ) ),
29550 binop( Iop_32HLto64, mkU32( 0 ), ca_result ) ) );
29552 putVReg( vD_addr, binop( Iop_64HLtoV128,
29553 mkexpr( result_hi ), mkexpr ( result_low ) ) );
29554 break;
29557 /* Multiply-Sum */
29558 case 0x24: { // vmsumubm (Multiply Sum Unsigned B Modulo, AV p204)
29559 IRTemp abEE, abEO, abOE, abOO;
29560 abEE = abEO = abOE = abOO = IRTemp_INVALID;
29561 DIP("vmsumubm v%d,v%d,v%d,v%d\n",
29562 vD_addr, vA_addr, vB_addr, vC_addr);
29564 /* multiply vA,vB (unsigned, widening) */
29565 assign( abEvn, MK_Iop_MullOdd8Ux16( mkexpr(vA), mkexpr(vB) ));
29566 assign( abOdd, binop(Iop_MullEven8Ux16, mkexpr(vA), mkexpr(vB)) );
29568 /* evn,odd: V128_16Ux8 -> 2 x V128_32Ux4, zero-extended */
29569 expand16Ux8( mkexpr(abEvn), &abEE, &abEO );
29570 expand16Ux8( mkexpr(abOdd), &abOE, &abOO );
29572 putVReg( vD_addr,
29573 binop(Iop_Add32x4, mkexpr(vC),
29574 binop(Iop_Add32x4,
29575 binop(Iop_Add32x4, mkexpr(abEE), mkexpr(abEO)),
29576 binop(Iop_Add32x4, mkexpr(abOE), mkexpr(abOO)))) );
29577 break;
29579 case 0x25: { // vmsummbm (Multiply Sum Mixed-Sign B Modulo, AV p201)
29580 IRTemp aEvn, aOdd, bEvn, bOdd;
29581 IRTemp abEE = newTemp(Ity_V128);
29582 IRTemp abEO = newTemp(Ity_V128);
29583 IRTemp abOE = newTemp(Ity_V128);
29584 IRTemp abOO = newTemp(Ity_V128);
29585 IRTemp prod = newTemp(Ity_V128);
29586 IRTemp sum0 = newTemp(Ity_I32);
29587 IRTemp sum1 = newTemp(Ity_I32);
29588 IRTemp sum2 = newTemp(Ity_I32);
29589 IRTemp sum3 = newTemp(Ity_I32);
29591 aEvn = aOdd = bEvn = bOdd = IRTemp_INVALID;
29592 DIP("vmsummbm v%d,v%d,v%d,v%d\n",
29593 vD_addr, vA_addr, vB_addr, vC_addr);
29595 /* sign-extend vA, zero-extend vB, for mixed-sign multiply
29596 (separating out adjacent lanes to different vectors) */
29597 expand8Sx16( mkexpr(vA), &aEvn, &aOdd );
29598 expand8Ux16( mkexpr(vB), &bEvn, &bOdd );
29600 /* multiply vA, vB, again separating adjacent lanes */
29601 assign( abEE, MK_Iop_MullOdd16Sx8( mkexpr(aEvn), mkexpr(bEvn) ));
29602 assign( abEO, binop(Iop_MullEven16Sx8, mkexpr(aEvn), mkexpr(bEvn)) );
29603 assign( abOE, MK_Iop_MullOdd16Sx8( mkexpr(aOdd), mkexpr(bOdd) ));
29604 assign( abOO, binop(Iop_MullEven16Sx8, mkexpr(aOdd), mkexpr(bOdd)) );
29606 /* add results together, + vC */
29607 /* Unfortunately, we need to chop the results of the adds to 32-bits. The
29608 following lane based calculations don't handle the overflow correctly. Need
29609 to explicitly do the adds and 32-bit chops.
29611 putVReg( vD_addr,
29612 binop(Iop_QAdd32Sx4, mkexpr(vC),
29613 binop(Iop_QAdd32Sx4,
29614 binop(Iop_QAdd32Sx4, mkexpr(abEE), mkexpr(abEO)),
29615 binop(Iop_QAdd32Sx4, mkexpr(abOE), mkexpr(abOO)))) );
29618 assign(prod,
29619 binop(Iop_QAdd32Sx4,
29620 binop(Iop_QAdd32Sx4, mkexpr(abEE), mkexpr(abEO)),
29621 binop(Iop_QAdd32Sx4, mkexpr(abOE), mkexpr(abOO))));
29622 assign( sum0,
29623 unop(Iop_64to32,
29624 binop(Iop_Add64,
29625 unop(Iop_32Sto64,
29626 unop(Iop_64HIto32, unop(Iop_V128HIto64, mkexpr(prod)))),
29627 unop(Iop_32Sto64,
29628 unop(Iop_64HIto32, unop(Iop_V128HIto64, mkexpr(vC)))))));
29629 assign( sum1,
29630 unop(Iop_64to32,
29631 binop(Iop_Add64,
29632 unop(Iop_32Sto64,
29633 unop(Iop_64to32, unop(Iop_V128HIto64, mkexpr(prod)))),
29634 unop(Iop_32Sto64,
29635 unop(Iop_64to32, unop(Iop_V128HIto64, mkexpr(vC)))))));
29636 assign( sum2,
29637 unop(Iop_64to32,
29638 binop(Iop_Add64,
29639 unop(Iop_32Sto64,
29640 unop(Iop_64HIto32, unop(Iop_V128to64, mkexpr(prod)))),
29641 unop(Iop_32Sto64,
29642 unop(Iop_64HIto32, unop(Iop_V128to64, mkexpr(vC)))))));
29643 assign( sum3,
29644 unop(Iop_64to32,
29645 binop(Iop_Add64,
29646 unop(Iop_32Sto64,
29647 unop(Iop_64to32, unop(Iop_V128to64, mkexpr(prod)))),
29648 unop(Iop_32Sto64,
29649 unop(Iop_64to32, unop(Iop_V128to64, mkexpr(vC)))))));
29650 putVReg( vD_addr, binop(Iop_64HLtoV128,
29651 binop(Iop_32HLto64, mkexpr(sum0), mkexpr(sum1)),
29652 binop(Iop_32HLto64, mkexpr(sum2), mkexpr(sum3))));
29654 break;
29656 case 0x26: { // vmsumuhm (Multiply Sum Unsigned HW Modulo, AV p205)
29657 DIP("vmsumuhm v%d,v%d,v%d,v%d\n",
29658 vD_addr, vA_addr, vB_addr, vC_addr);
29659 assign( abEvn, MK_Iop_MullOdd16Ux8( mkexpr(vA), mkexpr(vB) ));
29660 assign( abOdd, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)) );
29661 putVReg( vD_addr,
29662 binop(Iop_Add32x4, mkexpr(vC),
29663 binop(Iop_Add32x4, mkexpr(abEvn), mkexpr(abOdd))) );
29664 break;
29666 case 0x27: { // vmsumuhs (Multiply Sum Unsigned HW Saturate, AV p206)
29667 DIP("vmsumuhs v%d,v%d,v%d,v%d\n",
29668 vD_addr, vA_addr, vB_addr, vC_addr);
29669 /* widening multiply, separating lanes */
29670 assign( abEvn, MK_Iop_MullOdd16Ux8(mkexpr(vA), mkexpr(vB) ));
29671 assign( abOdd, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)) );
29673 /* break V128 to 4xI32's, zero-extending to I64's */
29674 breakV128to4x64U( mkexpr(abEvn), &ab7, &ab5, &ab3, &ab1 );
29675 breakV128to4x64U( mkexpr(abOdd), &ab6, &ab4, &ab2, &ab0 );
29676 breakV128to4x64U( mkexpr(vC), &c3, &c2, &c1, &c0 );
29678 /* add lanes */
29679 assign( z3, binop(Iop_Add64, mkexpr(c3),
29680 binop(Iop_Add64, mkexpr(ab7), mkexpr(ab6))));
29681 assign( z2, binop(Iop_Add64, mkexpr(c2),
29682 binop(Iop_Add64, mkexpr(ab5), mkexpr(ab4))));
29683 assign( z1, binop(Iop_Add64, mkexpr(c1),
29684 binop(Iop_Add64, mkexpr(ab3), mkexpr(ab2))));
29685 assign( z0, binop(Iop_Add64, mkexpr(c0),
29686 binop(Iop_Add64, mkexpr(ab1), mkexpr(ab0))));
29688 /* saturate-narrow to 32bit, and combine to V128 */
29689 putVReg( vD_addr, mkV128from4x64U( mkexpr(z3), mkexpr(z2),
29690 mkexpr(z1), mkexpr(z0)) );
29692 break;
29694 case 0x28: { // vmsumshm (Multiply Sum Signed HW Modulo, AV p202)
29695 DIP("vmsumshm v%d,v%d,v%d,v%d\n",
29696 vD_addr, vA_addr, vB_addr, vC_addr);
29697 assign( abEvn, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) ));
29698 assign( abOdd, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)) );
29699 putVReg( vD_addr,
29700 binop(Iop_Add32x4, mkexpr(vC),
29701 binop(Iop_Add32x4, mkexpr(abOdd), mkexpr(abEvn))) );
29702 break;
29704 case 0x29: { // vmsumshs (Multiply Sum Signed HW Saturate, AV p203)
29705 DIP("vmsumshs v%d,v%d,v%d,v%d\n",
29706 vD_addr, vA_addr, vB_addr, vC_addr);
29707 /* widening multiply, separating lanes */
29708 assign( abEvn, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) ));
29709 assign( abOdd, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)) );
29711 /* break V128 to 4xI32's, sign-extending to I64's */
29712 breakV128to4x64S( mkexpr(abEvn), &ab7, &ab5, &ab3, &ab1 );
29713 breakV128to4x64S( mkexpr(abOdd), &ab6, &ab4, &ab2, &ab0 );
29714 breakV128to4x64S( mkexpr(vC), &c3, &c2, &c1, &c0 );
29716 /* add lanes */
29717 assign( z3, binop(Iop_Add64, mkexpr(c3),
29718 binop(Iop_Add64, mkexpr(ab7), mkexpr(ab6))));
29719 assign( z2, binop(Iop_Add64, mkexpr(c2),
29720 binop(Iop_Add64, mkexpr(ab5), mkexpr(ab4))));
29721 assign( z1, binop(Iop_Add64, mkexpr(c1),
29722 binop(Iop_Add64, mkexpr(ab3), mkexpr(ab2))));
29723 assign( z0, binop(Iop_Add64, mkexpr(c0),
29724 binop(Iop_Add64, mkexpr(ab1), mkexpr(ab0))));
29726 /* saturate-narrow to 32bit, and combine to V128 */
29727 putVReg( vD_addr, mkV128from4x64S( mkexpr(z3), mkexpr(z2),
29728 mkexpr(z1), mkexpr(z0)) );
29729 break;
29731 default:
29732 vex_printf("dis_av_multarith(ppc)(opc2)\n");
29733 return False;
29735 return True;
29739 AltiVec Polynomial Multiply-Sum Instructions
29741 static Bool dis_av_polymultarith ( UInt prefix, UInt theInstr )
29743 /* VA-Form */
29744 UChar opc1 = ifieldOPC(theInstr);
29745 UChar vD_addr = ifieldRegDS(theInstr);
29746 UChar vA_addr = ifieldRegA(theInstr);
29747 UChar vB_addr = ifieldRegB(theInstr);
29748 UChar vC_addr = ifieldRegC(theInstr);
29749 UInt opc2 = IFIELD(theInstr, 0, 11);
29750 IRTemp vA = newTemp(Ity_V128);
29751 IRTemp vB = newTemp(Ity_V128);
29752 IRTemp vC = newTemp(Ity_V128);
29754 /* There is no prefixed version of these instructions. */
29755 PREFIX_CHECK
29757 assign( vA, getVReg(vA_addr));
29758 assign( vB, getVReg(vB_addr));
29759 assign( vC, getVReg(vC_addr));
29761 if (opc1 != 0x4) {
29762 vex_printf("dis_av_polymultarith(ppc)(instr)\n");
29763 return False;
29766 switch (opc2) {
29767 /* Polynomial Multiply-Add */
29768 case 0x408: // vpmsumb Vector Polynomial Multiply-sum Byte
29769 DIP("vpmsumb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
29770 putVReg( vD_addr, binop(Iop_PolynomialMulAdd8x16,
29771 mkexpr(vA), mkexpr(vB)) );
29772 break;
29773 case 0x448: // vpmsumd Vector Polynomial Multiply-sum Double Word
29774 DIP("vpmsumd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
29775 putVReg( vD_addr, binop(Iop_PolynomialMulAdd64x2,
29776 mkexpr(vA), mkexpr(vB)) );
29777 break;
29778 case 0x488: // vpmsumw Vector Polynomial Multiply-sum Word
29779 DIP("vpmsumw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
29780 putVReg( vD_addr, binop(Iop_PolynomialMulAdd32x4,
29781 mkexpr(vA), mkexpr(vB)) );
29782 break;
29783 case 0x4C8: // vpmsumh Vector Polynomial Multiply-sum Half Word
29784 DIP("vpmsumh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
29785 putVReg( vD_addr, binop(Iop_PolynomialMulAdd16x8,
29786 mkexpr(vA), mkexpr(vB)) );
29787 break;
29788 default:
29789 vex_printf("dis_av_polymultarith(ppc)(opc2=0x%x)\n", opc2);
29790 return False;
29792 return True;
29796 AltiVec Shift/Rotate Instructions
29798 static Bool dis_av_shift ( UInt prefix, UInt theInstr )
29800 /* VX-Form */
29801 UChar opc1 = ifieldOPC(theInstr);
29802 UChar vD_addr = ifieldRegDS(theInstr);
29803 UChar vA_addr = ifieldRegA(theInstr);
29804 UChar vB_addr = ifieldRegB(theInstr);
29805 UInt opc2_vx_form = IFIELD( theInstr, 0, 11 );
29806 UInt opc2_vn_form = IFIELD( theInstr, 0, 6 );
29808 IRTemp vA = newTemp(Ity_V128);
29809 IRTemp vB = newTemp(Ity_V128);
29811 /* There is no prefixed version of these instructions. */
29812 PREFIX_CHECK
29814 assign( vA, getVReg(vA_addr));
29815 assign( vB, getVReg(vB_addr));
29817 if (opc1 != 0x4){
29818 vex_printf("dis_av_shift(ppc)(instr)\n");
29819 return False;
29821 if (opc2_vn_form == 0x16) {
29822 UInt SH = IFIELD( theInstr, 6, 3 );
29823 UInt bit21_22 = IFIELD( theInstr, 9, 2 );
29824 IRTemp Middle_128 = newTemp(Ity_V128);
29825 IRTemp tmpLo_64 = newTemp(Ity_I64);
29826 IRTemp tmpHi_64 = newTemp(Ity_I64);
29827 IRTemp result = newTemp(Ity_V128);
29829 assign( Middle_128, binop( Iop_64HLtoV128,
29830 unop( Iop_V128to64, mkexpr( vA ) ),
29831 unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
29833 if (bit21_22 == 0) {
29834 // Vector Shift Left Double by Bit Immediate VN-form
29835 DIP("vsldbi v%u,v%u,v%u,%u\n", vD_addr, vA_addr, vB_addr, SH);
29837 assign( tmpHi_64,
29838 unop( Iop_V128HIto64,
29839 binop( Iop_ShlV128,
29840 mkexpr( vA ),
29841 mkU8( SH ) ) ) );
29842 assign( result,
29843 binop( Iop_64HLtoV128,
29844 mkexpr( tmpHi_64 ),
29845 unop( Iop_V128HIto64,
29846 binop( Iop_ShlV128,
29847 mkexpr( Middle_128 ),
29848 mkU8( SH ) ) ) ) );
29849 } else {
29850 // Vector Shift right Double by Bit Immediate VN-form
29851 DIP("vsrdbi v%u,v%u,v%u,%u\n", vD_addr, vA_addr, vB_addr, SH);
29853 assign( tmpLo_64,
29854 unop( Iop_V128to64,
29855 binop( Iop_ShrV128,
29856 mkexpr( vB ),
29857 mkU8( SH ) ) ) );
29858 assign( result,
29859 binop( Iop_64HLtoV128,
29860 unop( Iop_V128to64,
29861 binop( Iop_ShrV128,
29862 mkexpr( Middle_128 ),
29863 mkU8( SH ) ) ),
29864 mkexpr( tmpLo_64 ) ) );
29866 putVReg( vD_addr, mkexpr( result ) );
29867 return True;
29870 switch (opc2_vx_form) {
29871 /* Rotate */
29872 case 0x004: // vrlb (Rotate Left Integer B, AV p234)
29873 DIP("vrlb v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29874 putVReg( vD_addr, binop(Iop_Rol8x16, mkexpr(vA), mkexpr(vB)) );
29875 break;
29877 case 0x044: // vrlh (Rotate Left Integer HW, AV p235)
29878 DIP("vrlh v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29879 putVReg( vD_addr, binop(Iop_Rol16x8, mkexpr(vA), mkexpr(vB)) );
29880 break;
29882 case 0x084: // vrlw (Rotate Left Integer W, AV p236)
29883 DIP("vrlw v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29884 putVReg( vD_addr, binop(Iop_Rol32x4, mkexpr(vA), mkexpr(vB)) );
29885 break;
29887 case 0x0C4: // vrld (Rotate Left Integer Double Word)
29888 DIP("vrld v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29889 putVReg( vD_addr, binop(Iop_Rol64x2, mkexpr(vA), mkexpr(vB)) );
29890 break;
29893 /* Shift Left */
29894 case 0x104: // vslb (Shift Left Integer B, AV p240)
29895 DIP("vslb v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29896 putVReg( vD_addr, binop(Iop_Shl8x16, mkexpr(vA), mkexpr(vB)) );
29897 break;
29899 case 0x144: // vslh (Shift Left Integer HW, AV p242)
29900 DIP("vslh v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29901 putVReg( vD_addr, binop(Iop_Shl16x8, mkexpr(vA), mkexpr(vB)) );
29902 break;
29904 case 0x184: // vslw (Shift Left Integer W, AV p244)
29905 DIP("vslw v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29906 putVReg( vD_addr, binop(Iop_Shl32x4, mkexpr(vA), mkexpr(vB)) );
29907 break;
29909 case 0x5C4: // vsld (Shift Left Integer Double Word)
29910 DIP("vsld v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29911 putVReg( vD_addr, binop(Iop_Shl64x2, mkexpr(vA), mkexpr(vB)) );
29912 break;
29914 case 0x1C4: { // vsl (Shift Left, AV p239)
29915 IRTemp sh = newTemp(Ity_I8);
29916 DIP("vsl v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29917 assign( sh, binop(Iop_And8, mkU8(0x7),
29918 unop(Iop_32to8,
29919 unop(Iop_V128to32, mkexpr(vB)))) );
29920 putVReg( vD_addr,
29921 binop(Iop_ShlV128, mkexpr(vA), mkexpr(sh)) );
29922 break;
29924 case 0x40C: { // vslo (Shift Left by Octet, AV p243)
29925 IRTemp sh = newTemp(Ity_I8);
29926 DIP("vslo v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29927 assign( sh, binop(Iop_And8, mkU8(0x78),
29928 unop(Iop_32to8,
29929 unop(Iop_V128to32, mkexpr(vB)))) );
29930 putVReg( vD_addr,
29931 binop(Iop_ShlV128, mkexpr(vA), mkexpr(sh)) );
29932 break;
29936 /* Shift Right */
29937 case 0x204: // vsrb (Shift Right B, AV p256)
29938 DIP("vsrb v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29939 putVReg( vD_addr, binop(Iop_Shr8x16, mkexpr(vA), mkexpr(vB)) );
29940 break;
29942 case 0x244: // vsrh (Shift Right HW, AV p257)
29943 DIP("vsrh v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29944 putVReg( vD_addr, binop(Iop_Shr16x8, mkexpr(vA), mkexpr(vB)) );
29945 break;
29947 case 0x284: // vsrw (Shift Right W, AV p259)
29948 DIP("vsrw v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29949 putVReg( vD_addr, binop(Iop_Shr32x4, mkexpr(vA), mkexpr(vB)) );
29950 break;
29952 case 0x2C4: { // vsr (Shift Right, AV p251)
29953 IRTemp sh = newTemp(Ity_I8);
29954 DIP("vsr v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29955 assign( sh, binop(Iop_And8, mkU8(0x7),
29956 unop(Iop_32to8,
29957 unop(Iop_V128to32, mkexpr(vB)))) );
29958 putVReg( vD_addr,
29959 binop(Iop_ShrV128, mkexpr(vA), mkexpr(sh)) );
29960 break;
29962 case 0x304: // vsrab (Shift Right Alg B, AV p253)
29963 DIP("vsrab v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29964 putVReg( vD_addr, binop(Iop_Sar8x16, mkexpr(vA), mkexpr(vB)) );
29965 break;
29967 case 0x344: // vsrah (Shift Right Alg HW, AV p254)
29968 DIP("vsrah v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29969 putVReg( vD_addr, binop(Iop_Sar16x8, mkexpr(vA), mkexpr(vB)) );
29970 break;
29972 case 0x384: // vsraw (Shift Right Alg W, AV p255)
29973 DIP("vsraw v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29974 putVReg( vD_addr, binop(Iop_Sar32x4, mkexpr(vA), mkexpr(vB)) );
29975 break;
29977 case 0x3C4: // vsrad (Shift Right Alg Double Word)
29978 DIP("vsrad v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29979 putVReg( vD_addr, binop(Iop_Sar64x2, mkexpr(vA), mkexpr(vB)) );
29980 break;
29982 case 0x44C: { // vsro (Shift Right by Octet, AV p258)
29983 IRTemp sh = newTemp(Ity_I8);
29984 DIP("vsro v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29985 assign( sh, binop(Iop_And8, mkU8(0x78),
29986 unop(Iop_32to8,
29987 unop(Iop_V128to32, mkexpr(vB)))) );
29988 putVReg( vD_addr,
29989 binop(Iop_ShrV128, mkexpr(vA), mkexpr(sh)) );
29990 break;
29993 case 0x6C4: // vsrd (Shift Right Double Word)
29994 DIP("vsrd v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
29995 putVReg( vD_addr, binop(Iop_Shr64x2, mkexpr(vA), mkexpr(vB)) );
29996 break;
29999 default:
30000 vex_printf("dis_av_shift(ppc)(opc2)\n");
30001 return False;
30003 return True;
30007 AltiVec Permute Instructions
30009 static Bool dis_av_permute ( UInt prefix, UInt theInstr )
30011 /* VA-Form, VX-Form */
30012 UChar opc1 = ifieldOPC(theInstr);
30013 UChar vD_addr = ifieldRegDS(theInstr);
30014 UChar vA_addr = ifieldRegA(theInstr);
30015 UChar UIMM_5 = vA_addr;
30016 UChar vB_addr = ifieldRegB(theInstr);
30017 UChar vC_addr = ifieldRegC(theInstr);
30018 UChar b10 = ifieldBIT10(theInstr);
30019 UChar SHB_uimm4 = toUChar( IFIELD( theInstr, 6, 4 ) );
30020 UInt opc2 = toUChar( IFIELD( theInstr, 0, 6 ) );
30022 UChar SIMM_8 = extend_s_5to8(UIMM_5);
30024 IRTemp vA = newTemp(Ity_V128);
30025 IRTemp vB = newTemp(Ity_V128);
30026 IRTemp vC = newTemp(Ity_V128);
30028 /* There is no prefixed version of these instructions. */
30029 PREFIX_CHECK
30031 assign( vA, getVReg(vA_addr));
30032 assign( vB, getVReg(vB_addr));
30033 assign( vC, getVReg(vC_addr));
30035 if (opc1 != 0x4) {
30036 vex_printf("dis_av_permute(ppc)(instr)\n");
30037 return False;
30040 switch (opc2) {
30041 case 0x2A: // vsel (Conditional Select, AV p238)
30042 DIP("vsel v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
30043 /* vD = (vA & ~vC) | (vB & vC) */
30044 putVReg( vD_addr, binop(Iop_OrV128,
30045 binop(Iop_AndV128, mkexpr(vA), unop(Iop_NotV128, mkexpr(vC))),
30046 binop(Iop_AndV128, mkexpr(vB), mkexpr(vC))) );
30047 return True;
30049 case 0x2B: { // vperm (Permute, AV p218)
30050 /* limited to two args for IR, so have to play games... */
30051 IRTemp a_perm = newTemp(Ity_V128);
30052 IRTemp b_perm = newTemp(Ity_V128);
30053 IRTemp mask = newTemp(Ity_V128);
30054 IRTemp vC_andF = newTemp(Ity_V128);
30055 DIP("vperm v%d,v%d,v%d,v%d\n",
30056 vD_addr, vA_addr, vB_addr, vC_addr);
30057 /* Limit the Perm8x16 steering values to 0 .. 15 as that is what
30058 IR specifies, and also to hide irrelevant bits from
30059 memcheck */
30060 assign( vC_andF,
30061 binop(Iop_AndV128, mkexpr(vC),
30062 unop(Iop_Dup8x16, mkU8(0xF))) );
30063 assign( a_perm,
30064 binop(Iop_Perm8x16, mkexpr(vA), mkexpr(vC_andF)) );
30065 assign( b_perm,
30066 binop(Iop_Perm8x16, mkexpr(vB), mkexpr(vC_andF)) );
30067 // mask[i8] = (vC[i8]_4 == 1) ? 0xFF : 0x0
30068 assign( mask, binop(Iop_SarN8x16,
30069 binop(Iop_ShlN8x16, mkexpr(vC), mkU8(3)),
30070 mkU8(7)) );
30071 // dst = (a & ~mask) | (b & mask)
30072 putVReg( vD_addr, binop(Iop_OrV128,
30073 binop(Iop_AndV128, mkexpr(a_perm),
30074 unop(Iop_NotV128, mkexpr(mask))),
30075 binop(Iop_AndV128, mkexpr(b_perm),
30076 mkexpr(mask))) );
30077 return True;
30079 case 0x2C: // vsldoi (Shift Left Double by Octet Imm, AV p241)
30080 if (b10 != 0) {
30081 vex_printf("dis_av_permute(ppc)(vsldoi)\n");
30082 return False;
30084 DIP("vsldoi v%d,v%d,v%d,%d\n",
30085 vD_addr, vA_addr, vB_addr, SHB_uimm4);
30086 if (SHB_uimm4 == 0)
30087 putVReg( vD_addr, mkexpr(vA) );
30088 else
30089 putVReg( vD_addr,
30090 binop(Iop_OrV128,
30091 binop(Iop_ShlV128, mkexpr(vA), mkU8(SHB_uimm4*8)),
30092 binop(Iop_ShrV128, mkexpr(vB), mkU8((16-SHB_uimm4)*8))) );
30093 return True;
30094 case 0x2D: { // vpermxor (Vector Permute and Exclusive-OR)
30095 IRTemp a_perm = newTemp(Ity_V128);
30096 IRTemp b_perm = newTemp(Ity_V128);
30097 IRTemp vrc_a = newTemp(Ity_V128);
30098 IRTemp vrc_b = newTemp(Ity_V128);
30100 DIP("vpermxor v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
30102 /* IBM index is 0:7, Change index value to index 7:0 */
30103 assign( vrc_b, binop( Iop_AndV128, mkexpr( vC ),
30104 unop( Iop_Dup8x16, mkU8( 0xF ) ) ) );
30105 assign( vrc_a, binop( Iop_ShrV128,
30106 binop( Iop_AndV128, mkexpr( vC ),
30107 unop( Iop_Dup8x16, mkU8( 0xF0 ) ) ),
30108 mkU8 ( 4 ) ) );
30109 assign( a_perm, binop( Iop_Perm8x16, mkexpr( vA ), mkexpr( vrc_a ) ) );
30110 assign( b_perm, binop( Iop_Perm8x16, mkexpr( vB ), mkexpr( vrc_b ) ) );
30111 putVReg( vD_addr, binop( Iop_XorV128,
30112 mkexpr( a_perm ), mkexpr( b_perm) ) );
30113 return True;
30116 case 0x3B: { // vpermr (Vector Permute Right-indexed)
30117 /* limited to two args for IR, so have to play games... */
30118 IRTemp a_perm = newTemp( Ity_V128 );
30119 IRTemp b_perm = newTemp( Ity_V128 );
30120 IRTemp mask = newTemp( Ity_V128 );
30121 IRTemp vC_andF = newTemp( Ity_V128 );
30122 IRTemp vC_adj = newTemp( Ity_V128 );
30124 DIP( "vpermr v%d,v%d,v%d,v%d\n",
30125 vD_addr, vA_addr, vB_addr, vC_addr);
30126 /* Limit the Perm8x16 steering values to 0 .. 15 as that is what
30127 IR specifies, and also to hide irrelevant bits from
30128 memcheck.
30131 assign( vC_adj,
30132 binop( Iop_Sub16x8,
30133 unop( Iop_Dup8x16, mkU8( 0x1F ) ),
30134 mkexpr( vC ) ) );
30135 assign( vC_andF,
30136 binop( Iop_AndV128, mkexpr( vC_adj),
30137 unop( Iop_Dup8x16, mkU8( 0xF ) ) ) );
30138 assign( a_perm,
30139 binop( Iop_Perm8x16, mkexpr( vA ), mkexpr( vC_andF ) ) );
30140 assign( b_perm,
30141 binop( Iop_Perm8x16, mkexpr( vB ), mkexpr( vC_andF ) ) );
30142 // mask[i8] = (vC[i8]_4 == 1) ? 0xFF : 0x0
30143 assign( mask, binop(Iop_SarN8x16,
30144 binop( Iop_ShlN8x16, mkexpr( vC_adj ),
30145 mkU8( 3 ) ), mkU8( 7 ) ) );
30146 // dst = (a & ~mask) | (b & mask)
30147 putVReg( vD_addr, binop( Iop_OrV128,
30148 binop( Iop_AndV128, mkexpr( a_perm ),
30149 unop( Iop_NotV128, mkexpr( mask ) ) ),
30150 binop( Iop_AndV128, mkexpr( b_perm ),
30151 mkexpr( mask ) ) ) );
30152 return True;
30155 default:
30156 break; // Fall through...
30159 opc2 = IFIELD( theInstr, 0, 11 );
30160 switch (opc2) {
30162 /* Merge */
30163 case 0x00C: // vmrghb (Merge High B, AV p195)
30164 DIP("vmrghb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30165 putVReg( vD_addr,
30166 binop(Iop_InterleaveHI8x16, mkexpr(vA), mkexpr(vB)) );
30167 break;
30169 case 0x04C: // vmrghh (Merge High HW, AV p196)
30170 DIP("vmrghh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30171 putVReg( vD_addr,
30172 binop(Iop_InterleaveHI16x8, mkexpr(vA), mkexpr(vB)) );
30173 break;
30175 case 0x08C: // vmrghw (Merge High W, AV p197)
30176 DIP("vmrghw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30177 putVReg( vD_addr,
30178 binop(Iop_InterleaveHI32x4, mkexpr(vA), mkexpr(vB)) );
30179 break;
30181 case 0x10C: // vmrglb (Merge Low B, AV p198)
30182 DIP("vmrglb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30183 putVReg( vD_addr,
30184 binop(Iop_InterleaveLO8x16, mkexpr(vA), mkexpr(vB)) );
30185 break;
30187 case 0x14C: // vmrglh (Merge Low HW, AV p199)
30188 DIP("vmrglh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30189 putVReg( vD_addr,
30190 binop(Iop_InterleaveLO16x8, mkexpr(vA), mkexpr(vB)) );
30191 break;
30193 case 0x18C: // vmrglw (Merge Low W, AV p200)
30194 DIP("vmrglw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30195 putVReg( vD_addr,
30196 binop(Iop_InterleaveLO32x4, mkexpr(vA), mkexpr(vB)) );
30197 break;
30199 /* Extract instructions */
30200 case 0x20D: // vextractub (Vector Extract Unsigned Byte)
30202 UChar uim = IFIELD( theInstr, 16, 4 );
30204 DIP("vextractub v%d,v%d,%d\n", vD_addr, vB_addr, uim);
30206 putVReg( vD_addr, binop( Iop_ShlV128,
30207 binop( Iop_AndV128,
30208 binop( Iop_ShrV128,
30209 mkexpr( vB ),
30210 unop( Iop_32to8,
30211 binop( Iop_Mul32,
30212 mkU32( 8 ),
30213 mkU32( 31 - uim ) ) ) ),
30214 binop( Iop_64HLtoV128,
30215 mkU64( 0x0ULL ),
30216 mkU64( 0xFFULL ) ) ),
30217 mkU8( 64 ) ) );
30219 break;
30221 case 0x24D: // vextractuh (Vector Extract Unsigned Halfword)
30223 UChar uim = IFIELD( theInstr, 16, 4 );
30225 DIP("vextractuh v%d,v%d,%d\n", vD_addr, vB_addr, uim);
30227 putVReg( vD_addr, binop( Iop_ShlV128,
30228 binop( Iop_AndV128,
30229 binop( Iop_ShrV128,
30230 mkexpr( vB ),
30231 unop( Iop_32to8,
30232 binop( Iop_Mul32,
30233 mkU32( 8 ),
30234 mkU32( 30 - uim ) ) ) ),
30235 binop( Iop_64HLtoV128,
30236 mkU64( 0x0ULL ),
30237 mkU64( 0xFFFFULL ) ) ),
30238 mkU8( 64 ) ) );
30240 break;
30242 case 0x28D: // vextractuw (Vector Extract Unsigned Word)
30244 UChar uim = IFIELD( theInstr, 16, 4 );
30246 DIP("vextractuw v%d,v%d,%d\n", vD_addr, vB_addr, uim);
30248 putVReg( vD_addr,
30249 binop( Iop_ShlV128,
30250 binop( Iop_AndV128,
30251 binop( Iop_ShrV128,
30252 mkexpr( vB ),
30253 unop( Iop_32to8,
30254 binop( Iop_Mul32,
30255 mkU32( 8 ),
30256 mkU32( 28 - uim ) ) ) ),
30257 binop( Iop_64HLtoV128,
30258 mkU64( 0x0ULL ),
30259 mkU64( 0xFFFFFFFFULL ) ) ),
30260 mkU8( 64 ) ) );
30262 break;
30264 case 0x2CD: // vextractd (Vector Extract Double Word)
30266 UChar uim = IFIELD( theInstr, 16, 4 );
30268 DIP("vextractd v%d,v%d,%d\n", vD_addr, vB_addr, uim);
30270 putVReg( vD_addr,
30271 binop( Iop_ShlV128,
30272 binop( Iop_AndV128,
30273 binop( Iop_ShrV128,
30274 mkexpr( vB ),
30275 unop( Iop_32to8,
30276 binop( Iop_Mul32,
30277 mkU32( 8 ),
30278 mkU32( 24 - uim ) ) ) ),
30279 binop( Iop_64HLtoV128,
30280 mkU64( 0x0ULL ),
30281 mkU64( 0xFFFFFFFFFFFFFFFFULL ) ) ),
30282 mkU8( 64 ) ) );
30284 break;
30286 /* Insert instructions */
30287 case 0x30D: // vinsertb (Vector insert Unsigned Byte)
30289 UChar uim = IFIELD( theInstr, 16, 4 );
30290 IRTemp shift = newTemp( Ity_I8 );
30291 IRTemp vD = newTemp( Ity_V128 );
30293 DIP("vinsertb v%d,v%d,%d\n", vD_addr, vB_addr, uim);
30295 assign( vD, getVReg( vD_addr ) );
30297 assign( shift, unop( Iop_32to8,
30298 binop( Iop_Mul32,
30299 mkU32( 8 ),
30300 mkU32( 15 - ( uim + 0 ) ) ) ) );
30302 putVReg( vD_addr,
30303 binop( Iop_OrV128,
30304 binop( Iop_ShlV128,
30305 binop( Iop_AndV128,
30306 binop( Iop_ShrV128,
30307 mkexpr( vB ),
30308 mkU8( ( 15 - 7 )*8 ) ),
30309 binop( Iop_64HLtoV128,
30310 mkU64( 0x0ULL ),
30311 mkU64( 0xFFULL ) ) ),
30312 mkexpr( shift ) ),
30313 binop( Iop_AndV128,
30314 unop( Iop_NotV128,
30315 binop( Iop_ShlV128,
30316 binop( Iop_64HLtoV128,
30317 mkU64( 0x0ULL ),
30318 mkU64( 0xFFULL ) ),
30319 mkexpr( shift ) ) ),
30320 mkexpr( vD ) ) ) );
30322 break;
30324 case 0x34D: // vinserth (Vector insert Halfword)
30326 UChar uim = IFIELD( theInstr, 16, 4 );
30327 IRTemp shift = newTemp( Ity_I8 );
30328 IRTemp vD = newTemp( Ity_V128 );
30330 DIP("vinserth v%d,v%d,%d\n", vD_addr, vB_addr, uim);
30332 assign( vD, getVReg( vD_addr ) );
30334 assign( shift, unop( Iop_32to8,
30335 binop( Iop_Mul32,
30336 mkU32( 8 ),
30337 mkU32( 15 - ( uim + 1 ) ) ) ) );
30339 putVReg( vD_addr,
30340 binop( Iop_OrV128,
30341 binop( Iop_ShlV128,
30342 binop( Iop_AndV128,
30343 binop( Iop_ShrV128,
30344 mkexpr( vB ),
30345 mkU8( (7 - 3)*16 ) ),
30346 binop( Iop_64HLtoV128,
30347 mkU64( 0x0ULL ),
30348 mkU64( 0xFFFFULL ) ) ),
30349 mkexpr( shift ) ),
30350 binop( Iop_AndV128,
30351 unop( Iop_NotV128,
30352 binop( Iop_ShlV128,
30353 binop( Iop_64HLtoV128,
30354 mkU64( 0x0ULL ),
30355 mkU64( 0xFFFFULL ) ),
30356 mkexpr( shift ) ) ),
30357 mkexpr( vD ) ) ) );
30359 break;
30361 case 0x38D: // vinsertw (Vector insert Word)
30363 UChar uim = IFIELD( theInstr, 16, 4 );
30364 IRTemp shift = newTemp( Ity_I8 );
30365 IRTemp vD = newTemp( Ity_V128 );
30367 DIP("vinsertw v%d,v%d,%d\n", vD_addr, vB_addr, uim);
30369 assign( vD, getVReg( vD_addr ) );
30371 assign( shift, unop( Iop_32to8,
30372 binop( Iop_Mul32,
30373 mkU32( 8 ),
30374 mkU32( 15 - ( uim + 3 ) ) ) ) );
30376 putVReg( vD_addr,
30377 binop( Iop_OrV128,
30378 binop( Iop_ShlV128,
30379 binop( Iop_AndV128,
30380 binop( Iop_ShrV128,
30381 mkexpr( vB ),
30382 mkU8( (3 - 1) * 32 ) ),
30383 binop( Iop_64HLtoV128,
30384 mkU64( 0x0ULL ),
30385 mkU64( 0xFFFFFFFFULL ) ) ),
30386 mkexpr( shift ) ),
30387 binop( Iop_AndV128,
30388 unop( Iop_NotV128,
30389 binop( Iop_ShlV128,
30390 binop( Iop_64HLtoV128,
30391 mkU64( 0x0ULL ),
30392 mkU64( 0xFFFFFFFFULL ) ),
30393 mkexpr( shift ) ) ),
30394 mkexpr( vD ) ) ) );
30396 break;
30398 case 0x3CD: // vinsertd (Vector insert Doubleword)
30400 UChar uim = IFIELD( theInstr, 16, 4 );
30401 IRTemp shift = newTemp( Ity_I8 );
30402 IRTemp vD = newTemp( Ity_V128 );
30404 DIP("vinsertd v%d,v%d,%d\n", vD_addr, vB_addr, uim);
30406 assign( vD, getVReg( vD_addr ) );
30408 assign( shift, unop( Iop_32to8,
30409 binop( Iop_Mul32,
30410 mkU32( 8 ),
30411 mkU32( 15 - ( uim + 7 ) ) ) ) );
30413 putVReg( vD_addr,
30414 binop( Iop_OrV128,
30415 binop( Iop_ShlV128,
30416 binop( Iop_AndV128,
30417 binop( Iop_ShrV128,
30418 mkexpr( vB ),
30419 mkU8( ( 1 - 0 ) * 64 ) ),
30420 binop( Iop_64HLtoV128,
30421 mkU64( 0x0ULL ),
30422 mkU64( 0xFFFFFFFFFFFFFFFFULL ) ) ),
30423 mkexpr( shift ) ),
30424 binop( Iop_AndV128,
30425 unop( Iop_NotV128,
30426 binop( Iop_ShlV128,
30427 binop( Iop_64HLtoV128,
30428 mkU64( 0x0ULL ),
30429 mkU64( 0xFFFFFFFFFFFFFFFFULL ) ),
30430 mkexpr( shift ) ) ),
30431 mkexpr( vD ) ) ) );
30433 break;
30435 /* Splat */
30436 case 0x20C: { // vspltb (Splat Byte, AV p245)
30437 /* vD = Dup8x16( vB[UIMM_5] ) */
30438 UChar sh_uimm = (15 - (UIMM_5 & 15)) * 8;
30439 DIP("vspltb v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
30440 putVReg( vD_addr, unop(Iop_Dup8x16,
30441 unop(Iop_32to8, unop(Iop_V128to32,
30442 binop(Iop_ShrV128, mkexpr(vB), mkU8(sh_uimm))))) );
30443 break;
30445 case 0x24C: { // vsplth (Splat Half Word, AV p246)
30446 UChar sh_uimm = (7 - (UIMM_5 & 7)) * 16;
30447 DIP("vsplth v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
30448 putVReg( vD_addr, unop(Iop_Dup16x8,
30449 unop(Iop_32to16, unop(Iop_V128to32,
30450 binop(Iop_ShrV128, mkexpr(vB), mkU8(sh_uimm))))) );
30451 break;
30453 case 0x28C: { // vspltw (Splat Word, AV p250)
30454 /* vD = Dup32x4( vB[UIMM_5] ) */
30455 UChar sh_uimm = (3 - (UIMM_5 & 3)) * 32;
30456 DIP("vspltw v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
30457 putVReg( vD_addr, unop(Iop_Dup32x4,
30458 unop(Iop_V128to32,
30459 binop(Iop_ShrV128, mkexpr(vB), mkU8(sh_uimm)))) );
30460 break;
30462 case 0x30C: // vspltisb (Splat Immediate Signed B, AV p247)
30463 DIP("vspltisb v%d,%d\n", vD_addr, (Char)SIMM_8);
30464 putVReg( vD_addr, unop(Iop_Dup8x16, mkU8(SIMM_8)) );
30465 break;
30467 case 0x34C: // vspltish (Splat Immediate Signed HW, AV p248)
30468 DIP("vspltish v%d,%d\n", vD_addr, (Char)SIMM_8);
30469 putVReg( vD_addr,
30470 unop(Iop_Dup16x8, mkU16(extend_s_8to32(SIMM_8))) );
30471 break;
30473 case 0x38C: // vspltisw (Splat Immediate Signed W, AV p249)
30474 DIP("vspltisw v%d,%d\n", vD_addr, (Char)SIMM_8);
30475 putVReg( vD_addr,
30476 unop(Iop_Dup32x4, mkU32(extend_s_8to32(SIMM_8))) );
30477 break;
30479 case 0x68C: // vmrgow (Merge Odd Word)
30480 DIP("vmrgow v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30481 /* VD[0] <- VA[1]
30482 VD[1] <- VB[1]
30483 VD[2] <- VA[3]
30484 VD[3] <- VB[3]
30486 putVReg( vD_addr,
30487 binop(Iop_CatOddLanes32x4, mkexpr(vA), mkexpr(vB) ) );
30488 break;
30490 case 0x78C: // vmrgew (Merge Even Word)
30491 DIP("vmrgew v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30492 /* VD[0] <- VA[0]
30493 VD[1] <- VB[0]
30494 VD[2] <- VA[2]
30495 VD[3] <- VB[2]
30497 putVReg( vD_addr,
30498 binop(Iop_CatEvenLanes32x4, mkexpr(vA), mkexpr(vB) ) );
30499 break;
30501 default:
30502 vex_printf("dis_av_permute(ppc)(opc2)\n");
30503 return False;
30505 return True;
30509 Vector Integer Absolute Difference
30511 static Bool dis_abs_diff ( UInt prefix, UInt theInstr )
30513 /* VX-Form */
30514 UChar opc1 = ifieldOPC( theInstr );
30515 UChar vT_addr = ifieldRegDS( theInstr );
30516 UChar vA_addr = ifieldRegA( theInstr );
30517 UChar vB_addr = ifieldRegB( theInstr );
30518 UInt opc2 = IFIELD( theInstr, 0, 11 );
30520 IRTemp vA = newTemp( Ity_V128 );
30521 IRTemp vB = newTemp( Ity_V128 );
30522 IRTemp vT = newTemp( Ity_V128 );
30524 IRTemp vAminusB = newTemp( Ity_V128 );
30525 IRTemp vBminusA = newTemp( Ity_V128 );
30526 IRTemp vMask = newTemp( Ity_V128 );
30528 /* There is no prefixed version of these instructions. */
30529 PREFIX_CHECK
30531 assign( vA, getVReg( vA_addr ) );
30532 assign( vB, getVReg( vB_addr ) );
30534 if ( opc1 != 0x4 ) {
30535 vex_printf("dis_abs_diff(ppc)(instr)\n");
30536 return False;
30539 switch ( opc2 ) {
30540 case 0x403: // vabsdub Vector absolute difference Unsigned Byte
30542 DIP("vabsdub v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
30544 /* Determine which of the corresponding bytes is larger,
30545 * create mask with 1's in byte positions where vA[i] > vB[i]
30547 assign( vMask, binop( Iop_CmpGT8Ux16, mkexpr( vA ), mkexpr( vB ) ) );
30549 assign( vAminusB,
30550 binop( Iop_AndV128,
30551 binop( Iop_Sub8x16, mkexpr( vA ), mkexpr( vB ) ),
30552 mkexpr( vMask ) ) );
30554 assign( vBminusA,
30555 binop( Iop_AndV128,
30556 binop( Iop_Sub8x16, mkexpr( vB ), mkexpr( vA ) ),
30557 unop ( Iop_NotV128, mkexpr( vMask ) ) ) );
30559 assign( vT, binop( Iop_OrV128,
30560 mkexpr( vAminusB ),
30561 mkexpr( vBminusA ) ) );
30563 break;
30565 case 0x443: // vabsduh Vector absolute difference Unsigned Halfword
30567 DIP("vabsduh v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
30569 /* Determine which of the corresponding halfwords is larger,
30570 * create mask with 1's in halfword positions where vA[i] > vB[i]
30572 assign( vMask, binop( Iop_CmpGT16Ux8, mkexpr( vA ), mkexpr( vB ) ) );
30574 assign( vAminusB,
30575 binop( Iop_AndV128,
30576 binop( Iop_Sub16x8, mkexpr( vA ), mkexpr( vB ) ),
30577 mkexpr( vMask ) ) );
30579 assign( vBminusA,
30580 binop( Iop_AndV128,
30581 binop( Iop_Sub16x8, mkexpr( vB ), mkexpr( vA ) ),
30582 unop ( Iop_NotV128, mkexpr( vMask ) ) ) );
30584 assign( vT, binop( Iop_OrV128,
30585 mkexpr( vAminusB ),
30586 mkexpr( vBminusA ) ) );
30588 break;
30590 case 0x483: // vabsduw Vector absolute difference Unsigned Word
30592 DIP("vabsduw v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
30594 /* Determine which of the corresponding words is larger,
30595 * create mask with 1's in word positions where vA[i] > vB[i]
30597 assign( vMask, binop( Iop_CmpGT32Ux4, mkexpr( vA ), mkexpr( vB ) ) );
30599 assign( vAminusB,
30600 binop( Iop_AndV128,
30601 binop( Iop_Sub32x4, mkexpr( vA ), mkexpr( vB ) ),
30602 mkexpr( vMask ) ) );
30604 assign( vBminusA,
30605 binop( Iop_AndV128,
30606 binop( Iop_Sub32x4, mkexpr( vB ), mkexpr( vA ) ),
30607 unop ( Iop_NotV128, mkexpr( vMask ) ) ) );
30609 assign( vT, binop( Iop_OrV128,
30610 mkexpr( vAminusB ),
30611 mkexpr( vBminusA ) ) );
30613 break;
30615 default:
30616 return False;
30619 putVReg( vT_addr, mkexpr( vT ) );
30621 return True;
30625 AltiVec 128 bit integer multiply by 10 Instructions
30627 static Bool dis_av_mult10 ( UInt prefix, UInt theInstr )
30629 /* VX-Form */
30630 UChar opc1 = ifieldOPC(theInstr);
30631 UChar vT_addr = ifieldRegDS(theInstr);
30632 UChar vA_addr = ifieldRegA(theInstr);
30633 UChar vB_addr = ifieldRegB(theInstr);
30634 UInt opc2 = IFIELD( theInstr, 0, 11 );
30636 IRTemp vA = newTemp(Ity_V128);
30638 /* There is no prefixed version of these instructions. */
30639 PREFIX_CHECK
30641 assign( vA, getVReg(vA_addr));
30643 if (opc1 != 0x4) {
30644 vex_printf("dis_av_mult10(ppc)(instr)\n");
30645 return False;
30647 switch (opc2) {
30648 case 0x001: { // vmul10cuq (Vector Multiply-by-10 and write carry
30649 DIP("vmul10cuq v%d,v%d\n", vT_addr, vA_addr);
30650 putVReg( vT_addr,
30651 unop( Iop_MulI128by10Carry, mkexpr( vA ) ) );
30652 break;
30654 case 0x041: { // vmul10uq (Vector Multiply-by-10 Extended and write carry
30655 // Unsigned Quadword VX form)
30656 IRTemp vB = newTemp(Ity_V128);
30657 assign( vB, getVReg(vB_addr));
30658 DIP("vmul10ecuq v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
30659 putVReg( vT_addr,
30660 binop( Iop_MulI128by10ECarry, mkexpr( vA ), mkexpr( vB ) ) );
30661 break;
30663 case 0x201: { // vmul10uq (Vector Multiply-by-10 Unsigned Quadword VX form)
30664 DIP("vmul10uq v%d,v%d\n", vT_addr, vA_addr);
30665 putVReg( vT_addr,
30666 unop( Iop_MulI128by10, mkexpr( vA ) ) );
30667 break;
30669 case 0x241: { // vmul10uq (Vector Multiply-by-10 Extended
30670 // Unsigned Quadword VX form)
30671 IRTemp vB = newTemp(Ity_V128);
30672 assign( vB, getVReg(vB_addr));
30673 DIP("vmul10euq v%d,v%d,v%d\n", vT_addr, vA_addr, vB_addr);
30674 putVReg( vT_addr,
30675 binop( Iop_MulI128by10E, mkexpr( vA ), mkexpr( vB ) ) );
30676 break;
30678 default:
30679 vex_printf("dis_av_mult10(ppc)(opc2)\n");
30680 return False;
30682 return True;
30686 AltiVec Pack/Unpack Instructions
30688 static Bool dis_av_pack ( UInt prefix, UInt theInstr )
30690 /* VX-Form */
30691 UChar opc1 = ifieldOPC(theInstr);
30692 UChar vD_addr = ifieldRegDS(theInstr);
30693 UChar vA_addr = ifieldRegA(theInstr);
30694 UChar vB_addr = ifieldRegB(theInstr);
30695 UInt opc2 = IFIELD( theInstr, 0, 11 );
30697 IRTemp signs = IRTemp_INVALID;
30698 IRTemp zeros = IRTemp_INVALID;
30699 IRTemp vA = newTemp(Ity_V128);
30700 IRTemp vB = newTemp(Ity_V128);
30702 /* There is no prefixed version of these instructions. */
30703 PREFIX_CHECK
30705 assign( vA, getVReg(vA_addr));
30706 assign( vB, getVReg(vB_addr));
30708 if (opc1 != 0x4) {
30709 vex_printf("dis_av_pack(ppc)(instr)\n");
30710 return False;
30712 switch (opc2) {
30713 /* Packing */
30714 case 0x00E: // vpkuhum (Pack Unsigned HW Unsigned Modulo, AV p224)
30715 DIP("vpkuhum v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30716 putVReg( vD_addr,
30717 binop(Iop_NarrowBin16to8x16, mkexpr(vA), mkexpr(vB)) );
30718 return True;
30720 case 0x04E: // vpkuwum (Pack Unsigned W Unsigned Modulo, AV p226)
30721 DIP("vpkuwum v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30722 putVReg( vD_addr,
30723 binop(Iop_NarrowBin32to16x8, mkexpr(vA), mkexpr(vB)) );
30724 return True;
30726 case 0x08E: // vpkuhus (Pack Unsigned HW Unsigned Saturate, AV p225)
30727 DIP("vpkuhus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30728 putVReg( vD_addr,
30729 binop(Iop_QNarrowBin16Uto8Ux16, mkexpr(vA), mkexpr(vB)) );
30730 // TODO: set VSCR[SAT]
30731 return True;
30733 case 0x0CE: // vpkuwus (Pack Unsigned W Unsigned Saturate, AV p227)
30734 DIP("vpkuwus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30735 putVReg( vD_addr,
30736 binop(Iop_QNarrowBin32Uto16Ux8, mkexpr(vA), mkexpr(vB)) );
30737 // TODO: set VSCR[SAT]
30738 return True;
30740 case 0x10E: { // vpkshus (Pack Signed HW Unsigned Saturate, AV p221)
30741 // This insn does a signed->unsigned saturating conversion.
30742 // Conversion done here, then uses unsigned->unsigned vpk insn:
30743 // => UnsignedSaturatingNarrow( x & ~ (x >>s 15) )
30744 IRTemp vA_tmp = newTemp(Ity_V128);
30745 IRTemp vB_tmp = newTemp(Ity_V128);
30746 DIP("vpkshus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30747 assign( vA_tmp, binop(Iop_AndV128, mkexpr(vA),
30748 unop(Iop_NotV128,
30749 binop(Iop_SarN16x8,
30750 mkexpr(vA), mkU8(15)))) );
30751 assign( vB_tmp, binop(Iop_AndV128, mkexpr(vB),
30752 unop(Iop_NotV128,
30753 binop(Iop_SarN16x8,
30754 mkexpr(vB), mkU8(15)))) );
30755 putVReg( vD_addr, binop(Iop_QNarrowBin16Uto8Ux16,
30756 mkexpr(vA_tmp), mkexpr(vB_tmp)) );
30757 // TODO: set VSCR[SAT]
30758 return True;
30760 case 0x14E: { // vpkswus (Pack Signed W Unsigned Saturate, AV p223)
30761 // This insn does a signed->unsigned saturating conversion.
30762 // Conversion done here, then uses unsigned->unsigned vpk insn:
30763 // => UnsignedSaturatingNarrow( x & ~ (x >>s 31) )
30764 IRTemp vA_tmp = newTemp(Ity_V128);
30765 IRTemp vB_tmp = newTemp(Ity_V128);
30766 DIP("vpkswus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30767 assign( vA_tmp, binop(Iop_AndV128, mkexpr(vA),
30768 unop(Iop_NotV128,
30769 binop(Iop_SarN32x4,
30770 mkexpr(vA), mkU8(31)))) );
30771 assign( vB_tmp, binop(Iop_AndV128, mkexpr(vB),
30772 unop(Iop_NotV128,
30773 binop(Iop_SarN32x4,
30774 mkexpr(vB), mkU8(31)))) );
30775 putVReg( vD_addr, binop(Iop_QNarrowBin32Uto16Ux8,
30776 mkexpr(vA_tmp), mkexpr(vB_tmp)) );
30777 // TODO: set VSCR[SAT]
30778 return True;
30780 case 0x18E: // vpkshss (Pack Signed HW Signed Saturate, AV p220)
30781 DIP("vpkshss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30782 putVReg( vD_addr,
30783 binop(Iop_QNarrowBin16Sto8Sx16, mkexpr(vA), mkexpr(vB)) );
30784 // TODO: set VSCR[SAT]
30785 return True;
30787 case 0x1CE: // vpkswss (Pack Signed W Signed Saturate, AV p222)
30788 DIP("vpkswss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30789 putVReg( vD_addr,
30790 binop(Iop_QNarrowBin32Sto16Sx8, mkexpr(vA), mkexpr(vB)) );
30791 // TODO: set VSCR[SAT]
30792 return True;
30794 case 0x30E: { // vpkpx (Pack Pixel, AV p219)
30795 /* CAB: Worth a new primop? */
30796 /* Using shifts to compact pixel elements, then packing them */
30797 IRTemp a1 = newTemp(Ity_V128);
30798 IRTemp a2 = newTemp(Ity_V128);
30799 IRTemp a3 = newTemp(Ity_V128);
30800 IRTemp a_tmp = newTemp(Ity_V128);
30801 IRTemp b1 = newTemp(Ity_V128);
30802 IRTemp b2 = newTemp(Ity_V128);
30803 IRTemp b3 = newTemp(Ity_V128);
30804 IRTemp b_tmp = newTemp(Ity_V128);
30805 DIP("vpkpx v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30806 assign( a1, binop(Iop_ShlN16x8,
30807 binop(Iop_ShrN32x4, mkexpr(vA), mkU8(19)),
30808 mkU8(10)) );
30809 assign( a2, binop(Iop_ShlN16x8,
30810 binop(Iop_ShrN16x8, mkexpr(vA), mkU8(11)),
30811 mkU8(5)) );
30812 assign( a3, binop(Iop_ShrN16x8,
30813 binop(Iop_ShlN16x8, mkexpr(vA), mkU8(8)),
30814 mkU8(11)) );
30815 assign( a_tmp, binop(Iop_OrV128, mkexpr(a1),
30816 binop(Iop_OrV128, mkexpr(a2), mkexpr(a3))) );
30818 assign( b1, binop(Iop_ShlN16x8,
30819 binop(Iop_ShrN32x4, mkexpr(vB), mkU8(19)),
30820 mkU8(10)) );
30821 assign( b2, binop(Iop_ShlN16x8,
30822 binop(Iop_ShrN16x8, mkexpr(vB), mkU8(11)),
30823 mkU8(5)) );
30824 assign( b3, binop(Iop_ShrN16x8,
30825 binop(Iop_ShlN16x8, mkexpr(vB), mkU8(8)),
30826 mkU8(11)) );
30827 assign( b_tmp, binop(Iop_OrV128, mkexpr(b1),
30828 binop(Iop_OrV128, mkexpr(b2), mkexpr(b3))) );
30830 putVReg( vD_addr, binop(Iop_NarrowBin32to16x8,
30831 mkexpr(a_tmp), mkexpr(b_tmp)) );
30832 return True;
30835 case 0x44E: // vpkudum (Pack Unsigned Double Word Unsigned Modulo)
30836 DIP("vpkudum v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30837 putVReg( vD_addr,
30838 binop(Iop_NarrowBin64to32x4, mkexpr(vA), mkexpr(vB)) );
30839 return True;
30841 case 0x4CE: // vpkudus (Pack Unsigned Double Word Unsigned Saturate)
30842 DIP("vpkudus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30843 putVReg( vD_addr,
30844 binop(Iop_QNarrowBin64Uto32Ux4, mkexpr(vA), mkexpr(vB)) );
30845 // TODO: set VSCR[SAT]
30846 return True;
30848 case 0x54E: { // vpksdus (Pack Signed Double Word Unsigned Saturate)
30849 // This insn does a doubled signed->double unsigned saturating conversion
30850 // Conversion done here, then uses unsigned->unsigned vpk insn:
30851 // => UnsignedSaturatingNarrow( x & ~ (x >>s 31) )
30852 // This is similar to the technique used for vpkswus, except done
30853 // with double word integers versus word integers.
30854 IRTemp vA_tmp = newTemp(Ity_V128);
30855 IRTemp vB_tmp = newTemp(Ity_V128);
30856 DIP("vpksdus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30857 assign( vA_tmp, binop(Iop_AndV128, mkexpr(vA),
30858 unop(Iop_NotV128,
30859 binop(Iop_SarN64x2,
30860 mkexpr(vA), mkU8(63)))) );
30861 assign( vB_tmp, binop(Iop_AndV128, mkexpr(vB),
30862 unop(Iop_NotV128,
30863 binop(Iop_SarN64x2,
30864 mkexpr(vB), mkU8(63)))) );
30865 putVReg( vD_addr, binop(Iop_QNarrowBin64Uto32Ux4,
30866 mkexpr(vA_tmp), mkexpr(vB_tmp)) );
30867 // TODO: set VSCR[SAT]
30868 return True;
30871 case 0x5CE: // vpksdss (Pack Signed double word Signed Saturate)
30872 DIP("vpksdss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
30873 putVReg( vD_addr,
30874 binop(Iop_QNarrowBin64Sto32Sx4, mkexpr(vA), mkexpr(vB)) );
30875 // TODO: set VSCR[SAT]
30876 return True;
30877 default:
30878 break; // Fall through...
30882 if (vA_addr != 0) {
30883 vex_printf("dis_av_pack(ppc)(vA_addr)\n");
30884 return False;
30887 signs = newTemp(Ity_V128);
30888 zeros = newTemp(Ity_V128);
30889 assign( zeros, unop(Iop_Dup32x4, mkU32(0)) );
30891 switch (opc2) {
30892 /* Unpacking */
30893 case 0x20E: { // vupkhsb (Unpack High Signed B, AV p277)
30894 DIP("vupkhsb v%d,v%d\n", vD_addr, vB_addr);
30895 assign( signs, binop(Iop_CmpGT8Sx16, mkexpr(zeros), mkexpr(vB)) );
30896 putVReg( vD_addr,
30897 binop(Iop_InterleaveHI8x16, mkexpr(signs), mkexpr(vB)) );
30898 break;
30900 case 0x24E: { // vupkhsh (Unpack High Signed HW, AV p278)
30901 DIP("vupkhsh v%d,v%d\n", vD_addr, vB_addr);
30902 assign( signs, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vB)) );
30903 putVReg( vD_addr,
30904 binop(Iop_InterleaveHI16x8, mkexpr(signs), mkexpr(vB)) );
30905 break;
30907 case 0x28E: { // vupklsb (Unpack Low Signed B, AV p280)
30908 DIP("vupklsb v%d,v%d\n", vD_addr, vB_addr);
30909 assign( signs, binop(Iop_CmpGT8Sx16, mkexpr(zeros), mkexpr(vB)) );
30910 putVReg( vD_addr,
30911 binop(Iop_InterleaveLO8x16, mkexpr(signs), mkexpr(vB)) );
30912 break;
30914 case 0x2CE: { // vupklsh (Unpack Low Signed HW, AV p281)
30915 DIP("vupklsh v%d,v%d\n", vD_addr, vB_addr);
30916 assign( signs, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vB)) );
30917 putVReg( vD_addr,
30918 binop(Iop_InterleaveLO16x8, mkexpr(signs), mkexpr(vB)) );
30919 break;
30921 case 0x34E: { // vupkhpx (Unpack High Pixel16, AV p276)
30922 /* CAB: Worth a new primop? */
30923 /* Using shifts to isolate pixel elements, then expanding them */
30924 IRTemp z0 = newTemp(Ity_V128);
30925 IRTemp z1 = newTemp(Ity_V128);
30926 IRTemp z01 = newTemp(Ity_V128);
30927 IRTemp z2 = newTemp(Ity_V128);
30928 IRTemp z3 = newTemp(Ity_V128);
30929 IRTemp z23 = newTemp(Ity_V128);
30930 DIP("vupkhpx v%d,v%d\n", vD_addr, vB_addr);
30931 assign( z0, binop(Iop_ShlN16x8,
30932 binop(Iop_SarN16x8, mkexpr(vB), mkU8(15)),
30933 mkU8(8)) );
30934 assign( z1, binop(Iop_ShrN16x8,
30935 binop(Iop_ShlN16x8, mkexpr(vB), mkU8(1)),
30936 mkU8(11)) );
30937 assign( z01, binop(Iop_InterleaveHI16x8, mkexpr(zeros),
30938 binop(Iop_OrV128, mkexpr(z0), mkexpr(z1))) );
30939 assign( z2, binop(Iop_ShrN16x8,
30940 binop(Iop_ShlN16x8,
30941 binop(Iop_ShrN16x8, mkexpr(vB), mkU8(5)),
30942 mkU8(11)),
30943 mkU8(3)) );
30944 assign( z3, binop(Iop_ShrN16x8,
30945 binop(Iop_ShlN16x8, mkexpr(vB), mkU8(11)),
30946 mkU8(11)) );
30947 assign( z23, binop(Iop_InterleaveHI16x8, mkexpr(zeros),
30948 binop(Iop_OrV128, mkexpr(z2), mkexpr(z3))) );
30949 putVReg( vD_addr,
30950 binop(Iop_OrV128,
30951 binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)),
30952 mkexpr(z23)) );
30953 break;
30955 case 0x3CE: { // vupklpx (Unpack Low Pixel16, AV p279)
30956 /* identical to vupkhpx, except interleaving LO */
30957 IRTemp z0 = newTemp(Ity_V128);
30958 IRTemp z1 = newTemp(Ity_V128);
30959 IRTemp z01 = newTemp(Ity_V128);
30960 IRTemp z2 = newTemp(Ity_V128);
30961 IRTemp z3 = newTemp(Ity_V128);
30962 IRTemp z23 = newTemp(Ity_V128);
30963 DIP("vupklpx v%d,v%d\n", vD_addr, vB_addr);
30964 assign( z0, binop(Iop_ShlN16x8,
30965 binop(Iop_SarN16x8, mkexpr(vB), mkU8(15)),
30966 mkU8(8)) );
30967 assign( z1, binop(Iop_ShrN16x8,
30968 binop(Iop_ShlN16x8, mkexpr(vB), mkU8(1)),
30969 mkU8(11)) );
30970 assign( z01, binop(Iop_InterleaveLO16x8, mkexpr(zeros),
30971 binop(Iop_OrV128, mkexpr(z0), mkexpr(z1))) );
30972 assign( z2, binop(Iop_ShrN16x8,
30973 binop(Iop_ShlN16x8,
30974 binop(Iop_ShrN16x8, mkexpr(vB), mkU8(5)),
30975 mkU8(11)),
30976 mkU8(3)) );
30977 assign( z3, binop(Iop_ShrN16x8,
30978 binop(Iop_ShlN16x8, mkexpr(vB), mkU8(11)),
30979 mkU8(11)) );
30980 assign( z23, binop(Iop_InterleaveLO16x8, mkexpr(zeros),
30981 binop(Iop_OrV128, mkexpr(z2), mkexpr(z3))) );
30982 putVReg( vD_addr,
30983 binop(Iop_OrV128,
30984 binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)),
30985 mkexpr(z23)) );
30986 break;
30988 case 0x64E: { // vupkhsw (Unpack High Signed Word)
30989 DIP("vupkhsw v%d,v%d\n", vD_addr, vB_addr);
30990 assign( signs, binop(Iop_CmpGT32Sx4, mkexpr(zeros), mkexpr(vB)) );
30991 putVReg( vD_addr,
30992 binop(Iop_InterleaveHI32x4, mkexpr(signs), mkexpr(vB)) );
30993 break;
30995 case 0x6CE: { // vupklsw (Unpack Low Signed Word)
30996 DIP("vupklsw v%d,v%d\n", vD_addr, vB_addr);
30997 assign( signs, binop(Iop_CmpGT32Sx4, mkexpr(zeros), mkexpr(vB)) );
30998 putVReg( vD_addr,
30999 binop(Iop_InterleaveLO32x4, mkexpr(signs), mkexpr(vB)) );
31000 break;
31002 default:
31003 vex_printf("dis_av_pack(ppc)(opc2)\n");
31004 return False;
31006 return True;
31010 AltiVec Cipher Instructions
31012 static Bool dis_av_cipher ( UInt prefix, UInt theInstr )
31014 /* VX-Form */
31015 UChar opc1 = ifieldOPC(theInstr);
31016 UChar vD_addr = ifieldRegDS(theInstr);
31017 UChar vA_addr = ifieldRegA(theInstr);
31018 UChar vB_addr = ifieldRegB(theInstr);
31019 UInt opc2 = IFIELD( theInstr, 0, 11 );
31021 IRTemp vA = newTemp(Ity_V128);
31022 IRTemp vB = newTemp(Ity_V128);
31024 /* There is no prefixed version of these instructions. */
31025 PREFIX_CHECK
31027 assign( vA, getVReg(vA_addr));
31028 assign( vB, getVReg(vB_addr));
31030 if (opc1 != 0x4) {
31031 vex_printf("dis_av_cipher(ppc)(instr)\n");
31032 return False;
31034 switch (opc2) {
31035 case 0x508: // vcipher (Vector Inverser Cipher)
31036 DIP("vcipher v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
31037 putVReg( vD_addr,
31038 binop(Iop_CipherV128, mkexpr(vA), mkexpr(vB)) );
31039 return True;
31041 case 0x509: // vcipherlast (Vector Inverser Cipher Last)
31042 DIP("vcipherlast v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
31043 putVReg( vD_addr,
31044 binop(Iop_CipherLV128, mkexpr(vA), mkexpr(vB)) );
31045 return True;
31047 case 0x548: // vncipher (Vector Inverser Cipher)
31048 DIP("vncipher v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
31049 putVReg( vD_addr,
31050 binop(Iop_NCipherV128, mkexpr(vA), mkexpr(vB)) );
31051 return True;
31053 case 0x549: // vncipherlast (Vector Inverser Cipher Last)
31054 DIP("vncipherlast v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
31055 putVReg( vD_addr,
31056 binop(Iop_NCipherLV128, mkexpr(vA), mkexpr(vB)) );
31057 return True;
31059 case 0x5C8: /* vsbox (Vector SubBytes, this does the cipher
31060 * subBytes transform)
31062 DIP("vsbox v%d,v%d\n", vD_addr, vA_addr);
31063 putVReg( vD_addr,
31064 unop(Iop_CipherSV128, mkexpr(vA) ) );
31065 return True;
31067 default:
31068 vex_printf("dis_av_cipher(ppc)(opc2)\n");
31069 return False;
31071 return True;
31075 AltiVec Secure Hash Instructions
31077 static Bool dis_av_hash ( UInt prefix, UInt theInstr )
31079 /* VX-Form */
31080 UChar opc1 = ifieldOPC(theInstr);
31081 UChar vRT_addr = ifieldRegDS(theInstr);
31082 UChar vRA_addr = ifieldRegA(theInstr);
31083 UChar s_field = IFIELD( theInstr, 11, 5 ); // st and six field
31084 UChar st = IFIELD( theInstr, 15, 1 ); // st
31085 UChar six = IFIELD( theInstr, 11, 4 ); // six field
31086 UInt opc2 = IFIELD( theInstr, 0, 11 );
31088 IRTemp vA = newTemp(Ity_V128);
31089 IRTemp dst = newTemp(Ity_V128);
31091 /* There is no prefixed version of these instructions. */
31092 PREFIX_CHECK
31094 assign( vA, getVReg(vRA_addr));
31096 if (opc1 != 0x4) {
31097 vex_printf("dis_av_hash(ppc)(instr)\n");
31098 return False;
31101 switch (opc2) {
31102 case 0x682: // vshasigmaw
31103 DIP("vshasigmaw v%d,v%d,%u,%u\n", vRT_addr, vRA_addr, st, six);
31104 assign( dst, binop( Iop_SHA256, mkexpr( vA ), mkU8( s_field) ) );
31105 putVReg( vRT_addr, mkexpr(dst));
31106 return True;
31108 case 0x6C2: // vshasigmad,
31109 DIP("vshasigmad v%d,v%d,%u,%u\n", vRT_addr, vRA_addr, st, six);
31110 putVReg( vRT_addr, binop( Iop_SHA512, mkexpr( vA ), mkU8( s_field) ) );
31111 return True;
31113 default:
31114 vex_printf("dis_av_hash(ppc)(opc2)\n");
31115 return False;
31117 return True;
31121 * This function is used by the Vector add/subtract [extended] modulo/carry
31122 * instructions.
31123 * - For the non-extended add instructions, the cin arg is set to zero.
31124 * - For the extended add instructions, cin is the integer value of
31125 * src3.bit[127].
31126 * - For the non-extended subtract instructions, src1 is added to the one's
31127 * complement of src2 + 1. We re-use the cin argument to hold the '1'
31128 * value for this operation.
31129 * - For the extended subtract instructions, cin is the integer value of src3.bit[127].
31131 static IRTemp _get_quad_modulo_or_carry(IRExpr * vecA, IRExpr * vecB,
31132 IRExpr * cin, Bool modulo)
31134 IRTemp _vecA_32 = IRTemp_INVALID;
31135 IRTemp _vecB_32 = IRTemp_INVALID;
31136 IRTemp res_32 = IRTemp_INVALID;
31137 IRTemp res_64 = IRTemp_INVALID;
31138 IRTemp result = IRTemp_INVALID;
31139 IRTemp tmp_result = IRTemp_INVALID;
31140 IRTemp carry = IRTemp_INVALID;
31141 Int i;
31142 IRExpr * _vecA_low64 = unop( Iop_V128to64, vecA );
31143 IRExpr * _vecB_low64 = unop( Iop_V128to64, vecB );
31144 IRExpr * _vecA_high64 = unop( Iop_V128HIto64, vecA );
31145 IRExpr * _vecB_high64 = unop( Iop_V128HIto64, vecB );
31147 carry = newTemp(Ity_I32);
31148 assign( carry, cin );
31150 for (i = 0; i < 4; i++) {
31151 _vecA_32 = newTemp(Ity_I32);
31152 _vecB_32 = newTemp(Ity_I32);
31153 res_32 = newTemp(Ity_I32);
31154 res_64 = newTemp(Ity_I64);
31156 switch (i) {
31157 case 0:
31158 assign(_vecA_32, unop( Iop_64to32, _vecA_low64 ) );
31159 assign(_vecB_32, unop( Iop_64to32, _vecB_low64 ) );
31160 break;
31161 case 1:
31162 assign(_vecA_32, unop( Iop_64HIto32, _vecA_low64 ) );
31163 assign(_vecB_32, unop( Iop_64HIto32, _vecB_low64 ) );
31164 break;
31165 case 2:
31166 assign(_vecA_32, unop( Iop_64to32, _vecA_high64 ) );
31167 assign(_vecB_32, unop( Iop_64to32, _vecB_high64 ) );
31168 break;
31169 case 3:
31170 assign(_vecA_32, unop( Iop_64HIto32, _vecA_high64 ) );
31171 assign(_vecB_32, unop( Iop_64HIto32, _vecB_high64 ) );
31172 break;
31175 assign( res_64, binop( Iop_Add64,
31176 binop ( Iop_Add64,
31177 binop( Iop_32HLto64,
31178 mkU32( 0 ),
31179 mkexpr(_vecA_32) ),
31180 binop( Iop_32HLto64,
31181 mkU32( 0 ),
31182 mkexpr(_vecB_32) ) ),
31183 binop( Iop_32HLto64,
31184 mkU32( 0 ),
31185 mkexpr( carry ) ) ) );
31187 /* Calculate the carry to the next higher 32 bits. */
31188 carry = newTemp(Ity_I32);
31189 assign(carry, unop( Iop_64HIto32, mkexpr( res_64 ) ) );
31191 /* result is the lower 32-bits */
31192 assign(res_32, unop( Iop_64to32, mkexpr( res_64 ) ) );
31194 if (modulo) {
31195 result = newTemp(Ity_V128);
31196 assign(result, binop( Iop_OrV128,
31197 (i == 0) ? binop( Iop_64HLtoV128,
31198 mkU64(0),
31199 mkU64(0) ) : mkexpr(tmp_result),
31200 binop( Iop_ShlV128,
31201 binop( Iop_64HLtoV128,
31202 mkU64(0),
31203 binop( Iop_32HLto64,
31204 mkU32(0),
31205 mkexpr(res_32) ) ),
31206 mkU8(i * 32) ) ) );
31207 tmp_result = newTemp(Ity_V128);
31208 assign(tmp_result, mkexpr(result));
31211 if (modulo)
31212 return result;
31213 else
31214 return carry;
31217 static IRExpr * copy_MSB_bit_fields ( IRExpr *src, UInt size,
31218 const VexAbiInfo* vbi )
31220 IRTemp src_hi = newTemp( Ity_I64 );
31221 IRTemp src_lo = newTemp( Ity_I64 );
31222 IRTemp ones_hi, ones_lo;
31223 ULong extract_mask_hi, extract_mask_lo;
31224 UInt num_bits;
31226 ones_hi = newTemp( Ity_I64 );
31227 ones_lo = newTemp( Ity_I64 );
31229 /* Create 64-bit extract mask, with a 1 in the MSB for each vector element
31230 size. */
31232 switch (size) {
31233 case 8:
31234 extract_mask_hi = 0x8080808080808080ULL;
31235 extract_mask_lo = 0x8080808080808080ULL;
31236 num_bits = 8;
31237 break;
31239 case 16:
31240 extract_mask_hi = 0x8000800080008000ULL;
31241 extract_mask_lo = 0x8000800080008000ULL;
31242 num_bits = 4;
31243 break;
31245 case 32:
31246 extract_mask_hi = 0x8000000080000000ULL;
31247 extract_mask_lo = 0x8000000080000000ULL;
31248 num_bits = 2;
31249 break;
31251 case 64:
31252 extract_mask_hi = 0x8000000000000000ULL;
31253 extract_mask_lo = 0x8000000000000000ULL;
31254 num_bits = 1;
31255 break;
31257 default:
31258 /* unsupported element size */
31259 vassert(0);
31262 assign( src_hi, unop( Iop_V128HIto64, src ) );
31263 assign( src_lo, unop( Iop_V128to64, src ) );
31265 assign( ones_hi, extract_bits_under_mask ( vbi, mkexpr( src_hi ),
31266 mkU64( extract_mask_hi ),
31267 mkU64( 1 ) ) );
31268 assign( ones_lo, extract_bits_under_mask ( vbi, mkexpr( src_lo ),
31269 mkU64( extract_mask_lo ),
31270 mkU64( 1 ) ) );
31272 /* Concatenate the extracted bits from ones_hi and ones_lo and
31273 store in GPR. Make sure the hi and low bits are left aligned per
31274 IBM numbering */
31275 return binop( Iop_Or64,
31276 binop( Iop_Shl64,
31277 mkexpr( ones_hi ),
31278 mkU8( num_bits ) ),
31279 mkexpr( ones_lo ) );
31282 static Bool dis_VSR_byte_mask ( UInt prefix, UInt theInstr,
31283 const VexAbiInfo* vbi )
31285 UChar RT_addr = ifieldRegDS(theInstr);
31286 UChar B_addr = ifieldRegB(theInstr);
31287 IRTemp src = newTemp(Ity_I64);
31289 UInt inst_select = IFIELD( theInstr, 16, 5);
31290 IRTemp vRT = newTemp( Ity_V128 );
31291 UInt size;
31292 ULong extract_mask, shift_by;
31295 /* The various instructions handled by this function use bits[11:15] to
31296 specify the instruction in addition to the opc1 (bits[0:5]) and opc2
31297 (bits21:31]). The exception is the mtvsrbmi which uses bits[11:15]
31298 for part of the immediate value. Assign mtvsrbmi a unique inst_select
31299 so it can be handled similarly to the other instructions. This helps
31300 simplify the code control flow. */
31301 if (IFIELD(theInstr, 1, 5) == 0xA) //mtvsrbmi
31302 inst_select = 0x9999;
31304 switch(inst_select) {
31305 case 0x0: // vexpandbm
31306 DIP("vexpandbm v%u,r%u\n", RT_addr, B_addr);
31308 extract_mask = 0x8080808080808080ULL;
31309 shift_by = 0x0707070707070707ULL;
31311 /* Use extract mask to select the MSB from each byte field. Then
31312 use the arithmetic right shift to replicate the MSB into each
31313 bit of the element field. */
31314 assign( vRT,
31315 binop( Iop_Sar8x16,
31316 binop( Iop_AndV128,
31317 getVReg(B_addr),
31318 binop( Iop_64HLtoV128, mkU64( extract_mask ),
31319 mkU64( extract_mask ) ) ),
31320 binop( Iop_64HLtoV128, mkU64( shift_by ),
31321 mkU64( shift_by ) ) ) );
31322 putVReg( RT_addr, mkexpr( vRT ) );
31323 return True;
31325 case 0x1: // vexpandhm
31326 DIP("vexpandhm v%u,r%u\n", RT_addr, B_addr);
31328 extract_mask = 0x8000800080008000ULL;
31329 shift_by = 0x000F000F000F000FULL;
31331 /* Use extract mask to select the MSB from each byte field. Then
31332 use the arithmetic right shift to replicate the MSB into each
31333 bit of the element field. */
31334 assign( vRT,
31335 binop( Iop_Sar16x8,
31336 binop( Iop_AndV128,
31337 getVReg(B_addr),
31338 binop( Iop_64HLtoV128, mkU64( extract_mask ),
31339 mkU64( extract_mask ) ) ),
31340 binop( Iop_64HLtoV128, mkU64( shift_by ),
31341 mkU64( shift_by ) ) ) );
31342 putVReg( RT_addr, mkexpr( vRT ) );
31343 return True;
31345 case 0x2: // vexpandwm
31346 DIP("vexpandwm v%u,r%u\n", RT_addr, B_addr);
31348 extract_mask = 0x8000000080000000ULL;
31349 shift_by = 0x0000001F0000001FULL;
31351 /* Use extract mask to select the MSB from each byte field. Then
31352 use the arithmetic right shift to replicate the MSB into each
31353 bit of the element field. */
31354 assign( vRT,
31355 binop( Iop_Sar32x4,
31356 binop( Iop_AndV128,
31357 getVReg(B_addr),
31358 binop( Iop_64HLtoV128, mkU64( extract_mask ),
31359 mkU64( extract_mask ) ) ),
31360 binop( Iop_64HLtoV128, mkU64( shift_by ),
31361 mkU64( shift_by ) ) ) );
31362 putVReg( RT_addr, mkexpr( vRT ) );
31363 return True;
31365 case 0x3: // vexpanddm
31366 DIP("vexpanddm v%u,r%u\n", RT_addr, B_addr);
31367 extract_mask = 0x8000000080000000ULL;
31368 shift_by = 0x000003F000003FULL;
31370 /* Use extract mask to select the MSB from each byte field. Then
31371 use the arithmetic right shift to replicate the MSB into each
31372 bit of the element field. */
31373 assign( vRT,
31374 binop( Iop_Sar64x2,
31375 binop( Iop_AndV128,
31376 getVReg(B_addr),
31377 binop( Iop_64HLtoV128, mkU64( extract_mask ),
31378 mkU64( extract_mask ) ) ),
31379 binop( Iop_64HLtoV128, mkU64( shift_by ),
31380 mkU64( shift_by ) ) ) );
31381 putVReg( RT_addr, mkexpr( vRT ) );
31382 return True;
31384 case 0x4: // vexpandqm
31386 IRTemp ones = newTemp( Ity_I64 );
31387 DIP("vexpandqm v%u,r%u\n", RT_addr, B_addr);
31389 assign( src, binop( Iop_Shr64,
31390 unop( Iop_V128HIto64, getVReg( B_addr) ),
31391 mkU8( 63 ) ) );
31392 assign( ones,
31393 unop( Iop_1Sto64,
31394 binop( Iop_CmpEQ64,
31395 mkU64( 1 ),
31396 binop( Iop_And64,
31397 mkU64( 0x1 ),
31398 mkexpr( src ) ) ) ) );
31399 putVReg( RT_addr, binop( Iop_64HLtoV128,
31400 mkexpr( ones ), mkexpr( ones ) ) );
31402 return True;
31404 case 0x8: // vextractbm
31405 DIP("vextractbm v%u,r%u\n", RT_addr, B_addr);
31406 size = 8;
31407 putIReg( RT_addr, copy_MSB_bit_fields( getVReg( B_addr ), size, vbi ) );
31408 return True;
31410 case 0x9: // vextracthm
31411 DIP("vextracthm v%u,r%u\n", RT_addr, B_addr);
31412 size = 16;
31413 putIReg( RT_addr, copy_MSB_bit_fields( getVReg( B_addr ), size, vbi ) );
31414 return True;
31416 case 0xA: // vextractwm
31417 DIP("vextractwm v%u,r%u\n", RT_addr, B_addr );
31418 size = 32;
31419 putIReg( RT_addr, copy_MSB_bit_fields( getVReg( B_addr ), size, vbi ) );
31420 return True;
31422 case 0xB: // vextractdm
31423 DIP("vextractdm v%u,r%u\n", RT_addr, B_addr);
31424 size = 64;
31425 putIReg( RT_addr, copy_MSB_bit_fields( getVReg( B_addr ), size, vbi ) );
31426 return True;
31428 case 0xC: // vextractqm
31429 DIP("vextractqm v%u,r%u\n", RT_addr, B_addr);
31430 putIReg( RT_addr, binop( Iop_Shr64,
31431 unop( Iop_V128HIto64, getVReg( B_addr ) ),
31432 mkU8 (63) ) );
31433 return True;
31435 case 0x10: // mtvsrbm
31437 IRTemp src_upper = newTemp(Ity_I32);
31438 IRTemp src_upper2 = newTemp(Ity_I32);
31439 IRTemp src_upper4 = newTemp(Ity_I32);
31440 IRTemp src_lower = newTemp(Ity_I32);
31441 IRTemp src_lower2 = newTemp(Ity_I32);
31442 IRTemp src_lower4 = newTemp(Ity_I32);
31443 IRTemp tmp128 = newTemp(Ity_V128);
31445 DIP("mtvsrbm v%u,r%u\n", RT_addr, B_addr);
31447 /* Copy the lower 8-bits of the 16 bit mask to lower 8 byte elements
31448 and copy the upper 8-bits of the 16 bit mask to the upper 8 byte
31449 elements. */
31450 assign( src_upper, binop( Iop_Shr32,
31451 binop( Iop_And32, mkU32( 0xFF00 ),
31452 unop ( Iop_64to32,
31453 getIReg( B_addr ) ) ),
31454 mkU8( 0x8 ) ) );
31455 assign( src_lower, binop( Iop_And32, mkU32( 0xFF ),
31456 unop ( Iop_64to32,
31457 getIReg( B_addr ) ) ) );
31459 assign( src_upper2,
31460 binop( Iop_Or32, mkexpr( src_upper ),
31461 binop( Iop_Shl32, mkexpr( src_upper ), mkU8( 8 ) ) ) );
31463 assign( src_upper4,
31464 binop( Iop_Or32, mkexpr( src_upper2 ),
31465 binop( Iop_Shl32, mkexpr( src_upper2 ),
31466 mkU8( 16 ) ) ) );
31468 assign( src_lower2,
31469 binop( Iop_Or32, mkexpr( src_lower ),
31470 binop( Iop_Shl32, mkexpr( src_lower ), mkU8( 8 ) ) ) );
31472 assign( src_lower4,
31473 binop( Iop_Or32, mkexpr( src_lower2 ),
31474 binop( Iop_Shl32, mkexpr( src_lower2 ),
31475 mkU8( 16 ) ) ) );
31477 /* Shift the bits in each element so the bit corresponding to the
31478 element position is in the MSB. */
31479 assign( tmp128, binop( Iop_Shl8x16,
31480 binop( Iop_64HLtoV128,
31481 binop( Iop_32HLto64,
31482 mkexpr( src_upper4 ),
31483 mkexpr( src_upper4 ) ),
31484 binop( Iop_32HLto64,
31485 mkexpr( src_lower4 ),
31486 mkexpr( src_lower4 ) ) ),
31487 binop( Iop_64HLtoV128,
31488 mkU64( 0x0001020304050607ULL ),
31489 mkU64( 0x0001020304050607ULL ) ) ) );
31490 /* Do an arithmetic shift to replicate MSB to all bit positions. */
31491 assign( vRT, binop( Iop_Sar8x16, mkexpr( tmp128 ),
31492 binop( Iop_64HLtoV128,
31493 mkU64( 0x0707070707070707ULL ),
31494 mkU64( 0x0707070707070707ULL ) ) ) );
31495 putVReg( RT_addr, mkexpr( vRT ) );
31496 return True;
31499 case 0x9999: // mtvsrbmi
31501 ULong immediate16, immediate16_hi, immediate16_lo;
31502 ULong immediate64_hi, immediate64_lo;
31503 IRTemp tmp128 = newTemp(Ity_V128);
31505 DIP("mtvsrbmi v%u,r%u\n", RT_addr, B_addr);
31507 /* Replicate the immediate fields b0|b1|b2 to all 16 vector
31508 elements */
31509 immediate16 = (IFIELD(theInstr, 0, 1) ) | //b2 bits[31]
31510 (IFIELD(theInstr, 16, 5) << 1) | //b1 bits[11:15]
31511 (IFIELD(theInstr, 6, 10) << 6 ); //b0 bits[16:25]
31513 immediate16_hi = (immediate16 >> 8) & 0xFF;
31514 immediate16_lo = immediate16 & 0xFF;
31516 immediate64_hi = ((immediate16_hi << 32) | (immediate16_hi << 56) |
31517 (immediate16_hi << 48) | (immediate16_hi << 40) |
31518 (immediate16_hi << 32) | (immediate16_hi << 16) |
31519 (immediate16_hi << 8) | immediate16_hi);
31521 immediate64_lo = ((immediate16_lo << 32) | (immediate16_lo << 56) |
31522 (immediate16_lo << 48) | (immediate16_lo << 40) |
31523 (immediate16_lo << 32) | (immediate16_lo << 16) |
31524 (immediate16_lo << 8) | immediate16_lo);
31526 /* Shift the bits in each element so the bit corresponding to the
31527 element position is in the MSB. */
31528 assign( tmp128, binop( Iop_Shl8x16,
31529 binop( Iop_64HLtoV128,
31530 mkU64( immediate64_hi ),
31531 mkU64( immediate64_lo ) ),
31532 binop( Iop_64HLtoV128,
31533 mkU64( 0x0001020304050607ULL ),
31534 mkU64( 0x0001020304050607ULL ) ) ) );
31535 /* Do an arithmetic shift to replicate MSB to all bit positions. */
31536 assign( vRT, binop( Iop_Sar8x16, mkexpr( tmp128 ),
31537 binop( Iop_64HLtoV128,
31538 mkU64( 0x0707070707070707ULL ),
31539 mkU64( 0x0707070707070707ULL ) ) ) );
31540 putVReg( RT_addr, mkexpr( vRT ) );
31541 return True;
31544 case 0x11: // mtvsrhm
31546 DIP("mtvsrhm v%u,r%u\n", RT_addr, B_addr);
31548 IRTemp src2 = newTemp(Ity_I32);
31549 IRTemp tmp128 = newTemp(Ity_V128);
31551 /* Copy the 16 bit mask to all eight of the 16-bit elements. */
31552 assign( src, binop( Iop_And32, mkU32( 0xFFFF ),
31553 unop ( Iop_64to32,
31554 getIReg( B_addr ) ) ) );
31556 assign( src2,
31557 binop( Iop_Or32, mkexpr( src ),
31558 binop( Iop_Shl32, mkexpr( src ), mkU8( 16 ) ) ) );
31560 /* Shift the bits in each element so the bit corresponding to the
31561 element position is in the MSB. */
31562 assign( tmp128, binop( Iop_Shl16x8,
31563 binop( Iop_64HLtoV128,
31564 binop( Iop_32HLto64,
31565 mkexpr( src2 ),
31566 mkexpr( src2 ) ),
31567 binop( Iop_32HLto64,
31568 mkexpr( src2 ),
31569 mkexpr( src2 ) ) ),
31570 binop( Iop_64HLtoV128,
31571 mkU64( 0x0000000100020003ULL ),
31572 mkU64( 0x0004000500060007ULL ) ) ) );
31573 /* Do an arithmetic shift to replicate MSB to all bit positions. */
31574 assign( vRT, binop( Iop_Sar16x8, mkexpr( tmp128 ),
31575 binop( Iop_64HLtoV128,
31576 mkU64( 0x000F000F000F000FULL ),
31577 mkU64( 0x000F000F000F000FULL ) ) ) );
31578 putVReg( RT_addr, mkexpr( vRT ) );
31579 return True;
31582 case 0x12: // mtvsrwm
31584 IRTemp tmp128 = newTemp(Ity_V128);
31585 IRTemp src32 = newTemp(Ity_I32);
31587 DIP("mtvsrwm v%u,r%u\n", RT_addr, B_addr);
31589 /* Copy the 32 bit mask to all four of the 32-bit elements. */
31590 assign( src32, binop( Iop_Shl32,
31591 unop ( Iop_64to32, getIReg( B_addr ) ),
31592 mkU8( 28 ) ) );
31594 /* Shift the bits in each element so the bit corresponding to the
31595 element position is in the MSB. */
31596 assign( tmp128, binop( Iop_Shl32x4,
31597 binop( Iop_64HLtoV128,
31598 binop( Iop_32HLto64,
31599 mkexpr( src32 ),
31600 mkexpr( src32 ) ),
31601 binop( Iop_32HLto64,
31602 mkexpr( src32 ),
31603 mkexpr( src32 ) ) ),
31604 binop( Iop_64HLtoV128,
31605 mkU64( 0x0000000000000001ULL ),
31606 mkU64( 0x0000000200000003ULL ) ) ) );
31608 /* Do an arithmetic shift to replicate MSB to all bit positions. */
31609 assign( vRT, binop( Iop_Sar32x4, mkexpr( tmp128 ),
31610 binop( Iop_64HLtoV128,
31611 mkU64( 0x0000001F0000001FULL ),
31612 mkU64( 0x0000001F0000001FULL ) ) ) );
31613 putVReg( RT_addr, mkexpr( vRT ) );
31614 return True;
31617 case 0x13: // mtvsrdm
31619 IRTemp tmp128 = newTemp(Ity_V128);
31621 DIP("mtvsrdm v%u,r%u\n", RT_addr, B_addr);
31623 /* Copy the 64 bit mask to both of the 64-bit elements. */
31624 assign( src, binop( Iop_Shl64,
31625 getIReg( B_addr ),
31626 mkU8( 62 ) ) );
31628 /* Shift the bits in each element so the bit corresponding to the
31629 element position is in the MSB. */
31630 assign( tmp128, binop( Iop_Shl64x2,
31631 binop( Iop_64HLtoV128,
31632 mkexpr( src ),
31633 mkexpr( src ) ),
31634 binop( Iop_64HLtoV128,
31635 mkU64( 0x0000000000000000ULL ),
31636 mkU64( 0x0000000000000001ULL ) ) ) );
31638 /* Do an arithmetic shift to replicate MSB to all bit positions. */
31639 assign( vRT, binop( Iop_Sar64x2, mkexpr( tmp128 ),
31640 binop( Iop_64HLtoV128,
31641 mkU64( 0x000000000000003FULL ),
31642 mkU64( 0x000000000000003FULL ) ) ) );
31643 putVReg( RT_addr, mkexpr( vRT ) );
31644 return True;
31647 case 0x14: // mtvsrqm
31649 IRTemp ones = newTemp( Ity_I64 );
31650 DIP("mtvsrqm v%u,r%u\n", RT_addr, B_addr);
31652 assign( src, getIReg( B_addr ) );
31653 assign( ones,
31654 unop( Iop_1Sto64,
31655 binop( Iop_CmpEQ64,
31656 mkU64( 1 ),
31657 binop( Iop_And64,
31658 mkU64( 0x1 ),
31659 mkexpr( src ) ) ) ) );
31660 putVReg( RT_addr, binop( Iop_64HLtoV128,
31661 mkexpr( ones ), mkexpr( ones ) ) );
31662 return True;
31665 case 0x18: // vcntmbb MP=0
31666 case 0x19: // vcntmbb MP=1
31668 UInt MP = IFIELD(theInstr, 16, 1); // bits[15] IBM numbering
31669 IRTemp bit_mask = newTemp(Ity_I64);
31670 IRTemp bit_cnt = newTemp(Ity_I64);
31672 DIP("vcntmbb r%u,v%u,%u\n", RT_addr, B_addr, MP);
31674 size = 8;
31675 assign( bit_mask, copy_MSB_bit_fields( getVReg( B_addr ), size,
31676 vbi ) );
31678 if ( MP == 1) {
31679 assign( bit_cnt, binop( Iop_Shl64,
31680 popcnt64( vbi, mkexpr( bit_mask ) ),
31681 mkU8( 56 ) ) );
31683 } else {
31684 /* Need to complement the bit mask then count the ones. */
31685 assign( bit_cnt,
31686 binop( Iop_Shl64,
31687 popcnt64( vbi,
31688 binop( Iop_And64,
31689 mkU64( 0xFFFF ),
31690 unop( Iop_Not64,
31691 mkexpr( bit_mask ) ) ) ),
31692 mkU8( 56 ) ) );
31694 putIReg( RT_addr, mkexpr( bit_cnt ) );
31695 return True;
31698 case 0x1A: // vcntmbh MP=0
31699 case 0x1B: // vcntmbh MP=1
31701 UInt MP = IFIELD(theInstr, 16, 1); // bits[15] IBM numbering
31702 IRTemp bit_mask = newTemp(Ity_I64);
31703 IRTemp bit_cnt = newTemp(Ity_I64);
31705 DIP("vcntmbh r%u,v%u,%u\n", RT_addr, B_addr, MP);
31707 size = 16;
31708 assign( bit_mask, copy_MSB_bit_fields( getVReg( B_addr ), size,
31709 vbi ) );
31711 /* Result is in IBM bits [0:6] */
31712 if ( MP == 1) {
31713 assign( bit_cnt,
31714 binop( Iop_Shl64,
31715 popcnt64( vbi, mkexpr( bit_mask ) ),
31716 mkU8( 57 ) ) );
31718 } else {
31719 /* Need to complement the bit mask then count the ones. */
31720 assign( bit_cnt,
31721 binop( Iop_Shl64,
31722 popcnt64( vbi,
31723 binop( Iop_And64,
31724 mkU64( 0xFF ),
31725 unop( Iop_Not64,
31726 mkexpr( bit_mask ) ) ) ),
31727 mkU8( 57 ) ) );
31729 putIReg( RT_addr, mkexpr( bit_cnt ) );
31730 return True;
31733 case 0x1C: // vcntmbw MP=0
31734 case 0x1D: // vcntmbw MP=1
31736 UInt MP = IFIELD(theInstr, 16, 1); // bits[15] IBM numbering
31737 IRTemp bit_mask = newTemp(Ity_I64);
31738 IRTemp bit_cnt = newTemp(Ity_I64);
31740 DIP("vcntmbw r%u,v%u,%u\n", RT_addr, B_addr, MP);
31742 size = 32;
31743 assign( bit_mask, copy_MSB_bit_fields( getVReg( B_addr ), size,
31744 vbi) );
31746 if ( MP == 1) {
31747 assign( bit_cnt,
31748 binop( Iop_Shl64,
31749 popcnt64( vbi, mkexpr( bit_mask ) ),
31750 mkU8( 58 ) ) );
31752 } else {
31753 /* Need to complement the bit mask then count the ones. */
31754 assign( bit_cnt,
31755 binop( Iop_Shl64,
31756 popcnt64( vbi,
31757 binop( Iop_And64,
31758 mkU64( 0xF ),
31759 unop( Iop_Not64,
31760 mkexpr( bit_mask ) ) ) ),
31761 mkU8( 58 ) ) );
31763 putIReg( RT_addr, mkexpr( bit_cnt ) );
31764 return True;
31767 case 0x1E: // vcntmbd MP=0
31768 case 0x1F: // vcntmbd MP=1
31770 UInt MP = IFIELD(theInstr, 16, 1); // bits[15] IBM numbering
31771 IRTemp bit_mask = newTemp(Ity_I64);
31772 IRTemp bit_cnt = newTemp(Ity_I64);
31774 DIP("vcntmbd r%u,v%u,%u\n", RT_addr, B_addr, MP);
31776 size = 64;
31777 assign( bit_mask, copy_MSB_bit_fields( getVReg( B_addr ), size,
31778 vbi ) );
31780 /* Result is in IBM bits [0:4] */
31781 if ( MP == 1) {
31782 assign( bit_cnt,
31783 binop( Iop_Shl64,
31784 popcnt64( vbi, mkexpr( bit_mask ) ),
31785 mkU8( 59 ) ) );
31787 } else {
31788 /* Need to complement the bit mask then count the ones. */
31789 assign( bit_cnt,
31790 binop( Iop_Shl64,
31791 popcnt64( vbi,
31792 binop( Iop_And64,
31793 mkU64( 0x3 ),
31794 unop( Iop_Not64,
31795 mkexpr( bit_mask ) ) ) ),
31796 mkU8( 59 ) ) );
31798 putIReg( RT_addr, mkexpr( bit_cnt ) );
31799 return True;
31802 default:
31803 /* Unkown opc2 value for the dis_VSR_byte_mask function. */
31804 return False;
31808 static Bool dis_av_quad ( UInt prefix, UInt theInstr, const VexAbiInfo* vbi )
31810 /* VX-Form */
31811 UChar opc1 = ifieldOPC(theInstr);
31812 UChar vRT_addr = ifieldRegDS(theInstr);
31813 UChar vRA_addr = ifieldRegA(theInstr);
31814 UChar vRB_addr = ifieldRegB(theInstr);
31815 UChar vRC_addr;
31816 UInt opc2 = IFIELD( theInstr, 0, 11 );
31818 IRTemp vA = newTemp(Ity_V128);
31819 IRTemp vB = newTemp(Ity_V128);
31820 IRTemp vC = IRTemp_INVALID;
31821 IRTemp cin = IRTemp_INVALID;
31823 /* There is no prefixed version of these instructions. */
31824 PREFIX_CHECK
31826 assign( vA, getVReg(vRA_addr));
31827 assign( vB, getVReg(vRB_addr));
31829 if (opc1 != 0x4) {
31830 vex_printf("dis_av_quad(ppc)(instr)\n");
31831 return False;
31834 switch (opc2) {
31835 case 0x140: // vaddcuq
31836 DIP("vaddcuq v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
31837 putVReg( vRT_addr, unop( Iop_32UtoV128,
31838 mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
31839 mkexpr(vB),
31840 mkU32(0), False) ) ) );
31841 return True;
31842 case 0x100: // vadduqm
31843 DIP("vadduqm v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
31844 putVReg( vRT_addr, mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
31845 mkexpr(vB), mkU32(0), True) ) );
31846 return True;
31847 case 0x540: // vsubcuq
31848 DIP("vsubcuq v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
31849 putVReg( vRT_addr,
31850 unop( Iop_32UtoV128,
31851 mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
31852 unop( Iop_NotV128,
31853 mkexpr(vB) ),
31854 mkU32(1), False) ) ) );
31855 return True;
31856 case 0x500: // vsubuqm
31857 DIP("vsubuqm v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
31858 putVReg( vRT_addr,
31859 mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
31860 unop( Iop_NotV128, mkexpr(vB) ),
31861 mkU32(1), True) ) );
31862 return True;
31863 case 0x054C: // vbpermq
31865 /* The original supports was done with Iops but it caused the internal
31866 temorary storage to be exhausted if there were three or more vbpermq
31867 instructions in a row. Changed to a clean helper on 3/24/2022. For
31868 Powerpc 32-bit support, passing two 128-bit arguments doesn't work.
31869 Hence, the helper is called twice to calculate the result for the
31870 upper and lower 64-bit vB register indicies. */
31871 IRTemp res_hi = newTemp( Ity_I64 );
31872 IRTemp res_0 = newTemp( Ity_I32 );
31873 IRTemp res_1 = newTemp( Ity_I32 );
31874 IRExpr * res_low = mkU64(0);
31875 assign( res_0,
31876 mkIRExprCCall( Ity_I32, 0 /*regparms*/,
31877 "vbpermq_clean_helper",
31878 fnptr_to_fnentry( vbi,
31879 &vbpermq_clean_helper ),
31880 mkIRExprVec_3( unop( Iop_V128HIto64,
31881 mkexpr(vA) ),
31882 unop( Iop_V128to64,
31883 mkexpr(vA) ),
31884 unop( Iop_V128HIto64,
31885 mkexpr(vB) ) ) ) );
31886 assign( res_1,
31887 mkIRExprCCall( Ity_I32, 0 /*regparms*/,
31888 "vbpermq_clean_helper",
31889 fnptr_to_fnentry( vbi,
31890 &vbpermq_clean_helper ),
31891 mkIRExprVec_3( unop( Iop_V128HIto64,
31892 mkexpr(vA) ),
31893 unop( Iop_V128to64,
31894 mkexpr(vA) ),
31895 unop( Iop_V128to64,
31896 mkexpr(vB) ) ) ) );
31897 assign( res_hi, binop( Iop_32HLto64,
31898 mkU32( 0 ),
31899 binop( Iop_Or32,
31900 binop( Iop_Shl32, mkexpr( res_0 ),
31901 mkU8( 8 ) ),
31902 mkexpr( res_1 ) ) ) );
31904 putVReg( vRT_addr, binop( Iop_64HLtoV128, mkexpr( res_hi ), res_low ) );
31905 return True;
31908 default:
31909 break; // fall through
31912 opc2 = IFIELD( theInstr, 0, 6 );
31913 vRC_addr = ifieldRegC(theInstr);
31914 vC = newTemp(Ity_V128);
31915 cin = newTemp(Ity_I32);
31916 switch (opc2) {
31917 case 0x3D: // vaddecuq
31918 assign( vC, getVReg(vRC_addr));
31919 DIP("vaddecuq v%d,v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr,
31920 vRC_addr);
31921 assign(cin, binop( Iop_And32,
31922 unop( Iop_64to32,
31923 unop( Iop_V128to64, mkexpr(vC) ) ),
31924 mkU32(1) ) );
31925 putVReg( vRT_addr,
31926 unop( Iop_32UtoV128,
31927 mkexpr(_get_quad_modulo_or_carry(mkexpr(vA), mkexpr(vB),
31928 mkexpr(cin),
31929 False) ) ) );
31930 return True;
31931 case 0x3C: // vaddeuqm
31932 assign( vC, getVReg(vRC_addr));
31933 DIP("vaddeuqm v%d,v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr,
31934 vRC_addr);
31935 assign(cin, binop( Iop_And32,
31936 unop( Iop_64to32,
31937 unop( Iop_V128to64, mkexpr(vC) ) ),
31938 mkU32(1) ) );
31939 putVReg( vRT_addr,
31940 mkexpr(_get_quad_modulo_or_carry(mkexpr(vA), mkexpr(vB),
31941 mkexpr(cin),
31942 True) ) );
31943 return True;
31944 case 0x3F: // vsubecuq
31945 assign( vC, getVReg(vRC_addr));
31946 DIP("vsubecuq v%d,v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr,
31947 vRC_addr);
31948 assign(cin, binop( Iop_And32,
31949 unop( Iop_64to32,
31950 unop( Iop_V128to64, mkexpr(vC) ) ),
31951 mkU32(1) ) );
31952 putVReg( vRT_addr,
31953 unop( Iop_32UtoV128,
31954 mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
31955 unop( Iop_NotV128,
31956 mkexpr(vB) ),
31957 mkexpr(cin),
31958 False) ) ) );
31959 return True;
31960 case 0x3E: // vsubeuqm
31961 assign( vC, getVReg(vRC_addr));
31962 DIP("vsubeuqm v%d,v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr,
31963 vRC_addr);
31964 assign(cin, binop( Iop_And32,
31965 unop( Iop_64to32,
31966 unop( Iop_V128to64, mkexpr(vC) ) ),
31967 mkU32(1) ) );
31968 putVReg( vRT_addr,
31969 mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
31970 unop( Iop_NotV128, mkexpr(vB) ),
31971 mkexpr(cin),
31972 True) ) );
31973 return True;
31974 default:
31975 vex_printf("dis_av_quad(ppc)(opc2.2)\n");
31976 return False;
31979 return True;
31982 static IRExpr * bcd_sign_code_adjust( UInt ps, IRExpr * tmp)
31984 /* The Iop_BCDAdd and Iop_BCDSub will result in the corresponding Power PC
31985 * instruction being issued with ps = 0. If ps = 1, the sign code, which
31986 * is in the least significant four bits of the result, needs to be updated
31987 * per the ISA:
31989 * If PS=0, the sign code of the result is set to 0b1100.
31990 * If PS=1, the sign code of the result is set to 0b1111.
31992 * Note, the ps value is NOT being passed down to the instruction issue
31993 * because passing a constant via triop() breaks the vbit-test test. The
31994 * vbit-tester assumes it can set non-zero shadow bits for the triop()
31995 * arguments. Thus they have to be expressions not a constant.
31996 * Use 32-bit compare instructions as 64-bit compares are not supported
31997 * in 32-bit mode.
31999 IRTemp mask = newTemp(Ity_I64);
32000 IRExpr *rtn;
32002 if ( ps == 0 ) {
32003 /* sign code is correct, just return it. */
32004 rtn = tmp;
32006 } else {
32007 /* Check if lower four bits are 0b1100, if so, change to 0b1111 */
32008 /* Make this work in 32-bit mode using only 32-bit compares */
32009 assign( mask, unop( Iop_1Sto64,
32010 binop( Iop_CmpEQ32, mkU32( 0xC ),
32011 binop( Iop_And32, mkU32( 0xF ),
32012 unop( Iop_64to32,
32013 unop( Iop_V128to64, tmp )
32014 ) ) ) ) );
32015 rtn = binop( Iop_64HLtoV128,
32016 unop( Iop_V128HIto64, tmp ),
32017 binop( Iop_Or64,
32018 binop( Iop_And64, mkU64( 0xF ), mkexpr( mask ) ),
32019 unop( Iop_V128to64, tmp ) ) );
32022 return rtn;
32026 AltiVec BCD Arithmetic instructions.
32027 These instructions modify CR6 for various conditions in the result,
32028 including when an overflow occurs. We could easily detect all conditions
32029 except when an overflow occurs. But since we can't be 100% accurate
32030 in our emulation of CR6, it seems best to just not support it all.
32032 static Bool dis_av_bcd_misc ( UInt prefix, UInt theInstr, const VexAbiInfo* vbi )
32034 UChar opc1 = ifieldOPC(theInstr);
32035 UChar vRT_addr = ifieldRegDS(theInstr);
32036 UChar vRA_addr = ifieldRegA(theInstr);
32037 UChar vRB_addr = ifieldRegB(theInstr);
32038 IRTemp vA = newTemp(Ity_V128);
32039 IRTemp vB = newTemp(Ity_V128);
32040 UInt opc2 = IFIELD( theInstr, 0, 11 );
32041 IRExpr *pos, *neg, *valid, *zero, *sign;
32042 IRTemp eq_lt_gt = newTemp( Ity_I32 );
32044 /* There is no prefixed version of these instructions. */
32045 PREFIX_CHECK
32047 assign( vA, getVReg(vRA_addr));
32048 assign( vB, getVReg(vRB_addr));
32050 if (opc1 != 0x4) {
32051 vex_printf("dis_av_bcd_misc(ppc)(instr)\n");
32052 return False;
32055 switch (opc2) {
32056 case 0x341: // bcdcpsgn. Decimal Copy Sign VX-form
32058 IRExpr *sign_vb, *value_va;
32059 DIP("bcdcpsgn. v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
32061 zero =
32062 BCDstring_zero( binop( Iop_AndV128,
32063 binop( Iop_64HLtoV128,
32064 mkU64( 0xFFFFFFFFFFFFFFFF ),
32065 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
32066 mkexpr( vA ) ) );
32068 /* Sign codes of 0xA, 0xC, 0xE or 0xF are positive, sign
32069 * codes 0xB and 0xD are negative.
32071 sign = binop( Iop_And64, mkU64( 0xF ),
32072 unop( Iop_V128to64, mkexpr( vB ) ) );
32074 neg = mkOR1( binop( Iop_CmpEQ64,
32075 sign,
32076 mkU64 ( 0xB ) ),
32077 binop( Iop_CmpEQ64,
32078 sign,
32079 mkU64 ( 0xD ) ) );
32081 pos = mkNOT1( neg );
32083 /* invalid if vA or vB is not valid */
32084 valid =
32085 unop( Iop_64to32,
32086 binop( Iop_And64,
32087 is_BCDstring128( vbi,
32088 /*Signed*/True, mkexpr( vA ) ),
32089 is_BCDstring128( vbi,
32090 /*Signed*/True, mkexpr( vB ) ) ) );
32092 sign_vb = binop( Iop_AndV128,
32093 binop( Iop_64HLtoV128,
32094 mkU64( 0 ),
32095 mkU64( 0xF ) ),
32096 mkexpr( vB ) );
32098 value_va = binop( Iop_AndV128,
32099 binop( Iop_64HLtoV128,
32100 mkU64( 0xFFFFFFFFFFFFFFFF ),
32101 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
32102 mkexpr( vA ) );
32103 putVReg( vRT_addr, binop( Iop_OrV128, sign_vb, value_va ) );
32105 break;
32107 default:
32108 vex_printf("dis_av_bcd_misc(ppc)(opc2)\n");
32109 return False;
32112 /* set CR field 6 to:
32113 * 0b1000 if vB less then 0, i.e. vB is neg and not zero,
32114 * 0b0100 if vB greter then 0, i.e. vB is pos and not zero,
32115 * 0b1000 if vB equals 0,
32116 * 0b0001 if vB is invalid over rules lt, gt, eq
32118 assign( eq_lt_gt,
32119 binop( Iop_Or32,
32120 binop( Iop_Shl32,
32121 unop( Iop_1Uto32,
32122 mkAND1( neg,
32123 mkNOT1( zero ) ) ),
32124 mkU8( 3 ) ),
32125 binop( Iop_Or32,
32126 binop( Iop_Shl32,
32127 unop( Iop_1Uto32,
32128 mkAND1( pos,
32129 mkNOT1( zero ) ) ),
32130 mkU8( 2 ) ),
32131 binop( Iop_Shl32,
32132 unop( Iop_1Uto32, zero ),
32133 mkU8( 1 ) ) ) ) );
32135 IRTemp valid_mask = newTemp( Ity_I32 );
32137 assign( valid_mask, unop( Iop_1Sto32, unop( Iop_32to1, valid ) ) );
32139 putGST_field( PPC_GST_CR,
32140 binop( Iop_Or32,
32141 binop( Iop_And32,
32142 mkexpr( valid_mask ),
32143 mkexpr( eq_lt_gt ) ),
32144 binop( Iop_And32,
32145 unop( Iop_Not32, mkexpr( valid_mask ) ),
32146 mkU32( 1 ) ) ),
32147 6 );
32148 return True;
32151 static Bool dis_av_bcd ( UInt prefix, UInt theInstr, const VexAbiInfo* vbi )
32153 /* VX-Form */
32154 UChar opc1 = ifieldOPC(theInstr);
32155 UChar vRT_addr = ifieldRegDS(theInstr);
32156 UChar vRA_addr = ifieldRegA(theInstr);
32157 UChar vRB_addr = ifieldRegB(theInstr);
32158 UChar ps = IFIELD( theInstr, 9, 1 );
32159 UInt opc2 = IFIELD( theInstr, 0, 9 );
32160 IRTemp vA = newTemp(Ity_V128);
32161 IRTemp vB = newTemp(Ity_V128);
32162 IRTemp dst = newTemp(Ity_V128);
32163 IRExpr *pos, *neg, *valid, *zero, *sign_digit, *in_range;
32164 IRTemp eq_lt_gt = newTemp( Ity_I32 );
32165 IRExpr *overflow, *value;
32167 /* There is no prefixed version of these instructions. */
32168 PREFIX_CHECK
32170 assign( vA, getVReg(vRA_addr));
32171 assign( vB, getVReg(vRB_addr));
32173 if (opc1 != 0x4) {
32174 vex_printf("dis_av_bcd(ppc)(instr)\n");
32175 return False;
32178 switch (opc2) {
32179 case 0x1: // bcdadd.
32180 case 0x41: // bcdsub.
32182 /* NOTE 64 bit compares are not supported in 32-bit mode. Use
32183 * 32-bit compares only.
32186 IRExpr *sign, *res_smaller;
32187 IRExpr *signA, *signB, *sign_digitA, *sign_digitB;
32188 IRExpr *zeroA, *zeroB, *posA, *posB, *negA, *negB;
32190 if ( opc2 == 0x1 ) {
32191 DIP("bcdadd. v%d,v%d,v%d,%u\n", vRT_addr, vRA_addr, vRB_addr, ps);
32192 assign( dst, bcd_sign_code_adjust( ps,
32193 binop( Iop_BCDAdd,
32194 mkexpr( vA ),
32195 mkexpr( vB ) ) ) );
32196 } else {
32197 DIP("bcdsub. v%d,v%d,v%d,%u\n", vRT_addr, vRA_addr, vRB_addr, ps);
32198 assign( dst, bcd_sign_code_adjust( ps,
32199 binop( Iop_BCDSub,
32200 mkexpr( vA ),
32201 mkexpr( vB ) ) ) );
32204 putVReg( vRT_addr, mkexpr( dst ) );
32205 /* set CR field 6 */
32206 /* result */
32207 zero = BCDstring_zero( binop( Iop_AndV128,
32208 binop( Iop_64HLtoV128,
32209 mkU64( 0xFFFFFFFFFFFFFFFF ),
32210 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
32211 mkexpr(dst) ) ); // ignore sign
32213 sign_digit = binop( Iop_And32, mkU32( 0xF ),
32214 unop( Iop_64to32,
32215 unop( Iop_V128to64, mkexpr( dst ) ) ) );
32217 sign = mkOR1( binop( Iop_CmpEQ32,
32218 sign_digit,
32219 mkU32 ( 0xB ) ),
32220 binop( Iop_CmpEQ32,
32221 sign_digit,
32222 mkU32 ( 0xD ) ) );
32223 neg = mkAND1( sign, mkNOT1( zero ) );
32225 /* Pos position AKA gt = 1 if ((not neg) & (not eq zero)) */
32226 pos = mkAND1( mkNOT1( sign ), mkNOT1( zero ) );
32227 valid = unop( Iop_64to32,
32228 binop( Iop_And64,
32229 is_BCDstring128( vbi,
32230 /*Signed*/True, mkexpr( vA ) ),
32231 is_BCDstring128( vbi,
32232 /*Signed*/True, mkexpr( vB ) )
32233 ) );
32235 /* src A */
32236 zeroA = BCDstring_zero( binop( Iop_AndV128,
32237 binop( Iop_64HLtoV128,
32238 mkU64( 0xFFFFFFFFFFFFFFFF ),
32239 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
32240 mkexpr( vA ) ) ); // ignore sign
32241 sign_digitA = binop( Iop_And32, mkU32( 0xF ),
32242 unop( Iop_64to32,
32243 unop( Iop_V128to64, mkexpr( vA ) ) ) );
32245 signA = mkOR1( binop( Iop_CmpEQ32,
32246 sign_digitA,
32247 mkU32 ( 0xB ) ),
32248 binop( Iop_CmpEQ32,
32249 sign_digitA,
32250 mkU32 ( 0xD ) ) );
32251 negA = mkAND1( signA, mkNOT1( zeroA ) );
32252 /* Pos position AKA gt = 1 if ((not neg) & (not eq zero)) */
32253 posA = mkAND1( mkNOT1( signA ), mkNOT1( zeroA ) );
32255 /* src B */
32256 zeroB = BCDstring_zero( binop( Iop_AndV128,
32257 binop( Iop_64HLtoV128,
32258 mkU64( 0xFFFFFFFFFFFFFFFF ),
32259 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
32260 mkexpr( vB ) ) ); // ignore sign
32261 sign_digitB = binop( Iop_And32, mkU32( 0xF ),
32262 unop( Iop_64to32,
32263 unop( Iop_V128to64, mkexpr( vB ) ) ) );
32265 signB = mkOR1( binop( Iop_CmpEQ32,
32266 sign_digitB,
32267 mkU32 ( 0xB ) ),
32268 binop( Iop_CmpEQ32,
32269 sign_digitB,
32270 mkU32 ( 0xD ) ) );
32271 negB = mkAND1( signB, mkNOT1( zeroB ) );
32274 /* Pos position AKA gt = 1 if ((not neg) & (not eq zero)) */
32275 posB = mkAND1( mkNOT1( signB ), mkNOT1( zeroB ) );
32278 if (mode64) {
32279 res_smaller = mkAND1( CmpGT128U( mkexpr( vA ), mkexpr( dst ) ),
32280 CmpGT128U( mkexpr( vB ), mkexpr( dst ) ) );
32282 } else {
32283 /* Have to do this with 32-bit compares, expensive */
32284 res_smaller = mkAND1( UNSIGNED_CMP_GT_V128( mkexpr( vA ),
32285 mkexpr( dst ) ),
32286 UNSIGNED_CMP_GT_V128( mkexpr( vB ),
32287 mkexpr( dst ) ) );
32290 if ( opc2 == 0x1) {
32291 /* Overflow for Add can only occur if the signs of the operands
32292 * are the same and the two operands are non-zero. On overflow,
32293 * the PPC hardware produces a result consisting of just the lower
32294 * digits of the result. So, if the result is less then both
32295 * operands and the sign of the operands are the same overflow
32296 * occured.
32298 overflow = mkOR1( mkAND1( res_smaller, mkAND1( negA, negB ) ),
32299 mkAND1( res_smaller, mkAND1( posA, posB ) ) );
32300 } else {
32301 /* Overflow for Add can only occur if the signs of the operands
32302 * are the different and the two operands are non-zero. On overflow,
32303 * the PPC hardware produces a result consisting of just the lower
32304 * digits of the result. So, if the result is less then both
32305 * operands and the sign of the operands are different overflow
32306 * occured.
32308 overflow = mkOR1( mkAND1( res_smaller, mkAND1( negA, posB ) ),
32309 mkAND1( res_smaller, mkAND1( posA, negB ) ) );
32312 break;
32314 case 0x081: // bcdus. Decimal Unsigned Shift VX-form
32315 case 0x0C1: // bcds. Decimal Shift VX-form
32316 case 0x1C1: // bcdsr. Decimal Shift and Round VX-form
32318 IRExpr *shift_dir;
32319 IRExpr *shift_mask, *result, *new_sign_val, *sign;
32320 IRExpr *not_excess_shift, *not_excess_shift_mask;
32321 IRTemp shift_dir_mask = newTemp( Ity_I64 );
32322 IRTemp shift_by = newTemp( Ity_I64 );
32323 IRTemp shift_field = newTemp( Ity_I64 );
32324 IRTemp shifted_out = newTemp( Ity_V128 );
32325 IRTemp value_shl = newTemp( Ity_V128 );
32326 IRTemp value_shr = newTemp( Ity_V128 );
32327 IRTemp round = newTemp( Ity_I32);
32329 ULong value_mask_low = 0;
32330 UInt max_shift = 0;
32332 if (opc2 == 0x0C1) {
32333 DIP("bcds. v%d,v%d,v%d,%d\n", vRT_addr, vRA_addr, vRB_addr, ps);
32334 value_mask_low = 0xFFFFFFFFFFFFFFF0;
32335 max_shift = 30 * 4; /* maximum without shifting all digits out */
32337 } else if (opc2 == 0x1C1) {
32338 DIP("bcdsr. v%d,v%d,v%d,%d\n", vRT_addr, vRA_addr, vRB_addr, ps);
32340 value_mask_low = 0xFFFFFFFFFFFFFFF0;
32341 max_shift = 30 * 4; /* maximum without shifting all digits out */
32343 } else {
32344 DIP("bcdus. v%d,v%d,v%d,%d\n", vRT_addr, vRA_addr,
32345 vRB_addr, ps);
32346 value_mask_low = 0xFFFFFFFFFFFFFFFF;
32347 max_shift = 31 * 4; /* maximum without shifting all digits out */
32350 value = binop( Iop_AndV128,
32351 binop( Iop_64HLtoV128,
32352 mkU64( 0xFFFFFFFFFFFFFFFF ),
32353 mkU64( value_mask_low ) ),
32354 mkexpr( vB ) );
32356 zero = BCDstring_zero( value );
32358 /* Shift field is 2's complement value */
32359 assign( shift_field, unop( Iop_V128to64,
32360 binop( Iop_ShrV128,
32361 binop( Iop_AndV128,
32362 binop( Iop_64HLtoV128,
32363 mkU64( 0xFF ),
32364 mkU64( 0x0) ),
32365 mkexpr( vA ) ),
32366 mkU8( 64 ) ) ) );
32368 /* if shift_dir = 0 shift left, otherwise shift right */
32369 shift_dir = binop( Iop_CmpEQ64,
32370 binop( Iop_Shr64,
32371 mkexpr( shift_field ),
32372 mkU8( 7 ) ),
32373 mkU64( 1 ) );
32375 assign( shift_dir_mask, unop( Iop_1Sto64, shift_dir ) );
32377 /* Shift field is stored in 2's complement form */
32378 assign(shift_by,
32379 binop( Iop_Mul64,
32380 binop( Iop_Or64,
32381 binop( Iop_And64,
32382 unop( Iop_Not64,
32383 mkexpr( shift_dir_mask ) ),
32384 mkexpr( shift_field ) ),
32385 binop( Iop_And64,
32386 mkexpr( shift_dir_mask ),
32387 binop( Iop_And64,
32388 binop( Iop_Add64,
32389 mkU64( 1 ),
32390 unop( Iop_Not64,
32391 mkexpr( shift_field ) ) ),
32392 mkU64( 0xFF ) ) ) ),
32393 mkU64( 4 ) ) );
32395 /* If the shift exceeds 128 bits, we need to force the result
32396 * to zero because Valgrind shift amount is only 7-bits. Otherwise,
32397 * we get a shift amount of mod(shift_by, 127)
32399 not_excess_shift = unop( Iop_1Sto64,
32400 binop( Iop_CmpLE64U,
32401 mkexpr( shift_by ),
32402 mkU64( max_shift ) ) );
32404 not_excess_shift_mask = binop( Iop_64HLtoV128,
32405 not_excess_shift,
32406 not_excess_shift );
32408 assign( value_shl,
32409 binop( Iop_ShlV128, value, unop( Iop_64to8,
32410 mkexpr( shift_by) ) ) );
32411 assign( value_shr,
32412 binop( Iop_AndV128,
32413 binop( Iop_64HLtoV128,
32414 mkU64( 0xFFFFFFFFFFFFFFFF ),
32415 mkU64( value_mask_low) ),
32416 binop( Iop_ShrV128,
32417 value,
32418 unop( Iop_64to8,
32419 mkexpr( shift_by ) ) ) ) );
32421 /* Overflow occurs if the shift amount is greater than zero, the
32422 * operation is a left shift and any non-zero digits are left
32423 * shifted out.
32425 assign( shifted_out,
32426 binop( Iop_OrV128,
32427 binop( Iop_ShrV128,
32428 value,
32429 unop( Iop_64to8,
32430 binop( Iop_Sub64,
32431 mkU64( 32*4 ),
32432 mkexpr( shift_by ) ) ) ),
32433 binop( Iop_AndV128,
32434 unop( Iop_NotV128,
32435 not_excess_shift_mask ),
32436 value ) ) );
32438 overflow = mkAND1( mkNOT1( BCDstring_zero( mkexpr( shifted_out ) ) ),
32439 mkAND1( mkNOT1( shift_dir ),
32440 binop( Iop_CmpNE64,
32441 mkexpr( shift_by ),
32442 mkU64( 0 ) ) ) );
32444 if ((opc2 == 0xC1) || (opc2 == 0x1C1)) {
32445 /* Sign codes of 0xA, 0xC, 0xE or 0xF are positive, sign
32446 * codes 0xB and 0xD are negative.
32448 sign_digit = binop( Iop_And64, mkU64( 0xF ),
32449 unop( Iop_V128to64, mkexpr( vB ) ) );
32451 sign = mkOR1( binop( Iop_CmpEQ64,
32452 sign_digit,
32453 mkU64 ( 0xB ) ),
32454 binop( Iop_CmpEQ64,
32455 sign_digit,
32456 mkU64 ( 0xD ) ) );
32457 neg = mkAND1( sign, mkNOT1( zero ) );
32459 /* Pos position AKA gt = 1 if ((not neg) & (not eq zero)) */
32460 pos = mkAND1( mkNOT1( sign ), mkNOT1( zero ) );
32462 valid =
32463 unop( Iop_64to32,
32464 is_BCDstring128( vbi, /* Signed */True, mkexpr( vB ) ) );
32466 } else {
32467 /* string is an unsigned BCD value */
32468 pos = mkU1( 1 );
32469 neg = mkU1( 0 );
32470 sign = mkU1( 0 );
32472 valid =
32473 unop( Iop_64to32,
32474 is_BCDstring128( vbi, /* Unsigned */False,
32475 mkexpr( vB ) ) );
32478 /* if PS = 0
32479 vB positive, sign is C
32480 vB negative, sign is D
32481 if PS = 1
32482 vB positive, sign is F
32483 vB negative, sign is D
32484 Note can't use pos or neg here since they are ANDed with zero,
32485 use sign instead.
32487 if (ps == 0) {
32488 new_sign_val = binop( Iop_Or64,
32489 unop( Iop_1Uto64, sign ),
32490 mkU64( 0xC ) );
32492 } else {
32493 new_sign_val = binop( Iop_Xor64,
32494 binop( Iop_Shl64,
32495 unop( Iop_1Uto64, sign ),
32496 mkU8( 1 ) ),
32497 mkU64( 0xF ) );
32500 shift_mask = binop( Iop_64HLtoV128,
32501 unop( Iop_1Sto64, shift_dir ),
32502 unop( Iop_1Sto64, shift_dir ) );
32504 result = binop( Iop_OrV128,
32505 binop( Iop_AndV128, mkexpr( value_shr ), shift_mask ),
32506 binop( Iop_AndV128,
32507 mkexpr( value_shl ),
32508 unop( Iop_NotV128, shift_mask ) ) );
32510 if (opc2 == 0xC1) { // bcds.
32511 putVReg( vRT_addr, binop( Iop_OrV128,
32512 binop( Iop_64HLtoV128,
32513 mkU64( 0 ),
32514 new_sign_val ),
32515 binop( Iop_AndV128,
32516 not_excess_shift_mask,
32517 result ) ) );
32518 } else if (opc2 == 0x1C1) { //bcdsr.
32519 /* If shifting right, need to round up if needed */
32520 assign( round, unop( Iop_1Uto32,
32521 mkAND1( shift_dir,
32522 check_BCD_round( value,
32523 shift_by ) ) ) );
32525 putVReg( vRT_addr,
32526 binop( Iop_OrV128,
32527 binop( Iop_64HLtoV128,
32528 mkU64( 0 ),
32529 new_sign_val ),
32530 binop( Iop_AndV128,
32531 not_excess_shift_mask,
32532 mkexpr( increment_BCDstring( vbi, result,
32533 mkexpr( round)
32534 ) ) ) ) );
32535 } else { // bcdus.
32536 putVReg( vRT_addr, binop( Iop_AndV128,
32537 not_excess_shift_mask,
32538 result ) );
32541 break;
32543 case 0x101: // bcdtrunc. Decimal Truncate VX-form
32544 case 0x141: // bcdutrunc. Decimal Unsigned Truncate VX-form
32546 IRTemp length = newTemp( Ity_I64 );
32547 IRTemp masked_out = newTemp( Ity_V128 );
32548 IRExpr *new_sign_val, *result, *shift;
32549 IRExpr *length_neq_128, *sign;
32550 ULong value_mask_low;
32551 Int max_digits;
32553 if ( opc2 == 0x101) { // bcdtrunc.
32554 value_mask_low = 0xFFFFFFFFFFFFFFF0;
32555 max_digits = 31;
32556 } else {
32557 value_mask_low = 0xFFFFFFFFFFFFFFFF;
32558 max_digits = 32;
32561 assign( length, binop( Iop_And64,
32562 unop( Iop_V128HIto64,
32563 mkexpr( vA ) ),
32564 mkU64( 0xFFFF ) ) );
32565 shift = unop( Iop_64to8,
32566 binop( Iop_Mul64,
32567 binop( Iop_Sub64,
32568 mkU64( max_digits ),
32569 mkexpr( length ) ),
32570 mkU64( 4 ) ) );
32572 /* Note ShrV128 gets masked by 127 so a shift of 128 results in
32573 * the value not being shifted. A shift of 128 sets the register
32574 * zero. So if length+1 = 128, just set the value to 0.
32576 length_neq_128 = mkNOT1( binop( Iop_CmpEQ64,
32577 mkexpr( length),
32578 mkU64( 0x1F ) ) );
32580 assign( masked_out,
32581 binop( Iop_AndV128,
32582 binop( Iop_64HLtoV128,
32583 unop( Iop_1Sto64, length_neq_128 ),
32584 unop( Iop_1Sto64, length_neq_128 ) ),
32585 binop( Iop_ShrV128,
32586 mkexpr( vB ),
32587 unop( Iop_64to8,
32588 binop( Iop_Mul64,
32589 mkU64( 4 ),
32590 binop( Iop_Add64,
32591 mkU64( 1 ),
32592 mkexpr( length ) ) ) ) )
32593 ) );
32595 /* Overflow occurs if any of the left most 31-length digits of vB
32596 * are non-zero.
32598 overflow = mkNOT1( BCDstring_zero( mkexpr( masked_out ) ) );
32600 value = binop( Iop_AndV128,
32601 binop( Iop_64HLtoV128,
32602 mkU64( 0xFFFFFFFFFFFFFFFF ),
32603 mkU64( value_mask_low ) ),
32604 mkexpr( vB ) );
32607 if ( opc2 == 0x101 ) { // bcdtrunc.
32608 /* Check if all of the non-sign digits are zero */
32609 zero = BCDstring_zero( binop( Iop_AndV128,
32610 binop( Iop_64HLtoV128,
32611 mkU64( 0xFFFFFFFFFFFFFFFF ),
32612 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
32613 value ) );
32615 /* Sign codes of 0xA, 0xC, 0xE or 0xF are positive, sign
32616 * codes 0xB and 0xD are negative.
32618 sign_digit = binop( Iop_And64, mkU64( 0xF ),
32619 unop( Iop_V128to64, mkexpr( vB ) ) );
32621 sign = mkOR1( binop( Iop_CmpEQ64,
32622 sign_digit,
32623 mkU64 ( 0xB ) ),
32624 binop( Iop_CmpEQ64,
32625 sign_digit,
32626 mkU64 ( 0xD ) ) );
32627 neg = mkAND1( sign, mkNOT1( zero ) );
32629 /* Pos position AKA gt = 1 if ((not neg) & (not eq zero)) */
32630 pos = mkAND1( mkNOT1( sign ), mkNOT1( zero ) );
32632 /* Note can't use pos or neg here since they are ANDed with zero,
32633 use sign instead.
32635 if (ps == 0) {
32636 new_sign_val = binop( Iop_Or64,
32637 unop( Iop_1Uto64, sign ),
32638 mkU64( 0xC ) );
32639 } else {
32640 new_sign_val = binop( Iop_Xor64,
32641 binop( Iop_Shl64,
32642 unop( Iop_1Uto64, sign ),
32643 mkU8 ( 1 ) ),
32644 mkU64( 0xF ) );
32646 valid =
32647 unop( Iop_64to32,
32648 is_BCDstring128( vbi, /* Signed */True, mkexpr( vB ) ) );
32650 } else { // bcdutrunc.
32651 /* Check if all of the digits are zero */
32652 zero = BCDstring_zero( value );
32654 /* unsigned value, need to make CC code happy */
32655 neg = mkU1( 0 );
32657 /* Pos position AKA gt = 1 if (not eq zero) */
32658 pos = mkNOT1( zero );
32659 valid =
32660 unop( Iop_64to32,
32661 is_BCDstring128( vbi, /* Unsigned */False,
32662 mkexpr( vB ) ) );
32665 /* If vB is not valid, the result is undefined, but we need to
32666 * match the hardware so the output of the test suite will match.
32667 * Hardware sets result to 0x0.
32669 result = binop( Iop_AndV128,
32670 mkV128( 0xFFFF ),
32671 binop( Iop_ShrV128,
32672 binop( Iop_ShlV128, value, shift ),
32673 shift ) );
32675 if ( opc2 == 0x101) { // bcdtrunc.
32676 putVReg( vRT_addr, binop( Iop_OrV128,
32677 binop( Iop_64HLtoV128,
32678 mkU64( 0 ),
32679 new_sign_val ),
32680 result ) );
32681 } else {
32682 putVReg( vRT_addr, result );
32685 break;
32687 case 0x181: // bcdctz., bcdctn., bcdcfz., bcdcfn., bcdsetsgn.,
32688 // bcdcfsq., bcdctsq.
32690 UInt inst_select = IFIELD( theInstr, 16, 5);
32692 switch (inst_select) {
32693 case 0: // bcdctsq. (Decimal Convert to Signed Quadword VX-form)
32695 IRExpr *sign;
32697 /* The instruction takes a 32-bit integer in a vector source
32698 * register and returns the signed packed decimal number
32699 * in a vector register. The source integer needs to be moved
32700 * from the V128 to an I32 for the Iop.
32703 DIP("bcdctsq v%d, v%d\n", vRT_addr, vRB_addr);
32705 putVReg( vRT_addr, unop( Iop_BCD128toI128S, mkexpr( vB ) ) );
32707 sign = binop( Iop_And64,
32708 unop( Iop_V128to64, mkexpr( vB ) ),
32709 mkU64( 0xF ) );
32710 zero = mkAND1( binop( Iop_CmpEQ64,
32711 unop( Iop_V128HIto64, mkexpr( vB ) ),
32712 mkU64( 0x0 ) ),
32713 binop( Iop_CmpEQ64,
32714 binop( Iop_And64,
32715 unop( Iop_V128to64, mkexpr( vB ) ),
32716 mkU64( 0xFFFFFFF0 ) ),
32717 mkU64( 0x0 ) ) );
32718 pos = mkAND1( mkNOT1( zero ),
32719 mkOR1( mkOR1( binop( Iop_CmpEQ64,
32720 sign, mkU64( 0xA ) ),
32721 binop( Iop_CmpEQ64,
32722 sign, mkU64( 0xC ) ) ),
32723 mkOR1( binop( Iop_CmpEQ64,
32724 sign, mkU64( 0xE ) ),
32725 binop( Iop_CmpEQ64,
32726 sign, mkU64( 0xF ) ) ) ) );
32727 neg = mkAND1( mkNOT1( zero ),
32728 mkOR1( binop( Iop_CmpEQ64, sign, mkU64( 0xB ) ),
32729 binop( Iop_CmpEQ64, sign, mkU64( 0xD ) ) ) );
32731 /* Check each of the nibbles for a valid digit 0 to 9 */
32732 valid =
32733 unop( Iop_64to32,
32734 is_BCDstring128( vbi, /* Signed */True,
32735 mkexpr( vB ) ) );
32736 overflow = mkU1( 0 ); // not used
32738 break;
32740 case 2: // bcdcfsq. (Decimal Convert from Signed Quadword VX-form)
32742 IRExpr *pos_upper_gt, *pos_upper_eq, *pos_lower_gt;
32743 IRExpr *neg_upper_lt, *neg_upper_eq, *neg_lower_lt;
32745 DIP("bcdcfsq v%d, v%d, %d\n", vRT_addr, vRB_addr, ps);
32747 /* The instruction takes a signed packed decimal number and
32748 * returns the integer value in the vector register. The Iop
32749 * returns an I32 which needs to be moved to the destination
32750 * vector register.
32752 putVReg( vRT_addr,
32753 binop( Iop_I128StoBCD128, mkexpr( vB ), mkU8( ps ) ) );
32755 zero = mkAND1( binop( Iop_CmpEQ64, mkU64( 0 ),
32756 unop( Iop_V128to64, mkexpr( vB ) ) ),
32757 binop( Iop_CmpEQ64, mkU64( 0 ),
32758 unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
32759 pos = mkAND1( mkNOT1 ( zero ),
32760 binop( Iop_CmpEQ64, mkU64( 0 ),
32761 binop( Iop_And64,
32762 unop( Iop_V128HIto64,
32763 mkexpr( vB ) ),
32764 mkU64( 0x8000000000000000UL ) ) ) );
32765 neg = mkAND1( mkNOT1 ( zero ),
32766 binop( Iop_CmpEQ64, mkU64( 0x8000000000000000UL ),
32767 binop( Iop_And64,
32768 unop( Iop_V128HIto64,
32769 mkexpr( vB ) ),
32770 mkU64( 0x8000000000000000UL ) ) ) );
32772 /* Overflow occurs if: vB > 10^31-1 OR vB < -10^31-1
32773 * do not have a 128 bit compare. Will have to compare the
32774 * upper 64 bit and athe lower 64 bits. If the upper 64-bits
32775 * are equal then overflow if the lower 64 bits of vB is greater
32776 * otherwise if the upper bits of vB is greater then the max
32777 * for the upper 64-bits then overflow
32779 * 10^31-1 = 0x7E37BE2022C0914B267FFFFFFF
32781 pos_upper_gt = binop( Iop_CmpLT64U,
32782 mkU64( 0x7E37BE2022 ),
32783 unop( Iop_V128HIto64, mkexpr( vB ) ) );
32784 pos_upper_eq = binop( Iop_CmpEQ64,
32785 unop( Iop_V128HIto64, mkexpr( vB ) ),
32786 mkU64( 0x7E37BE2022 ) );
32787 pos_lower_gt = binop( Iop_CmpLT64U,
32788 mkU64( 0x0914B267FFFFFFF ),
32789 unop( Iop_V128to64, mkexpr( vB ) ) );
32790 /* -10^31-1 = 0X81C841DFDD3F6EB4D97FFFFFFF */
32791 neg_upper_lt = binop( Iop_CmpLT64U,
32792 mkU64( 0X81C841DFDD ),
32793 unop( Iop_V128HIto64, mkexpr( vB ) ) );
32794 neg_upper_eq = binop( Iop_CmpEQ64,
32795 unop( Iop_V128HIto64, mkexpr( vB ) ),
32796 mkU64( 0X81C841DFDD ) );
32797 neg_lower_lt = binop( Iop_CmpLT64U,
32798 mkU64( 0x3F6EB4D97FFFFFFF ),
32799 unop( Iop_V128to64, mkexpr( vB ) ) );
32801 /* calculate overflow, masking for positive and negative */
32802 overflow = mkOR1( mkAND1( pos,
32803 mkOR1( pos_upper_gt,
32804 mkAND1( pos_upper_eq,
32805 pos_lower_gt ) ) ),
32806 mkAND1( neg,
32807 mkOR1( neg_upper_lt,
32808 mkAND1( neg_upper_eq,
32809 neg_lower_lt )
32810 ) ) );
32811 valid = mkU32( 1 );
32813 break;
32815 case 4: // bcdctz. (Decimal Convert to Zoned VX-form)
32817 IRExpr *ox_flag, *sign, *vrb_nibble30;
32818 int neg_bit_shift;
32819 unsigned int upper_byte, sign_byte;
32820 IRTemp tmp = newTemp( Ity_V128 );
32822 DIP("bcdctz. v%d,v%d,%d\n", vRT_addr, vRB_addr, ps);
32824 if (ps == 0) {
32825 upper_byte = 0x30;
32826 sign_byte = 0x30;
32827 neg_bit_shift = 4+2; /* sign byte is in bits [7:4] */
32828 } else {
32829 upper_byte = 0xF0;
32830 sign_byte = 0xC0;
32831 neg_bit_shift = 4+0;
32834 /* Grab vB bits[7:4]. It goes into bits [3:0] of the
32835 * result.
32837 vrb_nibble30 = binop( Iop_Shr64,
32838 binop( Iop_And64,
32839 unop( Iop_V128to64, mkexpr( vB ) ),
32840 mkU64( 0xF0 ) ),
32841 mkU8( 4 ) );
32843 /* Upper 24 hex digits of VB, i.e. hex digits vB[0:23],
32844 * must be zero for the value to be zero. This goes
32845 * in the overflow position of the condition code register.
32847 ox_flag = binop( Iop_CmpEQ64,
32848 binop( Iop_And64,
32849 unop( Iop_V128to64, mkexpr( vB ) ),
32850 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
32851 mkU64( 0 ) );
32853 /* zero is the same as eq_flag */
32854 zero = mkAND1( binop( Iop_CmpEQ64,
32855 binop( Iop_And64,
32856 unop( Iop_V128HIto64, mkexpr( vB ) ),
32857 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
32858 mkU64( 0 ) ),
32859 binop( Iop_CmpEQ64,
32860 binop( Iop_And64,
32861 unop( Iop_V128to64, mkexpr( vB ) ),
32862 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
32863 mkU64( 0 ) ) );
32865 /* Sign codes of 0xA, 0xC, 0xE or 0xF are positive, sign
32866 * codes 0xB and 0xD are negative.
32868 sign_digit = binop( Iop_And64, mkU64( 0xF ),
32869 unop( Iop_V128to64, mkexpr( vB ) ) );
32871 /* The negative value goes in the LT bit position of the
32872 * condition code register. Set neg if the sign of vB
32873 * is negative and zero is true.
32875 sign = mkOR1( binop( Iop_CmpEQ64,
32876 sign_digit,
32877 mkU64 ( 0xB ) ),
32878 binop( Iop_CmpEQ64,
32879 sign_digit,
32880 mkU64 ( 0xD ) ) );
32881 neg = mkAND1( sign, mkNOT1( zero ) );
32883 /* The positive value goes in the LT bit position of the
32884 * condition code register. Set positive if the sign of the
32885 * value is not negative.
32887 pos = mkAND1( mkNOT1( sign ), mkNOT1( zero ) );
32889 assign( tmp,
32890 convert_to_zoned( vbi, mkexpr( vB ),
32891 mkU64( upper_byte ) ) );
32893 /* Insert the sign based on ps and sign of vB
32894 * in the lower byte.
32896 putVReg( vRT_addr,
32897 binop( Iop_OrV128,
32898 binop( Iop_64HLtoV128,
32899 mkU64( 0 ),
32900 vrb_nibble30 ),
32901 binop( Iop_OrV128,
32902 mkexpr( tmp ),
32903 binop( Iop_64HLtoV128,
32904 mkU64( 0 ),
32905 binop( Iop_Or64,
32906 mkU64( sign_byte ),
32907 binop( Iop_Shl64,
32908 unop( Iop_1Uto64,
32909 sign ),
32910 mkU8( neg_bit_shift)
32911 ) ) ) ) ) );
32913 /* A valid number must have a value that is less then or
32914 * equal to 10^16 - 1. This is checked by making sure
32915 * bytes [31:16] of vB are zero.
32917 in_range = binop( Iop_CmpEQ64,
32918 binop( Iop_And64,
32919 mkU64( 0xFFFFFFFFFFFFFFF0 ),
32920 unop( Iop_V128HIto64, mkexpr( vB ) ) ),
32921 mkU64( 0 ) );
32923 /* overflow is set if ox_flag or not in_inrange. Setting is
32924 * ORed with the other condition code values.
32926 overflow = mkOR1( ox_flag, mkNOT1( in_range ) );
32928 /* The sign code must be between 0xA and 0xF and all digits are
32929 * between 0x0 and 0x9. The vB must be in range to be valid.
32930 * If not valid, condition code set to 0x0001.
32932 valid =
32933 unop( Iop_64to32,
32934 is_BCDstring128( vbi, /* Signed */True,
32935 mkexpr( vB ) ) );
32937 break;
32939 case 5: // bcdctn. (Decimal Convert to National VX-form)
32941 IRExpr *ox_flag, *sign;
32942 IRTemp tmp = newTemp( Ity_V128 );;
32944 DIP("bcdctn. v%d,v%d\n", vRT_addr, vRB_addr);
32946 value = binop( Iop_And64,
32947 mkU64( 0xFFFFFFFF ),
32948 unop( Iop_V128to64, mkexpr( vB ) ) );
32950 /* A valid number must have a value that is less then or
32951 * equal to 10^7 - 1. This is checked by making sure
32952 * bytes [31:8] of vB are zero.
32954 in_range = mkAND1( binop( Iop_CmpEQ64,
32955 unop( Iop_V128HIto64, mkexpr( vB ) ),
32956 mkU64( 0 ) ),
32957 binop( Iop_CmpEQ64,
32958 binop( Iop_Shr64,
32959 unop( Iop_V128to64,
32960 mkexpr( vB ) ),
32961 mkU8( 32 ) ),
32962 mkU64( 0 ) ) );
32964 /* The sign code must be between 0xA and 0xF and all digits are
32965 * between 0x0 and 0x9.
32967 valid =
32968 unop( Iop_64to32,
32969 is_BCDstring128( vbi, /* Signed */True,
32970 mkexpr( vB ) ) );
32972 /* Upper 24 hex digits of VB, i.e. hex ditgits vB[0:23],
32973 * must be zero for the ox_flag to be zero. This goes
32974 * in the LSB position (variable overflow) of the
32975 * condition code register.
32977 ox_flag =
32978 mkNOT1( mkAND1( binop( Iop_CmpEQ64,
32979 binop( Iop_And64,
32980 unop( Iop_V128HIto64,
32981 mkexpr( vB ) ),
32982 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
32983 mkU64( 0 ) ),
32984 binop( Iop_CmpEQ64,
32985 binop( Iop_And64,
32986 unop( Iop_V128to64,
32987 mkexpr( vB ) ),
32988 mkU64( 0xFFFFFFFF00000000 ) ),
32989 mkU64( 0 ) ) ) );
32991 /* Set zero to 1 if all of the bytes in vB are zero. This is
32992 * used when setting the lt_flag (variable neg) and the gt_flag
32993 * (variable pos).
32995 zero = mkAND1( binop( Iop_CmpEQ64,
32996 binop( Iop_And64,
32997 unop( Iop_V128HIto64,
32998 mkexpr( vB ) ),
32999 mkU64( 0xFFFFFFFFFFFFFFFF ) ),
33000 mkU64( 0 ) ),
33001 binop( Iop_CmpEQ64,
33002 binop( Iop_And64,
33003 unop( Iop_V128to64, mkexpr( vB ) ),
33004 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
33005 mkU64( 0 ) ) );
33007 /* Sign codes of 0xA, 0xC, 0xE or 0xF are positive, sign
33008 * codes 0xB and 0xD are negative.
33010 sign_digit = binop( Iop_And64, mkU64( 0xF ), value );
33012 /* The negative value goes in the LT bit position of the
33013 * condition code register. Set neg if the sign of the
33014 * value is negative and the value is zero.
33016 sign = mkOR1( binop( Iop_CmpEQ64,
33017 sign_digit,
33018 mkU64 ( 0xB ) ),
33019 binop( Iop_CmpEQ64,
33020 sign_digit,
33021 mkU64 ( 0xD ) ) );
33022 neg = mkAND1( sign, mkNOT1( zero ) );
33024 /* The positive value goes in the LT bit position of the
33025 * condition code register. Set neg if the sign of the
33026 * value is not negative and the value is zero.
33028 pos = mkAND1( mkNOT1( sign ), mkNOT1( zero ) );
33030 assign( tmp,
33031 convert_to_national( vbi, mkexpr( vB ) ) );
33033 /* If vB is positive insert sign value 0x002B, otherwise
33034 * insert 0x002D for negative. Have to use sign not neg
33035 * because neg has been ANDed with zero. This is 0x29
33036 * OR'd with (sign << 1 | NOT sign) << 1.
33037 * sign = 1 if vB is negative.
33039 putVReg( vRT_addr,
33040 binop( Iop_OrV128,
33041 mkexpr( tmp ),
33042 binop( Iop_64HLtoV128,
33043 mkU64( 0 ),
33044 binop( Iop_Or64,
33045 mkU64( 0x29 ),
33046 binop( Iop_Or64,
33047 binop( Iop_Shl64,
33048 unop( Iop_1Uto64,
33049 sign ),
33050 mkU8( 2 ) ),
33051 binop( Iop_Shl64,
33052 unop( Iop_1Uto64,
33053 mkNOT1(sign)),
33054 mkU8( 1 ) )
33055 ) ) ) ) );
33058 /* The sign code must be between 0xA and 0xF and all digits are
33059 * between 0x0 and 0x9. The vB must be in range to be valid.
33061 valid =
33062 unop( Iop_64to32,
33063 is_BCDstring128( vbi, /* Signed */True,
33064 mkexpr( vB ) ) );
33066 overflow = ox_flag;
33068 break;
33070 case 6: // bcdcfz. (Decimal Convert From Zoned VX-form)
33072 IRExpr *sign;
33073 IRTemp tmp = newTemp( Ity_V128 );;
33075 DIP("bcdcfz. v%d,v%d,%d\n", vRT_addr, vRB_addr, ps);
33077 valid = unop( Iop_1Uto32, is_Zoned_decimal( vB, ps ) );
33079 assign( tmp,
33080 convert_from_zoned( vbi, mkexpr( vB ) ) );
33082 /* If the result of checking the lower 4 bits of each 8-bit
33083 * value is zero, then the "number" was zero.
33085 zero =
33086 binop( Iop_CmpEQ64,
33087 binop( Iop_Or64,
33088 binop( Iop_And64,
33089 unop( Iop_V128to64, mkexpr( vB ) ),
33090 mkU64( 0x0F0F0F0F0F0F0F0FULL ) ),
33091 binop( Iop_And64,
33092 unop( Iop_V128to64, mkexpr( vB ) ),
33093 mkU64( 0x0F0F0F0F0F0F0F0FULL ) ) ),
33094 mkU64( 0 ) );
33096 /* Sign bit is in bit 6 of vB. */
33097 sign_digit = binop( Iop_And64, mkU64( 0xF0 ),
33098 unop( Iop_V128to64, mkexpr( vB ) ) );
33100 if ( ps == 0 ) {
33101 /* sign will be equal to 0 for positive number */
33102 sign = binop( Iop_CmpEQ64,
33103 binop( Iop_And64,
33104 sign_digit,
33105 mkU64( 0x40 ) ),
33106 mkU64( 0x40 ) );
33107 } else {
33108 sign = mkOR1(
33109 binop( Iop_CmpEQ64, sign_digit, mkU64( 0xB0 ) ),
33110 binop( Iop_CmpEQ64, sign_digit, mkU64( 0xD0 ) ) );
33113 /* The negative value goes in the LT bit position of the
33114 * condition code register. Set neg if the sign of the
33115 * value is negative and the value is zero.
33117 neg = mkAND1( sign, mkNOT1( zero ) );
33119 /* The positive value goes in the GT bit position of the
33120 * condition code register. Set neg if the sign of the
33121 * value is not negative and the value is zero.
33123 pos = mkAND1( mkNOT1( sign ), mkNOT1( zero ) );
33125 /* sign of the result is 0xC for positive, 0xD for negative */
33126 putVReg( vRT_addr,
33127 binop( Iop_OrV128,
33128 mkexpr( tmp ),
33129 binop( Iop_64HLtoV128,
33130 mkU64( 0 ),
33131 binop( Iop_Or64,
33132 mkU64( 0xC ),
33133 unop( Iop_1Uto64, sign )
33134 ) ) ) );
33135 /* For this instructions the LSB position in the CC
33136 * field, the overflow position in the other instructions,
33137 * is given by 0. There is nothing to or with LT, EQ or GT.
33139 overflow = mkU1( 0 );
33141 break;
33143 case 7: // bcdcfn. (Decimal Convert From National VX-form)
33145 IRTemp hword_7 = newTemp( Ity_I64 );
33146 IRExpr *sign;
33147 IRTemp tmp = newTemp( Ity_I64 );;
33149 DIP("bcdcfn. v%d,v%d,%d\n", vRT_addr, vRB_addr, ps);
33151 /* check that the value is valid */
33152 valid = unop( Iop_1Uto32, is_National_decimal( vB ) );
33154 assign( hword_7, binop( Iop_And64,
33155 unop( Iop_V128to64, mkexpr( vB ) ),
33156 mkU64( 0xFFF ) ) );
33157 /* sign = 1 if vB is negative */
33158 sign = binop( Iop_CmpEQ64, mkexpr( hword_7 ), mkU64( 0x002D ) );
33160 assign( tmp, convert_from_national( vbi, mkexpr( vB ) ) );
33162 /* If the result of checking the lower 4 bits of each 16-bit
33163 * value is zero, then the "number" was zero.
33165 zero =
33166 binop( Iop_CmpEQ64,
33167 binop( Iop_Or64,
33168 binop( Iop_And64,
33169 unop( Iop_V128HIto64, mkexpr( vB ) ),
33170 mkU64( 0x000F000F000F000FULL ) ),
33171 binop( Iop_And64,
33172 unop( Iop_V128to64, mkexpr( vB ) ),
33173 mkU64( 0x000F000F000F0000ULL ) ) ),
33174 mkU64( 0 ) );
33177 /* The negative value goes in the LT bit position of the
33178 * condition code register. Set neg if the sign of the
33179 * value is negative and the value is zero.
33181 neg = mkAND1( sign, mkNOT1( zero ) );
33183 /* The positive value goes in the GT bit position of the
33184 * condition code register. Set neg if the sign of the
33185 * value is not negative and the value is zero.
33187 pos = mkAND1( mkNOT1( sign ), mkNOT1( zero ) );
33189 /* For this instructions the LSB position in the CC
33190 * field, the overflow position in the other instructions,
33191 * is given by invalid. There is nothing to OR with the valid
33192 * flag.
33194 overflow = mkU1( 0 );
33196 /* sign of the result is:
33197 ( 0b1100 OR neg) OR (ps OR (ps AND pos) << 1 )
33200 putVReg( vRT_addr,
33201 binop( Iop_64HLtoV128,
33202 mkU64( 0 ),
33203 binop( Iop_Or64,
33204 binop( Iop_Or64,
33205 binop( Iop_Shl64,
33206 binop( Iop_And64,
33207 mkU64( ps ),
33208 unop( Iop_1Uto64,
33209 mkNOT1(sign))),
33210 mkU8( 1 ) ),
33211 mkU64( ps ) ),
33212 binop( Iop_Or64,
33213 binop( Iop_Or64,
33214 mkU64( 0xC ),
33215 unop( Iop_1Uto64, sign ) ),
33216 mkexpr( tmp ) ) ) ) );
33219 break;
33221 case 31: // bcdsetsgn. (BCD set sign)
33223 IRExpr *new_sign_val, *sign;
33225 DIP("bcdsetsgn. v%d,v%d,%d\n", vRT_addr, vRB_addr, ps);
33227 value = binop( Iop_AndV128,
33228 binop( Iop_64HLtoV128,
33229 mkU64( 0xFFFFFFFFFFFFFFFF ),
33230 mkU64( 0xFFFFFFFFFFFFFFF0 ) ),
33231 mkexpr( vB ) );
33232 zero = BCDstring_zero( value );
33234 /* Sign codes of 0xA, 0xC, 0xE or 0xF are positive, sign
33235 * codes 0xB and 0xD are negative.
33237 sign_digit = binop( Iop_And64, mkU64( 0xF ),
33238 unop( Iop_V128to64, mkexpr( vB ) ) );
33240 sign = mkOR1( binop( Iop_CmpEQ64,
33241 sign_digit,
33242 mkU64 ( 0xB ) ),
33243 binop( Iop_CmpEQ64,
33244 sign_digit,
33245 mkU64 ( 0xD ) ) );
33246 neg = mkAND1( sign, mkNOT1( zero ) );
33248 pos = mkAND1( mkNOT1( sign ), mkNOT1( zero ) );
33250 valid =
33251 unop( Iop_64to32,
33252 is_BCDstring128( vbi, /* Signed */True,
33253 mkexpr( vB ) ) );
33255 /* if PS = 0
33256 vB positive, sign is C
33257 vB negative, sign is D
33258 if PS = 1
33259 vB positive, sign is F
33260 vB negative, sign is D
33261 Note can't use pos or neg here since they are ANDed with
33262 zero, use sign instead.
33264 if (ps == 0) {
33265 new_sign_val = binop( Iop_Or64,
33266 unop( Iop_1Uto64, sign ),
33267 mkU64( 0xC ) );
33269 } else {
33270 new_sign_val = binop( Iop_Xor64,
33271 binop( Iop_Shl64,
33272 unop( Iop_1Uto64, sign ),
33273 mkU8( 1 ) ),
33274 mkU64( 0xF ) );
33277 putVReg( vRT_addr, binop( Iop_OrV128,
33278 binop( Iop_64HLtoV128,
33279 mkU64( 0 ),
33280 new_sign_val ),
33281 value ) );
33282 /* For this instructions the LSB position in the CC
33283 * field, the overflow position in the other instructions,
33284 * is given by invalid.
33286 overflow = unop( Iop_32to1, unop( Iop_Not32, valid ) );
33288 break;
33290 default:
33291 vex_printf("dis_av_bcd(ppc)(invalid inst_select)\n");
33292 return False;
33295 break;
33297 default:
33298 vex_printf("dis_av_bcd(ppc)(opc2)\n");
33299 return False;
33302 IRTemp valid_mask = newTemp( Ity_I32 );
33304 assign( valid_mask, unop( Iop_1Sto32, unop( Iop_32to1, valid ) ) );
33306 /* set CR field 6 to:
33307 * 0b1000 if vB less then 0, i.e. vB is neg and not zero,
33308 * 0b0100 if vB greter then 0, i.e. vB is pos and not zero,
33309 * 0b0010 if vB equals 0,
33310 * 0b0001 if vB is invalid over rules lt, gt, eq
33312 assign( eq_lt_gt,
33313 binop( Iop_Or32,
33314 binop( Iop_Shl32,
33315 unop( Iop_1Uto32, neg ),
33316 mkU8( 3 ) ),
33317 binop( Iop_Or32,
33318 binop( Iop_Shl32,
33319 unop( Iop_1Uto32, pos ),
33320 mkU8( 2 ) ),
33321 binop( Iop_Shl32,
33322 unop( Iop_1Uto32, zero ),
33323 mkU8( 1 ) ) ) ) );
33324 /* valid is 1 if it is a valid number, complement and put in the
33325 * invalid bit location, overriding ls, eq, gt, overflow.
33327 putGST_field( PPC_GST_CR,
33328 binop( Iop_Or32,
33329 binop( Iop_And32,
33330 mkexpr( valid_mask ),
33331 binop( Iop_Or32,
33332 mkexpr( eq_lt_gt ),
33333 unop( Iop_1Uto32, overflow ) ) ),
33334 binop( Iop_And32,
33335 unop( Iop_Not32, mkexpr( valid_mask ) ),
33336 mkU32( 1 ) ) ),
33337 6 );
33338 return True;
33342 AltiVec Floating Point Arithmetic Instructions
33344 static Bool dis_av_fp_arith ( UInt prefix, UInt theInstr )
33346 /* VA-Form */
33347 UChar opc1 = ifieldOPC(theInstr);
33348 UChar vD_addr = ifieldRegDS(theInstr);
33349 UChar vA_addr = ifieldRegA(theInstr);
33350 UChar vB_addr = ifieldRegB(theInstr);
33351 UChar vC_addr = ifieldRegC(theInstr);
33352 UInt opc2=0;
33354 IRTemp vA = newTemp(Ity_V128);
33355 IRTemp vB = newTemp(Ity_V128);
33356 IRTemp vC = newTemp(Ity_V128);
33358 /* There is no prefixed version of these instructions. */
33359 PREFIX_CHECK
33361 assign( vA, getVReg(vA_addr));
33362 assign( vB, getVReg(vB_addr));
33363 assign( vC, getVReg(vC_addr));
33365 if (opc1 != 0x4) {
33366 vex_printf("dis_av_fp_arith(ppc)(instr)\n");
33367 return False;
33370 IRTemp rm = newTemp(Ity_I32);
33371 assign(rm, get_IR_roundingmode());
33373 opc2 = IFIELD( theInstr, 0, 6 );
33374 switch (opc2) {
33375 case 0x2E: // vmaddfp (Multiply Add FP, AV p177)
33376 DIP("vmaddfp v%d,v%d,v%d,v%d\n",
33377 vD_addr, vA_addr, vC_addr, vB_addr);
33378 putVReg( vD_addr,
33379 dnorm_adj_Vector(
33380 triop( Iop_Add32Fx4,
33381 mkU32( Irrm_NEAREST ),
33382 dnorm_adj_Vector( mkexpr( vB ) ),
33383 dnorm_adj_Vector( triop( Iop_Mul32Fx4,
33384 mkU32( Irrm_NEAREST ),
33385 dnorm_adj_Vector( mkexpr( vA ) ),
33386 dnorm_adj_Vector( mkexpr( vC ) ) )
33387 ) ) ) );
33388 return True;
33390 case 0x2F: { // vnmsubfp (Negative Multiply-Subtract FP, AV p215)
33391 DIP("vnmsubfp v%d,v%d,v%d,v%d\n",
33392 vD_addr, vA_addr, vC_addr, vB_addr);
33393 putVReg( vD_addr,
33394 negate_Vector( Ity_I32,
33395 dnorm_adj_Vector(
33396 triop( Iop_Sub32Fx4,
33397 mkU32( Irrm_NEAREST ),
33398 dnorm_adj_Vector(
33399 triop( Iop_Mul32Fx4,
33400 mkU32( Irrm_NEAREST ),
33401 dnorm_adj_Vector( mkexpr( vA ) ),
33402 dnorm_adj_Vector( mkexpr( vC ) ) ) ),
33403 dnorm_adj_Vector( mkexpr( vB ) ) ) ) ) );
33404 return True;
33407 default:
33408 break; // Fall through...
33411 opc2 = IFIELD( theInstr, 0, 11 );
33412 switch (opc2) {
33413 case 0x00A: // vaddfp (Add FP, AV p137)
33414 DIP("vaddfp v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
33415 putVReg( vD_addr,
33416 dnorm_adj_Vector( triop( Iop_Add32Fx4, mkU32( Irrm_NEAREST ),
33417 dnorm_adj_Vector( mkexpr( vA ) ),
33418 dnorm_adj_Vector( mkexpr( vB ) ) ) ) );
33419 return True;
33421 case 0x04A: // vsubfp (Subtract FP, AV p261)
33422 DIP("vsubfp v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
33423 putVReg( vD_addr,
33424 dnorm_adj_Vector( triop( Iop_Sub32Fx4, mkU32( Irrm_NEAREST ),
33425 dnorm_adj_Vector( mkexpr( vA ) ),
33426 dnorm_adj_Vector( mkexpr( vB ) ) ) ) );
33427 return True;
33429 case 0x40A: // vmaxfp (Maximum FP, AV p178)
33430 DIP("vmaxfp v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
33431 putVReg( vD_addr,
33432 dnorm_adj_Vector( binop( Iop_Max32Fx4,
33433 mkexpr( vA ), mkexpr( vB ) ) ) );
33434 return True;
33436 case 0x44A: // vminfp (Minimum FP, AV p187)
33437 DIP("vminfp v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
33438 putVReg( vD_addr,
33439 dnorm_adj_Vector( binop( Iop_Min32Fx4,
33440 mkexpr( vA ), mkexpr( vB ) ) ) );
33441 return True;
33443 default:
33444 break; // Fall through...
33448 if (vA_addr != 0) {
33449 vex_printf("dis_av_fp_arith(ppc)(vA_addr)\n");
33450 return False;
33453 switch (opc2) {
33454 case 0x10A: // vrefp (Reciprocal Esimate FP, AV p228)
33455 DIP("vrefp v%d,v%d\n", vD_addr, vB_addr);
33456 putVReg( vD_addr, dnorm_adj_Vector( unop( Iop_RecipEst32Fx4,
33457 dnorm_adj_Vector( mkexpr( vB ) ) ) ) );
33458 return True;
33460 case 0x14A: // vrsqrtefp (Reciprocal Sqrt Estimate FP, AV p237)
33461 DIP("vrsqrtefp v%d,v%d\n", vD_addr, vB_addr);
33462 putVReg( vD_addr, dnorm_adj_Vector( unop( Iop_RSqrtEst32Fx4,
33463 dnorm_adj_Vector( mkexpr( vB ) ) ) ) );
33464 return True;
33466 case 0x18A: // vexptefp (2 Raised to the Exp Est FP, AV p173)
33467 DIP("vexptefp v%d,v%d\n", vD_addr, vB_addr);
33468 /* NOTE, need to address dnormalized value handling when this is
33469 implemented. */
33470 putVReg( vD_addr,
33471 dnorm_adj_Vector( unop( Iop_Exp2_32Fx4,
33472 dnorm_adj_Vector( mkexpr( vB ) ) ) ) );
33473 return True;
33475 case 0x1CA: // vlogefp (Log2 Estimate FP, AV p175)
33476 DIP("vlogefp v%d,v%d\n", vD_addr, vB_addr);
33477 /* NOTE, need to address dnormalized value handling when this is
33478 implemented. */
33479 putVReg( vD_addr,
33480 dnorm_adj_Vector( unop( Iop_Log2_32Fx4,
33481 dnorm_adj_Vector( mkexpr( vB ) ) ) ) );
33482 return True;
33484 default:
33485 vex_printf("dis_av_fp_arith(ppc)(opc2=0x%x)\n",opc2);
33486 return False;
33488 return True;
33492 AltiVec Floating Point Compare Instructions
33494 static Bool dis_av_fp_cmp ( UInt prefix, UInt theInstr )
33496 /* VXR-Form */
33497 UChar opc1 = ifieldOPC(theInstr);
33498 UChar vD_addr = ifieldRegDS(theInstr);
33499 UChar vA_addr = ifieldRegA(theInstr);
33500 UChar vB_addr = ifieldRegB(theInstr);
33501 UChar flag_rC = ifieldBIT10(theInstr);
33502 UInt opc2 = IFIELD( theInstr, 0, 10 );
33504 Bool cmp_bounds = False;
33506 IRTemp vA = newTemp(Ity_V128);
33507 IRTemp vB = newTemp(Ity_V128);
33508 IRTemp vD = newTemp(Ity_V128);
33510 /* There is no prefixed version of these instructions. */
33511 PREFIX_CHECK
33513 assign( vA, getVReg(vA_addr));
33514 assign( vB, getVReg(vB_addr));
33516 if (opc1 != 0x4) {
33517 vex_printf("dis_av_fp_cmp(ppc)(instr)\n");
33518 return False;
33521 switch (opc2) {
33522 case 0x0C6: // vcmpeqfp (Compare Equal-to FP, AV p159)
33523 DIP("vcmpeqfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
33524 vD_addr, vA_addr, vB_addr);
33525 assign( vD, binop( Iop_CmpEQ32Fx4,
33526 dnorm_adj_Vector( mkexpr( vA ) ),
33527 dnorm_adj_Vector( mkexpr( vB ) ) ) );
33528 break;
33530 case 0x1C6: // vcmpgefp (Compare Greater-than-or-Equal-to, AV p163)
33531 DIP("vcmpgefp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
33532 vD_addr, vA_addr, vB_addr);
33533 assign( vD, binop( Iop_CmpGE32Fx4,
33534 dnorm_adj_Vector( mkexpr( vA ) ),
33535 dnorm_adj_Vector( mkexpr( vB ) ) ) );
33536 break;
33538 case 0x2C6: // vcmpgtfp (Compare Greater-than FP, AV p164)
33539 DIP("vcmpgtfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
33540 vD_addr, vA_addr, vB_addr);
33541 assign( vD, binop( Iop_CmpGT32Fx4,
33542 dnorm_adj_Vector( mkexpr( vA ) ),
33543 dnorm_adj_Vector( mkexpr( vB ) ) ) );
33544 break;
33546 case 0x3C6: { // vcmpbfp (Compare Bounds FP, AV p157)
33547 IRTemp gt = newTemp(Ity_V128);
33548 IRTemp lt = newTemp(Ity_V128);
33549 IRTemp zeros = newTemp(Ity_V128);
33550 IRTemp srcA = newTemp(Ity_V128);
33551 IRTemp srcB = newTemp(Ity_V128);
33553 DIP("vcmpbfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
33554 vD_addr, vA_addr, vB_addr);
33555 cmp_bounds = True;
33556 assign( zeros, unop(Iop_Dup32x4, mkU32(0)) );
33558 /* Note: making use of fact that the ppc backend for compare insns
33559 return zero'd lanes if either of the corresponding arg lanes is
33560 a nan.
33562 Perhaps better to have an irop Iop_isNan32Fx4, but then we'd
33563 need this for the other compares too (vcmpeqfp etc)...
33564 Better still, tighten down the spec for compare irops.
33566 assign ( srcA, dnorm_adj_Vector( mkexpr( vA ) ) );
33567 assign ( srcB, dnorm_adj_Vector( mkexpr( vB ) ) );
33569 assign( gt, unop( Iop_NotV128,
33570 binop( Iop_CmpLE32Fx4, mkexpr( srcA ),
33571 mkexpr( srcB ) ) ) );
33572 assign( lt, unop( Iop_NotV128,
33573 binop( Iop_CmpGE32Fx4, mkexpr( srcA ),
33574 triop( Iop_Sub32Fx4, mkU32( Irrm_NEAREST ),
33575 mkexpr( zeros ),
33576 mkexpr( srcB ) ) ) ) );
33578 // finally, just shift gt,lt to correct position
33579 assign( vD, binop(Iop_ShlN32x4,
33580 binop(Iop_OrV128,
33581 binop(Iop_AndV128, mkexpr(gt),
33582 unop(Iop_Dup32x4, mkU32(0x2))),
33583 binop(Iop_AndV128, mkexpr(lt),
33584 unop(Iop_Dup32x4, mkU32(0x1)))),
33585 mkU8(30)) );
33586 break;
33589 default:
33590 vex_printf("dis_av_fp_cmp(ppc)(opc2)\n");
33591 return False;
33594 putVReg( vD_addr, mkexpr(vD) );
33596 if (flag_rC) {
33597 set_AV_CR6( mkexpr(vD), !cmp_bounds );
33599 return True;
33603 AltiVec Floating Point Convert/Round Instructions
33605 static Bool dis_av_fp_convert ( UInt prefix, UInt theInstr )
33607 /* VX-Form */
33608 UChar opc1 = ifieldOPC(theInstr);
33609 UChar vD_addr = ifieldRegDS(theInstr);
33610 UChar UIMM_5 = ifieldRegA(theInstr);
33611 UChar vB_addr = ifieldRegB(theInstr);
33612 UInt opc2 = IFIELD( theInstr, 0, 11 );
33614 IRTemp vB = newTemp(Ity_V128);
33615 IRTemp vScale = newTemp(Ity_V128);
33616 IRTemp vInvScale = newTemp(Ity_V128);
33618 float scale, inv_scale;
33620 /* There is no prefixed version of these instructions. */
33621 PREFIX_CHECK
33623 assign( vB, getVReg(vB_addr));
33625 /* scale = 2^UIMM, cast to float, reinterpreted as uint */
33626 scale = (float)( (unsigned int) 1<<UIMM_5 );
33627 assign( vScale, unop(Iop_Dup32x4, mkU32( float_to_bits(scale) )) );
33628 inv_scale = 1/scale;
33629 assign( vInvScale,
33630 unop(Iop_Dup32x4, mkU32( float_to_bits(inv_scale) )) );
33632 if (opc1 != 0x4) {
33633 vex_printf("dis_av_fp_convert(ppc)(instr)\n");
33634 return False;
33637 switch (opc2) {
33638 case 0x30A: // vcfux (Convert from Unsigned Fixed-Point W, AV p156)
33639 DIP("vcfux v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
33640 putVReg( vD_addr, triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
33641 unop(Iop_I32UtoF32x4_DEP, mkexpr(vB)),
33642 mkexpr(vInvScale)) );
33643 return True;
33645 case 0x34A: // vcfsx (Convert from Signed Fixed-Point W, AV p155)
33646 DIP("vcfsx v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
33648 putVReg( vD_addr, triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
33649 unop(Iop_I32StoF32x4_DEP, mkexpr(vB)),
33650 mkexpr(vInvScale)) );
33651 return True;
33653 case 0x38A: // vctuxs (Convert to Unsigned Fixed-Point W Saturate, AV p172)
33654 DIP("vctuxs v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
33655 putVReg( vD_addr,
33656 unop(Iop_QF32toI32Ux4_RZ,
33657 triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
33658 mkexpr(vB), mkexpr(vScale))) );
33659 return True;
33661 case 0x3CA: // vctsxs (Convert to Signed Fixed-Point W Saturate, AV p171)
33662 DIP("vctsxs v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
33663 putVReg( vD_addr,
33664 unop(Iop_QF32toI32Sx4_RZ,
33665 triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
33666 mkexpr(vB), mkexpr(vScale))) );
33667 return True;
33669 default:
33670 break; // Fall through...
33673 if (UIMM_5 != 0) {
33674 vex_printf("dis_av_fp_convert(ppc)(UIMM_5)\n");
33675 return False;
33678 switch (opc2) {
33679 case 0x20A: // vrfin (Round to FP Integer Nearest, AV p231)
33680 DIP("vrfin v%d,v%d\n", vD_addr, vB_addr);
33681 putVReg( vD_addr, unop(Iop_RoundF32x4_RN,
33682 dnorm_adj_Vector( mkexpr( vB ) ) ) );
33683 break;
33685 case 0x24A: // vrfiz (Round to FP Integer toward zero, AV p233)
33686 DIP("vrfiz v%d,v%d\n", vD_addr, vB_addr);
33687 putVReg( vD_addr, unop(Iop_RoundF32x4_RZ,
33688 dnorm_adj_Vector( mkexpr( vB ) ) ) );
33689 break;
33691 case 0x28A: // vrfip (Round to FP Integer toward +inf, AV p232)
33692 DIP("vrfip v%d,v%d\n", vD_addr, vB_addr);
33693 putVReg( vD_addr, unop(Iop_RoundF32x4_RP,
33694 dnorm_adj_Vector( mkexpr( vB ) ) ) );
33695 break;
33697 case 0x2CA: // vrfim (Round to FP Integer toward -inf, AV p230)
33698 DIP("vrfim v%d,v%d\n", vD_addr, vB_addr);
33699 putVReg( vD_addr, unop(Iop_RoundF32x4_RM,
33700 dnorm_adj_Vector( mkexpr(vB ) ) ) );
33701 break;
33703 default:
33704 vex_printf("dis_av_fp_convert(ppc)(opc2)\n");
33705 return False;
33707 return True;
33710 static Bool dis_transactional_memory ( UInt prefix, UInt theInstr, UInt nextInstr,
33711 const VexAbiInfo* vbi,
33712 /*OUT*/DisResult* dres )
33714 UInt opc2 = IFIELD( theInstr, 1, 10 );
33716 /* There is no prefixed version of these instructions. */
33717 PREFIX_CHECK
33719 switch (opc2) {
33720 case 0x28E: { //tbegin.
33721 /* The current implementation is to just fail the tbegin and execute
33722 * the failure path. The failure path is assumed to be functionaly
33723 * equivalent to the transactional path with the needed data locking
33724 * to ensure correctness. The tend is just a noop and shouldn't
33725 * actually get executed.
33726 * 1) set cr0 to 0x2
33727 * 2) Initialize TFHAR to CIA+4
33728 * 3) Initialize TEXASR
33729 * 4) Initialize TFIAR (probably to CIA, ie, the address of tbegin.)
33730 * 5) Continue executing at the next instruction.
33732 UInt R = IFIELD( theInstr, 21, 1 );
33734 ULong tm_reason;
33735 UInt failure_code = 0; /* Forcing failure, will not be due to tabort
33736 * or treclaim.
33738 UInt persistent = 1; /* set persistent since we are always failing
33739 * the tbegin.
33741 UInt nest_overflow = 1; /* Alowed nesting depth overflow, we use this
33742 as the reason for failing the trasaction */
33743 UInt tm_exact = 1; /* have exact address for failure */
33745 DIP("tbegin. %u\n", R);
33747 /* Set the CR0 field to indicate the tbegin failed. Then let
33748 * the code do the branch to the failure path.
33750 * 000 || 0 Transaction initiation successful,
33751 * unnested (Transaction state of
33752 * Non-transactional prior to tbegin.)
33753 * 010 || 0 Transaction initiation successful, nested
33754 * (Transaction state of Transactional
33755 * prior to tbegin.)
33756 * 001 || 0 Transaction initiation unsuccessful,
33757 * (Transaction state of Suspended prior
33758 * to tbegin.)
33760 putCR321( 0, mkU8( 0x2 ) );
33762 tm_reason = generate_TMreason( failure_code, persistent,
33763 nest_overflow, tm_exact );
33765 storeTMfailure( guest_CIA_curr_instr, tm_reason,
33766 guest_CIA_curr_instr+4 );
33768 return True;
33770 break;
33773 case 0x2AE: { //tend.
33774 /* The tend. is just a noop. Do nothing */
33775 UInt A = IFIELD( theInstr, 25, 1 );
33777 DIP("tend. %u\n", A);
33778 break;
33781 case 0x2EE: { //tsr.
33782 /* The tsr. is just a noop. Do nothing */
33783 UInt L = IFIELD( theInstr, 21, 1 );
33785 DIP("tsr. %u\n", L);
33786 break;
33789 case 0x2CE: { //tcheck.
33790 /* The tcheck. is just a noop. Do nothing */
33791 UInt BF = IFIELD( theInstr, 25, 1 );
33793 DIP("tcheck. %u\n", BF);
33794 break;
33797 case 0x30E: { //tbortwc.
33798 /* The tabortwc. is just a noop. Do nothing */
33799 UInt TO = IFIELD( theInstr, 25, 1 );
33800 UInt RA = IFIELD( theInstr, 16, 5 );
33801 UInt RB = IFIELD( theInstr, 11, 5 );
33803 DIP("tabortwc. %u,%u,%u\n", TO, RA, RB);
33804 break;
33807 case 0x32E: { //tbortdc.
33808 /* The tabortdc. is just a noop. Do nothing */
33809 UInt TO = IFIELD( theInstr, 25, 1 );
33810 UInt RA = IFIELD( theInstr, 16, 5 );
33811 UInt RB = IFIELD( theInstr, 11, 5 );
33813 DIP("tabortdc. %u,%u,%u\n", TO, RA, RB);
33814 break;
33817 case 0x34E: { //tbortwci.
33818 /* The tabortwci. is just a noop. Do nothing */
33819 UInt TO = IFIELD( theInstr, 25, 1 );
33820 UInt RA = IFIELD( theInstr, 16, 5 );
33821 UInt SI = IFIELD( theInstr, 11, 5 );
33823 DIP("tabortwci. %u,%u,%u\n", TO, RA, SI);
33824 break;
33827 case 0x36E: { //tbortdci.
33828 /* The tabortdci. is just a noop. Do nothing */
33829 UInt TO = IFIELD( theInstr, 25, 1 );
33830 UInt RA = IFIELD( theInstr, 16, 5 );
33831 UInt SI = IFIELD( theInstr, 11, 5 );
33833 DIP("tabortdci. %u,%u,%u\n", TO, RA, SI);
33834 break;
33837 case 0x38E: { //tbort.
33838 /* The tabort. is just a noop. Do nothing */
33839 UInt RA = IFIELD( theInstr, 16, 5 );
33841 DIP("tabort. %u\n", RA);
33842 break;
33845 case 0x3AE: { //treclaim.
33846 /* The treclaim. is just a noop. Do nothing */
33847 UInt RA = IFIELD( theInstr, 16, 5 );
33849 DIP("treclaim. %u\n", RA);
33850 break;
33853 case 0x3EE: { //trechkpt.
33854 /* The trechkpt. is just a noop. Do nothing */
33855 DIP("trechkpt.\n");
33856 break;
33859 default:
33860 vex_printf("dis_transactional_memory(ppc): unrecognized instruction\n");
33861 return False;
33864 return True;
33868 /* The 0x3C primary opcode (VSX category) uses several different forms of
33869 * extended opcodes:
33870 * o XX2-form:
33871 * - [10:2] (IBM notation [21:29])
33872 * o XX3-form variants:
33873 * - variant 1: [10:3] (IBM notation [21:28])
33874 * - variant 2: [9:3] (IBM notation [22:28])
33875 * - variant 3: [7:3] (IBM notation [24:28])
33876 * o XX-4 form:
33877 * - [10:6] (IBM notation [21:25])
33879 * The XX2-form needs bit 0 masked from the standard extended opcode
33880 * as returned by ifieldOPClo10; the XX3-form needs bits 0 and 1 masked;
33881 * and the XX4-form needs bits 0, 1, and 2 masked. Additionally, the
33882 * XX4 and XX3 (variants 2 and 3) forms need certain bits masked on the
33883 * front end since their encoding does not begin at bit 21 like the standard
33884 * format.
33886 * The get_VSX60_opc2() function uses the vsx_insn array below to obtain the
33887 * secondary opcode for such VSX instructions.
33892 struct vsx_insn {
33893 UInt opcode;
33894 const HChar * name;
33897 // ATTENTION: Keep this array sorted on the opcocde!!!
33898 static struct vsx_insn vsx_xx2[] = {
33899 { 0x14, "xsrsqrtesp" },
33900 { 0x16, "xssqrtsp" },
33901 { 0x18, "xxsel" },
33902 { 0x34, "xsresp" },
33903 { 0x90, "xscvdpuxws" },
33904 { 0x92, "xsrdpi" },
33905 { 0x94, "xsrsqrtedp" },
33906 { 0x96, "xssqrtdp" },
33907 { 0xb0, "xscvdpsxws" },
33908 { 0xb2, "xsrdpiz" },
33909 { 0xb4, "xsredp" },
33910 { 0xd2, "xsrdpip" },
33911 { 0xd4, "xstsqrtdp" },
33912 { 0xd6, "xsrdpic" },
33913 { 0xf2, "xsrdpim" },
33914 { 0x112, "xvrspi" },
33915 { 0x116, "xvsqrtsp" },
33916 { 0x130, "xvcvspsxws" },
33917 { 0x132, "xvrspiz" },
33918 { 0x134, "xvresp" },
33919 { 0x148, "xxspltw" },
33920 { 0x14A, "xxextractuw" },
33921 { 0x150, "xvcvuxwsp" },
33922 { 0x152, "xvrspip" },
33923 { 0x154, "xvtsqrtsp" },
33924 { 0x156, "xvrspic" },
33925 { 0x16A, "xxinsertw" },
33926 { 0x170, "xvcvsxwsp" },
33927 { 0x172, "xvrspim" },
33928 { 0x190, "xvcvdpuxws" },
33929 { 0x192, "xvrdpi" },
33930 { 0x194, "xvrsqrtedp" },
33931 { 0x196, "xvsqrtdp" },
33932 { 0x1b0, "xvcvdpsxws" },
33933 { 0x1b2, "xvrdpiz" },
33934 { 0x1b4, "xvredp" },
33935 { 0x1d0, "xvcvuxwdp" },
33936 { 0x1d2, "xvrdpip" },
33937 { 0x1d4, "xvtsqrtdp" },
33938 { 0x1d6, "xvrdpic" },
33939 { 0x1f0, "xvcvsxwdp" },
33940 { 0x1f2, "xvrdpim" },
33941 { 0x212, "xscvdpsp" },
33942 { 0x216, "xscvdpspn" },
33943 { 0x232, "xxrsp" },
33944 { 0x250, "xscvuxdsp" },
33945 { 0x254, "xststdcsp" },
33946 { 0x270, "xscvsxdsp" },
33947 { 0x290, "xscvdpuxds" },
33948 { 0x292, "xscvspdp" },
33949 { 0x296, "xscvspdpn" },
33950 { 0x2b0, "xscvdpsxds" },
33951 { 0x2b2, "xsabsdp" },
33952 { 0x2b6, "xsxexpdp_xsxigdp" },
33953 { 0x2d0, "xscvuxddp" },
33954 { 0x2d2, "xsnabsdp" },
33955 { 0x2d4, "xststdcdp" },
33956 { 0x2e4, "xsnmsubmdp" },
33957 { 0x2f0, "xscvsxddp" },
33958 { 0x2f2, "xsnegdp" },
33959 { 0x310, "xvcvspuxds" },
33960 { 0x312, "xvcvdpsp" },
33961 { 0x330, "xvcvspsxds" },
33962 { 0x332, "xvabssp" },
33963 { 0x350, "xvcvuxdsp" },
33964 { 0x352, "xvnabssp" },
33965 { 0x370, "xvcvsxdsp" },
33966 { 0x372, "xvnegsp" },
33967 { 0x390, "xvcvdpuxds" },
33968 { 0x392, "xvcvspdp" },
33969 { 0x3b0, "xvcvdpsxds" },
33970 { 0x3b2, "xvabsdp" },
33971 { 0x3b6, "xxbr[h|w|d|q]|xvxexpdp|xvxexpsp|xvxsigdp|xvxsigsp|xvcvhpsp|xvcvsphp|xscvdphp|xscvhpdp|xvcvbf16spn|xvcvspbf16" },
33972 { 0x3d0, "xvcvuxddp" },
33973 { 0x3d2, "xvnabsdp" },
33974 { 0x3f2, "xvnegdp" }
33976 #define VSX_XX2_LEN (sizeof vsx_xx2 / sizeof *vsx_xx2)
33978 // ATTENTION: Keep this array sorted on the opcocde!!!
33979 static struct vsx_insn vsx_xx3[] = {
33980 { 0x0, "xsaddsp" },
33981 { 0x4, "xsmaddasp" },
33982 { 0x9, "xsmaddmsp" },
33983 { 0xC, "xscmpeqdp" },
33984 { 0x20, "xssubsp" },
33985 { 0x24, "xsmaddmsp" },
33986 { 0x2C, "xscmpgtdp" },
33987 { 0x3A, "xxpermr" },
33988 { 0x40, "xsmulsp" },
33989 { 0x44, "xsmsubasp" },
33990 { 0x48, "xxmrghw" },
33991 { 0x4C, "xscmpgedp" },
33992 { 0x60, "xsdivsp" },
33993 { 0x64, "xsmsubmsp" },
33994 { 0x68, "xxperm" },
33995 { 0x80, "xsadddp" },
33996 { 0x84, "xsmaddadp" },
33997 { 0x8c, "xscmpudp" },
33998 { 0xa0, "xssubdp" },
33999 { 0xa4, "xsmaddmdp" },
34000 { 0xac, "xscmpodp" },
34001 { 0xc0, "xsmuldp" },
34002 { 0xc4, "xsmsubadp" },
34003 { 0xc8, "xxmrglw" },
34004 { 0xd4, "xstsqrtdp" },
34005 { 0xe0, "xsdivdp" },
34006 { 0xe4, "xsmsubmdp" },
34007 { 0xe8, "xxpermr" },
34008 { 0xeC, "xscmpexpdp" },
34009 { 0xf4, "xstdivdp" },
34010 { 0x100, "xvaddsp" },
34011 { 0x104, "xvmaddasp" },
34012 { 0x10C, "xvcmpeqsp" },
34013 { 0x110, "xvcvspuxws" },
34014 { 0x114, "xvrsqrtesp" },
34015 { 0x120, "xvsubsp" },
34016 { 0x124, "xvmaddmsp" },
34017 { 0x130, "xvcvspsxws" },
34018 { 0x140, "xvmulsp" },
34019 { 0x144, "xvmsubasp" },
34020 { 0x14C, "xvcmpgesp", },
34021 { 0x160, "xvdivsp" },
34022 { 0x164, "xvmsubmsp" },
34023 { 0x174, "xvtdivsp" },
34024 { 0x180, "xvadddp" },
34025 { 0x184, "xvmaddadp" },
34026 { 0x18C, "xvcmpeqdp" },
34027 { 0x1a0, "xvsubdp" },
34028 { 0x1a4, "xvmaddmdp" },
34029 { 0x1aC, "xvcmpgtdp" },
34030 { 0x1c0, "xvmuldp" },
34031 { 0x1c4, "xvmsubadp" },
34032 { 0x1cc, "xvcmpgedp" },
34033 { 0x1e0, "xvdivdp" },
34034 { 0x1e4, "xvmsubmdp" },
34035 { 0x1f4, "xvtdivdp" },
34036 { 0x200, "xsmaxcdp" },
34037 { 0x204, "xsnmaddasp" },
34038 { 0x208, "xxland" },
34039 { 0x220, "xsmincdp" },
34040 { 0x224, "xsnmaddmsp" },
34041 { 0x228, "xxlandc" },
34042 { 0x244, "xsnmsubasp" },
34043 { 0x248, "xxlor" },
34044 { 0x264, "xsnmsubmsp" },
34045 { 0x268, "xxlxor" },
34046 { 0x280, "xsmaxdp" },
34047 { 0x284, "xsnmaddadp" },
34048 { 0x288, "xxlnor" },
34049 { 0x2a0, "xsmindp" },
34050 { 0x2a4, "xsnmaddmdp" },
34051 { 0x2a8, "xxlorc" },
34052 { 0x2c0, "xscpsgndp" },
34053 { 0x2c4, "xsnmsubadp" },
34054 { 0x2c8, "xxlnand" },
34055 { 0x2e4, "xsnmsubmdp" },
34056 { 0x2e8, "xxleqv" },
34057 { 0x300, "xvmaxsp" },
34058 { 0x304, "xvnmaddasp" },
34059 { 0x320, "xvminsp" },
34060 { 0x324, "xvnmaddmsp" },
34061 { 0x340, "xvcpsgnsp" },
34062 { 0x344, "xvnmsubasp" },
34063 { 0x360, "xviexpsp" },
34064 { 0x364, "xvnmsubmsp" },
34065 { 0x380, "xvmaxdp" },
34066 { 0x384, "xvnmaddadp" },
34067 { 0x3a0, "xvmindp" },
34068 { 0x3a4, "xvnmaddmdp" },
34069 { 0x3c0, "xvcpsgndp" },
34070 { 0x3c4, "xvnmsubadp" },
34071 { 0x3e0, "xviexpdp" },
34072 { 0x3e4, "xvnmsubmdp" },
34073 { 0x3f0, "xvcvsxddp" },
34075 #define VSX_XX3_LEN (sizeof vsx_xx3 / sizeof *vsx_xx3)
34078 /* ATTENTION: These search functions assumes vsx_xx2 and vsx_xx3 arrays
34079 * are sorted.
34081 static Int findVSXextOpCode_xx2(UInt opcode)
34083 Int low, mid, high;
34084 low = 0;
34085 high = VSX_XX2_LEN - 1;
34086 while (low <= high) {
34087 mid = (low + high)/2;
34088 if (opcode < vsx_xx2[mid].opcode)
34089 high = mid - 1;
34090 else if (opcode > vsx_xx2[mid].opcode)
34091 low = mid + 1;
34092 else
34093 return mid;
34095 return -1;
34098 static Int findVSXextOpCode_xx3(UInt opcode)
34100 Int low, mid, high;
34101 low = 0;
34102 high = VSX_XX3_LEN - 1;
34103 while (low <= high) {
34104 mid = (low + high)/2;
34105 if (opcode < vsx_xx3[mid].opcode)
34106 high = mid - 1;
34107 else if (opcode > vsx_xx3[mid].opcode)
34108 low = mid + 1;
34109 else
34110 return mid;
34112 return -1;
34116 /* The full 10-bit extended opcode retrieved via ifieldOPClo10 is
34117 * passed, and we then try to match it up with one of the VSX forms
34118 * below.
34120 static UInt get_VSX60_opc2(UInt opc2_full, UInt theInstr)
34122 #define XX2_1_MASK 0x000003FF // xsiexpdp specific
34123 #define XX2_2_MASK 0x000003FE
34124 #define XX3_1_MASK 0x000003FC
34125 #define XX3_2_MASK 0x000001FC
34126 #define XX3_4_MASK 0x0000027C
34127 #define XX3_5_MASK 0x000003DC
34128 #define XX4_MASK 0x00000018
34130 Int ret;
34131 UInt vsxExtOpcode = 0;
34133 if (( ret = findVSXextOpCode_xx2(opc2_full & XX2_2_MASK)) >= 0)
34134 return vsx_xx2[ret].opcode;
34135 else if ((opc2_full & XX2_1_MASK) == 0x396 ) // xsiexpdp
34136 return 0x396;
34137 else if (( ret = findVSXextOpCode_xx3(opc2_full & XX3_1_MASK)) >= 0)
34138 return vsx_xx3[ret].opcode;
34139 else {
34141 /* There are only a few codes in each of these cases it is
34142 * probably faster to check for the codes then do the array lookups.
34144 vsxExtOpcode = opc2_full & XX3_2_MASK;
34146 switch (vsxExtOpcode) {
34147 case 0x10C: return vsxExtOpcode; // xvcmpeqsp
34148 case 0x12C: return vsxExtOpcode; // xvcmpgtsp, xvcmpgtsp.
34149 case 0x14C: return vsxExtOpcode; // xvcmpgesp, xvcmpgesp.
34150 case 0x18C: return vsxExtOpcode; // xvcmpeqdp, xvcmpeqdp.
34151 case 0x1AC: return vsxExtOpcode; // xvcmpgtdp, xvcmpgtdp.
34152 case 0x1CC: return vsxExtOpcode; // xvcmpgedp, xvcmpgedp.
34153 default: break;
34156 vsxExtOpcode = opc2_full & XX3_4_MASK;
34158 switch (vsxExtOpcode) {
34159 case 0x8: return vsxExtOpcode; // xxsldwi
34160 case 0x28: return vsxExtOpcode; // xxpermdi
34161 default: break;
34164 vsxExtOpcode = opc2_full & XX3_5_MASK;
34166 switch (vsxExtOpcode) {
34167 case 0x354: return vsxExtOpcode; // xvtstdcsp
34168 case 0x3D4: return vsxExtOpcode; // xvtstdcdp
34169 default: break;
34172 if (( opc2_full & XX4_MASK ) == XX4_MASK ) { // xxsel
34173 vsxExtOpcode = 0x18;
34174 return vsxExtOpcode;
34178 vex_printf( "Error: undefined opcode 0x %x, the instruction = 0x %x\n",
34179 opc2_full, theInstr );
34180 vpanic( "ERROR: get_VSX60_opc2()\n" );
34181 return 0;
34184 static Bool dis_vec_extract_insert ( UInt prefix, UInt theInstr )
34186 /* VA-Form */
34187 UChar VRT = ifieldRegDS(theInstr);
34188 UChar VRA = ifieldRegA(theInstr);
34189 UChar VRB = ifieldRegB(theInstr);
34190 UChar rC_addr = ifieldRegC(theInstr);
34191 UChar opc2 = toUChar( IFIELD( theInstr, 0, 6 ) );
34192 UChar vT_addr = VRT + 32;
34193 UChar vA_addr = VRA + 32;
34194 UChar vB_addr = VRB + 32;
34196 IRTemp vA = newTemp(Ity_V128);
34197 IRTemp vB = newTemp(Ity_V128);
34198 IRTemp rC = newTemp(Ity_I64);
34199 IRTemp res_tmp = newTemp(Ity_I64);
34200 IRTemp byte_index = newTemp(Ity_I64);
34201 IRTemp index0 = newTemp(Ity_I64);
34203 UInt index_mask = 0x1F;
34204 UInt max_index_in_src = 31; /* src is vrA | vrB which is 32-bytes */
34206 assign( vA, getVSReg( vA_addr ) );
34207 assign( vB, getVSReg( vB_addr ) );
34208 assign( rC, getIReg( rC_addr ) );
34210 /* Get index of the element to extract */
34211 assign( byte_index, binop( Iop_And64,
34212 getIReg(rC_addr),
34213 mkU64( index_mask ) ) );
34214 switch (opc2) {
34216 case 0x18:
34217 // vextdubvlx, Vector Extract Double Unsigned Byte Left-indexed
34218 DIP("vextdubvlx v%u,v%u,v%u,%u\n", VRT, VRA, VRB, rC_addr);
34220 /* extractBytefromV256() assumes Right-index ordering */
34221 assign( index0,
34222 binop( Iop_Sub64,
34223 mkU64( max_index_in_src ), mkexpr( byte_index ) ) );
34224 assign( res_tmp, extractBytefromV256( vA, vB, index0 ) );
34225 break;
34227 case 0x19:
34228 // vextdubvrx, Vector Extract Double Unsigned Byte Right-indexed
34229 DIP("vextdubvrx v%u,v%u,v%u,%u\n", vT_addr, vA_addr, vB_addr, rC_addr);
34231 assign( res_tmp, extractBytefromV256( vA, vB, byte_index ) );
34232 break;
34234 case 0x1A:
34236 IRTemp index1 = newTemp(Ity_I64);
34238 // vextduhvlx, Vector Extract Double Unsigned Half-word Left-indexed
34239 DIP("vextduhvlx v%u,v%u,v%u,%u\n",
34240 vT_addr, vA_addr, vB_addr, rC_addr);
34242 /* extractBytefromV256() assumes Right-index ordering */
34243 assign( index0,
34244 binop( Iop_Sub64,
34245 mkU64( max_index_in_src ), mkexpr( byte_index ) ) );
34246 assign( index1, binop( Iop_Sub64, mkexpr( index0 ), mkU64( 1 ) ) );
34247 assign( res_tmp,
34248 binop( Iop_Or64,
34249 extractBytefromV256( vA, vB, index1 ),
34250 binop( Iop_Shl64,
34251 extractBytefromV256( vA, vB, index0 ),
34252 mkU8( 8 ) ) ) );
34254 break;
34256 case 0x1B:
34258 IRTemp index1 = newTemp(Ity_I64);
34260 // vextduhvrx, Vector Extract Double Unsigned Half-word Right-indexed
34261 DIP("vextduhvrx v%u,v%u,v%u,%u\n",
34262 vT_addr, vA_addr, vB_addr, rC_addr);
34264 assign( index0, mkexpr( byte_index ) );
34265 assign( index1, binop( Iop_Add64, mkU64( 1 ), mkexpr( index0 ) ) );
34266 assign( res_tmp,
34267 binop( Iop_Or64,
34268 extractBytefromV256( vA, vB, index0 ),
34269 binop( Iop_Shl64,
34270 extractBytefromV256( vA, vB, index1 ),
34271 mkU8( 8 ) ) ) );
34273 break;
34275 case 0x1C:
34277 IRTemp index1 = newTemp(Ity_I64);
34278 IRTemp index2 = newTemp(Ity_I64);
34279 IRTemp index3 = newTemp(Ity_I64);
34281 // vextduwvlx, Vector Extract Double Unsigned Word Left-indexed
34282 DIP("vextduwvlx v%u,v%u,v%u,%u\n",
34283 vT_addr, vA_addr, vB_addr, rC_addr);
34285 /* extractBytefromV256() assumes Right-index ordering */
34286 assign( index0,
34287 binop( Iop_Sub64,
34288 mkU64( max_index_in_src ), mkexpr( byte_index ) ) );
34289 assign( index1, binop( Iop_Sub64, mkexpr( index0 ), mkU64( 1 ) ) );
34290 assign( index2, binop( Iop_Sub64, mkexpr( index1 ), mkU64( 1 ) ) );
34291 assign( index3, binop( Iop_Sub64, mkexpr( index2 ), mkU64( 1 ) ) );
34292 assign( res_tmp,
34293 binop( Iop_Or64,
34294 binop( Iop_Or64,
34295 extractBytefromV256( vA, vB, index3 ),
34296 binop( Iop_Shl64,
34297 extractBytefromV256( vA, vB, index2 ),
34298 mkU8( 8 ) ) ),
34299 binop( Iop_Or64,
34300 binop( Iop_Shl64,
34301 extractBytefromV256( vA, vB, index1 ),
34302 mkU8( 16 ) ),
34303 binop( Iop_Shl64,
34304 extractBytefromV256( vA, vB, index0 ),
34305 mkU8( 24 ) ) ) ) );
34307 break;
34309 case 0x1D:
34311 IRTemp index1 = newTemp(Ity_I64);
34312 IRTemp index2 = newTemp(Ity_I64);
34313 IRTemp index3 = newTemp(Ity_I64);
34315 // vextduwvrx, Vector Extract Double Unsigned Word Right-indexed
34316 DIP("vextduwvrx v%u,v%u,v%u,%u\n",
34317 vT_addr, vA_addr, vB_addr, rC_addr);
34319 assign( index0, mkexpr( byte_index ) );
34320 assign( index1, binop( Iop_Add64, mkexpr( index0 ), mkU64( 1 ) ) );
34321 assign( index2, binop( Iop_Add64, mkexpr( index1 ), mkU64( 1 ) ) );
34322 assign( index3, binop( Iop_Add64, mkexpr( index2 ), mkU64( 1 ) ) );
34323 assign( res_tmp,
34324 binop( Iop_Or64,
34325 binop( Iop_Or64,
34326 extractBytefromV256( vA, vB, index0 ),
34327 binop( Iop_Shl64,
34328 extractBytefromV256( vA, vB, index1 ),
34329 mkU8( 8 ) ) ),
34330 binop( Iop_Or64,
34331 binop( Iop_Shl64,
34332 extractBytefromV256( vA, vB, index2 ),
34333 mkU8( 16 ) ),
34334 binop( Iop_Shl64,
34335 extractBytefromV256( vA, vB, index3 ),
34336 mkU8( 24 ) ) ) ) );
34338 break;
34339 case 0x1E:
34341 IRTemp index1 = newTemp(Ity_I64);
34342 IRTemp index2 = newTemp(Ity_I64);
34343 IRTemp index3 = newTemp(Ity_I64);
34344 IRTemp index4 = newTemp(Ity_I64);
34345 IRTemp index5 = newTemp(Ity_I64);
34346 IRTemp index6 = newTemp(Ity_I64);
34347 IRTemp index7 = newTemp(Ity_I64);
34349 // vextddvlx, Vector Extract Double Double-Word Left-indexed
34350 DIP("vextddvlx v%u,v%u,v%u,%u\n",
34351 vT_addr, vA_addr, vB_addr, rC_addr);
34353 /* extractBytefromV256() assumes Right-index ordering */
34354 assign( index0,
34355 binop( Iop_Sub64,
34356 mkU64( max_index_in_src ), mkexpr( byte_index ) ) );
34357 assign( index1, binop( Iop_Sub64, mkexpr( index0 ), mkU64( 1 ) ) );
34358 assign( index2, binop( Iop_Sub64, mkexpr( index1 ), mkU64( 1 ) ) );
34359 assign( index3, binop( Iop_Sub64, mkexpr( index2 ), mkU64( 1 ) ) );
34360 assign( index4, binop( Iop_Sub64, mkexpr( index3 ), mkU64( 1 ) ) );
34361 assign( index5, binop( Iop_Sub64, mkexpr( index4 ), mkU64( 1 ) ) );
34362 assign( index6, binop( Iop_Sub64, mkexpr( index5 ), mkU64( 1 ) ) );
34363 assign( index7, binop( Iop_Sub64, mkexpr( index6 ), mkU64( 1 ) ) );
34364 assign( res_tmp,
34365 binop( Iop_Or64,
34366 binop( Iop_Or64,
34367 binop( Iop_Or64,
34368 extractBytefromV256( vA, vB, index7 ),
34369 binop( Iop_Shl64,
34370 extractBytefromV256( vA, vB,
34371 index6 ),
34372 mkU8( 8 ) ) ),
34373 binop( Iop_Or64,
34374 binop( Iop_Shl64,
34375 extractBytefromV256( vA, vB,
34376 index5 ),
34377 mkU8( 16 ) ),
34378 binop( Iop_Shl64,
34379 extractBytefromV256( vA, vB,
34380 index4 ),
34381 mkU8( 24 ) ) ) ),
34382 binop( Iop_Or64,
34383 binop( Iop_Or64,
34384 binop( Iop_Shl64,
34385 extractBytefromV256( vA, vB,
34386 index3 ),
34387 mkU8( 32 ) ),
34388 binop( Iop_Shl64,
34389 extractBytefromV256( vA, vB,
34390 index2 ),
34391 mkU8( 40 ) ) ),
34392 binop( Iop_Or64,
34393 binop( Iop_Shl64,
34394 extractBytefromV256( vA, vB,
34395 index1 ),
34396 mkU8( 48 ) ),
34397 binop( Iop_Shl64,
34398 extractBytefromV256( vA, vB,
34399 index0 ),
34400 mkU8( 56 ) ) ) ) ) );
34402 break;
34404 case 0x1F:
34406 IRTemp index1 = newTemp(Ity_I64);
34407 IRTemp index2 = newTemp(Ity_I64);
34408 IRTemp index3 = newTemp(Ity_I64);
34409 IRTemp index4 = newTemp(Ity_I64);
34410 IRTemp index5 = newTemp(Ity_I64);
34411 IRTemp index6 = newTemp(Ity_I64);
34412 IRTemp index7 = newTemp(Ity_I64);
34414 // vextddvrx, Vector Extract Double Doubleword Right-indexed
34415 DIP("vextddvrx v%u,v%u,v%u,%u\n",
34416 vT_addr, vA_addr, vB_addr, rC_addr);
34418 assign( index0, mkexpr( byte_index ) );
34419 assign( index1, binop( Iop_Add64, mkexpr( index0 ), mkU64( 1 ) ) );
34420 assign( index2, binop( Iop_Add64, mkexpr( index1 ), mkU64( 1 ) ) );
34421 assign( index3, binop( Iop_Add64, mkexpr( index2 ), mkU64( 1 ) ) );
34422 assign( index4, binop( Iop_Add64, mkexpr( index3 ), mkU64( 1 ) ) );
34423 assign( index5, binop( Iop_Add64, mkexpr( index4 ), mkU64( 1 ) ) );
34424 assign( index6, binop( Iop_Add64, mkexpr( index5 ), mkU64( 1 ) ) );
34425 assign( index7, binop( Iop_Add64, mkexpr( index6 ), mkU64( 1 ) ) );
34426 assign( res_tmp,
34427 binop( Iop_Or64,
34428 binop( Iop_Or64,
34429 binop( Iop_Or64,
34430 extractBytefromV256( vA, vB, index0 ),
34431 binop( Iop_Shl64,
34432 extractBytefromV256( vA, vB,
34433 index1 ),
34434 mkU8( 8 ) ) ),
34435 binop( Iop_Or64,
34436 binop( Iop_Shl64,
34437 extractBytefromV256( vA, vB,
34438 index2 ),
34439 mkU8( 16 ) ),
34440 binop( Iop_Shl64,
34441 extractBytefromV256( vA, vB,
34442 index3 ),
34443 mkU8( 24 ) ) ) ),
34444 binop( Iop_Or64,
34445 binop( Iop_Or64,
34446 binop( Iop_Shl64,
34447 extractBytefromV256( vA, vB,
34448 index4 ),
34449 mkU8( 32 ) ),
34450 binop( Iop_Shl64,
34451 extractBytefromV256( vA, vB,
34452 index5 ),
34453 mkU8( 40 ) ) ),
34454 binop( Iop_Or64,
34455 binop( Iop_Shl64,
34456 extractBytefromV256( vA, vB,
34457 index6 ),
34458 mkU8( 48 ) ),
34459 binop( Iop_Shl64,
34460 extractBytefromV256( vA, vB,
34461 index7 ),
34462 mkU8( 56 ) ) ) ) ) );
34464 break;
34466 default:
34467 vex_printf("dis_vec_extract_insert\n");
34468 return False;
34471 putVSReg( vT_addr, binop( Iop_64HLtoV128,
34472 mkexpr( res_tmp ),
34473 mkU64( 0 ) ) );
34475 return True;
34478 static Bool dis_string_isolate ( UInt prefix, UInt theInstr )
34480 UChar vT_addr = ifieldRegDS(theInstr);
34481 UChar vA_addr = ifieldRegA(theInstr);
34482 UChar vB_addr = ifieldRegB(theInstr);
34484 IRTemp vT = newTemp(Ity_V128);
34485 IRTemp index = newTemp(Ity_I32);
34486 IRTemp sh_index = newTemp(Ity_I32);
34487 IRTemp mask = newTemp(Ity_V128);
34488 IRTemp cc = newTemp(Ity_I32);
34489 UInt cc_field = 6;
34491 UInt Rc = IFIELD( theInstr, (31-21), 1 );
34493 UInt opc2 = IFIELD( theInstr, 0, 11 );
34494 Int inst_sel = IFIELD(theInstr, 16, 5);
34495 Int dir = 0; // 0 - index from left, 1 - index from right
34496 IROp shift_first, shift_second;
34498 assign( mask, binop( Iop_64HLtoV128,
34499 mkU64( 0xFFFFFFFFFFFFFFFF ),
34500 mkU64( 0xFFFFFFFFFFFFFFFF ) ) );
34502 if (opc2 == 0x18D)
34503 inst_sel = opc2;
34505 else if (opc2 == 0x1CD)
34506 inst_sel = opc2;
34508 switch(inst_sel) {
34509 case 0x0: // vstribl[.]
34510 case 0x1: // vstribr[.]
34512 IRTemp vB = newTemp(Ity_V128);
34514 if (inst_sel == 0) {
34515 DIP("vstribl%s v%u,v%u\n", Rc ? ".":"", vT_addr, vB_addr);
34516 shift_first = Iop_ShlV128;
34517 dir = 0;
34519 } else {
34520 DIP("vstribr%s v%u,v%u\n", Rc ? ".":"", vT_addr, vB_addr);
34521 shift_first = Iop_ShrV128;
34522 dir = 1;
34525 /* Get index of match of first byte from the left that matches zero.
34526 Index will be equal to max elements in vector if there is no match.
34527 If index is equal to the max, which is 16 in this case, set index
34528 to zero so the data mask will select all of the bits.
34530 assign( vB, getVReg( vB_addr ) );
34531 assign( index, unop( Iop_8Uto32,
34532 locate_vector_ele_eq( vB, mkU64( 0 ), dir,
34533 Ity_I8 ) ) );
34534 assign( sh_index,
34535 binop( Iop_And32,
34536 unop( Iop_1Sto32,
34537 binop( Iop_CmpLE32U,
34538 mkexpr( index ),
34539 mkU32( 16 ) ) ),
34540 binop( Iop_Sub32,
34541 mkU32( 16 ),
34542 mkexpr( index ) ) ) );
34544 /* Shift mask to select the bytes up to the match with zero */
34545 assign( vT, binop( Iop_AndV128,
34546 // binop( Iop_ShlV128,
34547 binop( shift_first,
34548 mkexpr( mask ),
34549 unop( Iop_32to8,
34550 binop( Iop_Mul32,
34551 mkU32( 8 ),
34552 mkexpr( sh_index ) ) ) ),
34553 mkexpr( vB ) ) );
34555 if (Rc)
34556 /* The returned index was between 1 and 16 if a null was found. */
34557 assign( cc, binop( Iop_Shl32,
34558 unop( Iop_1Uto32,
34559 binop( Iop_CmpLE32U,
34560 mkexpr( index ), mkU32( 16 ) ) ),
34561 mkU8( 1 ) ) );
34563 break;
34565 case 0x2: // vstrihl[.]
34566 case 0x3: // vstrihr[.]
34568 IRTemp vB = newTemp(Ity_V128);
34570 if (inst_sel == 2) {
34571 DIP("vstrihl%s v%u,v%u\n", Rc ? ".":"", vT_addr, vB_addr);
34572 shift_first = Iop_ShlV128;
34573 dir = 0;
34575 } else {
34576 DIP("vstrihr%s v%u,v%u\n", Rc ? ".":"", vT_addr, vB_addr);
34577 shift_first = Iop_ShrV128;
34578 dir = 1;
34581 assign( vB, getVReg( vB_addr ) );
34582 assign( index, unop( Iop_8Uto32,
34583 locate_vector_ele_eq( vB, mkU64( 0 ), dir,
34584 Ity_I16 ) ) );
34585 /* Get index of match of first half word from specified direction
34586 that matches zero. Index will be equal to max elements in vector
34587 if there is no match. If index is equal to the max, which is 8
34588 in this case, set index to zero so the data mask will select all
34589 of the bits.
34591 assign( sh_index,
34592 binop( Iop_And32,
34593 unop( Iop_1Sto32,
34594 binop( Iop_CmpLE32U,
34595 mkexpr( index ),
34596 mkU32( 8 ) ) ),
34597 binop( Iop_Sub32,
34598 mkU32( 8 ),
34599 mkexpr( index ) ) ) );
34601 /* Shift mask left to select the bytes up to the match with zero */
34602 assign( vT, binop( Iop_AndV128,
34603 // binop( Iop_ShlV128,
34604 binop( shift_first,
34605 mkexpr( mask ),
34606 unop( Iop_32to8,
34607 binop( Iop_Mul32,
34608 mkU32( 16 ),
34609 mkexpr( sh_index ) ) ) ),
34610 mkexpr( vB ) ) );
34612 if (Rc)
34613 /* The returned index was between 1 and 16 if a null was found. */
34614 assign( cc, binop( Iop_Shl32,
34615 unop( Iop_1Uto32,
34616 binop( Iop_CmpLE32U,
34617 mkexpr( index ), mkU32( 8 ) ) ),
34618 mkU8( 1 ) ) );
34620 break;
34622 case 0x18D: // vclrlb
34623 case 0x1CD: // vclrrb
34625 IRTemp rB = newTemp(Ity_I64);
34626 IRTemp vA = newTemp(Ity_V128);
34627 IRTemp shift = newTemp(Ity_I8);
34628 IRTemp clear_result = newTemp(Ity_I64);
34630 /* Note vB_addr actually refers to a GPR in this inst. */
34631 if (inst_sel == 0x18D) {
34632 DIP("vclrlb v%u,v%u,%u\n", vT_addr, vA_addr, vB_addr);
34633 shift_first = Iop_ShlV128;
34634 shift_second = Iop_ShrV128;
34636 } else {
34637 DIP("vclrrb v%u,v%u,%u\n", vT_addr, vA_addr, vB_addr);
34638 shift_first = Iop_ShrV128;
34639 shift_second = Iop_ShlV128;
34642 assign( vA, getVReg( vA_addr ) );
34643 assign( rB, getIReg( vB_addr ) );
34645 /* Clear left 16-rB bytes, if rb > 16, set shift to 0
34646 and clear_result to all 1's. */
34647 assign( shift,
34648 unop( Iop_32to8,
34649 binop( Iop_And32,
34650 binop( Iop_Mul32,
34651 mkU32( 8 ),
34652 binop( Iop_Sub32,
34653 mkU32( 16 ),
34654 unop( Iop_64to32,
34655 mkexpr( rB ) ) ) ),
34656 unop( Iop_Not32,
34657 unop( Iop_1Sto32,
34658 binop( Iop_CmpLT32S,
34659 mkU32( 16 ),
34660 unop( Iop_64to32,
34661 mkexpr( rB ) ) ) ) )
34662 ) ) );
34664 /* Clear all bits if rB > 16 */
34665 assign( clear_result,
34666 binop( Iop_Or64,
34667 unop( Iop_1Sto64,
34668 binop( Iop_CmpLE32S,
34669 unop( Iop_8Uto32, mkexpr( shift ) ),
34670 mkU32( 127 ) ) ),
34671 unop( Iop_1Sto64,
34672 binop( Iop_CmpLT32S,
34673 mkU32( 16 ),
34674 unop( Iop_64to32,
34675 mkexpr( rB ) ) ) ) ) );
34677 /* Clear bits by shifting mask, then shifting back by index. If
34678 * shift is >= 127, need to mask out result as underlying shift only
34679 * supports shifts up to 127 bits.
34681 assign( vT,
34682 binop( Iop_AndV128,
34683 binop( Iop_AndV128,
34684 binop( shift_second,
34685 binop( shift_first,
34686 mkexpr( mask ),
34687 mkexpr( shift ) ),
34688 mkexpr( shift ) ),
34689 mkexpr( vA ) ),
34690 binop( Iop_64HLtoV128, mkexpr( clear_result ),
34691 mkexpr( clear_result ) ) ) );
34693 break;
34695 default:
34696 vex_printf("dis_string_isolate(isnt_sel = %d)\n", inst_sel);
34697 return False;
34700 if (Rc)
34701 putGST_field( PPC_GST_CR, mkexpr( cc ), cc_field );
34703 putVReg( vT_addr, mkexpr( vT ) );
34704 return True;
34707 static Bool dis_test_LSB_by_bit ( UInt prefix, UInt theInstr )
34709 #define MAX_FIELDS 16
34710 UChar vB_addr = ifieldRegXB(theInstr);
34711 IRTemp vB = newTemp( Ity_V128 );
34712 UChar opc1 = ifieldOPC(theInstr);
34713 UInt opc2 = IFIELD(theInstr, (31-29), 9); // bits[21:29]
34714 UInt inst_select = IFIELD( theInstr, (31-15), 5); // bits[11:15]
34715 UInt BF = IFIELD( theInstr, (31-8), 3); // bits[6:8]
34716 UInt i;
34717 IRTemp all_true[MAX_FIELDS+1];
34718 IRTemp all_false[MAX_FIELDS+1];
34719 IRTemp tmp128[MAX_FIELDS];
34720 IRTemp cc = newTemp(Ity_I32);
34722 if (!((opc1 == 0x3C) && (opc2 == 0x1DB) && (inst_select == 2)))
34723 return False;
34725 DIP("xvtlsbb %u,v%u\n", BF, vB_addr);
34727 assign( vB, getVSReg( vB_addr ) );
34728 all_true[0] = newTemp( Ity_I1 );
34729 all_false[0] = newTemp( Ity_I1 );
34730 assign( all_true[0], mkU1( 1 ) );
34731 assign( all_false[0], mkU1( 1 ) );
34733 for (i = 0; i< MAX_FIELDS; i++) {
34734 tmp128[i] = newTemp( Ity_I64 );
34735 all_true[i+1] = newTemp( Ity_I1 );
34736 all_false[i+1] = newTemp( Ity_I1 );
34738 assign( tmp128[i], binop( Iop_And64,
34739 mkU64( 0x1 ),
34740 unop( Iop_V128to64,
34741 binop( Iop_ShrV128,
34742 mkexpr( vB ), mkU8( i*8 ) ) ) ) );
34743 assign( all_true[i+1], mkAND1 ( mkexpr( all_true[i] ),
34744 binop( Iop_CmpEQ64,
34745 mkU64( 1 ),
34746 mkexpr( tmp128[i] ) ) ) );
34747 assign( all_false[i+1], mkAND1 ( mkexpr( all_false[i] ),
34748 binop( Iop_CmpEQ64,
34749 mkU64( 0 ),
34750 mkexpr( tmp128[i] ) ) ) );
34753 assign( cc, binop( Iop_Or32,
34754 binop( Iop_Shl32,
34755 unop( Iop_1Uto32,
34756 mkexpr( all_true[MAX_FIELDS] ) ),
34757 mkU8( 3 ) ),
34758 binop( Iop_Shl32,
34759 unop( Iop_1Uto32,
34760 mkexpr( all_false[MAX_FIELDS] ) ),
34761 mkU8( 1 ) ) ) );
34763 putGST_field( PPC_GST_CR, mkexpr( cc ), BF );
34764 return True;
34765 #undef MAX_FIELDS
34768 static Bool dis_vsx_accumulator_prefix ( UInt prefix, UInt theInstr,
34769 const VexAbiInfo* vbi,
34770 Bool ACC_mapped_on_VSR )
34772 UChar opc1 = ifieldOPC(theInstr);
34773 UChar opc2 = IFIELD( theInstr, 1, 10);
34774 UInt bit11_15 = IFIELD( theInstr, (31-15), 5);
34775 UChar AT = ifieldAT(theInstr);
34776 Bool is_prefix = prefix_instruction( prefix );
34777 UChar rA_addr = ifieldRegA( theInstr );
34778 UChar rB_addr = ifieldRegB( theInstr );
34780 /* Note, not all of the instructions supported by this function are
34781 prefix instructions. */
34782 if ((opc1 == 0x3b)&& !is_prefix) {
34783 // Note these are not prefix instructions
34784 UInt XO = IFIELD( theInstr, 3, 8);
34785 UInt inst_prefix = 0;
34787 /* Note vsx_matrix_4bit_ger writes result to ACC register file. */
34788 switch ( XO ) {
34789 case XVI4GER8:
34790 DIP("xvi4ger8 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34791 vsx_matrix_ger( vbi, MATRIX_4BIT_INT_GER,
34792 getVSReg( rA_addr ), getVSReg( rB_addr ),
34793 AT, ( ( inst_prefix << 8 ) | XO ),
34794 ACC_mapped_on_VSR );
34795 break;
34796 case XVI4GER8PP:
34797 DIP("xvi4ger8pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34798 vsx_matrix_ger( vbi, MATRIX_4BIT_INT_GER,
34799 getVSReg( rA_addr ), getVSReg( rB_addr ),
34800 AT, ( ( inst_prefix << 8 ) | XO ),
34801 ACC_mapped_on_VSR );
34802 break;
34803 case XVI8GER4:
34804 DIP("xvi8ger4 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34805 vsx_matrix_ger( vbi, MATRIX_8BIT_INT_GER,
34806 getVSReg( rA_addr ), getVSReg( rB_addr ),
34807 AT, ( ( inst_prefix << 8 ) | XO ),
34808 ACC_mapped_on_VSR );
34809 break;
34810 case XVI8GER4PP:
34811 DIP("xvi8ger4pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34812 vsx_matrix_ger( vbi, MATRIX_8BIT_INT_GER,
34813 getVSReg( rA_addr ), getVSReg( rB_addr ),
34814 AT, ( ( inst_prefix << 8 ) | XO ),
34815 ACC_mapped_on_VSR );
34816 break;
34817 case XVI8GER4SPP:
34818 DIP("xvi8ger4spp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34819 vsx_matrix_ger( vbi, MATRIX_8BIT_INT_GER,
34820 getVSReg( rA_addr ), getVSReg( rB_addr ),
34821 AT, ( ( inst_prefix << 8 ) | XO ),
34822 ACC_mapped_on_VSR );
34823 break;
34824 case XVI16GER2S:
34825 DIP("xvi16ger2s %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34826 vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
34827 getVSReg( rA_addr ), getVSReg( rB_addr ),
34828 AT, ( ( inst_prefix << 8 ) | XO ),
34829 ACC_mapped_on_VSR );
34830 break;
34831 case XVI16GER2SPP:
34832 DIP("xvi16ger2pps %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34833 vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
34834 getVSReg( rA_addr ), getVSReg( rB_addr ),
34835 AT, ( ( inst_prefix << 8 ) | XO ),
34836 ACC_mapped_on_VSR );
34837 break;
34838 case XVI16GER2:
34839 DIP("xvi16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34840 vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
34841 getVSReg( rA_addr ), getVSReg( rB_addr ),
34842 AT, ( ( inst_prefix << 8 ) | XO ),
34843 ACC_mapped_on_VSR );
34844 break;
34845 case XVI16GER2PP:
34846 DIP("xvi16ger2pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34847 vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
34848 getVSReg( rA_addr ), getVSReg( rB_addr ),
34849 AT, ( ( inst_prefix << 8 ) | XO ),
34850 ACC_mapped_on_VSR );
34851 break;
34853 case XVF16GER2:
34854 DIP("xvf16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34855 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34856 getVSReg( rA_addr ),
34857 getVSReg( rB_addr ), AT,
34858 ( ( inst_prefix << 8 ) | XO ),
34859 ACC_mapped_on_VSR );
34860 break;
34861 case XVF16GER2PP:
34862 DIP("xvf16ger2pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34863 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34864 getVSReg( rA_addr ),
34865 getVSReg( rB_addr ), AT,
34866 ( ( inst_prefix << 8 ) | XO ),
34867 ACC_mapped_on_VSR );
34868 break;
34869 case XVF16GER2PN:
34870 DIP("xvf16ger2pn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34871 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34872 getVSReg( rA_addr ),
34873 getVSReg( rB_addr ), AT,
34874 ( ( inst_prefix << 8 ) | XO ),
34875 ACC_mapped_on_VSR );
34876 break;
34877 case XVF16GER2NP:
34878 DIP("xvf16ger2np %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34879 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34880 getVSReg( rA_addr ),
34881 getVSReg( rB_addr ), AT,
34882 ( ( inst_prefix << 8 ) | XO ),
34883 ACC_mapped_on_VSR );
34884 break;
34885 case XVF16GER2NN:
34886 DIP("xvf16ger2nn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34887 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34888 getVSReg( rA_addr ),
34889 getVSReg( rB_addr ), AT,
34890 ( ( inst_prefix << 8 ) | XO ),
34891 ACC_mapped_on_VSR );
34892 break;
34893 case XVBF16GER2:
34894 DIP("xvbf16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34895 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34896 getVSReg( rA_addr ),
34897 getVSReg( rB_addr ), AT,
34898 ( ( inst_prefix << 8 ) | XO ),
34899 ACC_mapped_on_VSR );
34900 break;
34901 case XVBF16GER2PP:
34902 DIP("xvbf16ger2pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34903 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34904 getVSReg( rA_addr ),
34905 getVSReg( rB_addr ), AT,
34906 ( ( inst_prefix << 8 ) | XO ),
34907 ACC_mapped_on_VSR );
34908 break;
34909 case XVBF16GER2PN:
34910 DIP("xvbf16ger2pn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34911 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34912 getVSReg( rA_addr ),
34913 getVSReg( rB_addr ), AT,
34914 ( ( inst_prefix << 8 ) | XO ),
34915 ACC_mapped_on_VSR );
34916 break;
34917 case XVBF16GER2NP:
34918 DIP("xvbf16ger2np %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34919 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34920 getVSReg( rA_addr ),
34921 getVSReg( rB_addr ), AT,
34922 ( ( inst_prefix << 8 ) | XO ),
34923 ACC_mapped_on_VSR );
34924 break;
34925 case XVBF16GER2NN:
34926 DIP("xvbf16ger2nn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34927 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
34928 getVSReg( rA_addr ),
34929 getVSReg( rB_addr ), AT,
34930 ( ( inst_prefix << 8 ) | XO ),
34931 ACC_mapped_on_VSR );
34932 break;
34933 case XVF32GER:
34934 DIP("xvf32ger %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34935 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
34936 getVSReg( rA_addr ),
34937 getVSReg( rB_addr ), AT,
34938 ( ( inst_prefix << 8 ) | XO ),
34939 ACC_mapped_on_VSR );
34940 break;
34941 case XVF32GERPP:
34942 DIP("xvf32gerpp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34943 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
34944 getVSReg( rA_addr ),
34945 getVSReg( rB_addr ), AT,
34946 ( ( inst_prefix << 8 ) | XO ),
34947 ACC_mapped_on_VSR );
34948 break;
34949 case XVF32GERPN:
34950 DIP("xvf32gerpn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34951 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
34952 getVSReg( rA_addr ),
34953 getVSReg( rB_addr ), AT,
34954 ( ( inst_prefix << 8 ) | XO ),
34955 ACC_mapped_on_VSR );
34956 break;
34957 case XVF32GERNP:
34958 DIP("xvf32gernp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34959 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
34960 getVSReg( rA_addr ),
34961 getVSReg( rB_addr ), AT,
34962 ( ( inst_prefix << 8 ) | XO ),
34963 ACC_mapped_on_VSR );
34964 break;
34965 case XVF32GERNN:
34966 DIP("xvf32gernn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34967 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
34968 getVSReg( rA_addr ),
34969 getVSReg( rB_addr ), AT,
34970 ( ( inst_prefix << 8 ) | XO ),
34971 ACC_mapped_on_VSR );
34972 break;
34973 case XVF64GER:
34974 DIP("xvf64ger %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34975 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
34976 getVSReg( rA_addr+1 ),
34977 getVSReg( rB_addr ), AT,
34978 ( ( inst_prefix << 8 ) | XO ),
34979 ACC_mapped_on_VSR );
34980 break;
34981 case XVF64GERPP:
34982 DIP("xvfd642gerpp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34983 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
34984 getVSReg( rA_addr+1 ),
34985 getVSReg( rB_addr ), AT,
34986 ( ( inst_prefix << 8 ) | XO ),
34987 ACC_mapped_on_VSR );
34988 break;
34989 case XVF64GERPN:
34990 DIP("xvf64gerpn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34991 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
34992 getVSReg( rA_addr+1 ),
34993 getVSReg( rB_addr ), AT,
34994 ( ( inst_prefix << 8 ) | XO ),
34995 ACC_mapped_on_VSR );
34996 break;
34997 case XVF64GERNP:
34998 DIP("xvf64gernp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
34999 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
35000 getVSReg( rA_addr+1 ),
35001 getVSReg( rB_addr ), AT,
35002 ( ( inst_prefix << 8 ) | XO ),
35003 ACC_mapped_on_VSR );
35004 break;
35005 case XVF64GERNN:
35006 DIP("xvf64gernn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35007 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
35008 getVSReg( rA_addr+1 ),
35009 getVSReg( rB_addr ), AT,
35010 ( ( inst_prefix << 8 ) | XO ),
35011 ACC_mapped_on_VSR );
35012 break;
35013 default:
35014 vex_printf("ERROR, dis_vsx_accumulator_prefix, Unknown X0 = 0x%x value.\n", XO);
35015 return False;
35018 } else if ((opc1 == 0x3b) && prefix) {
35019 // Note these are prefix instructions
35020 UInt XO = IFIELD( theInstr, 3, 8);
35021 UInt PMSK, XMSK, YMSK, MASKS;
35022 UInt inst_prefix = 0x1;
35023 MASKS = IFIELD( prefix, 0, 16);
35025 switch ( XO ) {
35026 case XVI4GER8:
35027 PMSK = IFIELD( prefix, 8, 8);
35028 XMSK = IFIELD( prefix, 4, 4);
35029 YMSK = IFIELD( prefix, 0, 4);
35031 DIP("pmxvi4ger8 %u,r%u, r%u,%u,%u,%u\n",
35032 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35033 vsx_matrix_ger( vbi, MATRIX_4BIT_INT_GER,
35034 getVSReg( rA_addr ), getVSReg( rB_addr ),
35036 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO),
35037 ACC_mapped_on_VSR );
35038 break;
35039 case XVI4GER8PP:
35040 PMSK = IFIELD( prefix, 8, 8);
35041 XMSK = IFIELD( prefix, 4, 4);
35042 YMSK = IFIELD( prefix, 0, 4);
35043 DIP("pmxvi4ger8pp %u,r%u, r%u,%u,%u,%u\n",
35044 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35045 vsx_matrix_ger( vbi, MATRIX_4BIT_INT_GER,
35046 getVSReg( rA_addr ), getVSReg( rB_addr ),
35048 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35049 ACC_mapped_on_VSR );
35050 break;
35051 case XVI8GER4:
35052 PMSK = IFIELD( prefix, 12, 4);
35053 XMSK = IFIELD( prefix, 4, 4);
35054 YMSK = IFIELD( prefix, 0, 4);
35055 DIP("pmxvi8ger4 %u,r%u, r%u,%u,%u,%u\n",
35056 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35057 vsx_matrix_ger( vbi, MATRIX_8BIT_INT_GER,
35058 getVSReg( rA_addr ), getVSReg( rB_addr ),
35060 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35061 ACC_mapped_on_VSR );
35062 break;
35063 case XVI8GER4PP:
35064 PMSK = IFIELD( prefix, 12, 4);
35065 XMSK = IFIELD( prefix, 4, 4);
35066 YMSK = IFIELD( prefix, 0, 4);
35067 DIP("pmxvi8ger4pp %u,r%u, r%u,%u,%u,%u\n",
35068 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35069 vsx_matrix_ger( vbi, MATRIX_8BIT_INT_GER,
35070 getVSReg( rA_addr ), getVSReg( rB_addr ),
35072 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35073 ACC_mapped_on_VSR );
35074 break;
35075 case XVI8GER4SPP:
35076 PMSK = IFIELD( prefix, 12, 4);
35077 XMSK = IFIELD( prefix, 4, 4);
35078 YMSK = IFIELD( prefix, 0, 4);
35079 DIP("pmxvi8ger4spp %u,r%u, r%u,%u,%u,%u\n",
35080 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35081 vsx_matrix_ger( vbi, MATRIX_8BIT_INT_GER,
35082 getVSReg( rA_addr ), getVSReg( rB_addr ),
35084 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35085 ACC_mapped_on_VSR );
35086 break;
35087 case XVI16GER2:
35088 PMSK = IFIELD( prefix, 12, 4);
35089 XMSK = IFIELD( prefix, 4, 4);
35090 YMSK = IFIELD( prefix, 0, 4);
35091 DIP("pmxvi16ger2 %u,r%u, r%u,%u,%u,%u\n",
35092 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35093 vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
35094 getVSReg( rA_addr ), getVSReg( rB_addr ),
35096 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35097 ACC_mapped_on_VSR );
35098 break;
35099 case XVI16GER2PP:
35100 PMSK = IFIELD( prefix, 12, 4);
35101 XMSK = IFIELD( prefix, 4, 4);
35102 YMSK = IFIELD( prefix, 0, 4);
35103 DIP("pmxvi16ger2pp %u,r%u, r%u,%u,%u,%u\n",
35104 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35105 vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
35106 getVSReg( rA_addr ), getVSReg( rB_addr ),
35108 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35109 ACC_mapped_on_VSR );
35110 break;
35111 case XVI16GER2S:
35112 PMSK = IFIELD( prefix, 14, 2);
35113 XMSK = IFIELD( prefix, 4, 4);
35114 YMSK = IFIELD( prefix, 0, 4);
35115 DIP("pmxvi16ger2s %u,r%u, r%u,%u,%u,%u\n",
35116 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35117 vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
35118 getVSReg( rA_addr ), getVSReg( rB_addr ),
35120 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35121 ACC_mapped_on_VSR );
35122 break;
35123 case XVI16GER2SPP:
35124 PMSK = IFIELD( prefix, 14, 2);
35125 XMSK = IFIELD( prefix, 4, 4);
35126 YMSK = IFIELD( prefix, 0, 4);
35127 DIP("pmxvi16ger2pps %u,r%u, r%u,%u,%u,%u\n",
35128 AT, rA_addr, rB_addr, XMSK, YMSK, PMSK);
35129 vsx_matrix_ger( vbi, MATRIX_16BIT_INT_GER,
35130 getVSReg( rA_addr ), getVSReg( rB_addr ),
35132 ( (MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35133 ACC_mapped_on_VSR );
35134 break;
35135 case XVBF16GER2:
35136 PMSK = IFIELD( prefix, 14, 2);
35137 XMSK = IFIELD( prefix, 4, 4);
35138 YMSK = IFIELD( prefix, 0, 4);
35139 DIP("pmxvbf16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35140 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35141 getVSReg( rA_addr ),
35142 getVSReg( rB_addr ),
35143 AT, ( (MASKS << 9 )
35144 | ( inst_prefix << 8 ) | XO ),
35145 ACC_mapped_on_VSR );
35146 break;
35147 case XVBF16GER2PP:
35148 PMSK = IFIELD( prefix, 14, 2);
35149 XMSK = IFIELD( prefix, 4, 4);
35150 YMSK = IFIELD( prefix, 0, 4);
35151 DIP("pmxvbf16ger2pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35152 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35153 getVSReg( rA_addr ),
35154 getVSReg( rB_addr ),
35155 AT, ( (MASKS << 9 )
35156 | ( inst_prefix << 8 ) | XO ),
35157 ACC_mapped_on_VSR );
35158 break;
35159 case XVBF16GER2PN:
35160 PMSK = IFIELD( prefix, 14, 2);
35161 XMSK = IFIELD( prefix, 4, 4);
35162 YMSK = IFIELD( prefix, 0, 4);
35163 DIP("pmxvbf16ger2pn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35164 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35165 getVSReg( rA_addr ),
35166 getVSReg( rB_addr ),
35167 AT, ( (MASKS << 9 )
35168 | ( inst_prefix << 8 ) | XO ),
35169 ACC_mapped_on_VSR );
35170 break;
35171 case XVBF16GER2NP:
35172 PMSK = IFIELD( prefix, 14, 2);
35173 XMSK = IFIELD( prefix, 4, 4);
35174 YMSK = IFIELD( prefix, 0, 4);
35175 DIP("pmxvbf16ger2np %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35176 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35177 getVSReg( rA_addr ),
35178 getVSReg( rB_addr ),
35179 AT, ( (MASKS << 9 )
35180 | ( inst_prefix << 8 ) | XO ),
35181 ACC_mapped_on_VSR );
35182 break;
35183 case XVBF16GER2NN:
35184 PMSK = IFIELD( prefix, 14, 2);
35185 XMSK = IFIELD( prefix, 4, 4);
35186 YMSK = IFIELD( prefix, 0, 4);
35187 DIP("pmxvbf16ger2nn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35188 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35189 getVSReg( rA_addr ),
35190 getVSReg( rB_addr ),
35191 AT, ( (MASKS << 9 )
35192 | ( inst_prefix << 8 ) | XO ),
35193 ACC_mapped_on_VSR );
35194 break;
35195 case XVF16GER2:
35196 PMSK = IFIELD( prefix, 14, 2);
35197 XMSK = IFIELD( prefix, 4, 4);
35198 YMSK = IFIELD( prefix, 0, 4);
35199 DIP("pmxvf16ger2 %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35200 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35201 getVSReg( rA_addr ),
35202 getVSReg( rB_addr ),
35203 AT, ( (MASKS << 9 )
35204 | ( inst_prefix << 8 ) | XO ),
35205 ACC_mapped_on_VSR );
35206 break;
35207 case XVF16GER2PP:
35208 PMSK = IFIELD( prefix, 14, 2);
35209 XMSK = IFIELD( prefix, 4, 4);
35210 YMSK = IFIELD( prefix, 0, 4);
35211 DIP("pmxvf16ger2pp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35212 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35213 getVSReg( rA_addr ),
35214 getVSReg( rB_addr ),
35215 AT, ( (MASKS << 9 )
35216 | ( inst_prefix << 8 ) | XO ),
35217 ACC_mapped_on_VSR );
35218 break;
35219 case XVF16GER2PN:
35220 PMSK = IFIELD( prefix, 14, 2);
35221 XMSK = IFIELD( prefix, 4, 4);
35222 YMSK = IFIELD( prefix, 0, 4);
35223 DIP("pmxvf16ger2pn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35224 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35225 getVSReg( rA_addr ),
35226 getVSReg( rB_addr ),
35227 AT, ( (MASKS << 9 )
35228 | ( inst_prefix << 8 ) | XO ),
35229 ACC_mapped_on_VSR );
35230 break;
35231 case XVF16GER2NP:
35232 PMSK = IFIELD( prefix, 14, 2);
35233 XMSK = IFIELD( prefix, 4, 4);
35234 YMSK = IFIELD( prefix, 0, 4);
35235 DIP("pmxvf16ger2np %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35236 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35237 getVSReg( rA_addr ),
35238 getVSReg( rB_addr ),
35239 AT, ( (MASKS << 9 )
35240 | ( inst_prefix << 8 ) | XO ),
35241 ACC_mapped_on_VSR );
35242 break;
35243 case XVF16GER2NN:
35244 PMSK = IFIELD( prefix, 14, 2);
35245 XMSK = IFIELD( prefix, 4, 4);
35246 YMSK = IFIELD( prefix, 0, 4);
35247 DIP("pmxvf16ger2nn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35248 vsx_matrix_ger( vbi, MATRIX_16BIT_FLOAT_GER,
35249 getVSReg( rA_addr ),
35250 getVSReg( rB_addr ),
35251 AT, ( (MASKS << 9 )
35252 | ( inst_prefix << 8 ) | XO ),
35253 ACC_mapped_on_VSR );
35254 break;
35255 case XVF32GER:
35256 PMSK = IFIELD( prefix, 14, 2);
35257 XMSK = IFIELD( prefix, 4, 4);
35258 YMSK = IFIELD( prefix, 0, 4);
35259 DIP("pmxvf32ger %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35260 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
35261 getVSReg( rA_addr ),
35262 getVSReg( rB_addr ), AT,
35263 ( ( MASKS << 9 ) | ( inst_prefix << 8 ) | XO ),
35264 ACC_mapped_on_VSR );
35265 break;
35266 case XVF32GERPP:
35267 PMSK = IFIELD( prefix, 14, 2);
35268 XMSK = IFIELD( prefix, 4, 4);
35269 YMSK = IFIELD( prefix, 0, 4);
35270 DIP("pmxvf32gerpp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35271 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
35272 getVSReg( rA_addr ),
35273 getVSReg( rB_addr ), AT,
35274 ( ( MASKS << 9) | ( inst_prefix << 8 ) | XO ),
35275 ACC_mapped_on_VSR );
35276 break;
35277 case XVF32GERPN:
35278 PMSK = 0;
35279 XMSK = IFIELD( prefix, 4, 4);
35280 YMSK = IFIELD( prefix, 0, 4);
35281 DIP("pmxvf32gerpn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35282 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
35283 getVSReg( rA_addr ),
35284 getVSReg( rB_addr ), AT,
35285 ( ( MASKS << 9) | ( inst_prefix << 8 ) | XO ),
35286 ACC_mapped_on_VSR );
35287 break;
35288 case XVF32GERNP:
35289 PMSK = 0;
35290 XMSK = IFIELD( prefix, 4, 4);
35291 YMSK = IFIELD( prefix, 0, 4);
35292 DIP("pmxvf32gernp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35293 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
35294 getVSReg( rA_addr ),
35295 getVSReg( rB_addr ), AT,
35296 ( ( MASKS << 9) | ( inst_prefix << 8 ) | XO ),
35297 ACC_mapped_on_VSR );
35298 break;
35299 case XVF32GERNN:
35300 PMSK = 0;
35301 XMSK = IFIELD( prefix, 4, 4);
35302 YMSK = IFIELD( prefix, 0, 4);
35303 DIP("pmxvf32gernn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35304 vsx_matrix_ger( vbi, MATRIX_32BIT_FLOAT_GER,
35305 getVSReg( rA_addr ),
35306 getVSReg( rB_addr ), AT,
35307 ( ( MASKS << 9) | ( inst_prefix << 8 ) | XO ),
35308 ACC_mapped_on_VSR );
35309 break;
35310 case XVF64GER:
35311 PMSK = 0;
35312 XMSK = IFIELD( prefix, 4, 4);
35313 YMSK = IFIELD( prefix, 2, 2);
35314 DIP("pmxvf64ger %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35315 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
35316 getVSReg( rA_addr+1 ),
35317 getVSReg( rB_addr ), AT,
35318 ( ( MASKS << 9) | ( inst_prefix << 8 )
35319 | XO ),
35320 ACC_mapped_on_VSR );
35321 break;
35322 case XVF64GERPP:
35323 PMSK = 0;
35324 XMSK = IFIELD( prefix, 4, 4);
35325 YMSK = IFIELD( prefix, 2, 2);
35326 DIP("pmxvf64gerpp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35327 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
35328 getVSReg( rA_addr+1 ),
35329 getVSReg( rB_addr ), AT,
35330 ( ( MASKS << 9) | ( inst_prefix << 8 )
35331 | XO ),
35332 ACC_mapped_on_VSR );
35333 break;
35334 case XVF64GERPN:
35335 PMSK = 0;
35336 XMSK = IFIELD( prefix, 4, 4);
35337 YMSK = IFIELD( prefix, 2, 2);
35338 DIP("pmxvf64gerpn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35339 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
35340 getVSReg( rA_addr+1 ),
35341 getVSReg( rB_addr ), AT,
35342 ( ( MASKS << 9) | ( inst_prefix << 8 )
35343 | XO ),
35344 ACC_mapped_on_VSR );
35345 break;
35346 case XVF64GERNP:
35347 PMSK = 0;
35348 XMSK = IFIELD( prefix, 4, 4);
35349 YMSK = IFIELD( prefix, 2, 2);
35350 DIP("pmxvf64gernp %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35351 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
35352 getVSReg( rA_addr+1 ),
35353 getVSReg( rB_addr ), AT,
35354 ( ( MASKS << 9) | ( inst_prefix << 8 )
35355 | XO ),
35356 ACC_mapped_on_VSR );
35357 break;
35358 case XVF64GERNN:
35359 PMSK = 0;
35360 XMSK = IFIELD( prefix, 4, 4);
35361 YMSK = IFIELD( prefix, 2, 2);
35362 DIP("pmxvf64gernn %u,r%u, r%u\n", AT, rA_addr, rB_addr);
35363 vsx_matrix_64bit_float_ger( vbi, getVSReg( rA_addr ),
35364 getVSReg( rA_addr+1 ),
35365 getVSReg( rB_addr ), AT,
35366 ( ( MASKS << 9) | ( inst_prefix << 8 )
35367 | XO ),
35368 ACC_mapped_on_VSR );
35369 break;
35370 default:
35371 return False;
35374 } else if ((opc1 == 0x1F) && (opc2 == 0xB1) && (bit11_15 == 0) && !prefix) {
35375 // FYI, this is not a prefix instruction
35376 DIP("xxmfacc %u\n", AT);
35378 putVSReg( 4*AT+0, getACC( AT, 0, ACC_mapped_on_VSR ) );
35379 putVSReg( 4*AT+1, getACC( AT, 1, ACC_mapped_on_VSR ) );
35380 putVSReg( 4*AT+2, getACC( AT, 2, ACC_mapped_on_VSR ) );
35381 putVSReg( 4*AT+3, getACC( AT, 3, ACC_mapped_on_VSR ) );
35383 } else if ((opc1 == 0x1F) && (opc2 == 0xB1) && (bit11_15 == 3) && !prefix) {
35384 // FYI, this is not a prefix instruction
35385 IRTemp zero128 = newTemp(Ity_V128);
35387 DIP("xxsetaccz %u\n", AT);
35389 assign( zero128, binop(Iop_64HLtoV128, mkU64( 0 ), mkU64( 0 ) ) );
35390 putACC( AT, 0, mkexpr( zero128 ), ACC_mapped_on_VSR );
35391 putACC( AT, 1, mkexpr( zero128 ), ACC_mapped_on_VSR );
35392 putACC( AT, 2, mkexpr( zero128 ), ACC_mapped_on_VSR );
35393 putACC( AT, 3, mkexpr( zero128 ), ACC_mapped_on_VSR );
35395 } else if ((opc1 == 0x1F) && (opc2 == 0xB1) && (bit11_15 == 1) && !prefix) {
35396 // FYI, this is not a prefix instruction
35397 DIP("xxmtacc %u\n", AT);
35399 putACC( AT, 0, getVSReg( 4*AT+0 ), ACC_mapped_on_VSR );
35400 putACC( AT, 1, getVSReg( 4*AT+1 ), ACC_mapped_on_VSR );
35401 putACC( AT, 2, getVSReg( 4*AT+2 ), ACC_mapped_on_VSR );
35402 putACC( AT, 3, getVSReg( 4*AT+3 ), ACC_mapped_on_VSR );
35404 } else {
35405 vex_printf("ERROR, dis_vsx_accumulator_prefix, Unknown instruction theInstr = 0x%x\n",
35406 theInstr);
35407 return False;
35410 return True;
35413 static Bool dis_vector_generate_pvc_from_mask ( UInt prefix,
35414 UInt theInstr,
35415 const VexAbiInfo* vbi )
35417 UChar XT_addr = ifieldRegXT(theInstr);
35418 UChar vB_addr = ifieldRegB(theInstr);
35419 IRTemp vB = newTemp( Ity_V128 );
35420 UInt opc2 = ifieldOPClo10(theInstr);
35421 UInt IMM = IFIELD(theInstr, (31-15), 5); // bits[11:15]
35423 assign( vB, getVReg( vB_addr ) );
35425 switch( opc2 ) {
35426 case 0x394:
35427 DIP("xxgenpcvbm v%u,v%u,%u\n", XT_addr, vB_addr, IMM);
35428 /* vector_gen_pvc_mask uses a dirty helper to calculate the result and
35429 write it to the VSX result register. */
35430 vector_gen_pvc_mask( vbi, mkexpr( vB ), IMM, opc2, XT_addr );
35431 break;
35433 case 0x395:
35434 DIP("xxgenpcvhm v%u,v%u,%u\n", XT_addr, vB_addr, IMM);
35435 /* vector_gen_pvc_mask uses a dirty helper to calculate the result and
35436 write it to the VSX result register. */
35437 vector_gen_pvc_mask( vbi, mkexpr( vB ), IMM, opc2, XT_addr );
35438 break;
35440 case 0x3B4:
35441 DIP("xxgenpcvwm v%u,v%u,%u\n", XT_addr, vB_addr, IMM);
35442 /* vector_gen_pvc_mask uses a dirty helper to calculate the result and
35443 write it to the VSX result register. */
35444 vector_gen_pvc_mask( vbi, mkexpr( vB ), IMM, opc2, XT_addr );
35445 break;
35447 case 0x3B5:
35448 DIP("xxgenpcvdm v%u,v%u,%u\n", XT_addr, vB_addr, IMM);
35449 /* vector_gen_pvc_mask uses a dirty helper to calculate the result and
35450 write it to the VSX result register. */
35451 vector_gen_pvc_mask( vbi, mkexpr( vB ), IMM, opc2, XT_addr );
35452 break;
35454 default:
35455 return False;
35458 return True;
35461 static Int dis_copy_paste ( UInt prefix, UInt theInstr,
35462 const VexAbiInfo* vbi )
35464 IRType ty = mode64 ? Ity_I64 : Ity_I32;
35465 Bool L = IFIELD( theInstr, 21, 1 );
35466 UInt bit0 = IFIELD( theInstr, 0, 1 );
35467 UInt opc2 = ifieldOPClo10( theInstr );
35468 UChar rA_addr = ifieldRegA(theInstr);
35469 UChar rB_addr = ifieldRegB(theInstr);
35470 IRTemp cr0 = newTemp( Ity_I8 );
35471 UInt operation = INVALD_INST;
35472 IRTemp EA_base = newTemp(ty);
35473 IRExpr** args;
35474 IRDirty* d;
35475 UInt mFx = Ifx_None;
35476 IRTemp helper_rtn = newTemp(Ity_I32);
35478 /* There is no prefixed version of these instructions. */
35479 PREFIX_CHECK
35481 assign( EA_base, ea_rAor0_idxd(rA_addr, rB_addr) );
35483 if (ty != Ity_I64) {
35484 vpanic( "ERROR PPC: copy, paste, cpabort only supported on 64-bit systems");
35485 return False;
35488 /* The dirty helper is passed the EA_bse for the 128-byte buffer and
35489 and operation, i.e. which instruction to issue on the host. It returns
35490 uint32_t result. The result is condition code CR0. Only for the paste
35491 instruction is the return value relevant and must be used to update the
35492 guest state. */
35494 if (( opc2 == 0x306 ) && ( L == 1 )) { // copy
35495 DIP("copy %u,%u\n", rA_addr, rB_addr);
35496 operation = COPY_INST;
35497 mFx = Ifx_Read;
35499 } else if ( opc2 == 0x346 ) { // cpabort
35500 DIP("cpabort\n");
35501 operation = CPABORT_INST;
35502 /* Abort data transfer if one is in progress. */
35503 /* cpabort does nothing to the guest state, just resets operation
35504 on the host. */
35506 } else if (( opc2 == 0x386 ) && ( bit0 == 1 )) { // paste.
35508 /* The Ifx_write will cause Memcheck will instrument the buffer, if
35509 there is any undefinedness in the inputs, then all of the outputs
35510 will be undefined. Hence:
35512 if EA_base or operation contain any undefined bits
35514 then the return value is undefined and the specified 128-byte
35515 memory area are undefined after the call
35517 else the return value is undefined and the specified 128-byte
35518 memory area are defined after the call */
35519 DIP("paste %u,%u\n", rA_addr, rB_addr);
35520 operation = PASTE_INST;
35521 mFx = Ifx_Write;
35523 } else {
35524 /* Unknown instruction, should never get here. */
35525 return False;
35528 /* Call dirty helper to issue the copy, paste or cpabort instruction on the
35529 host. */
35530 args = mkIRExprVec_2( mkexpr(EA_base), mkU32(operation) );
35532 /* The dirty helper needs to return the 8-bit condition code result from
35533 the copy/paste instructions run on the host. The follwoing hack is used
35534 to get Memcheck to return an error if any of the bits in the 128-byte
35535 copy-paste buffer are uninitialized. The bottom 8-bits of helper_rtn
35536 contain the condition code CR0. The upper bits must all be zero. */
35538 d = unsafeIRDirty_1_N (
35539 helper_rtn,
35540 0/*regparms*/,
35541 "copy_paste_abort_dirty_helper",
35542 fnptr_to_fnentry( vbi, &copy_paste_abort_dirty_helper ),
35543 args );
35545 /* As part of the hack, we must set mFx/mAddr/mSize so as to declare the
35546 memory area used by the copy/paste instructions. */
35547 d->mAddr = NULL;
35549 if (mFx != Ifx_None) {
35550 d->mFx = mFx;
35551 d->mAddr = mkexpr(EA_base);
35552 d->mSize = 128; /* 128 byte memory region */
35555 stmt( IRStmt_Dirty(d) );
35557 /* The following Exit state is inserted with a test that the IR
35558 optimization cannot remove. */
35559 stmt( IRStmt_Exit(
35560 binop(Iop_CmpNE32, binop( Iop_And32, mkexpr(helper_rtn),
35561 mkU32(0xFF00)),
35562 mkU32(0)),
35563 Ijk_SigTRAP,
35564 mode64 ? IRConst_U64(guest_CIA_curr_instr) :
35565 IRConst_U32((UInt) guest_CIA_curr_instr),
35566 OFFB_CIA
35568 /* The effects of this hack are as follows:
35570 (1) the above stmt() asks the IR to exit, asking Valgrind to hand
35571 the program a SIGTRAP at this point, if the fake return value is
35572 nonzero, however ..
35574 (2) .. that never happens, because the actual return value is maked
35575 out and the upper bits of the return are always zero.
35577 (3) Memcheck will believe that any undefinedness in the copy/paste
35578 area read by the helper will be propagated through to the helper_rtn
35579 value, and will generate instrumentation to cause that to happen.
35581 (4) Memcheck will instrument the IRStmt_Exit to check the definedness
35582 computed by (3) and emit an error if helper_rtn value contains any
35583 undefined bits. Hence Memcheck will generate a warning for undefined
35584 bits in the copy/paste buffer.
35586 (5) Note that the IR optimisation passes do not know what value the
35587 helper call will return. Hence we are guaranteed that they can't
35588 optimise away the IRStmt_Exit and its associated check. */
35590 /* Need to extract the actual return value and put it into the guest
35591 state. */
35592 assign( cr0, unop(Iop_16to8,
35593 unop(Iop_32to16, mkexpr(helper_rtn))));
35595 if (( opc2 == 0x386 ) && (bit0 == 1 )) {
35596 /* Only the paste instruction sets CR0.
35597 Update CR0 bits [3:1] with the copy/paste result with the host CR0
35598 result value. CR0 bit 0 must match the guest XER_OV value. */
35599 putCR0 ( 0, binop(Iop_And8, mkU8( 1 ), getXER_OV() ) );
35600 putCR321( 0, binop(Iop_And8, mkU8( 0xE ), mkexpr(cr0) ) );
35603 return True;
35606 static Int dis_nop_prefix ( UInt prefix, UInt theInstr )
35608 Bool is_prefix = prefix_instruction( prefix );
35609 UInt bit6_7 = IFIELD( prefix, 24, 2);
35610 UInt bit8_11 = IFIELD( prefix, 20, 4);
35611 UInt bit14_31 = IFIELD( prefix, 0, 18);
35612 UInt opc2 = ifieldOPClo10(theInstr);
35614 /* pnop instruction :
35615 must be a prefix instruction;
35616 prefix[6:7] = 3;
35617 prefix[8:11] = 0;
35618 prefix[14:31] = 0;
35619 theInstr[0:31] != Branch instruction
35620 The branch instruction (b) has opc2 = 18 (0x12)); */
35623 if (is_prefix && (bit6_7 == 3) && (bit8_11 == 0) && (bit14_31 == 0)) {
35624 if (opc2 == 0x12) {
35625 /* suffix is a branch instruction which is invalid. */
35626 vex_printf("INVALID pnop instruction. Exiting\n");
35627 return PREFIX_NOP_INVALID;
35630 /* valid */
35631 pDIP( is_prefix, "nop\n");
35632 return True;
35634 return False;
35637 static Int dis_darn ( UInt prefix, UInt theInstr,
35638 const VexAbiInfo* vbi )
35640 /* darn - Deliver A Random Number */
35641 UInt L = IFIELD( theInstr, 16, 2);
35642 UChar rD_addr = ifieldRegDS( theInstr );
35643 IRTemp rD = newTemp( Ity_I64 );
35644 IRDirty* d;
35646 /* L Format or returned value
35647 0 0 || CRN_32bits
35648 1 CRN_64bits (0 to 0xFFFF_FFFF_FFFF_FFFE)
35649 2 RRN_64bits (0 to 0xFFFF_FFFF_FFFF_FFFE)
35650 3 reserved
35652 On error, return 0xFFFFFFFFFFFFFFFF
35653 A CRN value is a conditioned random number that was processed
35654 to to reduce bias.
35656 /* There is no prefixed version of these instructions. */
35657 PREFIX_CHECK
35658 DIP("darn r%u,%u\n", rD_addr, L);
35660 if (L == 3)
35661 /* Hardware reports illegal instruction if L = 3. */
35662 return False;
35664 IRExpr** args = mkIRExprVec_1( mkU32( L ) );
35666 d = unsafeIRDirty_1_N (
35668 0/*regparms*/,
35669 "darn_dirty_helper",
35670 fnptr_to_fnentry( vbi, &darn_dirty_helper ),
35671 args );
35673 /* Execute the dirty call, returning the result in rD. The dirty
35674 helper calls the darn instruction on the host returning the
35675 random number generated by the darn instruction on the host.
35676 The dirty helper does not change the state of the guest or guest
35677 memory. */
35678 stmt( IRStmt_Dirty(d) );
35679 putIReg( rD_addr, mkexpr( rD ) );
35680 return True;
35684 /*------------------------------------------------------------*/
35685 /*--- Disassemble a single instruction ---*/
35686 /*------------------------------------------------------------*/
35688 /* ISA 3.1 introduced a new 8-byte instruction format called "prefixed
35689 instructions". All instructions up to ISA 3.1 were 4-byte instructions
35690 that are now called "word instructions".
35692 Disassemble a single instruction into IR. The instruction
35693 is located in host memory at &guest_code[delta]. */
35695 static
35696 DisResult disInstr_PPC_WRK (
35697 Long delta64,
35698 const VexArchInfo* archinfo,
35699 const VexAbiInfo* abiinfo,
35700 Bool sigill_diag
35703 UChar opc1;
35704 UInt opc2;
35705 UInt opc3;
35706 DisResult dres;
35707 UInt theInstr;
35708 UInt prefix;
35709 IRType ty = mode64 ? Ity_I64 : Ity_I32;
35710 UInt hwcaps = archinfo->hwcaps;
35711 UInt inst_size = WORD_INST_SIZE; //Default
35712 Long delta;
35713 Bool allow_F = False;
35714 Bool allow_V = False;
35715 Bool allow_FX = False;
35716 Bool allow_GX = False;
35717 Bool allow_VX = False; // Equates to "supports Power ISA 2.06
35718 Bool allow_DFP = False;
35719 Bool allow_isa_2_07 = False;
35720 Bool allow_isa_3_0 = False;
35721 Bool allow_isa_3_1 = False;
35722 Bool allow_scv = False;
35723 Bool is_prefix;
35725 /* In ISA 3.1 the ACC is implemented on top of the vsr0 thru vsr31.
35727 NOTE, ISA 3.1 says in the future the ACC implentation may change. It
35728 doesn't say how it might change but the assumption is the ACC might be
35729 implemented as a separate register file. If/when the ACC is implemented
35730 as a separate register file, ACC_mapped_on_VSR can be set to False, and
35731 Valgrind will instead utilize the separate register file. 2/8/2022
35733 For example, if ISA_3.2 implements the ACC as a separate register
35734 file, there will need to be a check after the if (mode64) statement below
35735 of the form: if (allow_isa_3_2) ACC_mapped_on_VSR = False;
35736 to set the flag to indicate the ACC is implemented as a separate register
35737 file. */
35738 Bool ACC_mapped_on_VSR = True;
35740 /* What insn variants are we supporting today? */
35741 if (mode64) {
35742 allow_F = True;
35743 allow_V = (0 != (hwcaps & VEX_HWCAPS_PPC64_V));
35744 allow_FX = (0 != (hwcaps & VEX_HWCAPS_PPC64_FX));
35745 allow_GX = (0 != (hwcaps & VEX_HWCAPS_PPC64_GX));
35746 allow_VX = (0 != (hwcaps & VEX_HWCAPS_PPC64_VX));
35747 allow_DFP = (0 != (hwcaps & VEX_HWCAPS_PPC64_DFP));
35748 allow_isa_2_07 = (0 != (hwcaps & VEX_HWCAPS_PPC64_ISA2_07));
35749 allow_isa_3_0 = (0 != (hwcaps & VEX_HWCAPS_PPC64_ISA3_0));
35750 allow_isa_3_1 = (0 != (hwcaps & VEX_HWCAPS_PPC64_ISA3_1));
35751 allow_scv = archinfo->ppc_scv_supported;
35752 } else {
35753 allow_F = (0 != (hwcaps & VEX_HWCAPS_PPC32_F));
35754 allow_V = (0 != (hwcaps & VEX_HWCAPS_PPC32_V));
35755 allow_FX = (0 != (hwcaps & VEX_HWCAPS_PPC32_FX));
35756 allow_GX = (0 != (hwcaps & VEX_HWCAPS_PPC32_GX));
35757 allow_VX = (0 != (hwcaps & VEX_HWCAPS_PPC32_VX));
35758 allow_DFP = (0 != (hwcaps & VEX_HWCAPS_PPC32_DFP));
35759 allow_isa_2_07 = (0 != (hwcaps & VEX_HWCAPS_PPC32_ISA2_07));
35760 allow_isa_3_0 = (0 != (hwcaps & VEX_HWCAPS_PPC32_ISA3_0));
35761 /* ISA 3.1 is not supported in 32-bit mode */
35762 /* The scv instruction is not supported in 32-bit mode */
35765 /* Enable writting the OV32 and CA32 bits added with ISA3.0 */
35766 OV32_CA32_supported = allow_isa_3_0;
35768 /* The running delta */
35769 delta = (Long)mkSzAddr(ty, (ULong)delta64);
35771 /* Set result defaults. */
35772 dres.whatNext = Dis_Continue;
35773 dres.len = 0;
35774 dres.jk_StopHere = Ijk_INVALID;
35775 dres.hint = Dis_HintNone;
35777 /* At least this is simple on PPC32: insns are all 4 bytes long, and
35778 4-aligned. So just fish the whole thing out of memory right now
35779 and have done. */
35780 theInstr = getUIntPPCendianly( &guest_code[delta] );
35781 prefix = 0; /* Reset the prefix so instruction flag */
35783 if (0) vex_printf("insn: 0x%x\n", theInstr);
35785 DIP("\t0x%llx: ", (ULong)guest_CIA_curr_instr);
35787 /* Spot "Special" instructions (see comment at top of file). */
35789 const UChar* code = guest_code + delta;
35790 /* Spot the 16-byte preamble:
35791 32-bit mode:
35792 5400183E rlwinm 0,0,3,0,31
35793 5400683E rlwinm 0,0,13,0,31
35794 5400E83E rlwinm 0,0,29,0,31
35795 5400983E rlwinm 0,0,19,0,31
35796 64-bit mode:
35797 78001800 rotldi 0,0,3
35798 78006800 rotldi 0,0,13
35799 7800E802 rotldi 0,0,61
35800 78009802 rotldi 0,0,51
35802 UInt word1 = mode64 ? 0x78001800 : 0x5400183E;
35803 UInt word2 = mode64 ? 0x78006800 : 0x5400683E;
35804 UInt word3 = mode64 ? 0x7800E802 : 0x5400E83E;
35805 UInt word4 = mode64 ? 0x78009802 : 0x5400983E;
35806 Bool is_special_preamble = False;
35807 if (getUIntPPCendianly(code+ 0) == word1 &&
35808 getUIntPPCendianly(code+ 4) == word2 &&
35809 getUIntPPCendianly(code+ 8) == word3 &&
35810 getUIntPPCendianly(code+12) == word4) {
35811 is_special_preamble = True;
35812 } else if (! mode64 &&
35813 getUIntPPCendianly(code+ 0) == 0x54001800 &&
35814 getUIntPPCendianly(code+ 4) == 0x54006800 &&
35815 getUIntPPCendianly(code+ 8) == 0x5400E800 &&
35816 getUIntPPCendianly(code+12) == 0x54009800) {
35817 static Bool reported = False;
35818 if (!reported) {
35819 vex_printf("disInstr(ppc): old ppc32 instruction magic detected. Code might clobber r0.\n");
35820 vex_printf("disInstr(ppc): source needs to be recompiled against latest valgrind.h.\n");
35821 reported = True;
35823 is_special_preamble = True;
35825 if (is_special_preamble) {
35826 /* Got a "Special" instruction preamble. Which one is it? */
35827 if (getUIntPPCendianly(code+16) == 0x7C210B78 /* or 1,1,1 */) {
35828 /* %R3 = client_request ( %R4 ) */
35829 DIP("r3 = client_request ( %%r4 )\n");
35830 delta += 20;
35831 putGST( PPC_GST_CIA, mkSzImm( ty, guest_CIA_bbstart + delta ));
35832 dres.jk_StopHere = Ijk_ClientReq;
35833 dres.whatNext = Dis_StopHere;
35834 goto decode_success;
35836 else
35837 if (getUIntPPCendianly(code+16) == 0x7C421378 /* or 2,2,2 */) {
35838 /* %R3 = guest_NRADDR */
35839 DIP("r3 = guest_NRADDR\n");
35840 delta += 20;
35841 dres.len = 20;
35842 putIReg(3, IRExpr_Get( OFFB_NRADDR, ty ));
35843 goto decode_success;
35845 else
35846 if (getUIntPPCendianly(code+16) == 0x7C631B78 /* or 3,3,3 */) {
35847 delta += 20;
35848 if (host_endness == VexEndnessLE) {
35849 /* branch-and-link-to-noredir %R12 */
35850 DIP("branch-and-link-to-noredir r12\n");
35851 putGST( PPC_GST_LR,
35852 mkSzImm(ty, guest_CIA_bbstart + (Long)delta) );
35853 putGST( PPC_GST_CIA, getIReg(12));
35854 } else {
35855 /* branch-and-link-to-noredir %R11 */
35856 DIP("branch-and-link-to-noredir r11\n");
35857 putGST( PPC_GST_LR,
35858 mkSzImm(ty, guest_CIA_bbstart + (Long)delta) );
35859 putGST( PPC_GST_CIA, getIReg(11));
35861 dres.jk_StopHere = Ijk_NoRedir;
35862 dres.whatNext = Dis_StopHere;
35863 goto decode_success;
35865 else
35866 if (getUIntPPCendianly(code+16) == 0x7C842378 /* or 4,4,4 */) {
35867 /* %R3 = guest_NRADDR_GPR2 */
35868 DIP("r3 = guest_NRADDR_GPR2\n");
35869 delta += 20;
35870 dres.len = 20;
35871 putIReg(3, IRExpr_Get( OFFB_NRADDR_GPR2, ty ));
35872 goto decode_success;
35874 else
35875 if (getUIntPPCendianly(code+16) == 0x7CA52B78 /* or 5,5,5 */) {
35876 DIP("IR injection\n");
35877 if (host_endness == VexEndnessBE)
35878 vex_inject_ir(irsb, Iend_BE);
35879 else
35880 vex_inject_ir(irsb, Iend_LE);
35882 delta += 20;
35883 dres.len = 20;
35885 // Invalidate the current insn. The reason is that the IRop we're
35886 // injecting here can change. In which case the translation has to
35887 // be redone. For ease of handling, we simply invalidate all the
35888 // time.
35890 stmt(IRStmt_Put(OFFB_CMSTART, mkSzImm(ty, guest_CIA_curr_instr)));
35891 stmt(IRStmt_Put(OFFB_CMLEN, mkSzImm(ty, 20)));
35893 putGST( PPC_GST_CIA, mkSzImm( ty, guest_CIA_bbstart + delta ));
35894 dres.whatNext = Dis_StopHere;
35895 dres.jk_StopHere = Ijk_InvalICache;
35896 goto decode_success;
35898 /* We don't know what it is. Set opc1/opc2 so decode_failure
35899 can print the insn following the Special-insn preamble. */
35900 theInstr = getUIntPPCendianly(code+16);
35901 opc1 = ifieldOPC(theInstr);
35902 opc2 = ifieldOPClo10(theInstr);
35903 goto decode_failure;
35904 /*NOTREACHED*/
35908 /* Determine if the instruction is a word instruction (4-bytes) or a
35909 prefix instruction (8-bytes).
35911 A prefix instruction basically consists of a 4-byte pre-emble followed
35912 bye the 4-byte word instruction. The pre-emble give information on how
35913 the immediate fields are extended. The following 4-bytes are basically
35914 the word instruction containing the opc1 and opc2 fields. */
35916 if (prefix_instruction ( theInstr )) {
35917 int ret;
35918 /* Save the current theInstr into the prefix. Fetch the next
35919 four bytes into theInstr and decode the instruction opc1 and opc2
35920 fields the same as a pre ISA 3.1 word instruction. */
35921 inst_size = PREFIX_INST_SIZE;
35922 delta += WORD_INST_SIZE; // Get next instruction word
35924 prefix = theInstr;
35925 theInstr = getUIntPPCendianly( &guest_code[delta] );
35927 /* Check for pnop instruction. Suffix field is allowed to be anything
35928 but a branch instruction. */
35929 ret = dis_nop_prefix( prefix, theInstr);
35930 if (ret == True)
35931 goto decode_success;
35932 else if (ret == PREFIX_NOP_INVALID)
35933 goto decode_failure;
35934 /* not a pnop instruction, try to decode */
35937 opc1 = ifieldOPC(theInstr);
35938 opc2 = ifieldOPClo10(theInstr);
35939 is_prefix = prefix_instruction( prefix );
35941 // Note: all 'reserved' bits must be cleared, else invalid
35942 switch (opc1) {
35944 /* Integer Arithmetic Instructions */
35945 case 0x0E: // addi
35946 ISA_3_1_PREFIX_CHECK
35947 if (dis_int_arith_prefix( prefix, theInstr )) goto decode_success;
35948 goto decode_failure;
35950 case 0x0C: case 0x0D: // addic, addic.
35951 case 0x0F: case 0x07: case 0x08: // addis, mulli, subfic
35952 if (dis_int_arith( prefix, theInstr )) goto decode_success;
35953 goto decode_failure;
35955 /* Integer Compare Instructions */
35956 case 0x0B: case 0x0A: // cmpi, cmpli
35957 if (dis_int_cmp( prefix, theInstr )) goto decode_success;
35958 goto decode_failure;
35960 /* Integer Logical Instructions */
35961 case 0x1C: case 0x1D: case 0x18: // andi., andis., ori
35962 case 0x1A: // xori
35963 if (dis_int_logic( prefix, theInstr )) goto decode_success;
35964 goto decode_failure;
35966 case 0x1B:
35967 if ( !is_prefix ) { // oris
35968 if (dis_int_logic( prefix, theInstr )) goto decode_success;
35970 goto decode_failure;
35972 case 0x19:
35973 if ( !is_prefix ) { //oris
35974 if (dis_int_logic( prefix, theInstr ))
35975 goto decode_success;
35977 goto decode_failure;
35979 /* Integer Rotate Instructions */
35980 case 0x14: case 0x15: case 0x17: // rlwimi, rlwinm, rlwnm
35981 if (dis_int_rot( prefix, theInstr )) goto decode_success;
35982 goto decode_failure;
35984 /* 64bit Integer Rotate Instructions */
35985 case 0x1E: // rldcl, rldcr, rldic, rldicl, rldicr, rldimi
35986 if (!mode64) goto decode_failure;
35987 if (dis_int_rot( prefix, theInstr )) goto decode_success;
35988 goto decode_failure;
35990 /* Integer Load Instructions */
35991 case 0x20: // lwz
35993 UInt ptype = PrefixType(prefix);
35995 if (( ptype == 1) && prefix_instruction( prefix)) {
35996 // splat instructions: xxspltiw, xxspltidp, xxsplti32dx
35997 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
35998 if (dis_vector_splat_imm_prefix( prefix, theInstr ))
35999 goto decode_success;
36001 } else if ( is_prefix && (ptype == pType2) ) { // plwz
36002 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36003 if (dis_int_load_prefix( prefix, theInstr ))
36004 goto decode_success;
36006 } else { // lwz
36007 if (dis_int_load_prefix( prefix, theInstr ))
36008 goto decode_success;
36010 goto decode_failure;
36013 case 0x22: // lbz
36015 UInt ptype = PrefixType(prefix);
36017 if (is_prefix && ( ptype == pType1 ) ) {
36018 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36019 // splat instructions: xxpermx
36020 if (dis_vector_permute_prefix( prefix, theInstr, abiinfo ))
36021 goto decode_success;
36022 } else if (is_prefix && ( ptype == pType2 ) ) { // plbz: load instruction
36023 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36024 if (dis_int_load_prefix( prefix, theInstr ))
36025 goto decode_success;
36026 } else if (!is_prefix) { // lbz: load instruction
36027 if (dis_int_load_prefix( prefix, theInstr ))
36028 goto decode_success;
36030 goto decode_failure;
36033 case 0x21: case 0x23: // lwzu, lbzu
36034 if (prefix_instruction( prefix)) {
36035 // blend instructions: xxblendvb, xxblendvh, xxblendvw, xxblendvd
36036 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36037 if (dis_vector_blend_prefix( prefix, theInstr ))
36038 goto decode_success;
36039 } else {
36040 // lbzu, lhau, lhzu, lwzu
36041 if (dis_int_load( prefix, theInstr )) goto decode_success;
36042 goto decode_failure;
36044 goto decode_failure;
36046 /* Integer Store Instructions */
36047 case 0x24:
36048 if (is_prefix && (PrefixType(prefix) == pType2)) { // pstw
36049 ISA_3_1_PREFIX_CHECK
36050 if (dis_int_store_prefix( prefix, theInstr, abiinfo ))
36051 goto decode_success;
36052 } else if ( !is_prefix ) { // stw
36053 if (dis_int_store_prefix( prefix, theInstr, abiinfo ))
36054 goto decode_success;
36056 goto decode_failure;
36058 case 0x26:
36059 if (is_prefix && (PrefixType(prefix) == pType2)) { // pstb
36060 ISA_3_1_PREFIX_CHECK
36061 if (dis_int_store_prefix( prefix, theInstr, abiinfo ))
36062 goto decode_success;
36063 } else if ( !is_prefix ) { // stb
36064 if (dis_int_store_prefix( prefix, theInstr, abiinfo ))
36065 goto decode_success;
36067 goto decode_failure;
36069 case 0x2C:
36070 if (is_prefix && (PrefixType(prefix) == pType2)) { // psth
36071 ISA_3_1_PREFIX_CHECK
36072 if (dis_int_store_prefix( prefix, theInstr, abiinfo ))
36073 goto decode_success;
36074 } else if ( !is_prefix ) { //sth
36075 if (dis_int_store_prefix( prefix, theInstr, abiinfo ))
36076 goto decode_success;
36078 goto decode_failure;
36080 case 0x27: case 0x2D: case 0x25: // stbu, sthu, stwu
36081 if (dis_int_store( prefix, theInstr, abiinfo )) goto decode_success;
36082 goto decode_failure;
36084 case 0x28:
36085 if (is_prefix && (PrefixType(prefix) == pType2)) { // plhz
36086 ISA_3_1_PREFIX_CHECK
36087 if (dis_int_load_prefix( prefix, theInstr ))
36088 goto decode_success;
36089 } else if ( !is_prefix ) { // lhz
36090 if (dis_int_load_prefix( prefix, theInstr ))
36091 goto decode_success;
36093 goto decode_failure;
36095 case 0x29:
36096 if (is_prefix && (PrefixType(prefix) == pType0)) { // plwa
36097 ISA_3_1_PREFIX_CHECK
36098 // prefix inst: plwa
36099 if (dis_int_load_ds_form_prefix( prefix, theInstr ))
36100 goto decode_success;
36101 } else if ( !is_prefix ) { // lhzu
36102 if (dis_int_load( prefix, theInstr ))
36103 goto decode_success;
36105 goto decode_failure;
36107 case 0x2A: // lha, plha, plxsd
36109 if (is_prefix && (PrefixType(prefix) == pType0)) { // plxsd
36110 ISA_3_1_PREFIX_CHECK
36111 if (dis_fp_pair_prefix( prefix, theInstr ))
36112 goto decode_success;
36113 } else if (is_prefix && (PrefixType(prefix) == pType2)) { // plha
36114 ISA_3_1_PREFIX_CHECK
36115 if (dis_int_load_prefix( prefix, theInstr ))
36116 goto decode_success;
36117 } else if ( !is_prefix ) {
36118 if (dis_int_load_prefix( prefix, theInstr )) // lha
36119 goto decode_success;
36122 goto decode_failure;
36124 case 0x2B: // lhau, plxssp
36125 if (is_prefix && (PrefixType(prefix) == pType0)) { // plxssp
36126 ISA_3_1_PREFIX_CHECK
36127 if (dis_fp_pair_prefix( prefix, theInstr ))
36128 goto decode_success;
36129 } else if ( !is_prefix ) { // lhau
36130 if (dis_int_load( prefix, theInstr ))
36131 goto decode_success;
36133 goto decode_failure;
36135 /* Integer Load and Store Multiple Instructions */
36136 case 0x2E:
36137 if (is_prefix && (PrefixType(prefix) == pType0)) { // pstxsd
36138 ISA_3_1_PREFIX_CHECK
36139 if (dis_fp_pair_prefix( prefix, theInstr )) goto decode_success;
36140 } else if ( !is_prefix ) { // lmw,
36141 if (dis_int_ldst_mult( prefix, theInstr )) goto decode_success;
36143 goto decode_failure;
36145 case 0x2F:
36146 if (is_prefix && (PrefixType(prefix) == pType0)) { // pstxssp
36147 ISA_3_1_PREFIX_CHECK
36148 if (dis_fp_pair_prefix( prefix, theInstr )) goto decode_success;
36149 } else if ( !is_prefix ) { // stmw
36150 if (dis_int_ldst_mult( prefix, theInstr )) goto decode_success;
36152 goto decode_failure;
36154 /* Branch Instructions */
36155 case 0x12: case 0x10: // b, bc
36156 if (dis_branch( prefix, theInstr, abiinfo, &dres))
36157 goto decode_success;
36158 goto decode_failure;
36160 /* System Linkage Instructions */
36161 case 0x11: // sc, scv
36162 if (dis_syslink( prefix, theInstr, abiinfo, &dres, allow_scv,
36163 sigill_diag))
36164 goto decode_success;
36165 goto decode_failure;
36167 /* Trap Instructions */
36168 case 0x02: // tdi
36169 if (!mode64) goto decode_failure;
36170 if (dis_trapi( prefix, theInstr, &dres)) goto decode_success;
36171 goto decode_failure;
36173 case 0x03: // twi
36174 if (dis_trapi( prefix, theInstr, &dres)) goto decode_success;
36175 goto decode_failure;
36177 case 0x06: // lxvp, stxvp
36178 if (dis_fp_pair_prefix( prefix, theInstr ))
36179 goto decode_success;
36180 goto decode_failure;
36182 /* Floating Point Load Instructions */
36183 case 0x30:
36184 if (!allow_F) goto decode_noF;
36185 if (is_prefix && (PrefixType(prefix) == pType2)) { // plfs
36186 ISA_3_1_PREFIX_CHECK
36187 if (dis_fp_load_prefix( prefix, theInstr )) goto decode_success;
36188 } else if ( !is_prefix ) { // lfs
36189 if (dis_fp_load_prefix( prefix, theInstr )) goto decode_success;
36191 goto decode_failure;
36193 case 0x31: // lfsu
36194 if (!allow_F) goto decode_noF;
36195 if (dis_fp_load( prefix, theInstr )) goto decode_success;
36196 goto decode_failure;
36198 case 0x32:
36199 if (is_prefix && (PrefixType(prefix) == pType0)) { // plxv, TX bit = 0
36200 if (!allow_F) goto decode_noF;
36201 ISA_3_1_PREFIX_CHECK
36202 if (dis_fp_pair_prefix( prefix, theInstr ))
36203 goto decode_success;
36204 } else if (is_prefix && (PrefixType(prefix) == pType2)) { // plfd
36205 ISA_3_1_PREFIX_CHECK
36206 if (dis_fp_load_prefix( prefix, theInstr ))
36207 goto decode_success;
36208 } else if ( !is_prefix ) { // lfd
36209 if (dis_fp_load_prefix( prefix, theInstr ))
36210 goto decode_success;
36212 goto decode_failure;
36214 case 0x33:
36215 if (is_prefix && (PrefixType(prefix) == pType0)) { // plxv, TX bit = 1
36216 if (!allow_F) goto decode_noF;
36217 ISA_3_1_PREFIX_CHECK
36218 if (dis_fp_pair_prefix( prefix, theInstr ))
36219 goto decode_success;
36220 } else { // lfdu
36221 if (!allow_F) goto decode_noF;
36222 if (dis_fp_load( prefix, theInstr )) goto decode_success;
36224 goto decode_failure;
36226 /* Floating Point Store Instructions */
36227 case 0x34:
36228 if (!allow_F) goto decode_noF;
36229 if (is_prefix && (PrefixType(prefix) == pType2)) { // pstfs
36230 ISA_3_1_PREFIX_CHECK
36231 if (dis_fp_store_prefix( prefix, theInstr )) goto decode_success;
36232 } else if ( !is_prefix ) { // stfs
36233 if (dis_fp_store_prefix( prefix, theInstr )) goto decode_success;
36235 goto decode_failure;
36237 case 0x35: // stfsu
36238 if (!allow_F) goto decode_noF;
36239 if (dis_fp_store( prefix, theInstr )) goto decode_success;
36240 goto decode_failure;
36242 case 0x36:
36243 if (is_prefix && (PrefixType(prefix) == pType0)) { // pstxv, XS bit = 0
36244 ISA_3_1_PREFIX_CHECK
36245 if (dis_fp_pair_prefix( prefix, theInstr ))
36246 goto decode_success;
36247 } else if ( is_prefix && (PrefixType(prefix) == pType2)) { // pstfd
36248 ISA_3_1_PREFIX_CHECK
36249 if (dis_fp_store_prefix( prefix, theInstr ))
36250 goto decode_success;
36251 } else if ( !is_prefix ) { // stfd
36252 if (!allow_F) goto decode_noF;
36253 if (dis_fp_store_prefix( prefix, theInstr )) goto decode_success;
36255 goto decode_failure;
36257 case 0x37:
36258 if (is_prefix && (PrefixType(prefix) == pType0)) { // pstxv, XS bit = 1
36259 ISA_3_1_PREFIX_CHECK
36260 if (dis_fp_pair_prefix( prefix, theInstr ))
36261 goto decode_success;
36262 } else if ( !is_prefix ) { // stfdu
36263 if (!allow_F) goto decode_noF;
36264 if (dis_fp_store( prefix, theInstr )) goto decode_success;
36266 goto decode_failure;
36268 /* 128-bit Integer Load */
36269 case 0x38:
36270 if (is_prefix && (PrefixType(prefix) == pType0)) { // plq
36271 ISA_3_1_PREFIX_CHECK
36272 if (dis_int_load_prefix( prefix, theInstr )) goto decode_success;
36273 } else if ( !is_prefix) { // lq
36274 if (dis_int_load_prefix( prefix, theInstr )) goto decode_success;
36276 goto decode_failure;
36278 /* Floating Point Load Double Pair Instructions */
36279 case 0x39: // pld, lxsd, lxssp, lfdp
36281 UInt opc2tmp = ifieldOPC0o2(theInstr);
36282 if (!allow_F) goto decode_noF;
36283 if (prefix_instruction( prefix )) { // pld
36284 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36285 if (dis_int_load_ds_form_prefix( prefix, theInstr ))
36286 goto decode_success;
36288 } else {
36289 if ((opc2tmp == 2) || (opc2tmp == 3)) { // lxsd, lxssp
36290 if (dis_fp_pair_prefix( prefix, theInstr ))
36291 goto decode_success;
36293 } else if (opc2tmp == 0) { // lfdp
36294 if (dis_fp_pair( prefix, theInstr ))
36295 goto decode_success;
36298 goto decode_failure;
36301 case 0x3D:
36303 UInt bits1_0 = IFIELD( theInstr, 0, 2 );
36304 UInt bits2_0 = IFIELD( theInstr, 0, 3 );
36306 if (is_prefix && (PrefixType(prefix) == pType0)) { // pstd
36307 if (dis_int_store_ds_prefix( prefix, theInstr, abiinfo ))
36308 goto decode_success;
36310 } else if ( !is_prefix ) {
36311 if (bits2_0 == 0x1) { // lxv [29:31] = 1
36312 if (dis_fp_pair_prefix( prefix, theInstr ))
36313 goto decode_success;
36314 } else if (bits2_0 == 0x5) { // stxv [29:31] = 5
36315 if (dis_fp_pair_prefix( prefix, theInstr ))
36316 goto decode_success;
36317 } else if (bits1_0 == 0x0) { // stfdp [30:31] = 0
36318 if (dis_fp_pair( prefix, theInstr ))
36319 goto decode_success;
36320 } else if (bits1_0 == 0x2) { // stxsd [30:31] = 2
36321 if (dis_fp_pair_prefix( prefix, theInstr ))
36322 goto decode_success;
36323 } else if (bits1_0 == 0x3) { // stxssp [30:31] = 3
36324 if (dis_fp_pair_prefix( prefix, theInstr ))
36325 goto decode_success;
36328 goto decode_failure;
36331 /* 64bit Integer Loads */
36332 case 0x3A: // word inst: ld, ldu, lwa
36334 UChar b1_0 = IFIELD(theInstr, 0, 2);
36335 if (!mode64) goto decode_failure;
36337 if (is_prefix && (PrefixType(prefix) == pType0)) { // plxvp
36338 ISA_3_1_PREFIX_CHECK
36339 if (dis_fp_pair_prefix( prefix, theInstr ))
36340 goto decode_success;
36342 } else if ( !is_prefix && ( b1_0 != 0x3 )) {
36343 // ld [30:31] = 0
36344 // ldu [30:31] = 1
36345 // lwa [30:31] = 2
36346 /* Note, here we only deal with the non prefix versions
36347 of the instructions. Hence do not check for ISA 3.1. */
36348 if (dis_int_load_ds_form_prefix( prefix, theInstr ))
36349 goto decode_success;
36351 goto decode_failure;
36354 case 0x3B:
36355 if (!allow_F) goto decode_noF;
36356 opc2 = ifieldOPClo10(theInstr);
36358 switch (opc2) {
36359 case 0x2: // dadd - DFP Add
36360 case 0x202: // dsub - DFP Subtract
36361 case 0x22: // dmul - DFP Mult
36362 case 0x222: // ddiv - DFP Divide
36363 if (!allow_DFP) goto decode_noDFP;
36364 if (dis_dfp_arith( prefix, theInstr ) )
36365 goto decode_success;
36366 goto decode_failure;
36367 case 0x82: // dcmpo, DFP comparison ordered instruction
36368 case 0x282: // dcmpu, DFP comparison unordered instruction
36369 if (!allow_DFP) goto decode_noDFP;
36370 if (dis_dfp_compare( prefix, theInstr ) )
36371 goto decode_success;
36372 goto decode_failure;
36373 case 0x102: // dctdp - DFP convert to DFP long
36374 case 0x302: // drsp - DFP round to dfp short
36375 case 0x122: // dctfix - DFP convert to fixed
36376 if (!allow_DFP) goto decode_noDFP;
36377 if (dis_dfp_fmt_conv( prefix, theInstr ) )
36378 goto decode_success;
36379 goto decode_failure;
36380 case 0x322: // POWER 7 inst, dcffix - DFP convert from fixed
36381 if (!allow_VX)
36382 goto decode_failure;
36383 if (!allow_DFP) goto decode_noDFP;
36384 if (dis_dfp_fmt_conv( prefix, theInstr ) )
36385 goto decode_success;
36386 goto decode_failure;
36387 case 0x2A2: // dtstsf - DFP number of significant digits
36388 case 0x2A3: // dtstsfi - DFP number of significant digits Immediate
36389 if (!allow_DFP) goto decode_noDFP;
36390 if (dis_dfp_significant_digits( prefix, theInstr ) )
36391 goto decode_success;
36392 goto decode_failure;
36393 case 0x142: // ddedpd DFP Decode DPD to BCD
36394 case 0x342: // denbcd DFP Encode BCD to DPD
36395 if (!allow_DFP) goto decode_noDFP;
36396 if (dis_dfp_bcd( prefix, theInstr ) )
36397 goto decode_success;
36398 goto decode_failure;
36399 case 0x162: // dxex - Extract exponent
36400 case 0x362: // diex - Insert exponent
36401 if (!allow_DFP) goto decode_noDFP;
36402 if (dis_dfp_extract_insert( prefix, theInstr ) )
36403 goto decode_success;
36404 goto decode_failure;
36405 case 0x3CE: // fcfidus (implemented as native insn)
36406 if (!allow_VX)
36407 goto decode_noVX;
36408 if (dis_fp_round( prefix, theInstr ) )
36409 goto decode_success;
36410 goto decode_failure;
36411 case 0x34E: // fcfids
36412 if (dis_fp_round( prefix, theInstr ) )
36413 goto decode_success;
36414 goto decode_failure;
36417 opc2 = ifieldOPClo9( theInstr );
36418 switch (opc2) {
36419 case 0x42: // dscli, DFP shift left
36420 case 0x62: // dscri, DFP shift right
36421 if (!allow_DFP) goto decode_noDFP;
36422 if (dis_dfp_shift( prefix, theInstr ))
36423 goto decode_success;
36424 goto decode_failure;
36425 case 0xc2: // dtstdc, DFP test data class
36426 case 0xe2: // dtstdg, DFP test data group
36427 if (!allow_DFP) goto decode_noDFP;
36428 if (dis_dfp_class_test( prefix, theInstr ))
36429 goto decode_success;
36430 goto decode_failure;
36433 opc2 = ifieldOPClo8( theInstr );
36434 switch (opc2) {
36435 case 0x3: // dqua - DFP Quantize
36436 case 0x23: // drrnd - DFP Reround
36437 case 0x43: // dquai - DFP Quantize immediate
36438 if (!allow_DFP) goto decode_noDFP;
36439 if (dis_dfp_quantize_sig_rrnd( prefix, theInstr ) )
36440 goto decode_success;
36441 goto decode_failure;
36442 case 0xA2: // dtstex - DFP Test exponent
36443 if (!allow_DFP) goto decode_noDFP;
36444 if (dis_dfp_exponent_test( prefix, theInstr ) )
36445 goto decode_success;
36446 goto decode_failure;
36447 case 0x63: // drintx - Round to an integer value
36448 case 0xE3: // drintn - Round to an integer value
36449 if (!allow_DFP) goto decode_noDFP;
36450 if (dis_dfp_round( prefix, theInstr ) ) {
36451 goto decode_success;
36453 goto decode_failure;
36454 default:
36455 break; /* fall through to next opc2 check */
36458 opc2 = IFIELD(theInstr, 1, 5);
36459 switch (opc2) {
36460 /* Floating Point Arith Instructions */
36461 case 0x12: case 0x14: case 0x15: // fdivs, fsubs, fadds
36462 case 0x19: // fmuls
36463 if (dis_fp_arith( prefix, theInstr )) goto decode_success;
36464 goto decode_failure;
36465 case 0x16: // fsqrts
36466 if (!allow_FX) goto decode_noFX;
36467 if (dis_fp_arith( prefix, theInstr )) goto decode_success;
36468 goto decode_failure;
36469 case 0x18: // fres
36470 if (!allow_GX) goto decode_noGX;
36471 if (dis_fp_arith( prefix, theInstr )) goto decode_success;
36472 goto decode_failure;
36474 /* Floating Point Mult-Add Instructions */
36475 case 0x1C: case 0x1D: case 0x1E: // fmsubs, fmadds, fnmsubs
36476 case 0x1F: // fnmadds
36477 if (dis_fp_multadd( prefix, theInstr )) goto decode_success;
36478 goto decode_failure;
36480 case 0x1A: // frsqrtes
36481 if (!allow_GX) goto decode_noGX;
36482 if (dis_fp_arith( prefix, theInstr )) goto decode_success;
36483 goto decode_failure;
36485 default:
36486 ; // Fall thru to the next check
36489 if ( !prefix_instruction( prefix ) ) {
36490 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36491 opc2 = IFIELD( theInstr, 3, 8 );
36492 if ((opc2 == XVI4GER8) || // xvi4ger8
36493 (opc2 == XVI4GER8PP) || // xvi4ger8pp
36494 (opc2 == XVI8GER4) || // xvi8ger4
36495 (opc2 == XVI8GER4PP) || // xvi8ger4pp
36496 (opc2 == XVI8GER4SPP) || // xvi8ger4spp
36497 (opc2 == XVI16GER2) || // xvi16ger2
36498 (opc2 == XVI16GER2PP) || // xvi16ger2pp
36499 (opc2 == XVBF16GER2) || // xvbf16ger2
36500 (opc2 == XVBF16GER2PP) || // xvbf16ger2pp
36501 (opc2 == XVBF16GER2PN) || // xvbf16ger2pn
36502 (opc2 == XVBF16GER2NP) || // xvbf16ger2np
36503 (opc2 == XVBF16GER2NN) || // xvbf16ger2nn
36504 (opc2 == XVF16GER2) || // xvf16ger2
36505 (opc2 == XVF16GER2PP) || // xvf16ger2pp
36506 (opc2 == XVF16GER2PN) || // xvf16ger2pn
36507 (opc2 == XVF16GER2NP) || // xvf16ger2np
36508 (opc2 == XVF16GER2NN) || // xvf16ger2nn
36509 (opc2 == XVI16GER2S) || // xvi16ger2s
36510 (opc2 == XVI16GER2SPP) || // xvi16ger2spp
36511 (opc2 == XVF32GER) || // xvf32ger
36512 (opc2 == XVF32GERPP) || // xvf32gerpp
36513 (opc2 == XVF32GERPN) || // xvf32gerpn
36514 (opc2 == XVF32GERNP) || // xvf32gernp
36515 (opc2 == XVF32GERNN) || // xvf32gernn
36516 (opc2 == XVF64GER) || // xvf64ger
36517 (opc2 == XVF64GERPP) || // xvf64gerpp
36518 (opc2 == XVF64GERPN) || // xvf64gerpn
36519 (opc2 == XVF64GERNP) || // xvf64gernp
36520 (opc2 == XVF64GERNN)) { // xvf64gernn
36521 if (dis_vsx_accumulator_prefix( prefix, theInstr, abiinfo,
36522 ACC_mapped_on_VSR ) )
36523 goto decode_success;
36524 goto decode_failure;
36525 } else {
36526 vex_printf("ERROR, dis_vsx_accumulator_prefix, unknown opc2 = 0x%x\n",
36527 opc2);
36528 goto decode_failure;
36531 } else {
36532 // lxacc
36533 if (dis_vsx_accumulator_prefix( prefix, theInstr, abiinfo,
36534 ACC_mapped_on_VSR ) )
36535 goto decode_success;
36536 goto decode_failure;
36538 break;
36540 case 0x3C: // pstq, VSX instructions (except load/store)
36542 if ( is_prefix && (PrefixType(prefix) == pType0) ) {
36543 // pstq instruction
36544 ISA_3_1_PREFIX_CHECK
36545 if (dis_int_store_ds_prefix( prefix, theInstr, abiinfo ))
36546 goto decode_success;
36547 goto decode_failure;
36550 // All of these VSX instructions use some VMX facilities, so
36551 // if allow_V is not set, we'll skip trying to decode.
36552 if (!allow_V) goto decode_noVX;
36553 /* The xvtstdcdp and xvtstdcsp instructions do not have a
36554 contiguous opc2 field. The following vsxOpc2 = get_VSX60_opc2()
36555 doesn't correctly match these instructions for dc != 0. So,
36556 we will explicitly look for the two instructions. */
36557 opc2 = ifieldOPClo10(theInstr);
36558 UInt opc2hi = IFIELD(theInstr, 7, 4);
36559 UInt opc2lo = IFIELD(theInstr, 3, 3);
36560 UInt vsxOpc2;
36562 if (( opc2hi == 13 ) && ( opc2lo == 5)) { //xvtstdcsp
36563 if (dis_vxs_misc( prefix, theInstr, abiinfo, 0x354, allow_isa_3_0 ))
36564 goto decode_success;
36565 goto decode_failure;
36568 if (( opc2hi == 15 ) && ( opc2lo == 5)) { //xvtstdcdp
36569 if (dis_vxs_misc( prefix, theInstr, abiinfo, 0x3D4, allow_isa_3_0 ))
36570 goto decode_success;
36571 goto decode_failure;
36574 if ( ( opc2 == 0x168 ) && ( IFIELD( theInstr, 19, 2 ) == 0 ) )// xxspltib
36576 /* This is a special case of the XX1 form where the RA, RB
36577 * fields hold an immediate value.
36579 if (dis_vxs_misc( prefix, theInstr, abiinfo, opc2,
36580 allow_isa_3_0))
36581 goto decode_success;
36582 goto decode_failure;
36585 /* This is a special instruction where the opc2 field instr[21:30] = 360
36586 (0x168) and field instr[11:15] = 31 (0x1F) */
36587 if ( ( opc2 == 0x168 ) && ( IFIELD( theInstr, 16, 5 ) == 31 ) )// lxvlq
36589 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36590 if (dis_load_vector_special( prefix, theInstr, abiinfo, opc2,
36591 allow_isa_3_0))
36592 goto decode_success;
36593 goto decode_failure;
36596 if ( ( opc2 == 0x394 ) || // xxgenpcvbm
36597 ( opc2 == 0x395 ) || // xxgenpcvwm
36598 ( opc2 == 0x3B4 ) || // xxgenpcvhm
36599 ( opc2 == 0x3B5 ) ) { // xxgenpcvdm
36600 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36601 if (dis_vector_generate_pvc_from_mask( prefix, theInstr,
36602 abiinfo ))
36603 goto decode_success;
36604 goto decode_failure;
36607 /* The vsxOpc2 returned is the "normalized" value, representing the
36608 * instructions secondary opcode as taken from the standard secondary
36609 * opcode field [21:30] (IBM notatition), even if the actual field
36610 * is non-standard. These normalized values are given in the opcode
36611 * appendices of the ISA 2.06 document.
36613 vsxOpc2 = get_VSX60_opc2(opc2, theInstr);
36615 switch (vsxOpc2) {
36616 case 0x8: case 0x28: case 0x48: case 0xc8: // xxsldwi, xxpermdi, xxmrghw, xxmrglw
36617 case 0x068: case 0xE8: // xxperm, xxpermr
36618 case 0x018: case 0x148: // xxsel, xxspltw
36619 if (dis_vx_permute_misc( prefix, theInstr, vsxOpc2 ))
36620 goto decode_success;
36621 goto decode_failure;
36622 case 0xC: case 0x2C: case 0x4C: // xscmpeqdp, xscmpgtdp, xscmpgedp
36623 case 0x200: case 0x220: //xsmaxcdp, xsmincdp
36624 if (dis_vx_misc( prefix, theInstr, vsxOpc2 )) goto decode_success;
36625 goto decode_failure;
36626 case 0x268: case 0x248: case 0x288: // xxlxor, xxlor, xxlnor,
36627 case 0x208: case 0x228: // xxland, xxlandc
36628 case 0x2A8: case 0x2C8: case 0x2E8: // xxlorc, xxlnand, xxleqv
36629 if (dis_vx_logic( prefix, theInstr, vsxOpc2 )) goto decode_success;
36630 goto decode_failure;
36631 case 0x0ec: // xscmpexpdp
36632 case 0x14A: case 0x16A: // xxextractuw, xxinsertw
36633 case 0x2B2: case 0x2C0: // xsabsdp, xscpsgndp
36634 case 0x2D2: case 0x2F2: // xsnabsdp, xsnegdp
36635 case 0x280: case 0x2A0: // xsmaxdp, xsmindp
36636 case 0x0F2: case 0x0D2: // xsrdpim, xsrdpip
36637 case 0x034: case 0x014: // xsresp, xsrsqrtesp
36638 case 0x0B4: case 0x094: // xsredp, xsrsqrtedp
36639 case 0x0D6: case 0x0B2: // xsrdpic, xsrdpiz
36640 case 0x092: case 0x232: // xsrdpi, xsrsp
36641 case 0x2b6: // xsxexpdp, xsxsigdp
36642 case 0x254: case 0x2d4: // xststdcsp, xststdcdp
36643 case 0x354: // xvtstdcsp
36644 case 0x360:case 0x396: // xviexpsp, xsiexpdp
36645 case 0x3D4: case 0x3E0: // xvtstdcdp, xviexpdp
36646 if (dis_vxs_misc( prefix, theInstr, abiinfo, vsxOpc2,
36647 allow_isa_3_0 ))
36648 goto decode_success;
36649 goto decode_failure;
36651 case 0x3B6: {
36652 UInt inst_select = IFIELD( theInstr, 16, 5);
36654 if (inst_select == 2) { //xvtlsbb
36655 if (dis_test_LSB_by_bit( prefix, theInstr))
36656 goto decode_success;
36657 goto decode_failure;
36660 // xxbrh, xxbrw, xxbrd, xxbrq, xvxexpdp, xvxexpsp, xvxsigdp
36661 // xvxsigsp, xvcvhpsp, xvcvbf16spn, xvcvspbf16
36662 if (dis_vxs_misc( prefix, theInstr, abiinfo, vsxOpc2,
36663 allow_isa_3_0 ))
36664 goto decode_success;
36665 goto decode_failure;
36668 case 0x08C: case 0x0AC: // xscmpudp, xscmpodp
36669 if (dis_vx_cmp( prefix, theInstr, vsxOpc2 )) goto decode_success;
36670 goto decode_failure;
36672 case 0x0: case 0x020: // xsaddsp, xssubsp
36673 case 0x080: // xsadddp
36674 case 0x060: case 0x0E0: // xsdivsp, xsdivdp
36675 case 0x004: case 0x024: // xsmaddasp, xsmaddmsp
36676 case 0x084: case 0x0A4: // xsmaddadp, xsmaddmdp
36677 case 0x044: case 0x064: // xsmsubasp, xsmsubmsp
36678 case 0x0C4: case 0x0E4: // xsmsubadp, xsmsubmdp
36679 case 0x204: case 0x224: // xsnmaddasp, xsnmaddmsp
36680 case 0x284: case 0x2A4: // xsnmaddadp, xsnmaddmdp
36681 case 0x244: case 0x264: // xsnmsubasp, xsnmsubmsp
36682 case 0x2C4: case 0x2E4: // xsnmsubadp, xsnmsubmdp
36683 case 0x040: case 0x0C0: // xsmulsp, xsmuldp
36684 case 0x0A0: // xssubdp
36685 case 0x016: case 0x096: // xssqrtsp,xssqrtdp
36686 case 0x0F4: case 0x0D4: // xstdivdp, xstsqrtdp
36687 if (dis_vxs_arith( prefix, theInstr, vsxOpc2 )) goto decode_success;
36688 goto decode_failure;
36689 case 0x180: // xvadddp
36690 case 0x1E0: // xvdivdp
36691 case 0x1C0: // xvmuldp
36692 case 0x1A0: // xvsubdp
36693 case 0x184: case 0x1A4: // xvmaddadp, xvmaddmdp
36694 case 0x1C4: case 0x1E4: // xvmsubadp, xvmsubmdp
36695 case 0x384: case 0x3A4: // xvnmaddadp, xvnmaddmdp
36696 case 0x3C4: case 0x3E4: // xvnmsubadp, xvnmsubmdp
36697 case 0x1D4: case 0x1F4: // xvtsqrtdp, xvtdivdp
36698 case 0x196: // xvsqrtdp
36699 if (dis_vxv_dp_arith( prefix, theInstr, vsxOpc2 ))
36700 goto decode_success;
36701 goto decode_failure;
36702 case 0x100: // xvaddsp
36703 case 0x160: // xvdivsp
36704 case 0x140: // xvmulsp
36705 case 0x120: // xvsubsp
36706 case 0x104: case 0x124: // xvmaddasp, xvmaddmsp
36707 case 0x144: case 0x164: // xvmsubasp, xvmsubmsp
36708 case 0x304: case 0x324: // xvnmaddasp, xvnmaddmsp
36709 case 0x344: case 0x364: // xvnmsubasp, xvnmsubmsp
36710 case 0x154: case 0x174: // xvtsqrtsp, xvtdivsp
36711 case 0x116: // xvsqrtsp
36712 if (dis_vxv_sp_arith( prefix, theInstr, vsxOpc2 ))
36713 goto decode_success;
36714 goto decode_failure;
36716 case 0x250: // xscvuxdsp
36717 case 0x2D0: case 0x3d0: // xscvuxddp, xvcvuxddp
36718 case 0x350: case 0x1d0: // xvcvuxdsp, xvcvuxwdp
36719 case 0x090: // xscvdpuxws
36720 // The above VSX conversion instructions employ some ISA 2.06
36721 // floating point conversion instructions under the covers,
36722 // so if allow_VX (which means "supports ISA 2.06") is not set,
36723 // we'll skip the decode.
36724 if (!allow_VX) goto decode_noVX;
36725 if (dis_vx_conv( prefix, theInstr, vsxOpc2 )) goto decode_success;
36726 goto decode_failure;
36728 case 0x2B0: // xscvdpsxds
36729 case 0x270: case 0x2F0: // xscvsxdsp, xscvsxddp
36730 case 0x1b0: case 0x130: // xvcvdpsxws, xvcvspsxws
36731 case 0x0b0: case 0x290: // xscvdpsxws, xscvdpuxds
36732 case 0x212: case 0x216: // xscvdpsp, xscvdpspn
36733 case 0x292: case 0x296: // xscvspdp, xscvspdpn
36734 case 0x312: // xvcvdpsp
36735 case 0x390: case 0x190: // xvcvdpuxds, xvcvdpuxws
36736 case 0x3B0: case 0x310: // xvcvdpsxds, xvcvspuxds
36737 case 0x392: case 0x330: // xvcvspdp, xvcvspsxds
36738 case 0x110: case 0x3f0: // xvcvspuxws, xvcvsxddp
36739 case 0x370: case 0x1f0: // xvcvsxdsp, xvcvsxwdp
36740 case 0x170: case 0x150: // xvcvsxwsp, xvcvuxwsp
36741 if (dis_vx_conv( prefix, theInstr, vsxOpc2 )) goto decode_success;
36742 goto decode_failure;
36744 case 0x18C: // xvcmpeqdp[.]
36745 case 0x10C: // xvcmpeqsp[.]
36746 case 0x14C: // xvcmpgesp[.]
36747 case 0x12C: // xvcmpgtsp[.]
36748 case 0x1CC: // xvcmpgedp[.]
36749 case 0x1AC: // xvcmpgtdp[.]
36750 if (dis_vvec_cmp( prefix, theInstr, vsxOpc2 )) goto decode_success;
36751 goto decode_failure;
36753 case 0x134: // xvresp
36754 case 0x1B4: // xvredp
36755 case 0x194: case 0x114: // xvrsqrtedp, xvrsqrtesp
36756 case 0x372: // xvnegsp
36757 case 0x380: case 0x3A0: // xvmaxdp, xvmindp
36758 case 0x300: case 0x320: // xvmaxsp, xvminsp
36759 case 0x3C0: case 0x340: // xvcpsgndp, xvcpsgnsp
36760 case 0x3B2: case 0x332: // xvabsdp, xvabssp
36761 case 0x3D2: case 0x352: // xvnabsdp, xvnabssp
36762 case 0x192: case 0x1D6: // xvrdpi, xvrdpic
36763 case 0x1F2: case 0x1D2: // xvrdpim, xvrdpip
36764 case 0x1B2: case 0x3F2: // xvrdpiz, xvnegdp
36765 case 0x112: case 0x156: // xvrspi, xvrspic
36766 case 0x172: case 0x152: // xvrspim, xvrspip
36767 case 0x132: // xvrspiz
36768 if (dis_vxv_misc( prefix, theInstr, vsxOpc2 )) goto decode_success;
36769 goto decode_failure;
36771 default:
36772 goto decode_failure;
36774 break;
36777 /* 64bit Integer Stores */
36778 case 0x3E: // std, stdu, stq, pstxvp
36780 UChar b1_0 = IFIELD(theInstr, 2, 0);
36782 if (is_prefix && (PrefixType(prefix) == pType0)) { // pstxvp
36783 if (dis_fp_pair_prefix( prefix, theInstr ))
36784 goto decode_success;
36786 } else if ( !is_prefix && (b1_0 != 3)) {
36787 // std [30:31] = 0
36788 // stdu [30:31] = 1
36789 // stq [30:31] = 2
36790 if (dis_int_store_ds_prefix( prefix, theInstr, abiinfo ))
36791 goto decode_success;
36793 } else {
36794 vex_printf("No mapping for instruction, opc1 = 0x3E, theInstr = 0x%x\n",
36795 theInstr);
36798 goto decode_failure;
36800 case 0x3F:
36801 if ( prefix_instruction( prefix ) ) { // stxacc
36802 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36803 if (dis_vsx_accumulator_prefix( prefix, theInstr, abiinfo,
36804 ACC_mapped_on_VSR ) )
36805 goto decode_success;
36806 goto decode_failure;
36809 if (!allow_F) goto decode_noF;
36810 /* Instrs using opc[1:5] never overlap instrs using opc[1:10],
36811 so we can simply fall through the first switch statement */
36813 opc2 = IFIELD(theInstr, 1, 5);
36814 switch (opc2) {
36815 /* Floating Point Arith Instructions */
36816 case 0x12: case 0x14: case 0x15: // fdiv, fsub, fadd
36817 case 0x19: // fmul
36818 if (dis_fp_arith( prefix, theInstr )) goto decode_success;
36819 goto decode_failure;
36820 case 0x16: // fsqrt
36821 if (!allow_FX) goto decode_noFX;
36822 if (dis_fp_arith( prefix, theInstr )) goto decode_success;
36823 goto decode_failure;
36824 case 0x17: case 0x1A: // fsel, frsqrte
36825 if (!allow_GX) goto decode_noGX;
36826 if (dis_fp_arith( prefix, theInstr )) goto decode_success;
36827 goto decode_failure;
36829 /* Floating Point Mult-Add Instructions */
36830 case 0x1C: case 0x1D: case 0x1E: // fmsub, fmadd, fnmsub
36831 case 0x1F: // fnmadd
36832 if (dis_fp_multadd( prefix, theInstr )) goto decode_success;
36833 goto decode_failure;
36835 case 0x18: // fre
36836 if (!allow_GX) goto decode_noGX;
36837 if (dis_fp_arith( prefix, theInstr )) goto decode_success;
36838 goto decode_failure;
36840 default:
36841 break; // Fall through
36844 opc2 = IFIELD(theInstr, 1, 8);
36845 switch (opc2) {
36846 case 0x5: // xsrqpi, xsrqpix
36847 case 0x25: // xsrqpxp
36848 if ( !mode64 || !allow_isa_3_0 ) goto decode_failure;
36849 if ( dis_vx_Scalar_Round_to_quad_integer( prefix, theInstr, abiinfo ) )
36850 goto decode_success;
36851 goto decode_failure;
36852 default:
36853 break; // Fall through
36856 opc2 = IFIELD(theInstr, 1, 10);
36857 UInt inst_select = IFIELD( theInstr, 16, 5 );
36859 switch (opc2) {
36860 /* 128-bit DFP instructions */
36861 case 0x2: // daddq - DFP Add
36862 case 0x202: // dsubq - DFP Subtract
36863 case 0x22: // dmulq - DFP Mult
36864 case 0x222: // ddivq - DFP Divide
36865 if (!allow_DFP) goto decode_noDFP;
36866 if (dis_dfp_arithq( prefix, theInstr ))
36867 goto decode_success;
36868 goto decode_failure;
36869 case 0x162: // dxexq - DFP Extract exponent
36870 case 0x362: // diexq - DFP Insert exponent
36871 if (!allow_DFP) goto decode_noDFP;
36872 if (dis_dfp_extract_insertq( prefix, theInstr ))
36873 goto decode_success;
36874 goto decode_failure;
36876 case 0x82: // dcmpoq, DFP comparison ordered instruction
36877 case 0x282: // dcmpuq, DFP comparison unordered instruction
36878 if (!allow_DFP) goto decode_noDFP;
36879 if (dis_dfp_compare( prefix, theInstr ) )
36880 goto decode_success;
36881 goto decode_failure;
36883 case 0x3E2: // dcffixqq - DFP Convert From Fixed Quadword
36884 // dctfixqq - DFP Convert To Fixed Quadword
36885 if (!allow_DFP) goto decode_noDFP;
36886 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
36887 if (dis_dfp_fmt_convq( prefix, theInstr, abiinfo ))
36888 goto decode_success;
36889 goto decode_failure;
36891 case 0x102: // dctqpq - DFP convert to DFP extended
36892 case 0x302: // drdpq - DFP round to dfp Long
36893 case 0x122: // dctfixq - DFP convert to fixed quad
36894 case 0x322: // dcffixq - DFP convert from fixed quad
36895 if (!allow_DFP) goto decode_noDFP;
36896 if (dis_dfp_fmt_convq( prefix, theInstr, abiinfo ))
36897 goto decode_success;
36898 goto decode_failure;
36900 case 0x2A2: // dtstsfq - DFP number of significant digits
36901 case 0x2A3: // dtstsfiq - DFP number of significant digits Immediate
36902 if (!allow_DFP) goto decode_noDFP;
36903 if (dis_dfp_significant_digits( prefix, theInstr ))
36904 goto decode_success;
36905 goto decode_failure;
36907 case 0x142: // ddedpdq DFP Decode DPD to BCD
36908 case 0x342: // denbcdq DFP Encode BCD to DPD
36909 if (!allow_DFP) goto decode_noDFP;
36910 if (dis_dfp_bcdq( prefix, theInstr ))
36911 goto decode_success;
36912 goto decode_failure;
36914 /* Floating Point Compare Instructions */
36915 case 0x000: // fcmpu
36916 case 0x020: // fcmpo
36917 if (dis_fp_cmp( prefix, theInstr )) goto decode_success;
36918 goto decode_failure;
36920 case 0x080: // ftdiv
36921 case 0x0A0: // ftsqrt
36922 if (dis_fp_tests( prefix, theInstr )) goto decode_success;
36923 goto decode_failure;
36925 /* Floating Point Rounding/Conversion Instructions */
36926 case 0x00C: // frsp
36927 case 0x00E: // fctiw
36928 case 0x00F: // fctiwz
36929 case 0x32E: // fctid
36930 case 0x32F: // fctidz
36931 case 0x34E: // fcfid
36932 if (dis_fp_round( prefix, theInstr )) goto decode_success;
36933 goto decode_failure;
36934 case 0x3CE: case 0x3AE: case 0x3AF: // fcfidu, fctidu[z] (implemented as native insns)
36935 case 0x08F: case 0x08E: // fctiwu[z] (implemented as native insns)
36936 if (!allow_VX) goto decode_noVX;
36937 if (dis_fp_round( prefix, theInstr )) goto decode_success;
36938 goto decode_failure;
36940 /* Power6 rounding stuff */
36941 case 0x1E8: // frim
36942 case 0x1C8: // frip
36943 case 0x188: // frin
36944 case 0x1A8: // friz
36945 /* A hack to check for P6 capability . . . */
36946 if ((allow_F && allow_V && allow_FX && allow_GX) &&
36947 (dis_fp_round( prefix, theInstr )))
36948 goto decode_success;
36949 goto decode_failure;
36951 /* Floating Point Move Instructions */
36952 case 0x008: // fcpsgn
36953 case 0x028: // fneg
36954 case 0x048: // fmr
36955 case 0x088: // fnabs
36956 case 0x108: // fabs
36957 if (dis_fp_move( prefix, theInstr )) goto decode_success;
36958 goto decode_failure;
36960 case 0x3c6: case 0x346: // fmrgew, fmrgow
36961 if (dis_fp_merge( prefix, theInstr )) goto decode_success;
36962 goto decode_failure;
36964 /* Floating Point Status/Control Register Instructions */
36965 case 0x026: // mtfsb1
36966 case 0x040: // mcrfs
36967 case 0x046: // mtfsb0
36968 case 0x086: // mtfsfi
36969 case 0x247: // mffs, mmfs., mffsce, mffscdrn, mffscdrni,
36970 // mffscrn, mffscrn, mffscri, mffsl
36971 case 0x2C7: // mtfsf
36972 // Some of the above instructions need to know more about the
36973 // ISA level supported by the host.
36974 if (dis_fp_scr( prefix, theInstr, allow_GX )) goto decode_success;
36975 goto decode_failure;
36977 case 0x324: // xsabsqp, xsxexpqp,xsnabsqp, xsnegqp, xsxsigqp
36978 if ( inst_select == 27 ) { // xssqrtqp
36979 if ( dis_vx_Floating_Point_Arithmetic_quad_precision( prefix,
36980 theInstr,
36981 abiinfo ) )
36982 goto decode_success;
36984 /* fallthrough to dis_vx_scalar_quad_precision */
36986 /* Instructions implemented with Pre ISA 3.0 Iops */
36987 /* VSX Scalar Quad-Precision instructions */
36988 case 0x064: // xscpsgnqp
36989 case 0x0A4: // xscmpexpqp
36990 case 0x084: // xscmpoqp
36991 case 0x284: // xscmpuqp
36992 case 0x2C4: // xststdcqp
36993 case 0x364: // xsiexpqp
36994 if (dis_vx_scalar_quad_precision( prefix, theInstr ))
36995 goto decode_success;
36996 goto decode_failure;
36998 case 0x044: // xscmpeqqp
36999 case 0x0C4: // xscmpgeqp
37000 case 0x0E4: // xscmpgtqp
37001 case 0x2A4: // xsmaxcqp
37002 case 0x2E4: // xsmincqp
37003 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37004 if (dis_vx_scalar_quad_precision( prefix, theInstr ))
37005 goto decode_success;
37006 goto decode_failure;
37008 /* Instructions implemented using ISA 3.0 instructions */
37009 // xsaddqpo (VSX Scalar Add Quad-Precision [using round to ODD]
37010 case 0x004: // xsaddqp (VSX Scalar Add Quad-Precision [using RN mode]
37011 // xsmulqpo (VSX Scalar Multiply Quad-Precision [using round to ODD]
37012 case 0x024: // xsmulqp (VSX Scalar Multiply Quad-Precision [using RN mode]
37013 // xsmaddqpo (VSX Scalar Multiply Add Quad-Precision [using round to ODD]
37014 case 0x184: // xsmaddqp (VSX Scalar Multiply Add Quad-Precision [using RN mode]
37015 // xsmsubqpo (VSX Scalar Multiply Sub Quad-Precision [using round to ODD]
37016 case 0x1A4: // xsmsubqp (VSX Scalar Multiply Sub Quad-Precision [using RN mode]
37017 // xsnmaddqpo (VSX Scalar Negative Multiply Add Quad-Precision [using round to ODD]
37018 case 0x1C4: // xsnmaddqp (VSX Scalar Negative Multiply Add Quad-Precision [using RN mode]
37019 // xsnmsubqpo (VSX Scalar Negative Multiply Sub Quad-Precision [using round to ODD]
37020 case 0x1E4: // xsnmsubqp (VSX Scalar Negative Multiply Sub Quad-Precision [usin RN mode]
37021 // xssubqpo (VSX Scalar Subrtact Quad-Precision [using round to ODD]
37022 case 0x204: // xssubqp (VSX Scalar Subrtact Quad-Precision [using RN mode]
37023 // xsdivqpo (VSX Scalar Divde Quad-Precision [using round to ODD]
37024 case 0x224: // xsdivqp (VSX Scalar Divde Quad-Precision [using RN mode]
37025 if ( dis_vx_Floating_Point_Arithmetic_quad_precision( prefix,
37026 theInstr,
37027 abiinfo ) )
37028 goto decode_success;
37029 goto decode_failure;
37031 case 0x344: // xscvudqp, xscvsdqp, xscvqpdp, xscvqpdpo, xsvqpdp
37032 // xscvqpswz, xscvqpuwz, xscvqpudz, xscvqpsdz
37033 /* ISA 3.1 instructions: xscvqpuqz, xscvuqqp, xscvqpsqz,
37034 xscvsqqp. */
37035 if (( IFIELD( theInstr, 16, 5) == 0 // xscvqpuqz
37036 || IFIELD( theInstr, 16, 5) == 3 // xscvuqqp
37037 || IFIELD( theInstr, 16, 5) == 8 // xscvqpsqz
37038 || IFIELD( theInstr, 16, 5) == 11 )) { // xscvsqqp
37039 if (!allow_isa_3_1)
37040 goto decode_noIsa3_1;
37042 if ( dis_vx_Floating_Point_Arithmetic_quad_precision( prefix,
37043 theInstr,
37044 abiinfo ) )
37045 goto decode_success;
37046 goto decode_failure;
37049 if ( !mode64 || !allow_isa_3_0 ) goto decode_failure;
37050 if ( dis_vx_Floating_Point_Arithmetic_quad_precision( prefix,
37051 theInstr,
37052 abiinfo ) )
37053 goto decode_success;
37054 goto decode_failure;
37056 default:
37057 break; // Fall through...
37060 opc2 = ifieldOPClo9( theInstr );
37061 switch (opc2) {
37062 case 0x42: // dscli, DFP shift left
37063 case 0x62: // dscri, DFP shift right
37064 if (!allow_DFP) goto decode_noDFP;
37065 if (dis_dfp_shiftq( prefix, theInstr ))
37066 goto decode_success;
37067 goto decode_failure;
37068 case 0xc2: // dtstdc, DFP test data class
37069 case 0xe2: // dtstdg, DFP test data group
37070 if (!allow_DFP) goto decode_noDFP;
37071 if (dis_dfp_class_test( prefix, theInstr ))
37072 goto decode_success;
37073 goto decode_failure;
37074 default:
37075 break;
37078 opc2 = ifieldOPClo8( theInstr );
37079 switch (opc2) {
37080 case 0x3: // dquaq - DFP Quantize Quad
37081 case 0x23: // drrndq - DFP Reround Quad
37082 case 0x43: // dquaiq - DFP Quantize immediate Quad
37083 if (!allow_DFP) goto decode_noDFP;
37084 if (dis_dfp_quantize_sig_rrndq( prefix, theInstr ))
37085 goto decode_success;
37086 goto decode_failure;
37087 case 0xA2: // dtstexq - DFP Test exponent Quad
37088 if (!allow_DFP) goto decode_noDFP;
37089 if (dis_dfp_exponent_test( prefix, theInstr ) )
37090 goto decode_success;
37091 goto decode_failure;
37092 case 0x63: // drintxq - DFP Round to an integer value
37093 case 0xE3: // drintnq - DFP Round to an integer value
37094 if (!allow_DFP) goto decode_noDFP;
37095 if (dis_dfp_roundq( prefix, theInstr ))
37096 goto decode_success;
37097 goto decode_failure;
37099 default:
37100 goto decode_failure;
37102 break;
37104 case 0x13:
37106 opc2 = ifieldOPClo5(theInstr);
37107 switch (opc2) {
37109 /* PC relative load/store */
37110 case 0x002: // addpcis
37111 if (dis_pc_relative( prefix, theInstr )) goto decode_success;
37112 goto decode_failure;
37114 /* fall through to the next opc2 field size */
37117 opc2 = ifieldOPClo10(theInstr);
37118 switch (opc2) {
37120 /* Condition Register Logical Instructions */
37121 case 0x101: case 0x081: case 0x121: // crand, crandc, creqv
37122 case 0x0E1: case 0x021: case 0x1C1: // crnand, crnor, cror
37123 case 0x1A1: case 0x0C1: case 0x000: // crorc, crxor, mcrf
37124 if (dis_cond_logic( prefix, theInstr )) goto decode_success;
37125 goto decode_failure;
37127 /* Branch Instructions */
37128 case 0x210: case 0x010: // bcctr, bclr
37129 if (dis_branch( prefix, theInstr, abiinfo, &dres))
37130 goto decode_success;
37131 goto decode_failure;
37133 /* Memory Synchronization Instructions */
37134 case 0x096: // isync
37135 if (dis_memsync( prefix, theInstr, allow_isa_3_0, allow_isa_3_1 ))
37136 goto decode_success;
37137 goto decode_failure;
37139 default:
37140 goto decode_failure;
37142 break;
37144 case 0x1F:
37145 if ( prefix_instruction( prefix ) ) { // stxacc
37146 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37147 if (dis_vsx_accumulator_prefix( prefix, theInstr, abiinfo,
37148 ACC_mapped_on_VSR ) )
37149 goto decode_success;
37150 goto decode_failure;
37153 /* For arith instns, bit10 is the OE flag (overflow enable) */
37155 opc2 = IFIELD(theInstr, 1, 9);
37156 switch (opc2) {
37157 /* Integer Arithmetic Instructions */
37158 case 0x10A: case 0x00A: case 0x08A: // add, addc, adde
37159 case 0x0AA: // addex
37160 case 0x0EA: case 0x0CA: case 0x1EB: // addme, addze, divw
37161 case 0x1CB: case 0x04B: case 0x00B: // divwu, mulhw, mulhwu
37162 case 0x0EB: case 0x068: case 0x028: // mullw, neg, subf
37163 case 0x008: case 0x088: case 0x0E8: // subfc, subfe, subfme
37164 case 0x0C8: // subfze
37165 if (dis_int_arith( prefix, theInstr )) goto decode_success;
37166 goto decode_failure;
37168 case 0x18B: // divweu (implemented as native insn)
37169 case 0x1AB: // divwe (implemented as native insn)
37170 if (!allow_VX) goto decode_noVX;
37171 if (dis_int_arith( prefix, theInstr )) goto decode_success;
37172 goto decode_failure;
37174 /* 64bit Integer Arithmetic */
37175 case 0x009: case 0x049: case 0x0E9: // mulhdu, mulhd, mulld
37176 case 0x1C9: case 0x1E9: // divdu, divd
37177 if (!mode64) goto decode_failure;
37178 if (dis_int_arith( prefix, theInstr )) goto decode_success;
37179 goto decode_failure;
37181 case 0x1A9: // divde (implemented as native insn)
37182 case 0x189: // divdeuo (implemented as native insn)
37183 if (!allow_VX) goto decode_noVX;
37184 if (!mode64) goto decode_failure;
37185 if (dis_int_arith( prefix, theInstr )) goto decode_success;
37186 goto decode_failure;
37188 case 0x1FC: // cmpb
37189 if (dis_int_logic( prefix, theInstr )) goto decode_success;
37190 goto decode_failure;
37192 case 0x180: case 0x1A0: // setbc, setbcr
37193 case 0x1C0: case 0x1E0: // setnbc, setnbcr
37194 if (!allow_isa_3_0) goto decode_noIsa3_1;
37195 if (dis_set_bool_condition( prefix, theInstr ))
37196 goto decode_success;
37197 goto decode_failure;
37199 case 0x14D: // lxvpx
37200 case 0x1CD: // stxvpx
37201 if (dis_vsx_vector_paired_load_store( prefix, theInstr ))
37202 goto decode_success;
37203 goto decode_failure;
37205 default:
37206 break; // Fall through...
37209 /* All remaining opcodes use full 10 bits. */
37211 opc2 = IFIELD(theInstr, 1, 10);
37212 switch (opc2) {
37213 case 0xB1: // xxmfacc, xxsetaccz
37215 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37216 if (dis_vsx_accumulator_prefix( prefix, theInstr, abiinfo,
37217 ACC_mapped_on_VSR ) )
37218 goto decode_success;
37219 goto decode_failure;
37222 case 0xDB: // brh
37223 case 0x9B: // brw
37224 case 0xBB: // brd
37225 if (dis_byte_reverse( prefix, theInstr )) goto decode_success;
37226 goto decode_failure;
37228 /* X-form instructions */
37229 case 0x03B: // cntlzdm, Count Leading Zeros Doubleword under bit Mask
37230 case 0x0BC: // pextd, Parallel Bits Extract Doubleword
37231 case 0x09C: // pdepd, Parallel Bits Deposit Doubleword
37232 case 0x23B: // cnttzdm, Count Trailing Zeros Doubleword under bit Mask
37233 case 0x0DC: // cfuged, Centrifuge Doubleword
37234 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37235 if (dis_logical_mask_bits( prefix, theInstr, abiinfo ) )
37236 goto decode_success;
37237 goto decode_failure;
37239 /* Integer miscellaneous instructions */
37240 case 0x01E: // wait RFC 2500
37241 if (dis_int_misc( prefix, theInstr )) goto decode_success;
37242 goto decode_failure;
37245 /* Integer Compare Instructions */
37246 case 0x000: case 0x020: case 0x080: // cmp, cmpl, setb
37247 if (dis_int_cmp( prefix, theInstr )) goto decode_success;
37248 goto decode_failure;
37250 case 0x0C0: case 0x0E0: // cmprb, cmpeqb
37251 if (dis_byte_cmp( prefix, theInstr )) goto decode_success;
37252 goto decode_failure;
37254 case 0x10B: case 0x30B: // moduw, modsw
37255 case 0x109: case 0x309: // modsd, modud
37256 if (dis_modulo_int( prefix, theInstr )) goto decode_success;
37257 goto decode_failure;
37259 case 0x21A: case 0x23A: // cnttzw, cnttzd
37260 if (dis_modulo_int( prefix, theInstr )) goto decode_success;
37261 goto decode_failure;
37263 /* Integer Logical Instructions */
37264 case 0x01C: case 0x03C: case 0x01A: // and, andc, cntlzw
37265 case 0x11C: case 0x3BA: case 0x39A: // eqv, extsb, extsh
37266 case 0x1DC: case 0x07C: case 0x1BC: // nand, nor, or
37267 case 0x19C: case 0x13C: // orc, xor
37268 case 0x2DF: case 0x25F: // mftgpr, mffgpr
37269 if (dis_int_logic( prefix, theInstr )) goto decode_success;
37270 goto decode_failure;
37272 case 0x2F3: // darn - Deliver A Random Number
37273 if (!allow_isa_3_0) goto decode_noP9;
37274 if (dis_darn( prefix, theInstr, abiinfo ))
37275 goto decode_success;
37276 goto decode_failure;
37278 case 0x28E: case 0x2AE: // tbegin., tend.
37279 case 0x2EE: case 0x2CE: case 0x30E: // tsr., tcheck., tabortwc.
37280 case 0x32E: case 0x34E: case 0x36E: // tabortdc., tabortwci., tabortdci.
37281 case 0x38E: case 0x3AE: case 0x3EE: // tabort., treclaim., trechkpt.
37282 if (dis_transactional_memory( prefix, theInstr,
37283 getUIntPPCendianly( &guest_code[delta + 4]),
37284 abiinfo, &dres))
37285 goto decode_success;
37286 goto decode_failure;
37288 /* 64bit Integer Logical Instructions */
37289 case 0x3DA: case 0x03A: // extsw, cntlzd
37290 if (!mode64) goto decode_failure;
37291 if (dis_int_logic( prefix, theInstr )) goto decode_success;
37292 goto decode_failure;
37294 /* 64bit Integer Parity Instructions */
37295 case 0xba: // prtyd
37296 if (!mode64) goto decode_failure;
37297 if (dis_int_parity( prefix, theInstr )) goto decode_success;
37298 goto decode_failure;
37300 case 0x9a: // prtyw
37301 if (dis_int_parity( prefix, theInstr )) goto decode_success;
37302 goto decode_failure;
37304 /* Integer Shift Instructions */
37305 case 0x018: case 0x318: case 0x338: // slw, sraw, srawi
37306 case 0x218: // srw
37307 if (dis_int_shift( prefix, theInstr, allow_isa_3_0 ))
37308 goto decode_success;
37309 goto decode_failure;
37311 /* 64bit Integer Shift Instructions */
37312 case 0x01B: case 0x31A: // sld, srad
37313 case 0x33A: case 0x33B: // sradi
37314 case 0x21B: // srd
37315 if (!mode64) goto decode_failure;
37316 if (dis_int_shift( prefix, theInstr, allow_isa_3_0 ))
37317 goto decode_success;
37318 goto decode_failure;
37320 /* Integer Load Instructions */
37321 case 0x057: case 0x077: case 0x157: // lbzx, lbzux, lhax
37322 case 0x177: case 0x117: case 0x137: // lhaux, lhzx, lhzux
37323 case 0x017: case 0x037: // lwzx, lwzux
37324 if (dis_int_load( prefix, theInstr )) goto decode_success;
37325 goto decode_failure;
37327 /* 64bit Integer Load Instructions */
37328 case 0x035: case 0x015: // ldux, ldx
37329 case 0x175: case 0x155: // lwaux, lwax
37330 if (!mode64) goto decode_failure;
37331 if (dis_int_load( prefix, theInstr )) goto decode_success;
37332 goto decode_failure;
37334 /* Integer Store Instructions */
37335 case 0x0F7: case 0x0D7: case 0x1B7: // stbux, stbx, sthux
37336 case 0x197: case 0x0B7: case 0x097: // sthx, stwux, stwx
37337 if (dis_int_store( prefix, theInstr, abiinfo )) goto decode_success;
37338 goto decode_failure;
37340 /* 64bit Integer Store Instructions */
37341 case 0x0B5: case 0x095: // stdux, stdx
37342 if (!mode64) goto decode_failure;
37343 if (dis_int_store( prefix, theInstr, abiinfo )) goto decode_success;
37344 goto decode_failure;
37346 /* Integer Load and Store with Byte Reverse Instructions */
37347 case 0x214: case 0x294: // ldbrx, stdbrx
37348 if (!mode64) goto decode_failure;
37349 if (dis_int_ldst_rev( prefix, theInstr )) goto decode_success;
37350 goto decode_failure;
37352 case 0x216: case 0x316: case 0x296: // lwbrx, lhbrx, stwbrx
37353 case 0x396: // sthbrx
37354 if (dis_int_ldst_rev( prefix, theInstr )) goto decode_success;
37355 goto decode_failure;
37357 /* Integer Load and Store String Instructions */
37358 case 0x255: case 0x215: case 0x2D5: // lswi, lswx, stswi
37359 case 0x295: { // stswx
37360 Bool stopHere = False;
37361 Bool ok = dis_int_ldst_str( prefix, theInstr, &stopHere );
37362 if (!ok) goto decode_failure;
37363 if (stopHere) {
37364 putGST( PPC_GST_CIA, mkSzImm(ty, nextInsnAddr()) );
37365 dres.jk_StopHere = Ijk_Boring;
37366 dres.whatNext = Dis_StopHere;
37368 goto decode_success;
37371 /* Memory Synchronization Instructions */
37372 case 0x034: case 0x074: // lbarx, lharx
37373 case 0x2B6: case 0x2D6: // stbcx, sthcx
37374 if (!allow_isa_2_07) goto decode_noP8;
37375 if (dis_memsync( prefix, theInstr, allow_isa_3_0, allow_isa_3_1 ))
37376 goto decode_success;
37377 goto decode_failure;
37379 case 0x356: case 0x014: case 0x096: // eieio, lwarx, stwcx.
37380 case 0x256: // sync
37381 if (dis_memsync( prefix, theInstr, allow_isa_3_0, allow_isa_3_1 ))
37382 goto decode_success;
37383 goto decode_failure;
37385 /* 64bit Memory Synchronization Instructions */
37386 case 0x054: case 0x0D6: // ldarx, stdcx.
37387 if (!mode64) goto decode_failure;
37388 if (dis_memsync( prefix, theInstr, allow_isa_3_0, allow_isa_3_1 ))
37389 goto decode_success;
37390 goto decode_failure;
37392 case 0x114: case 0x0B6: // lqarx, stqcx.
37393 if (dis_memsync( prefix, theInstr, allow_isa_3_0, allow_isa_3_1 ))
37394 goto decode_success;
37395 goto decode_failure;
37397 /* Processor Control Instructions */
37398 case 0x33: case 0x73: // mfvsrd, mfvsrwz
37399 case 0xB3: case 0xD3: case 0xF3: // mtvsrd, mtvsrwa, mtvsrwz
37400 case 0x200: case 0x013: case 0x153: // mcrxr, mfcr, mfspr
37401 case 0x173: case 0x090: case 0x1D3: // mftb, mtcrf, mtspr
37402 case 0x220: // mcrxrt
37403 case 0x240: // mcrxrx
37404 if (dis_proc_ctl( abiinfo, prefix, theInstr )) goto decode_success;
37405 goto decode_failure;
37407 /* Cache Management Instructions */
37408 case 0x2F6: case 0x056: case 0x036: // dcba, dcbf, dcbst
37409 case 0x116: case 0x0F6: case 0x3F6: // dcbt, dcbtst, dcbz
37410 case 0x3D6: // icbi
37411 if (dis_cache_manage( prefix, theInstr, &dres, allow_isa_3_1,
37412 archinfo ))
37413 goto decode_success;
37414 goto decode_failure;
37416 //zz /* External Control Instructions */
37417 //zz case 0x136: case 0x1B6: // eciwx, ecowx
37418 //zz DIP("external control op => not implemented\n");
37419 //zz goto decode_failure;
37421 /* Trap Instructions */
37422 case 0x004: // tw
37423 if (dis_trap( prefix, theInstr, &dres )) goto decode_success;
37424 goto decode_failure;
37426 case 0x044: // td
37427 if (!mode64) goto decode_failure;
37428 if (dis_trap( prefix, theInstr, &dres )) goto decode_success;
37429 goto decode_failure;
37431 /* Floating Point Load Instructions */
37432 case 0x217: case 0x237: case 0x257: // lfsx, lfsux, lfdx
37433 case 0x277: // lfdux
37434 if (!allow_F) goto decode_noF;
37435 if (dis_fp_load( prefix, theInstr )) goto decode_success;
37436 goto decode_failure;
37438 /* Floating Point Store Instructions */
37439 case 0x297: case 0x2B7: case 0x2D7: // stfs, stfsu, stfd
37440 case 0x2F7: // stfdu, stfiwx
37441 if (!allow_F) goto decode_noF;
37442 if (dis_fp_store( prefix, theInstr )) goto decode_success;
37443 goto decode_failure;
37444 case 0x3D7: // stfiwx
37445 if (!allow_F) goto decode_noF;
37446 if (!allow_GX) goto decode_noGX;
37447 if (dis_fp_store( prefix, theInstr )) goto decode_success;
37448 goto decode_failure;
37450 /* Floating Point Double Pair Indexed Instructions */
37451 case 0x317: // lfdpx (Power6)
37452 case 0x397: // stfdpx (Power6)
37453 if (!allow_F) goto decode_noF;
37454 if (dis_fp_pair( prefix, theInstr )) goto decode_success;
37455 goto decode_failure;
37457 case 0x357: // lfiwax
37458 if (!allow_F) goto decode_noF;
37459 if (dis_fp_load( prefix, theInstr )) goto decode_success;
37460 goto decode_failure;
37462 case 0x377: // lfiwzx
37463 if (!allow_F) goto decode_noF;
37464 if (dis_fp_load( prefix, theInstr )) goto decode_success;
37465 goto decode_failure;
37467 /* AltiVec instructions */
37469 /* AV Cache Control - Data streams */
37470 case 0x156: case 0x176: case 0x336: // dst, dstst, dss
37471 if (!allow_V) goto decode_noV;
37472 if (dis_av_datastream( prefix, theInstr )) goto decode_success;
37473 goto decode_failure;
37475 /* AV Load */
37476 case 0x006: case 0x026: // lvsl, lvsr
37477 case 0x007: case 0x027: case 0x047: // lvebx, lvehx, lvewx
37478 case 0x067: case 0x167: // lvx, lvxl
37479 if (!allow_V) goto decode_noV;
37480 if (dis_av_load( abiinfo, prefix, theInstr )) goto decode_success;
37481 goto decode_failure;
37483 /* AV Store */
37484 case 0x087: case 0x0A7: case 0x0C7: // stvebx, stvehx, stvewx
37485 case 0x0E7: case 0x1E7: // stvx, stvxl
37486 if (!allow_V) goto decode_noV;
37487 if (dis_av_store( prefix, theInstr )) goto decode_success;
37488 goto decode_failure;
37490 /* VSX Load */
37491 case 0x00C: // lxsiwzx
37492 case 0x04C: // lxsiwax
37493 case 0x10C: // lxvx
37494 case 0x10D: // lxvl
37495 case 0x12D: // lxvll
37496 case 0x16C: // lxvwsx
37497 case 0x20C: // lxsspx
37498 case 0x24C: // lxsdx
37499 case 0x32C: // lxvh8x
37500 case 0x30D: // lxsibzx
37501 case 0x32D: // lxsihzx
37502 case 0x34C: // lxvd2x
37503 case 0x36C: // lxvb16x
37504 case 0x14C: // lxvdsx
37505 case 0x30C: // lxvw4x
37506 // All of these VSX load instructions use some VMX facilities, so
37507 // if allow_V is not set, we'll skip trying to decode.
37508 if (!allow_V) goto decode_noV;
37510 if (dis_vx_load( prefix, theInstr )) goto decode_success;
37511 goto decode_failure;
37513 case 0x00D: // lxvrbx
37514 case 0x02D: // lxvrhx
37515 case 0x04D: // lxvrwx
37516 case 0x06D: // lxvrdx
37517 case 0x08D: // stxvrbx
37518 case 0x0AD: // stxvrhx
37519 case 0x0CD: // stxvrwx
37520 case 0x0ED: // stxvrdx
37521 // All of these VSX load instructions use some VMX facilities, so
37522 // if allow_V is not set, we'll skip trying to decode.
37523 if (!allow_V) goto decode_noV;
37524 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37525 if (dis_vx_load( prefix, theInstr )) goto decode_success;
37526 goto decode_failure;
37528 /* VSX Store */
37529 case 0x08C: // stxsiwx
37530 case 0x18C: // stxvx
37531 case 0x18D: // stxvl
37532 case 0x1AD: // stxvll
37533 case 0x28C: // stxsspx
37534 case 0x2CC: // stxsdx
37535 case 0x38C: // stxvw4x
37536 case 0x3CC: // stxvd2x
37537 case 0x38D: // stxsibx
37538 case 0x3AD: // stxsihx
37539 case 0x3AC: // stxvh8x
37540 case 0x3EC: // stxvb16x
37541 // All of these VSX store instructions use some VMX facilities, so
37542 // if allow_V is not set, we'll skip trying to decode.
37543 if (!allow_V) goto decode_noV;
37545 if (dis_vx_store( prefix, theInstr )) goto decode_success;
37546 goto decode_failure;
37548 case 0x133: case 0x193: case 0x1B3: // mfvsrld, mfvsrdd, mtvsrws
37549 // The move from/to VSX instructions use some VMX facilities, so
37550 // if allow_V is not set, we'll skip trying to decode.
37551 if (!allow_V) goto decode_noV;
37552 if (dis_vx_move( prefix, theInstr )) goto decode_success;
37553 goto decode_failure;
37555 /* Miscellaneous ISA 2.06 instructions */
37556 case 0x1FA: // popcntd
37557 if (!mode64) goto decode_failure;
37558 /* else fallthru */
37559 case 0x17A: // popcntw
37560 case 0x7A: // popcntb
37561 if (dis_int_logic( prefix, theInstr )) goto decode_success;
37562 goto decode_failure;
37564 case 0x0FC: // bpermd
37565 if (!mode64) goto decode_failure;
37566 if (dis_int_logic( prefix, theInstr )) goto decode_success;
37567 goto decode_failure;
37569 case 0x306: // copy
37570 if ( !mode64 || !allow_isa_3_0 ) goto decode_failure;
37571 if (dis_copy_paste( prefix, theInstr, abiinfo )) goto decode_success;
37572 goto decode_failure;
37574 case 0x346: // cpabort
37575 if ( !mode64 || !allow_isa_3_0 ) goto decode_failure;
37576 if (dis_copy_paste( prefix, theInstr, abiinfo )) goto decode_success;
37577 goto decode_failure;
37579 case 0x386: // paste.
37580 if ( !mode64 || !allow_isa_3_0 ) goto decode_failure;
37581 if (dis_copy_paste( prefix, theInstr, abiinfo )) goto decode_success;
37582 goto decode_failure;
37584 default:
37585 /* Deal with some other cases that we would otherwise have
37586 punted on. */
37587 /* --- ISEL (PowerISA_V2.05.pdf, p74) --- */
37588 /* only decode this insn when reserved bit 0 (31 in IBM's
37589 notation) is zero */
37590 if (IFIELD(theInstr, 0, 6) == (15<<1)) {
37591 UInt rT = ifieldRegDS( theInstr );
37592 UInt rA = ifieldRegA( theInstr );
37593 UInt rB = ifieldRegB( theInstr );
37594 UInt bi = ifieldRegC( theInstr );
37595 putIReg(
37597 IRExpr_ITE( binop(Iop_CmpNE32, getCRbit( bi ), mkU32(0)),
37598 rA == 0 ? (mode64 ? mkU64(0) : mkU32(0))
37599 : getIReg(rA),
37600 getIReg(rB))
37603 DIP("isel r%u,r%u,r%u,crb%u\n", rT,rA,rB,bi);
37604 goto decode_success;
37608 opc2 = IFIELD(theInstr, 2, 9);
37609 switch (opc2) {
37610 case 0x1BD:
37611 if (!mode64) goto decode_failure;
37612 if (dis_int_logic( prefix, theInstr )) goto decode_success;
37613 goto decode_failure;
37615 default:
37616 goto decode_failure;
37618 break;
37621 case 0x04:
37622 /* AltiVec instructions */
37624 opc2 = IFIELD(theInstr, 1, 5);
37625 switch (opc2) {
37626 case 0xA: // mtvsrbmi
37627 if (!allow_V) goto decode_noV;
37628 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37629 if (dis_VSR_byte_mask( prefix, theInstr, abiinfo ))
37630 goto decode_success;
37631 goto decode_failure;
37632 break;
37634 default:
37635 break; // Fall through...
37638 opc2 = IFIELD(theInstr, 0, 6);
37639 switch (opc2) {
37640 /* AV Mult-Add, Mult-Sum */
37641 case 0x16: // vsldbi/vsrdbi
37642 if (!allow_V) goto decode_noV;
37643 if (dis_av_shift( prefix, theInstr )) goto decode_success;
37644 goto decode_failure;
37646 case 0x17: // vmsumcud
37647 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37648 if (dis_vx_quadword_arith( prefix, theInstr )) {
37649 goto decode_success;
37651 goto decode_failure;
37653 case 0x18: case 0x19: // vextdubvlx, vextdubvrx
37654 case 0x1A: case 0x1B: // vextduhvlx, vextduhvrx
37655 case 0x1C: case 0x1D: // vextduwvlx, vextduwvrx
37656 case 0x1E: case 0x1F: // vextddvlx, vextddvrx
37657 if (!allow_V) goto decode_noV;
37658 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37659 if (dis_vec_extract_insert( prefix, theInstr ))
37660 goto decode_success;
37661 goto decode_failure;
37663 case 0x20: case 0x21: case 0x22: // vmhaddshs, vmhraddshs, vmladduhm
37664 case 0x23: // vmsumudm
37665 case 0x24: case 0x25: case 0x26: // vmsumubm, vmsummbm, vmsumuhm
37666 case 0x27: case 0x28: case 0x29: // vmsumuhs, vmsumshm, vmsumshs
37667 if (!allow_V) goto decode_noV;
37668 if (dis_av_multarith( prefix, theInstr )) goto decode_success;
37669 goto decode_failure;
37671 case 0x30: case 0x31: case 0x33: // maddhd, madhdu, maddld
37672 if (!mode64) goto decode_failure;
37673 if (dis_int_mult_add( prefix, theInstr )) goto decode_success;
37674 goto decode_failure;
37676 /* AV Permutations */
37677 case 0x2A: // vsel
37678 case 0x2B: // vperm
37679 case 0x2C: // vsldoi
37680 if (!allow_V) goto decode_noV;
37681 if (dis_av_permute( prefix, theInstr )) goto decode_success;
37682 goto decode_failure;
37684 case 0x2D: // vpermxor
37685 case 0x3B: // vpermr
37686 if (!allow_isa_2_07) goto decode_noP8;
37687 if (dis_av_permute( prefix, theInstr )) goto decode_success;
37688 goto decode_failure;
37690 /* AV Floating Point Mult-Add/Sub */
37691 case 0x2E: case 0x2F: // vmaddfp, vnmsubfp
37692 if (!allow_V) goto decode_noV;
37693 if (dis_av_fp_arith( prefix, theInstr )) goto decode_success;
37694 goto decode_failure;
37696 case 0x3D: case 0x3C: // vaddecuq, vaddeuqm
37697 case 0x3F: case 0x3E: // vsubecuq, vsubeuqm
37698 if (!allow_V) goto decode_noV;
37699 if (dis_av_quad( prefix, theInstr, abiinfo)) goto decode_success;
37700 goto decode_failure;
37702 default:
37703 break; // Fall through...
37706 opc2 = IFIELD(theInstr, 0, 9);
37707 if (IFIELD(theInstr, 10, 1) == 1) {
37708 /* The following instructions have bit 21 set and a PS bit (bit 22)
37709 * Bit 21 distinquishes them from instructions with an 11 bit opc2
37710 * field.
37712 switch (opc2) {
37713 /* BCD arithmetic */
37714 case 0x001: case 0x041: // bcdadd, bcdsub
37715 case 0x101: case 0x141: // bcdtrunc., bcdutrunc.
37716 case 0x081: case 0x0C1: case 0x1C1: // bcdus., bcds., bcdsr.
37717 case 0x181: // bcdcfn., bcdcfz.
37718 // bcdctz., bcdcfsq., bcdctsq.
37719 if (!allow_isa_2_07) goto decode_noP8;
37720 if (dis_av_bcd( prefix, theInstr, abiinfo )) goto decode_success;
37721 goto decode_failure;
37722 default:
37723 break; // Fall through...
37727 opc2 = IFIELD(theInstr, 0, 10);
37728 opc3 = IFIELD(theInstr, 16, 5);
37730 if ((opc2 == 0x0D) & (opc3 < 4)) { // vstrihr, vstrihl, vstribr vstrib
37731 /* Vector String Isolate instructions */
37732 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37733 if (dis_string_isolate( prefix, theInstr ))
37734 goto decode_success;
37735 goto decode_failure;
37738 opc2 = IFIELD(theInstr, 0, 11);
37740 switch (opc2) {
37741 /* Vector String Isolate instructions */
37742 case 0x18D: // vclrlb
37743 case 0x1CD: // vclrrb
37744 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37745 if (dis_string_isolate( prefix, theInstr ))
37746 goto decode_success;
37747 goto decode_failure;
37749 /* BCD manipulation */
37750 case 0x341: // bcdcpsgn
37751 if (!allow_isa_2_07) goto decode_noP8;
37752 if (dis_av_bcd_misc( prefix, theInstr, abiinfo ))
37753 goto decode_success;
37754 goto decode_failure;
37757 /* AV Arithmetic */
37758 case 0x180: // vaddcuw
37759 case 0x000: case 0x040: case 0x080: // vaddubm, vadduhm, vadduwm
37760 case 0x200: case 0x240: case 0x280: // vaddubs, vadduhs, vadduws
37761 case 0x300: case 0x340: case 0x380: // vaddsbs, vaddshs, vaddsws
37762 case 0x580: // vsubcuw
37763 case 0x400: case 0x440: case 0x480: // vsububm, vsubuhm, vsubuwm
37764 case 0x600: case 0x640: case 0x680: // vsububs, vsubuhs, vsubuws
37765 case 0x700: case 0x740: case 0x780: // vsubsbs, vsubshs, vsubsws
37766 case 0x402: case 0x442: case 0x482: // vavgub, vavguh, vavguw
37767 case 0x502: case 0x542: case 0x582: // vavgsb, vavgsh, vavgsw
37768 case 0x002: case 0x042: case 0x082: // vmaxub, vmaxuh, vmaxuw
37769 case 0x102: case 0x142: case 0x182: // vmaxsb, vmaxsh, vmaxsw
37770 case 0x202: case 0x242: case 0x282: // vminub, vminuh, vminuw
37771 case 0x302: case 0x342: case 0x382: // vminsb, vminsh, vminsw
37772 case 0x008: case 0x048: // vmuloub, vmulouh
37773 case 0x108: case 0x148: // vmulosb, vmulosh
37774 case 0x208: case 0x248: // vmuleub, vmuleuh
37775 case 0x308: case 0x348: // vmulesb, vmulesh
37776 case 0x608: case 0x708: case 0x648: // vsum4ubs, vsum4sbs, vsum4shs
37777 case 0x688: case 0x788: // vsum2sws, vsumsws
37778 if (!allow_V) goto decode_noV;
37779 if (dis_av_arith( prefix, theInstr )) goto decode_success;
37780 goto decode_failure;
37782 case 0x0C8: case 0x1C8: case 0x2C8: // vmuloud, vmulosd, vmuleud
37783 case 0x3C8: // vmulesd
37784 if (!allow_V) goto decode_noV;
37785 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37786 if (dis_av_arith( prefix, theInstr )) goto decode_success;
37787 goto decode_failure;
37789 case 0x08B: case 0x18B: // vdivuw, vdivsw
37790 case 0x289: case 0x389: // vmulhuw, vmulhsw
37791 case 0x28B: case 0x38B: // vdiveuw, vdivesw
37792 case 0x68B: case 0x78B: // vmoduw, vmodsw
37793 case 0x1c9: // vmulld
37794 case 0x2C9: case 0x3C9: // vmulhud, vmulhsd
37795 case 0x0CB: case 0x1CB: // vdivud, vdivsd
37796 case 0x2CB: case 0x3CB: // vdiveud, vdivesd
37797 case 0x6CB: case 0x7CB: // vmodud, vmodsd
37798 if (!allow_V) goto decode_noV;
37799 if (dis_av_arith( prefix, theInstr )) goto decode_success;
37800 goto decode_failure;
37802 case 0x005: // vrlq
37803 case 0x00B: case 0x10B: // vdivuq, vdivsq
37804 case 0x045: // vrlqmi
37805 case 0x101: case 0x141: // vcmpuq, vcmpsq
37806 case 0x105: case 0x145: // vslq, vrlqnm
37807 case 0x1C7: case 0x5C7: // vcmpequq, vcmpequq.
37808 case 0x205: // vsrq
37809 case 0x20B: case 0x30B: // vdivueq, vdivesq
37810 case 0x287: case 0x687: // vcmpgtuq, vcmpgtuq.
37811 case 0x305: // vsraq
37812 case 0x387: case 0x787: // vcmpgtsq, vcmpgtsq.
37813 case 0x60B: case 0x70B: // vmoduq, vmodsq
37814 if (!allow_V) goto decode_noV;
37815 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37816 if (dis_vx_quadword_arith( prefix, theInstr ))
37817 goto decode_success;
37818 goto decode_failure;
37820 case 0x088: case 0x089: // vmulouw, vmuluwm
37821 case 0x0C0: case 0x0C2: // vaddudm, vmaxud
37822 case 0x1C2: case 0x2C2: case 0x3C2: // vmaxsd, vminud, vminsd
37823 case 0x188: case 0x288: case 0x388: // vmulosw, vmuleuw, vmulesw
37824 case 0x4C0: // vsubudm
37825 if (!allow_isa_2_07) goto decode_noP8;
37826 if (dis_av_arith( prefix, theInstr )) goto decode_success;
37827 goto decode_failure;
37829 /* AV Polynomial Vector Multiply Add */
37830 case 0x408: case 0x448: // vpmsumb, vpmsumd
37831 case 0x488: case 0x4C8: // vpmsumw, vpmsumh
37832 if (!allow_isa_2_07) goto decode_noP8;
37833 if (dis_av_polymultarith( prefix, theInstr )) goto decode_success;
37834 goto decode_failure;
37836 /* AV Rotate, Shift */
37837 case 0x004: case 0x044: case 0x084: // vrlb, vrlh, vrlw
37838 case 0x104: case 0x144: case 0x184: // vslb, vslh, vslw
37839 case 0x204: case 0x244: case 0x284: // vsrb, vsrh, vsrw
37840 case 0x304: case 0x344: case 0x384: // vsrab, vsrah, vsraw
37841 case 0x1C4: case 0x2C4: // vsl, vsr
37842 case 0x40C: case 0x44C: // vslo, vsro
37843 if (!allow_V) goto decode_noV;
37844 if (dis_av_shift( prefix, theInstr )) goto decode_success;
37845 goto decode_failure;
37847 case 0x0C4: // vrld
37848 case 0x3C4: case 0x5C4: case 0x6C4: // vsrad, vsld, vsrd
37849 if (!allow_isa_2_07) goto decode_noP8;
37850 if (dis_av_shift( prefix, theInstr )) goto decode_success;
37851 goto decode_failure;
37853 /* AV Logic */
37854 case 0x404: case 0x444: case 0x484: // vand, vandc, vor
37855 case 0x4C4: case 0x504: // vxor, vnor
37856 if (!allow_V) goto decode_noV;
37857 if (dis_av_logic( prefix, theInstr )) goto decode_success;
37858 goto decode_failure;
37860 case 0x544: // vorc
37861 case 0x584: case 0x684: // vnand, veqv
37862 if (!allow_isa_2_07) goto decode_noP8;
37863 if (dis_av_logic( prefix, theInstr )) goto decode_success;
37864 goto decode_failure;
37866 /* AV Rotate */
37867 case 0x085: case 0x185: // vrlwmi, vrlwnm
37868 case 0x0C5: case 0x1C5: // vrldmi, vrldnm
37869 if (!allow_V) goto decode_noV;
37870 if (dis_av_rotate( prefix, theInstr )) goto decode_success;
37871 goto decode_failure;
37873 /* AV Processor Control */
37874 case 0x604: case 0x644: // mfvscr, mtvscr
37875 if (!allow_V) goto decode_noV;
37876 if (dis_av_procctl( prefix, theInstr )) goto decode_success;
37877 goto decode_failure;
37879 /* AV Vector Insert Element instructions */
37880 case 0x00F: case 0x10F: // vinsbvlx, vinsbvrx
37881 case 0x04F: case 0x14F: // vinshvlx, vinshvrx
37882 case 0x08F: case 0x18F: // vinswvlx, vinswvrx
37883 case 0x0CF: case 0x1CF: // vinsw, vinsw
37884 case 0x20F: case 0x30F: // vinsblx, vinsbrx
37885 case 0x24F: case 0x34F: // vinshlx, vinshrx
37886 case 0x28F: case 0x38F: // vinswlx, vinswrx
37887 case 0x2CF: case 0x3CF: // vinsdlx, vinsdrx
37888 if (!allow_V) goto decode_noV;
37889 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
37890 if (dis_av_insert_element( prefix, theInstr ))
37891 goto decode_success;
37892 goto decode_failure;
37894 /* AV Vector Extract Element instructions */
37895 case 0x60D: case 0x64D: case 0x68D: // vextublx, vextuhlx, vextuwlx
37896 case 0x70D: case 0x74D: case 0x78D: // vextubrx, vextuhrx, vextuwrx
37897 if (!allow_V) goto decode_noV;
37898 if (dis_av_extract_element( prefix, theInstr )) goto decode_success;
37899 goto decode_failure;
37902 /* AV Floating Point Arithmetic */
37903 case 0x00A: case 0x04A: // vaddfp, vsubfp
37904 case 0x10A: case 0x14A: case 0x18A: // vrefp, vrsqrtefp, vexptefp
37905 case 0x1CA: // vlogefp
37906 case 0x40A: case 0x44A: // vmaxfp, vminfp
37907 if (!allow_V) goto decode_noV;
37908 if (dis_av_fp_arith( prefix, theInstr )) goto decode_success;
37909 goto decode_failure;
37911 /* AV Floating Point Round/Convert */
37912 case 0x20A: case 0x24A: case 0x28A: // vrfin, vrfiz, vrfip
37913 case 0x2CA: // vrfim
37914 case 0x30A: case 0x34A: case 0x38A: // vcfux, vcfsx, vctuxs
37915 case 0x3CA: // vctsxs
37916 if (!allow_V) goto decode_noV;
37917 if (dis_av_fp_convert( prefix, theInstr )) goto decode_success;
37918 goto decode_failure;
37920 /* AV Merge, Splat, Extract, Insert */
37921 case 0x00C: case 0x04C: case 0x08C: // vmrghb, vmrghh, vmrghw
37922 case 0x10C: case 0x14C: case 0x18C: // vmrglb, vmrglh, vmrglw
37923 case 0x20C: case 0x24C: case 0x28C: // vspltb, vsplth, vspltw
37924 case 0x20D: case 0x24D: // vextractub, vextractuh,
37925 case 0x28D: case 0x2CD: // vextractuw, vextractd,
37926 case 0x30D: case 0x34D: // vinsertb, vinserth
37927 case 0x38D: case 0x3CD: // vinsertw, vinsertd
37928 case 0x30C: case 0x34C: case 0x38C: // vspltisb, vspltish, vspltisw
37929 if (!allow_V) goto decode_noV;
37930 if (dis_av_permute( prefix, theInstr )) goto decode_success;
37931 goto decode_failure;
37933 case 0x68C: case 0x78C: // vmrgow, vmrgew
37934 if (!allow_isa_2_07) goto decode_noP8;
37935 if (dis_av_permute( prefix, theInstr )) goto decode_success;
37936 goto decode_failure;
37938 /* AltiVec 128 bit integer multiply by 10 Instructions */
37939 case 0x201: case 0x001: //vmul10uq, vmul10cuq
37940 case 0x241: case 0x041: //vmul10euq, vmul10ceuq
37941 if (!allow_V) goto decode_noV;
37942 if (!allow_isa_3_0) goto decode_noP9;
37943 if (dis_av_mult10( prefix, theInstr )) goto decode_success;
37944 goto decode_failure;
37946 /* AV Pack, Unpack */
37947 case 0x00E: case 0x04E: case 0x08E: // vpkuhum, vpkuwum, vpkuhus
37948 case 0x0CE: // vpkuwus
37949 case 0x10E: case 0x14E: case 0x18E: // vpkshus, vpkswus, vpkshss
37950 case 0x1CE: // vpkswss
37951 case 0x20E: case 0x24E: case 0x28E: // vupkhsb, vupkhsh, vupklsb
37952 case 0x2CE: // vupklsh
37953 case 0x30E: case 0x34E: case 0x3CE: // vpkpx, vupkhpx, vupklpx
37954 if (!allow_V) goto decode_noV;
37955 if (dis_av_pack( prefix, theInstr )) goto decode_success;
37956 goto decode_failure;
37958 case 0x403: case 0x443: case 0x483: // vabsdub, vabsduh, vabsduw
37959 if (!allow_V) goto decode_noV;
37960 if (dis_abs_diff( prefix, theInstr )) goto decode_success;
37961 goto decode_failure;
37963 case 0x44E: case 0x4CE: case 0x54E: // vpkudum, vpkudus, vpksdus
37964 case 0x5CE: case 0x64E: case 0x6cE: // vpksdss, vupkhsw, vupklsw
37965 if (!allow_isa_2_07) goto decode_noP8;
37966 if (dis_av_pack( prefix, theInstr )) goto decode_success;
37967 goto decode_failure;
37969 case 0x508: case 0x509: // vcipher, vcipherlast
37970 case 0x548: case 0x549: // vncipher, vncipherlast
37971 case 0x5C8: // vsbox
37972 if (!allow_isa_2_07) goto decode_noP8;
37973 if (dis_av_cipher( prefix, theInstr )) goto decode_success;
37974 goto decode_failure;
37976 /* AV Vector Extend Sign Instructions and
37977 * Vector Count Leading/Trailing zero Least-Significant bits Byte.
37978 * Vector Integer Negate Instructions
37980 case 0x602: // vextsb2w, vextsh2w, vextsb2d, vextsh2d, vextsw2d
37981 // vclzlsbb and vctzlsbb
37982 // vnegw, vnegd
37983 // vprtybw, vprtybd, vprtybq
37984 // vctzb, vctzh, vctzw, vctzd
37985 // vextsd2q
37986 if (!allow_V) goto decode_noV;
37987 if ( !(allow_isa_3_1)
37988 && (ifieldRegA( theInstr ) == 27) ) // vextsd2q
37989 goto decode_noIsa3_1;
37990 if (dis_av_extend_sign_count_zero( prefix, theInstr,
37991 allow_isa_3_0 ))
37993 goto decode_success;
37994 goto decode_failure;
37996 case 0x642: // mtvsrbm, mtvsrhm, mtvswm, mtvsdm, mtvsqm, mtvsrbmi
37997 // vcntmbb, vcntmbh, vcntmbw, vcntmbd
37998 // vexpandbm, vexpandhm, vexpandwm, vexpanddm, vexpandqm
37999 // vextractbm, vextracthm, vextractwm, vextractdm, vextractqm
38000 if (!allow_V) goto decode_noV;
38001 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
38002 if (dis_VSR_byte_mask( prefix, theInstr, abiinfo))
38003 goto decode_success;
38004 goto decode_failure;
38006 case 0x6C2: case 0x682: // vshasigmaw, vshasigmad
38007 if (!allow_isa_2_07) goto decode_noP8;
38008 if (dis_av_hash( prefix, theInstr )) goto decode_success;
38009 goto decode_failure;
38011 case 0x702: case 0x742: // vclzb, vclzh
38012 case 0x782: case 0x7c2: // vclzw, vclzd
38013 if (!allow_isa_2_07) goto decode_noP8;
38014 if (dis_av_count_bitTranspose( prefix, theInstr, opc2 ))
38015 goto decode_success;
38016 goto decode_failure;
38018 case 0x4CC: case 0x54D: // vgnb, vcfuged
38019 case 0x58D: case 0x5CD: // vpextd, vpdepd
38020 case 0x784: case 0x7C4: // vclzdm, vctzdm
38021 if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
38022 if (dis_vector_logical_mask_bits( prefix, theInstr, opc2,
38023 abiinfo ))
38024 goto decode_success;
38025 goto decode_failure;
38027 case 0x703: case 0x743: // vpopcntb, vpopcnth
38028 case 0x783: case 0x7c3: // vpopcntw, vpopcntd
38029 if (!allow_isa_2_07) goto decode_noP8;
38030 if (dis_av_count_bitTranspose( prefix, theInstr, opc2 ))
38031 goto decode_success;
38032 goto decode_failure;
38034 case 0x50c: // vgbbd
38035 case 0x5cc: // vbpermd
38036 if (!allow_isa_2_07) goto decode_noP8;
38037 if (dis_av_count_bitTranspose( prefix, theInstr, opc2 ))
38038 goto decode_success;
38039 goto decode_failure;
38041 case 0x140: case 0x100: // vaddcuq, vadduqm
38042 case 0x540: case 0x500: // vsubcuq, vsubuqm
38043 case 0x54C: // vbpermq
38044 if (!allow_V) goto decode_noV;
38045 if (dis_av_quad( prefix, theInstr, abiinfo)) goto decode_success;
38046 goto decode_failure;
38048 default:
38049 break; // Fall through...
38052 opc2 = IFIELD(theInstr, 0, 10);
38053 switch (opc2) {
38055 /* AV Compare */
38056 case 0x006: case 0x007: case 0x107: // vcmpequb, vcmpneb, vcmpnezb
38057 case 0x046: case 0x047: case 0x147: // vcmpequh, vcmpneh, vcmpnezh
38058 case 0x086: case 0x087: case 0x187: // vcmpequw, vcmpnew, vcmpnezw
38059 case 0x206: case 0x246: case 0x286: // vcmpgtub, vcmpgtuh, vcmpgtuw
38060 case 0x306: case 0x346: case 0x386: // vcmpgtsb, vcmpgtsh, vcmpgtsw
38061 if (!allow_V) goto decode_noV;
38062 if (dis_av_cmp( prefix, theInstr )) goto decode_success;
38063 goto decode_failure;
38065 case 0x0C7: // vcmpequd
38066 case 0x2C7: // vcmpgtud
38067 case 0x3C7: // vcmpgtsd
38068 if (!allow_isa_2_07) goto decode_noP8;
38069 if (dis_av_cmp( prefix, theInstr )) goto decode_success;
38070 goto decode_failure;
38072 /* AV Floating Point Compare */
38073 case 0x0C6: case 0x1C6: case 0x2C6: // vcmpeqfp, vcmpgefp, vcmpgtfp
38074 case 0x3C6: // vcmpbfp
38075 if (!allow_V) goto decode_noV;
38076 if (dis_av_fp_cmp( prefix, theInstr ))
38077 goto decode_success;
38078 goto decode_failure;
38080 default:
38081 goto decode_failure;
38083 break;
38085 default:
38086 goto decode_failure;
38088 decode_noF:
38089 vassert(!allow_F);
38090 if (sigill_diag)
38091 vex_printf("disInstr(ppc): found the Floating Point instruction 0x%x that\n"
38092 "can't be handled by Valgrind on this host. This instruction\n"
38093 "requires a host that supports Floating Point instructions.\n",
38094 theInstr);
38095 goto not_supported;
38096 decode_noV:
38097 vassert(!allow_V);
38098 if (sigill_diag)
38099 vex_printf("disInstr(ppc): found an AltiVec or an e500 instruction 0x%x\n"
38100 "that can't be handled by Valgrind. If this instruction is an\n"
38101 "Altivec instruction, Valgrind must be run on a host that supports"
38102 "AltiVec instructions. If the application was compiled for e500, then\n"
38103 "unfortunately Valgrind does not yet support e500 instructions.\n",
38104 theInstr);
38105 goto not_supported;
38106 decode_noVX:
38107 vassert(!allow_VX);
38108 if (sigill_diag)
38109 vex_printf("disInstr(ppc): found the instruction 0x%x that is defined in the\n"
38110 "Power ISA 2.06 ABI but can't be handled by Valgrind on this host.\n"
38111 "This instruction \nrequires a host that supports the ISA 2.06 ABI.\n",
38112 theInstr);
38113 goto not_supported;
38114 decode_noFX:
38115 vassert(!allow_FX);
38116 if (sigill_diag)
38117 vex_printf("disInstr(ppc): found the General Purpose-Optional instruction 0x%x\n"
38118 "that can't be handled by Valgrind on this host. This instruction\n"
38119 "requires a host that supports the General Purpose-Optional instructions.\n",
38120 theInstr);
38121 goto not_supported;
38122 decode_noGX:
38123 vassert(!allow_GX);
38124 if (sigill_diag)
38125 vex_printf("disInstr(ppc): found the Graphics-Optional instruction 0x%x\n"
38126 "that can't be handled by Valgrind on this host. This instruction\n"
38127 "requires a host that supports the Graphic-Optional instructions.\n",
38128 theInstr);
38129 goto not_supported;
38130 decode_noDFP:
38131 vassert(!allow_DFP);
38132 if (sigill_diag)
38133 vex_printf("disInstr(ppc): found the decimal floating point (DFP) instruction 0x%x\n"
38134 "that can't be handled by Valgrind on this host. This instruction\n"
38135 "requires a host that supports DFP instructions.\n",
38136 theInstr);
38137 goto not_supported;
38138 decode_noP8:
38139 vassert(!allow_isa_2_07);
38140 if (sigill_diag)
38141 vex_printf("disInstr(ppc): found the Power 8 instruction 0x%x that can't be handled\n"
38142 "by Valgrind on this host. This instruction requires a host that\n"
38143 "supports Power 8 instructions.\n",
38144 theInstr);
38145 goto not_supported;
38147 decode_noP9:
38148 vassert(!allow_isa_3_0);
38149 if (sigill_diag)
38150 vex_printf("disInstr(ppc): found the Power 9 instruction 0x%x that can't be handled\n"
38151 "by Valgrind on this host. This instruction requires a host that\n"
38152 "supports Power 9 instructions.\n",
38153 theInstr);
38154 goto not_supported;
38156 decode_noIsa3_1:
38157 vassert(!allow_isa_3_1);
38158 if (sigill_diag)
38159 vex_printf("disInstr(ppc): found the Power 10 instruction 0x%x that can't be handled\n"
38160 "by Valgrind on this host. This instruction requires a host that\n"
38161 "supports ISA 3.1 instructions.\n", theInstr);
38162 goto not_supported;
38164 decode_failure:
38165 /* All decode failures end up here. */
38166 opc1 = ifieldOPC(theInstr);
38167 opc2 = (theInstr) & 0x7FF;
38168 if (sigill_diag) {
38170 if (prefix_instruction( prefix )) {
38171 vex_printf("disInstr(ppc): unhandled prefix instruction: "
38172 "prefix = 0x%x, theInstr 0x%x\n", prefix, theInstr);
38173 vex_printf(" primary %d(0x%x), secondary %u(0x%x)\n",
38174 opc1, opc1, opc2, opc2);
38176 } else {
38177 vex_printf("disInstr(ppc): unhandled instruction: "
38178 "0x%x\n", theInstr);
38179 vex_printf(" primary %d(0x%x), secondary %u(0x%x)\n",
38180 opc1, opc1, opc2, opc2);
38184 not_supported:
38185 /* Tell the dispatcher that this insn cannot be decoded, and so has
38186 not been executed, and (is currently) the next to be executed.
38187 CIA should be up-to-date since it made so at the start of each
38188 insn, but nevertheless be paranoid and update it again right
38189 now. */
38190 putGST( PPC_GST_CIA, mkSzImm(ty, guest_CIA_curr_instr) );
38191 dres.len = 0;
38192 dres.whatNext = Dis_StopHere;
38193 dres.jk_StopHere = Ijk_NoDecode;
38194 return dres;
38195 } /* switch (opc) for the main (primary) opcode switch. */
38197 decode_success:
38198 /* All decode successes end up here. */
38199 switch (dres.whatNext) {
38200 case Dis_Continue:
38201 /* Update the guest current instruction address (CIA) by size of
38202 the instruction just executed. */
38203 putGST( PPC_GST_CIA, mkSzImm(ty, guest_CIA_curr_instr + inst_size));
38204 break;
38205 case Dis_StopHere:
38206 break;
38207 default:
38208 vassert(0);
38210 DIP("\n");
38212 if (dres.len == 0) {
38213 dres.len = inst_size; //Tell Valgrind the size of the instruction just excuted
38214 } else {
38215 vassert(dres.len == 20);
38217 return dres;
38220 #undef DIP
38221 #undef DIS
38224 /*------------------------------------------------------------*/
38225 /*--- Top-level fn ---*/
38226 /*------------------------------------------------------------*/
38228 /* Disassemble a single instruction into IR. The instruction
38229 is located in host memory at &guest_code[delta]. */
38231 DisResult disInstr_PPC ( IRSB* irsb_IN,
38232 const UChar* guest_code_IN,
38233 Long delta,
38234 Addr guest_IP,
38235 VexArch guest_arch,
38236 const VexArchInfo* archinfo,
38237 const VexAbiInfo* abiinfo,
38238 VexEndness host_endness_IN,
38239 Bool sigill_diag_IN )
38241 IRType ty;
38242 DisResult dres;
38243 UInt mask32, mask64;
38244 UInt hwcaps_guest = archinfo->hwcaps;
38246 vassert(guest_arch == VexArchPPC32 || guest_arch == VexArchPPC64);
38248 /* global -- ick */
38249 mode64 = guest_arch == VexArchPPC64;
38250 ty = mode64 ? Ity_I64 : Ity_I32;
38251 if (!mode64 && (host_endness_IN == VexEndnessLE)) {
38252 vex_printf("disInstr(ppc): Little Endian 32-bit mode is not supported\n");
38253 dres.len = 0;
38254 dres.whatNext = Dis_StopHere;
38255 dres.jk_StopHere = Ijk_NoDecode;
38256 dres.hint = Dis_HintNone;
38257 return dres;
38260 /* do some sanity checks */
38261 mask32 = VEX_HWCAPS_PPC32_F | VEX_HWCAPS_PPC32_V
38262 | VEX_HWCAPS_PPC32_FX | VEX_HWCAPS_PPC32_GX | VEX_HWCAPS_PPC32_VX
38263 | VEX_HWCAPS_PPC32_DFP | VEX_HWCAPS_PPC32_ISA2_07;
38265 mask64 = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX
38266 | VEX_HWCAPS_PPC64_GX | VEX_HWCAPS_PPC64_VX | VEX_HWCAPS_PPC64_DFP
38267 | VEX_HWCAPS_PPC64_ISA2_07 | VEX_HWCAPS_PPC64_ISA3_0
38268 | VEX_HWCAPS_PPC64_ISA3_1;
38270 if (mode64) {
38271 vassert((hwcaps_guest & mask32) == 0);
38272 } else {
38273 vassert((hwcaps_guest & mask64) == 0);
38276 /* Set globals (see top of this file) */
38277 guest_code = guest_code_IN;
38278 irsb = irsb_IN;
38279 host_endness = host_endness_IN;
38281 guest_CIA_curr_instr = mkSzAddr(ty, guest_IP);
38282 guest_CIA_bbstart = mkSzAddr(ty, guest_IP - delta);
38284 dres = disInstr_PPC_WRK ( delta, archinfo, abiinfo, sigill_diag_IN );
38286 return dres;
38289 /*--------------------------------------------------------------------*/
38290 /*--- end guest_ppc_toIR.c ---*/
38291 /*--------------------------------------------------------------------*/