1 # e500 core instructions, for PSIM, the PowerPC simulator.
3 # Copyright 2003 Free Software Foundation, Inc.
5 # Contributed by Red Hat Inc; developed under contract from Motorola.
6 # Written by matthew green <mrg@redhat.com>.
8 # This file is part of GDB.
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU General Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU General Public License for more details.
20 # You should have received a copy of the GNU General Public License
21 # along with This program; see the file COPYING. If not, write to
22 # the Free Software Foundation, 59 Temple Place - Suite 330,
23 # Boston, MA 02111-1307, USA.
26 # e500 Core Complex Instructions
29 :cache:e500::signed_word *:rAh:RA:(cpu_registers(processor)->e500.gprh + RA)
30 :cache:e500::signed_word *:rSh:RS:(cpu_registers(processor)->e500.gprh + RS)
31 :cache:e500::signed_word *:rBh:RB:(cpu_registers(processor)->e500.gprh + RB)
35 #define PPC_INSN_INT_SPR(OUT_MASK, IN_MASK, SPR) \
37 if (CURRENT_MODEL_ISSUE > 0) \
38 ppc_insn_int_spr(MY_INDEX, cpu_model(processor), OUT_MASK, IN_MASK, SPR); \
41 # Schedule an instruction that takes 2 integer register and produces a special purpose output register plus an integer output register
42 void::model-function::ppc_insn_int_spr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned nSPR
43 const unsigned32 int_mask = out_mask | in_mask;
46 while ((model_ptr->int_busy & int_mask) != 0 || model_ptr->spr_busy[nSPR] != 0) {
47 if (WITH_TRACE && ppc_trace[trace_model])
48 model_trace_busy_p(model_ptr, int_mask, 0, 0, nSPR);
50 model_ptr->nr_stalls_data++;
51 model_new_cycle(model_ptr);
54 busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
55 busy_ptr->int_busy |= out_mask;
56 model_ptr->int_busy |= out_mask;
57 busy_ptr->spr_busy = nSPR;
58 model_ptr->spr_busy[nSPR] = 1;
59 busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_mask)) ? 3 : 2;
60 TRACE(trace_model,("Making register %s busy.\n", spr_name(nSPR)));
63 # SPE Modulo Fractional Multiplication handling support
65 :function:e500::unsigned64:ev_multiply16_smf:signed16 a, signed16 b, int *sat
66 signed32 a32 = a, b32 = b, rv32;
68 *sat = (rv32 & (3<<30)) == (3<<30);
69 return (signed64)rv32 << 1;
71 :function:e500::unsigned64:ev_multiply32_smf:signed32 a, signed32 b, int *sat
72 signed64 rv64, a64 = a, b64 = b;
74 *sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
75 /* Loses top sign bit. */
78 # SPE Saturation handling support
80 :function:e500::signed32:ev_multiply16_ssf:signed16 a, signed16 b, int *sat
82 if (a == 0xffff8000 && b == 0xffff8000)
90 signed32 a32 = a, b32 = b;
93 * sat = (rv32 & (3<<30)) == (3<<30);
94 return (signed64)rv32 << 1;
97 :function:e500::signed64:ev_multiply32_ssf:signed32 a, signed32 b, int *sat
99 if (a == 0x80000000 && b == 0x80000000)
101 rv64 = 0x7fffffffffffffffLL;
107 signed64 a64 = a, b64 = b;
109 *sat = (rv64 & ((signed64)3<<62)) == ((signed64)3<<62);
110 /* Loses top sign bit. */
115 # SPE FP handling support
118 :function:e500::void:ev_check_guard:sim_fpu *a, int fg, int fx, cpu *processor
120 guard = sim_fpu_guard(a, 0);
122 EV_SET_SPEFSCR_BITS(fg);
124 EV_SET_SPEFSCR_BITS(fx);
126 :function:e500::void:booke_sim_fpu_32to:sim_fpu *dst, unsigned32 packed
127 sim_fpu_32to (dst, packed);
129 /* Set normally unused fields to allow booke arithmetic. */
130 if (dst->class == sim_fpu_class_infinity)
132 dst->normal_exp = 128;
133 dst->fraction = ((unsigned64)1 << 60);
135 else if (dst->class == sim_fpu_class_qnan
136 || dst->class == sim_fpu_class_snan)
138 dst->normal_exp = 128;
139 /* This is set, but without the implicit bit, so we have to or
140 in the implicit bit. */
141 dst->fraction |= ((unsigned64)1 << 60);
144 :function:e500::int:booke_sim_fpu_add:sim_fpu *d, sim_fpu *a, sim_fpu *b, int inv, int over, int under, cpu *processor
145 int invalid_operand, overflow_result, underflow_result;
150 underflow_result = 0;
152 /* Treat NaN, Inf, and denorm like normal numbers, and signal invalid
153 operand if it hasn't already been done. */
154 if (EV_IS_INFDENORMNAN (a))
156 a->class = sim_fpu_class_number;
158 EV_SET_SPEFSCR_BITS (inv);
161 if (EV_IS_INFDENORMNAN (b))
163 b->class = sim_fpu_class_number;
165 if (! invalid_operand)
167 EV_SET_SPEFSCR_BITS (inv);
172 sim_fpu_add (d, a, b);
174 dest_exp = booke_sim_fpu_exp (d);
175 /* If this is a denorm, force to zero, and signal underflow if
176 we haven't already indicated invalid operand. */
177 if (dest_exp <= -127)
183 if (! invalid_operand)
185 EV_SET_SPEFSCR_BITS (under);
186 underflow_result = 1;
189 /* If this is Inf/NaN, force to pmax/nmax, and signal overflow if
190 we haven't already indicated invalid operand. */
191 else if (dest_exp >= 127)
197 if (! invalid_operand)
199 EV_SET_SPEFSCR_BITS (over);
203 /* Destination sign is sign of operand with larger magnitude, or
204 the sign of the first operand if operands have the same
205 magnitude. Thus if the result is zero, we force it to have
206 the sign of the first operand. */
207 else if (d->fraction == 0)
210 return invalid_operand || overflow_result || underflow_result;
212 :function:e500::unsigned32:ev_fs_add:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
217 booke_sim_fpu_32to (&a, aa);
218 booke_sim_fpu_32to (&b, bb);
220 exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
223 sim_fpu_to32 (&w, &d);
225 ev_check_guard(&d, fg, fx, processor);
228 :function:e500::unsigned32:ev_fs_sub:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
233 booke_sim_fpu_32to (&a, aa);
234 booke_sim_fpu_32to (&b, bb);
236 /* Invert sign of second operand, and add. */
238 exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
241 sim_fpu_to32 (&w, &d);
243 ev_check_guard(&d, fg, fx, processor);
246 # sim_fpu_exp leaves the normal_exp field undefined for Inf and NaN.
247 # The booke algorithms require exp values, so we fake them here.
248 # fixme: It also apparently does the same for zero, but should not.
249 :function:e500::unsigned32:booke_sim_fpu_exp:sim_fpu *x
250 int y = sim_fpu_is (x);
251 if (y == SIM_FPU_IS_PZERO || y == SIM_FPU_IS_NZERO)
253 else if (y == SIM_FPU_IS_SNAN || y == SIM_FPU_IS_QNAN
254 || y == SIM_FPU_IS_NINF || y == SIM_FPU_IS_PINF)
257 return sim_fpu_exp (x);
259 :function:e500::unsigned32:ev_fs_mul:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int fg, int fx, cpu *processor
262 int sa, sb, ea, eb, ei;
263 sim_fpu_32to (&a, aa);
264 sim_fpu_32to (&b, bb);
265 sa = sim_fpu_sign(&a);
266 sb = sim_fpu_sign(&b);
267 ea = booke_sim_fpu_exp(&a);
268 eb = booke_sim_fpu_exp(&b);
270 if (sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
275 EV_SET_SPEFSCR_BITS(over);
278 sim_fpu_to32 (&w, &d);
279 w &= 0x7fffffff; /* Clear sign bit. */
286 EV_SET_SPEFSCR_BITS(over);
289 sim_fpu_to32 (&w, &d);
290 w |= 0x80000000; /* Set sign bit. */
293 if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
294 EV_SET_SPEFSCR_BITS(inv);
295 sim_fpu_mul (&d, &a, &b);
296 sim_fpu_to32 (&w, &d);
301 :function:e500::unsigned32:ev_fs_div:unsigned32 aa, unsigned32 bb, int inv, int over, int under, int dbz, int fg, int fx, cpu *processor
304 int sa, sb, ea, eb, ei;
306 sim_fpu_32to (&a, aa);
307 sim_fpu_32to (&b, bb);
308 sa = sim_fpu_sign(&a);
309 sb = sim_fpu_sign(&b);
310 ea = booke_sim_fpu_exp(&a);
311 eb = booke_sim_fpu_exp(&b);
314 /* Special cases to handle behaviour of e500 hardware.
316 if (sim_fpu_is_nan (&a) || sim_fpu_is_nan (&b)
317 || sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
319 if (sim_fpu_is_snan (&a) || sim_fpu_is_snan (&b))
321 if (bb == 0x3f800000)
323 else if (aa == 0x7fc00001)
331 else if (sim_fpu_is_infinity (&a) && sim_fpu_is_infinity (&b))
334 sim_fpu_32to (&d, 0x3f800000);
336 sim_fpu_32to (&d, 0xbf800000);
337 sim_fpu_to32 (&w, &d);
342 EV_SET_SPEFSCR_BITS(over);
343 } else if (ei <= 1) {
345 sim_fpu_to32 (&w, &d);
346 w &= 0x7fffffff; /* Clear sign bit. */
353 EV_SET_SPEFSCR_BITS(over);
354 } else if (ei <= 1) {
356 sim_fpu_to32 (&w, &d);
357 w |= 0x80000000; /* Set sign bit. */
360 if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
361 EV_SET_SPEFSCR_BITS(inv);
362 if (sim_fpu_is_zero (&b))
364 if (sim_fpu_is_zero (&a))
365 EV_SET_SPEFSCR_BITS(dbz);
367 EV_SET_SPEFSCR_BITS(inv);
368 w = sa ? EV_NMAX : EV_PMAX;
372 sim_fpu_div (&d, &a, &b);
373 sim_fpu_to32 (&w, &d);
374 ev_check_guard(&d, fg, fx, processor);
382 # A.2.7 Integer SPE Simple Instructions
385 0.4,6.RS,11.RA,16.RB,21.512:X:e500:evaddw %RS,%RA,%RB:Vector Add Word
389 EV_SET_REG2(*rSh, *rS, w1, w2);
390 //printf("evaddw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
391 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
393 0.4,6.RS,11.IMM,16.RB,21.514:X:e500:evaddiw %RS,%RB,%IMM:Vector Add Immediate Word
397 EV_SET_REG2(*rSh, *rS, w1, w2);
398 //printf("evaddiw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
399 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
401 0.4,6.RS,11.RA,16.RB,21.516:X:e500:evsubfw %RS,%RA,%RB:Vector Subtract from Word
405 EV_SET_REG2(*rSh, *rS, w1, w2);
406 //printf("evsubfw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
407 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
409 0.4,6.RS,11.IMM,16.RB,21.518:X:e500:evsubifw %RS,%RB,%IMM:Vector Subtract Immediate from Word
413 EV_SET_REG2(*rSh, *rS, w1, w2);
414 //printf("evsubifw: *rSh = %08x; *rS = %08x; IMM = %d\n", *rSh, *rS, IMM);
415 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
417 0.4,6.RS,11.RA,16.0,21.520:X:e500:evabs %RS,%RA:Vector Absolute Value
420 if (w1 < 0 && w1 != 0x80000000)
423 if (w2 < 0 && w2 != 0x80000000)
425 EV_SET_REG2(*rSh, *rS, w1, w2);
426 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
428 0.4,6.RS,11.RA,16.0,21.521:X:e500:evneg %RS,%RA:Vector Negate
431 /* the negative most negative number is the most negative number */
432 if (w1 != 0x80000000)
435 if (w2 != 0x80000000)
437 EV_SET_REG2(*rSh, *rS, w1, w2);
438 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
440 0.4,6.RS,11.RA,16.0,21.522:X:e500:evextsb %RS,%RA:Vector Extend Signed Byte
448 EV_SET_REG2(*rSh, *rS, w1, w2);
449 PPC_INSN_INT(RS_BITMASK, RA_BITMASK , 0);
451 0.4,6.RS,11.RA,16.0,21.523:X:e500:evextsb %RS,%RA:Vector Extend Signed Half Word
459 EV_SET_REG2(*rSh, *rS, w1, w2);
460 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
462 0.4,6.RS,11.RA,16.RB,21.529:X:e500:evand %RS,%RA,%RB:Vector AND
466 EV_SET_REG2(*rSh, *rS, w1, w2);
467 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
469 0.4,6.RS,11.RA,16.RB,21.535:X:e500:evor %RS,%RA,%RB:Vector OR
473 EV_SET_REG2(*rSh, *rS, w1, w2);
474 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
476 0.4,6.RS,11.RA,16.RB,21.534:X:e500:evxor %RS,%RA,%RB:Vector XOR
480 EV_SET_REG2(*rSh, *rS, w1, w2);
481 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
483 0.4,6.RS,11.RA,16.RB,21.542:X:e500:evnand %RS,%RA,%RB:Vector NAND
487 EV_SET_REG2(*rSh, *rS, w1, w2);
488 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
490 0.4,6.RS,11.RA,16.RB,21.536:X:e500:evnor %RS,%RA,%RB:Vector NOR
494 EV_SET_REG2(*rSh, *rS, w1, w2);
495 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
497 0.4,6.RS,11.RA,16.RB,21.537:X:e500:eveqv %RS,%RA,%RB:Vector Equivalent
501 EV_SET_REG2(*rSh, *rS, w1, w2);
502 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
504 0.4,6.RS,11.RA,16.RB,21.530:X:e500:evandc %RS,%RA,%RB:Vector AND with Compliment
508 EV_SET_REG2(*rSh, *rS, w1, w2);
509 //printf("evandc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
510 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
512 0.4,6.RS,11.RA,16.RB,21.539:X:e500:evorc %RS,%RA,%RB:Vector OR with Compliment
516 EV_SET_REG2(*rSh, *rS, w1, w2);
517 //printf("evorc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
518 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
520 0.4,6.RS,11.RA,16.RB,21.552:X:e500:evrlw %RS,%RA,%RB:Vector Rotate Left Word
521 unsigned32 nh, nl, w1, w2;
524 w1 = ((unsigned32)*rAh) << nh | ((unsigned32)*rAh) >> (32 - nh);
525 w2 = ((unsigned32)*rA) << nl | ((unsigned32)*rA) >> (32 - nl);
526 EV_SET_REG2(*rSh, *rS, w1, w2);
527 //printf("evrlw: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
528 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
530 0.4,6.RS,11.RA,16.UIMM,21.554:X:e500:evrlwi %RS,%RA,%UIMM:Vector Rotate Left Word Immediate
531 unsigned32 w1, w2, imm;
532 imm = (unsigned32)UIMM;
533 w1 = ((unsigned32)*rAh) << imm | ((unsigned32)*rAh) >> (32 - imm);
534 w2 = ((unsigned32)*rA) << imm | ((unsigned32)*rA) >> (32 - imm);
535 EV_SET_REG2(*rSh, *rS, w1, w2);
536 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
538 0.4,6.RS,11.RA,16.RB,21.548:X:e500:evslw %RS,%RA,%RB:Vector Shift Left Word
539 unsigned32 nh, nl, w1, w2;
542 w1 = ((unsigned32)*rAh) << nh;
543 w2 = ((unsigned32)*rA) << nl;
544 EV_SET_REG2(*rSh, *rS, w1, w2);
545 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
547 0.4,6.RS,11.RA,16.UIMM,21.550:X:e500:evslwi %RS,%RA,%UIMM:Vector Shift Left Word Immediate
548 unsigned32 w1, w2, imm = UIMM;
549 w1 = ((unsigned32)*rAh) << imm;
550 w2 = ((unsigned32)*rA) << imm;
551 EV_SET_REG2(*rSh, *rS, w1, w2);
552 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
554 0.4,6.RS,11.RA,16.RB,21.545:X:e500:evsrws %RS,%RA,%RB:Vector Shift Right Word Signed
559 w1 = ((signed32)*rAh) >> nh;
560 w2 = ((signed32)*rA) >> nl;
561 EV_SET_REG2(*rSh, *rS, w1, w2);
562 //printf("evsrws: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
563 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
565 0.4,6.RS,11.RA,16.RB,21.544:X:e500:evsrwu %RS,%RA,%RB:Vector Shift Right Word Unsigned
566 unsigned32 w1, w2, nh, nl;
569 w1 = ((unsigned32)*rAh) >> nh;
570 w2 = ((unsigned32)*rA) >> nl;
571 EV_SET_REG2(*rSh, *rS, w1, w2);
572 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
574 0.4,6.RS,11.RA,16.UIMM,21.547:X:e500:evsrwis %RS,%RA,%UIMM:Vector Shift Right Word Immediate Signed
576 unsigned32 imm = UIMM;
577 w1 = ((signed32)*rAh) >> imm;
578 w2 = ((signed32)*rA) >> imm;
579 EV_SET_REG2(*rSh, *rS, w1, w2);
580 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
582 0.4,6.RS,11.RA,16.UIMM,21.546:X:e500:evsrwiu %RS,%RA,%UIMM:Vector Shift Right Word Immediate Unsigned
583 unsigned32 w1, w2, imm = UIMM;
584 w1 = ((unsigned32)*rAh) >> imm;
585 w2 = ((unsigned32)*rA) >> imm;
586 EV_SET_REG2(*rSh, *rS, w1, w2);
587 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
589 0.4,6.RS,11.RA,16.0,21.525:X:e500:evcntlzw %RS,%RA:Vector Count Leading Zeros Word
590 unsigned32 w1, w2, mask, c1, c2;
591 for (c1 = 0, mask = 0x80000000, w1 = *rAh;
592 !(w1 & mask) && mask != 0; mask >>= 1)
594 for (c2 = 0, mask = 0x80000000, w2 = *rA;
595 !(w2 & mask) && mask != 0; mask >>= 1)
597 EV_SET_REG2(*rSh, *rS, c1, c2);
598 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
600 0.4,6.RS,11.RA,16.0,21.526:X:e500:evcntlsw %RS,%RA:Vector Count Leading Sign Bits Word
601 unsigned32 w1, w2, mask, sign_bit, c1, c2;
602 for (c1 = 0, mask = 0x80000000, w1 = *rAh, sign_bit = w1 & mask;
603 ((w1 & mask) == sign_bit) && mask != 0;
604 mask >>= 1, sign_bit >>= 1)
606 for (c2 = 0, mask = 0x80000000, w2 = *rA, sign_bit = w2 & mask;
607 ((w2 & mask) == sign_bit) && mask != 0;
608 mask >>= 1, sign_bit >>= 1)
610 EV_SET_REG2(*rSh, *rS, c1, c2);
611 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
613 0.4,6.RS,11.RA,16.0,21.524:X:e500:evrndw %RS,%RA:Vector Round Word
615 w1 = ((unsigned32)*rAh + 0x8000) & 0xffff0000;
616 w2 = ((unsigned32)*rA + 0x8000) & 0xffff0000;
617 EV_SET_REG2(*rSh, *rS, w1, w2);
618 //printf("evrndw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
619 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
621 0.4,6.RS,11.RA,16.RB,21.556:X:e500:evmergehi %RS,%RA,%RB:Vector Merge Hi
625 EV_SET_REG2(*rSh, *rS, w1, w2);
626 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
628 0.4,6.RS,11.RA,16.RB,21.557:X:e500:evmergelo %RS,%RA,%RB:Vector Merge Low
632 EV_SET_REG2(*rSh, *rS, w1, w2);
633 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
635 0.4,6.RS,11.RA,16.RB,21.559:X:e500:evmergelohi %RS,%RA,%RB:Vector Merge Low Hi
639 EV_SET_REG2(*rSh, *rS, w1, w2);
640 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
642 0.4,6.RS,11.RA,16.RB,21.558:X:e500:evmergehilo %RS,%RA,%RB:Vector Merge Hi Low
646 EV_SET_REG2(*rSh, *rS, w1, w2);
647 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
649 0.4,6.RS,11.SIMM,16.0,21.553:X:e500:evsplati %RS,%SIMM:Vector Splat Immediate
654 EV_SET_REG2(*rSh, *rS, w, w);
655 PPC_INSN_INT(RS_BITMASK, 0, 0);
657 0.4,6.RS,11.SIMM,16.0,21.555:X:e500:evsplatfi %RS,%SIMM:Vector Splat Fractional Immediate
660 EV_SET_REG2(*rSh, *rS, w, w);
661 PPC_INSN_INT(RS_BITMASK, 0, 0);
663 0.4,6.BF,9.0,11.RA,16.RB,21.561:X:e500:evcmpgts %BF,%RA,%RB:Vector Compare Greater Than Signed
664 signed32 ah, al, bh, bl;
678 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
680 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
682 0.4,6.BF,9.0,11.RA,16.RB,21.560:X:e500:evcmpgtu %BF,%RA,%RB:Vector Compare Greater Than Unsigned
683 unsigned32 ah, al, bh, bl;
697 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
699 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
701 0.4,6.BF,9.0,11.RA,16.RB,21.563:X:e500:evcmplts %BF,%RA,%RB:Vector Compare Less Than Signed
702 signed32 ah, al, bh, bl;
716 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
718 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
720 0.4,6.BF,9.0,11.RA,16.RB,21.562:X:e500:evcmpltu %BF,%RA,%RB:Vector Compare Less Than Unsigned
721 unsigned32 ah, al, bh, bl;
735 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
737 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
739 0.4,6.BF,9.0,11.RA,16.RB,21.564:X:e500:evcmpeq %BF,%RA,%RB:Vector Compare Equal
740 unsigned32 ah, al, bh, bl;
754 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
756 //printf("evcmpeq: ch %d cl %d BF %d, CR is now %08x\n", ch, cl, BF, CR);
757 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
759 0.4,6.RS,11.RA,16.RB,21.79,29.CRFS:X:e500:evsel %RS,%RA,%RB,%CRFS:Vector Select
771 EV_SET_REG2(*rSh, *rS, w1, w2);
772 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
774 0.4,6.RS,11.RA,16.RB,21.527:X:e500:brinc %RS,%RA,%RB:Bit Reversed Increment
775 unsigned32 w1, w2, a, d, mask;
776 mask = (*rB) & 0xffff;
778 d = EV_BITREVERSE16(1 + EV_BITREVERSE16(a | ~mask));
779 *rS = ((*rA) & 0xffff0000) | (d & 0xffff);
780 //printf("brinc: *rS = %08x\n", *rS);
781 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
784 # A.2.8 Integer SPE Complex Instructions
787 0.4,6.RS,11.RA,16.RB,21.1031:EVX:e500:evmhossf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional
788 signed16 al, ah, bl, bh;
792 al = (signed16) EV_LOHALF (*rA);
793 ah = (signed16) EV_LOHALF (*rAh);
794 bl = (signed16) EV_LOHALF (*rB);
795 bh = (signed16) EV_LOHALF (*rBh);
796 tl = ev_multiply16_ssf (al, bl, &movl);
797 th = ev_multiply16_ssf (ah, bh, &movh);
798 EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
799 EV_SATURATE (movl, 0x7fffffff, tl));
800 EV_SET_SPEFSCR_OV (movl, movh);
801 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
803 0.4,6.RS,11.RA,16.RB,21.1063:EVX:e500:evmhossfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional Accumulate
804 signed16 al, ah, bl, bh;
808 al = (signed16) EV_LOHALF (*rA);
809 ah = (signed16) EV_LOHALF (*rAh);
810 bl = (signed16) EV_LOHALF (*rB);
811 bh = (signed16) EV_LOHALF (*rBh);
812 tl = ev_multiply16_ssf (al, bl, &movl);
813 th = ev_multiply16_ssf (ah, bh, &movh);
814 EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
815 EV_SATURATE (movl, 0x7fffffff, tl));
816 EV_SET_SPEFSCR_OV (movl, movh);
817 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
819 0.4,6.RS,11.RA,16.RB,21.1039:EVX:e500:evmhosmf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional
820 signed16 al, ah, bl, bh;
824 al = (signed16) EV_LOHALF (*rA);
825 ah = (signed16) EV_LOHALF (*rAh);
826 bl = (signed16) EV_LOHALF (*rB);
827 bh = (signed16) EV_LOHALF (*rBh);
828 tl = ev_multiply16_smf (al, bl, & dummy);
829 th = ev_multiply16_smf (ah, bh, & dummy);
830 EV_SET_REG2 (*rSh, *rS, th, tl);
831 PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
833 0.4,6.RS,11.RA,16.RB,21.1071:EVX:e500:evmhosmfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional Accumulate
834 signed32 al, ah, bl, bh;
838 al = (signed16) EV_LOHALF (*rA);
839 ah = (signed16) EV_LOHALF (*rAh);
840 bl = (signed16) EV_LOHALF (*rB);
841 bh = (signed16) EV_LOHALF (*rBh);
842 tl = ev_multiply16_smf (al, bl, & dummy);
843 th = ev_multiply16_smf (ah, bh, & dummy);
844 EV_SET_REG2_ACC (*rSh, *rS, th, tl);
845 PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
847 0.4,6.RS,11.RA,16.RB,21.1037:EVX:e500:evmhosmi %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer
848 signed32 al, ah, bl, bh, tl, th;
849 al = (signed32)(signed16)EV_LOHALF(*rA);
850 ah = (signed32)(signed16)EV_LOHALF(*rAh);
851 bl = (signed32)(signed16)EV_LOHALF(*rB);
852 bh = (signed32)(signed16)EV_LOHALF(*rBh);
855 EV_SET_REG2(*rSh, *rS, th, tl);
856 //printf("evmhosmi: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
857 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
859 0.4,6.RS,11.RA,16.RB,21.1069:EVX:e500:evmhosmia %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer Accumulate
860 signed32 al, ah, bl, bh, tl, th;
861 al = (signed32)(signed16)EV_LOHALF(*rA);
862 ah = (signed32)(signed16)EV_LOHALF(*rAh);
863 bl = (signed32)(signed16)EV_LOHALF(*rB);
864 bh = (signed32)(signed16)EV_LOHALF(*rBh);
867 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
868 //printf("evmhosmia: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
869 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
871 0.4,6.RS,11.RA,16.RB,21.1036:EVX:e500:evmhoumi %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer
872 unsigned32 al, ah, bl, bh, tl, th;
873 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
874 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
875 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
876 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
879 EV_SET_REG2(*rSh, *rS, th, tl);
880 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
882 0.4,6.RS,11.RA,16.RB,21.1068:EVX:e500:evmhoumia %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer Accumulate
883 unsigned32 al, ah, bl, bh, tl, th;
884 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
885 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
886 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
887 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
890 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
891 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
893 0.4,6.RS,11.RA,16.RB,21.1027:EVX:e500:evmhessf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional
894 signed16 al, ah, bl, bh;
898 al = (signed16) EV_HIHALF (*rA);
899 ah = (signed16) EV_HIHALF (*rAh);
900 bl = (signed16) EV_HIHALF (*rB);
901 bh = (signed16) EV_HIHALF (*rBh);
902 tl = ev_multiply16_ssf (al, bl, &movl);
903 th = ev_multiply16_ssf (ah, bh, &movh);
904 EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
905 EV_SATURATE (movl, 0x7fffffff, tl));
906 EV_SET_SPEFSCR_OV (movl, movh);
907 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
909 0.4,6.RS,11.RA,16.RB,21.1059:EVX:e500:evmhessfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional Accumulate
910 signed16 al, ah, bl, bh;
914 al = (signed16) EV_HIHALF (*rA);
915 ah = (signed16) EV_HIHALF (*rAh);
916 bl = (signed16) EV_HIHALF (*rB);
917 bh = (signed16) EV_HIHALF (*rBh);
918 tl = ev_multiply16_ssf (al, bl, &movl);
919 th = ev_multiply16_ssf (ah, bh, &movh);
920 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
921 EV_SATURATE (movl, 0x7fffffff, tl));
922 EV_SET_SPEFSCR_OV (movl, movh);
923 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
925 0.4,6.RS,11.RA,16.RB,21.1035:EVX:e500:evmhesmf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional
926 signed16 al, ah, bl, bh;
930 al = (signed16) EV_HIHALF (*rA);
931 ah = (signed16) EV_HIHALF (*rAh);
932 bl = (signed16) EV_HIHALF (*rB);
933 bh = (signed16) EV_HIHALF (*rBh);
934 tl = ev_multiply16_smf (al, bl, &movl);
935 th = ev_multiply16_smf (ah, bh, &movh);
936 EV_SET_REG2 (*rSh, *rS, th, tl);
937 EV_SET_SPEFSCR_OV (movl, movh);
938 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
940 0.4,6.RS,11.RA,16.RB,21.1067:EVX:e500:evmhesmfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional Accumulate
941 signed16 al, ah, bl, bh;
945 al = (signed16) EV_HIHALF (*rA);
946 ah = (signed16) EV_HIHALF (*rAh);
947 bl = (signed16) EV_HIHALF (*rB);
948 bh = (signed16) EV_HIHALF (*rBh);
949 tl = ev_multiply16_smf (al, bl, & dummy);
950 th = ev_multiply16_smf (ah, bh, & dummy);
951 EV_SET_REG2_ACC (*rSh, *rS, th, tl);
952 PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
954 0.4,6.RS,11.RA,16.RB,21.1033:EVX:e500:evmhesmi %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer
955 signed16 al, ah, bl, bh;
958 al = (signed16) EV_HIHALF (*rA);
959 ah = (signed16) EV_HIHALF (*rAh);
960 bl = (signed16) EV_HIHALF (*rB);
961 bh = (signed16) EV_HIHALF (*rBh);
964 EV_SET_REG2 (*rSh, *rS, th, tl);
965 PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
967 0.4,6.RS,11.RA,16.RB,21.1065:EVX:e500:evmhesmia %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer Accumulate
968 signed32 al, ah, bl, bh, tl, th;
969 al = (signed32)(signed16)EV_HIHALF(*rA);
970 ah = (signed32)(signed16)EV_HIHALF(*rAh);
971 bl = (signed32)(signed16)EV_HIHALF(*rB);
972 bh = (signed32)(signed16)EV_HIHALF(*rBh);
975 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
976 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
978 0.4,6.RS,11.RA,16.RB,21.1032:EVX:e500:evmheumi %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer
979 unsigned32 al, ah, bl, bh, tl, th;
980 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
981 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
982 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
983 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
986 EV_SET_REG2(*rSh, *rS, th, tl);
987 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
989 0.4,6.RS,11.RA,16.RB,21.1064:EVX:e500:evmheumia %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer Accumulate
990 unsigned32 al, ah, bl, bh, tl, th;
991 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
992 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
993 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
994 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
997 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
998 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1000 0.4,6.RS,11.RA,16.RB,21.1287:EVX:e500:evmhossfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate into Words
1001 signed16 al, ah, bl, bh;
1004 int movl, movh, ovl, ovh;
1006 al = (signed16) EV_LOHALF (*rA);
1007 ah = (signed16) EV_LOHALF (*rAh);
1008 bl = (signed16) EV_LOHALF (*rB);
1009 bh = (signed16) EV_LOHALF (*rBh);
1010 t1 = ev_multiply16_ssf (ah, bh, &movh);
1011 t2 = ev_multiply16_ssf (al, bl, &movl);
1012 th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
1013 tl = EV_ACCLOW + EV_SATURATE (movl, 0x7fffffff, t2);
1014 ovh = EV_SAT_P_S32 (th);
1015 ovl = EV_SAT_P_S32 (tl);
1016 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1017 EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1018 EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1019 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1021 0.4,6.RS,11.RA,16.RB,21.1285:EVX:e500:evmhossiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate into Words
1022 signed32 al, ah, bl, bh;
1023 signed64 t1, t2, tl, th;
1025 al = (signed32)(signed16)EV_LOHALF(*rA);
1026 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1027 bl = (signed32)(signed16)EV_LOHALF(*rB);
1028 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1031 th = EV_ACCHIGH + t1;
1032 tl = EV_ACCLOW + t2;
1033 ovh = EV_SAT_P_S32(th);
1034 ovl = EV_SAT_P_S32(tl);
1035 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1036 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1037 //printf("evmhossiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1038 //printf("evmhossiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1039 EV_SET_SPEFSCR_OV(ovl, ovh);
1040 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1042 0.4,6.RS,11.RA,16.RB,21.1295:EVX:e500:evmhosmfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate into Words
1043 signed32 al, ah, bl, bh;
1044 signed64 t1, t2, tl, th;
1045 al = (signed32)(signed16)EV_LOHALF(*rA);
1046 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1047 bl = (signed32)(signed16)EV_LOHALF(*rB);
1048 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1049 t1 = ((signed64)ah * bh) << 1;
1050 t2 = ((signed64)al * bl) << 1;
1051 th = EV_ACCHIGH + (t1 & 0xffffffff);
1052 tl = EV_ACCLOW + (t2 & 0xffffffff);
1053 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1054 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1056 0.4,6.RS,11.RA,16.RB,21.1293:EVX:e500:evmhosmiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate into Words
1057 signed32 al, ah, bl, bh;
1058 signed64 t1, t2, tl, th;
1059 al = (signed32)(signed16)EV_LOHALF(*rA);
1060 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1061 bl = (signed32)(signed16)EV_LOHALF(*rB);
1062 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1065 th = EV_ACCHIGH + t1;
1066 tl = EV_ACCLOW + t2;
1067 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1068 //printf("evmhosmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
1069 //printf("evmhosmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1070 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1072 0.4,6.RS,11.RA,16.RB,21.1284:EVX:e500:evmhousiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate into Words
1073 unsigned32 al, ah, bl, bh;
1077 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1078 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1079 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1080 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1083 th = (signed64)EV_ACCHIGH + (signed64)t1;
1084 tl = (signed64)EV_ACCLOW + (signed64)t2;
1085 ovh = EV_SAT_P_U32(th);
1086 ovl = EV_SAT_P_U32(tl);
1087 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1088 EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1089 //printf("evmhousiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
1090 //printf("evmhousiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1091 EV_SET_SPEFSCR_OV(ovl, ovh);
1092 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1094 0.4,6.RS,11.RA,16.RB,21.1292:EVX:e500:evmhoumiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate into Words
1095 unsigned32 al, ah, bl, bh;
1098 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1099 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1100 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1101 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1104 th = EV_ACCHIGH + t1;
1105 tl = EV_ACCLOW + t2;
1106 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1107 //printf("evmhoumiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
1108 //printf("evmhoumiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1109 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1111 0.4,6.RS,11.RA,16.RB,21.1283:EVX:e500:evmhessfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate into Words
1112 signed16 al, ah, bl, bh;
1115 int movl, movh, ovl, ovh;
1117 al = (signed16) EV_HIHALF (*rA);
1118 ah = (signed16) EV_HIHALF (*rAh);
1119 bl = (signed16) EV_HIHALF (*rB);
1120 bh = (signed16) EV_HIHALF (*rBh);
1121 t1 = ev_multiply16_ssf (ah, bh, &movh);
1122 t2 = ev_multiply16_ssf (al, bl, &movl);
1123 th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
1124 tl = EV_ACCLOW + EV_SATURATE (movl, 0x7fffffff, t2);
1125 ovh = EV_SAT_P_S32 (th);
1126 ovl = EV_SAT_P_S32 (tl);
1127 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1128 EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1129 EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1130 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1132 0.4,6.RS,11.RA,16.RB,21.1281:EVX:e500:evmhessiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate into Words
1133 signed32 al, ah, bl, bh;
1134 signed64 t1, t2, tl, th;
1136 al = (signed32)(signed16)EV_HIHALF(*rA);
1137 ah = (signed32)(signed16)EV_HIHALF(*rAh);
1138 bl = (signed32)(signed16)EV_HIHALF(*rB);
1139 bh = (signed32)(signed16)EV_HIHALF(*rBh);
1142 th = EV_ACCHIGH + t1;
1143 tl = EV_ACCLOW + t2;
1144 ovh = EV_SAT_P_S32(th);
1145 ovl = EV_SAT_P_S32(tl);
1146 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1147 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1148 //printf("evmhessiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1149 //printf("evmhessiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1150 EV_SET_SPEFSCR_OV(ovl, ovh);
1151 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1153 0.4,6.RS,11.RA,16.RB,21.1291:EVX:e500:evmhesmfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate into Words
1154 signed16 al, ah, bl, bh;
1155 signed32 t1, t2, th, tl;
1158 al = (signed16)EV_HIHALF(*rA);
1159 ah = (signed16)EV_HIHALF(*rAh);
1160 bl = (signed16)EV_HIHALF(*rB);
1161 bh = (signed16)EV_HIHALF(*rBh);
1162 t1 = ev_multiply16_smf (ah, bh, &dummy);
1163 t2 = ev_multiply16_smf (al, bl, &dummy);
1164 th = EV_ACCHIGH + t1;
1165 tl = EV_ACCLOW + t2;
1166 EV_SET_REG2_ACC(*rSh, *rS, th, tl);
1167 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1169 0.4,6.RS,11.RA,16.RB,21.1289:EVX:e500:evmhesmiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate into Words
1170 signed32 al, ah, bl, bh;
1171 signed64 t1, t2, tl, th;
1172 al = (signed32)(signed16)EV_HIHALF(*rA);
1173 ah = (signed32)(signed16)EV_HIHALF(*rAh);
1174 bl = (signed32)(signed16)EV_HIHALF(*rB);
1175 bh = (signed32)(signed16)EV_HIHALF(*rBh);
1178 th = EV_ACCHIGH + t1;
1179 tl = EV_ACCLOW + t2;
1180 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1181 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1183 0.4,6.RS,11.RA,16.RB,21.1280:EVX:e500:evmheusiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate into Words
1184 unsigned32 al, ah, bl, bh;
1188 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1189 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1190 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1191 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1194 th = (signed64)EV_ACCHIGH + (signed64)t1;
1195 tl = (signed64)EV_ACCLOW + (signed64)t2;
1196 ovh = EV_SAT_P_U32(th);
1197 ovl = EV_SAT_P_U32(tl);
1198 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1199 EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1200 EV_SET_SPEFSCR_OV(ovl, ovh);
1201 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1203 0.4,6.RS,11.RA,16.RB,21.1288:EVX:e500:evmheumiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate into Words
1204 unsigned32 al, ah, bl, bh;
1207 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1208 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1209 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1210 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1213 th = EV_ACCHIGH + t1;
1214 tl = EV_ACCLOW + t2;
1215 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1216 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1219 0.4,6.RS,11.RA,16.RB,21.1415:EVX:e500:evmhossfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate Negative into Words
1220 signed16 al, ah, bl, bh;
1223 int movl, movh, ovl, ovh;
1225 al = (signed16) EV_LOHALF (*rA);
1226 ah = (signed16) EV_LOHALF (*rAh);
1227 bl = (signed16) EV_LOHALF (*rB);
1228 bh = (signed16) EV_LOHALF (*rBh);
1229 t1 = ev_multiply16_ssf (ah, bh, &movh);
1230 t2 = ev_multiply16_ssf (al, bl, &movl);
1231 th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
1232 tl = EV_ACCLOW - EV_SATURATE (movl, 0x7fffffff, t2);
1233 ovh = EV_SAT_P_S32 (th);
1234 ovl = EV_SAT_P_S32 (tl);
1235 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1236 EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1237 EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1238 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1240 0.4,6.RS,11.RA,16.RB,21.1413:EVX:e500:evmhossianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate Negative into Words
1241 signed32 al, ah, bl, bh;
1242 signed64 t1, t2, tl, th;
1244 al = (signed32)(signed16)EV_LOHALF(*rA);
1245 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1246 bl = (signed32)(signed16)EV_LOHALF(*rB);
1247 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1250 th = EV_ACCHIGH - t1;
1251 tl = EV_ACCLOW - t2;
1252 ovh = EV_SAT_P_S32(th);
1253 ovl = EV_SAT_P_S32(tl);
1254 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1255 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1256 EV_SET_SPEFSCR_OV(ovl, ovh);
1257 //printf("evmhossianw: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
1258 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1260 0.4,6.RS,11.RA,16.RB,21.1423:EVX:e500:evmhosmfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate Negative into Words
1261 signed32 al, ah, bl, bh;
1262 signed64 t1, t2, tl, th;
1263 al = (signed32)(signed16)EV_LOHALF(*rA);
1264 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1265 bl = (signed32)(signed16)EV_LOHALF(*rB);
1266 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1267 t1 = ((signed64)ah * bh) << 1;
1268 t2 = ((signed64)al * bl) << 1;
1269 th = EV_ACCHIGH - (t1 & 0xffffffff);
1270 tl = EV_ACCLOW - (t2 & 0xffffffff);
1271 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1272 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1274 0.4,6.RS,11.RA,16.RB,21.1421:EVX:e500:evmhosmianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate Negative into Words
1275 signed32 al, ah, bl, bh;
1276 signed64 t1, t2, tl, th;
1277 al = (signed32)(signed16)EV_LOHALF(*rA);
1278 ah = (signed32)(signed16)EV_LOHALF(*rAh);
1279 bl = (signed32)(signed16)EV_LOHALF(*rB);
1280 bh = (signed32)(signed16)EV_LOHALF(*rBh);
1283 th = EV_ACCHIGH - t1;
1284 tl = EV_ACCLOW - t2;
1285 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1286 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1288 0.4,6.RS,11.RA,16.RB,21.1412:EVX:e500:evmhousianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate Negative into Words
1289 unsigned32 al, ah, bl, bh;
1293 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1294 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1295 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1296 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1299 th = (signed64)EV_ACCHIGH - (signed64)t1;
1300 tl = (signed64)EV_ACCLOW - (signed64)t2;
1301 ovl = EV_SAT_P_U32(tl);
1302 ovh = EV_SAT_P_U32(th);
1303 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1304 EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1305 //printf("evmhousianw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1306 //printf("evmoussianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1307 EV_SET_SPEFSCR_OV(ovl, ovh);
1308 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1310 0.4,6.RS,11.RA,16.RB,21.1420:EVX:e500:evmhoumianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate Negative into Words
1311 unsigned32 al, ah, bl, bh;
1314 al = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1315 ah = (unsigned32)(unsigned16)EV_LOHALF(*rAh);
1316 bl = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1317 bh = (unsigned32)(unsigned16)EV_LOHALF(*rBh);
1320 th = EV_ACCHIGH - t1;
1321 tl = EV_ACCLOW - t2;
1322 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1323 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1325 0.4,6.RS,11.RA,16.RB,21.1411:EVX:e500:evmhessfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate Negative into Words
1326 signed16 al, ah, bl, bh;
1329 int movl, movh, ovl, ovh;
1331 al = (signed16) EV_HIHALF (*rA);
1332 ah = (signed16) EV_HIHALF (*rAh);
1333 bl = (signed16) EV_HIHALF (*rB);
1334 bh = (signed16) EV_HIHALF (*rBh);
1335 t1 = ev_multiply16_ssf (ah, bh, &movh);
1336 t2 = ev_multiply16_ssf (al, bl, &movl);
1337 th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
1338 tl = EV_ACCLOW - EV_SATURATE (movl, 0x7fffffff, t2);
1339 ovh = EV_SAT_P_S32 (th);
1340 ovl = EV_SAT_P_S32 (tl);
1341 EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
1342 EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
1343 EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
1344 PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1346 0.4,6.RS,11.RA,16.RB,21.1409:EVX:e500:evmhessianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate Negative into Words
1347 signed32 al, ah, bl, bh;
1348 signed64 t1, t2, tl, th;
1350 al = (signed32)(signed16)EV_HIHALF(*rA);
1351 ah = (signed32)(signed16)EV_HIHALF(*rAh);
1352 bl = (signed32)(signed16)EV_HIHALF(*rB);
1353 bh = (signed32)(signed16)EV_HIHALF(*rBh);
1356 th = EV_ACCHIGH - t1;
1357 tl = EV_ACCLOW - t2;
1358 ovh = EV_SAT_P_S32(th);
1359 ovl = EV_SAT_P_S32(tl);
1360 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1361 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1362 EV_SET_SPEFSCR_OV(ovl, ovh);
1363 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1365 0.4,6.RS,11.RA,16.RB,21.1419:EVX:e500:evmhesmfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate Negative into Words
1366 signed32 al, ah, bl, bh;
1367 signed64 t1, t2, tl, th;
1368 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1369 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1370 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1371 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1372 t1 = ((signed64)ah * bh) << 1;
1373 t2 = ((signed64)al * bl) << 1;
1374 th = EV_ACCHIGH - (t1 & 0xffffffff);
1375 tl = EV_ACCLOW - (t2 & 0xffffffff);
1376 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1377 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1379 0.4,6.RS,11.RA,16.RB,21.1417:EVX:e500:evmhesmianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate Negative into Words
1380 signed32 al, ah, bl, bh;
1381 signed64 t1, t2, tl, th;
1382 al = (signed32)(signed16)EV_HIHALF(*rA);
1383 ah = (signed32)(signed16)EV_HIHALF(*rAh);
1384 bl = (signed32)(signed16)EV_HIHALF(*rB);
1385 bh = (signed32)(signed16)EV_HIHALF(*rBh);
1388 th = EV_ACCHIGH - t1;
1389 tl = EV_ACCLOW - t2;
1390 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1391 //printf("evmhesmianw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
1392 //printf("evmhesmianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1393 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1395 0.4,6.RS,11.RA,16.RB,21.1408:EVX:e500:evmheusianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate Negative into Words
1396 unsigned32 al, ah, bl, bh;
1400 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1401 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1402 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1403 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1406 th = (signed64)EV_ACCHIGH - (signed64)t1;
1407 tl = (signed64)EV_ACCLOW - (signed64)t2;
1408 ovl = EV_SAT_P_U32(tl);
1409 ovh = EV_SAT_P_U32(th);
1410 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
1411 EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
1412 //printf("evmheusianw: ovh %d ovl %d al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
1413 //printf("evmheusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1414 EV_SET_SPEFSCR_OV(ovl, ovh);
1415 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1417 0.4,6.RS,11.RA,16.RB,21.1416:EVX:e500:evmheumianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate Negative into Words
1418 unsigned32 al, ah, bl, bh;
1421 al = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1422 ah = (unsigned32)(unsigned16)EV_HIHALF(*rAh);
1423 bl = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1424 bh = (unsigned32)(unsigned16)EV_HIHALF(*rBh);
1427 th = EV_ACCHIGH - t1;
1428 tl = EV_ACCLOW - t2;
1429 EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
1430 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1432 0.4,6.RS,11.RA,16.RB,21.1327:EVX:e500:evmhogsmfaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate
1435 a = (signed32)(signed16)EV_LOHALF(*rA);
1436 b = (signed32)(signed16)EV_LOHALF(*rB);
1437 t1 = EV_MUL16_SSF(a, b);
1438 if (t1 & ((unsigned64)1 << 32))
1439 t1 |= 0xfffffffe00000000;
1441 EV_SET_REG1_ACC(*rSh, *rS, t2);
1442 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1444 0.4,6.RS,11.RA,16.RB,21.1325:EVX:e500:evmhogsmiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate
1447 a = (signed32)(signed16)EV_LOHALF(*rA);
1448 b = (signed32)(signed16)EV_LOHALF(*rB);
1449 t1 = (signed64)a * (signed64)b;
1450 t2 = (signed64)ACC + t1;
1451 EV_SET_REG1_ACC(*rSh, *rS, t2);
1452 //printf("evmhogsmiaa: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
1453 //printf("evmhogsmiaa: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1454 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1456 0.4,6.RS,11.RA,16.RB,21.1324:EVX:e500:evmhogumiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate
1459 a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1460 b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1463 EV_SET_REG1_ACC(*rSh, *rS, t2);
1464 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1466 0.4,6.RS,11.RA,16.RB,21.1323:EVX:e500:evmhegsmfaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate
1469 a = (signed32)(signed16)EV_HIHALF(*rA);
1470 b = (signed32)(signed16)EV_HIHALF(*rB);
1471 t1 = EV_MUL16_SSF(a, b);
1472 if (t1 & ((unsigned64)1 << 32))
1473 t1 |= 0xfffffffe00000000;
1475 EV_SET_REG1_ACC(*rSh, *rS, t2);
1476 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1478 0.4,6.RS,11.RA,16.RB,21.1321:EVX:e500:evmhegsmiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate
1481 a = (signed32)(signed16)EV_HIHALF(*rA);
1482 b = (signed32)(signed16)EV_HIHALF(*rB);
1483 t1 = (signed64)(a * b);
1485 EV_SET_REG1_ACC(*rSh, *rS, t2);
1486 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1488 0.4,6.RS,11.RA,16.RB,21.1320:EVX:e500:evmhegumiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate
1491 a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1492 b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1495 EV_SET_REG1_ACC(*rSh, *rS, t2);
1496 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1499 0.4,6.RS,11.RA,16.RB,21.1455:EVX:e500:evmhogsmfan %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate Negative
1502 a = (signed32)(signed16)EV_LOHALF(*rA);
1503 b = (signed32)(signed16)EV_LOHALF(*rB);
1504 t1 = EV_MUL16_SSF(a, b);
1505 if (t1 & ((unsigned64)1 << 32))
1506 t1 |= 0xfffffffe00000000;
1508 EV_SET_REG1_ACC(*rSh, *rS, t2);
1509 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1511 0.4,6.RS,11.RA,16.RB,21.1453:EVX:e500:evmhogsmian %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate Negative
1514 a = (signed32)(signed16)EV_LOHALF(*rA);
1515 b = (signed32)(signed16)EV_LOHALF(*rB);
1516 t1 = (signed64)a * (signed64)b;
1518 EV_SET_REG1_ACC(*rSh, *rS, t2);
1519 //printf("evmhogsmian: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
1520 //printf("evmhogsmian: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1521 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1523 0.4,6.RS,11.RA,16.RB,21.1452:EVX:e500:evmhogumian %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate Negative
1526 a = (unsigned32)(unsigned16)EV_LOHALF(*rA);
1527 b = (unsigned32)(unsigned16)EV_LOHALF(*rB);
1528 t1 = (unsigned64)a * (unsigned64)b;
1530 EV_SET_REG1_ACC(*rSh, *rS, t2);
1531 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1533 0.4,6.RS,11.RA,16.RB,21.1451:EVX:e500:evmhegsmfan %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate Negative
1536 a = (signed32)(signed16)EV_HIHALF(*rA);
1537 b = (signed32)(signed16)EV_HIHALF(*rB);
1538 t1 = EV_MUL16_SSF(a, b);
1539 if (t1 & ((unsigned64)1 << 32))
1540 t1 |= 0xfffffffe00000000;
1542 EV_SET_REG1_ACC(*rSh, *rS, t2);
1543 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1545 0.4,6.RS,11.RA,16.RB,21.1449:EVX:e500:evmhegsmian %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate Negative
1548 a = (signed32)(signed16)EV_HIHALF(*rA);
1549 b = (signed32)(signed16)EV_HIHALF(*rB);
1550 t1 = (signed64)a * (signed64)b;
1552 EV_SET_REG1_ACC(*rSh, *rS, t2);
1553 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1555 0.4,6.RS,11.RA,16.RB,21.1448:EVX:e500:evmhegumian %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate Negative
1558 a = (unsigned32)(unsigned16)EV_HIHALF(*rA);
1559 b = (unsigned32)(unsigned16)EV_HIHALF(*rB);
1560 t1 = (unsigned64)a * (unsigned64)b;
1562 EV_SET_REG1_ACC(*rSh, *rS, t2);
1563 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1566 0.4,6.RS,11.RA,16.RB,21.1095:EVX:e500:evmwhssf %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional
1567 signed32 al, ah, bl, bh;
1574 t1 = ev_multiply32_ssf(al, bl, &movl);
1575 t2 = ev_multiply32_ssf(ah, bh, &movh);
1576 EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
1577 EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
1578 EV_SET_SPEFSCR_OV(movl, movh);
1579 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1581 0.4,6.RS,11.RA,16.RB,21.1127:EVX:e500:evmwhssfa %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional and Accumulate
1582 signed32 al, ah, bl, bh;
1589 t1 = ev_multiply32_ssf(al, bl, &movl);
1590 t2 = ev_multiply32_ssf(ah, bh, &movh);
1591 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
1592 EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
1593 EV_SET_SPEFSCR_OV(movl, movh);
1594 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1596 0.4,6.RS,11.RA,16.RB,21.1103:EVX:e500:evmwhsmf %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional
1597 signed32 al, ah, bl, bh;
1603 t1 = EV_MUL32_SSF(al, bl);
1604 t2 = EV_MUL32_SSF(ah, bh);
1605 EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1606 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1608 0.4,6.RS,11.RA,16.RB,21.1135:EVX:e500:evmwhsmfa %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional and Accumulate
1609 signed32 al, ah, bl, bh;
1615 t1 = EV_MUL32_SSF(al, bl);
1616 t2 = EV_MUL32_SSF(ah, bh);
1617 EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1618 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1620 0.4,6.RS,11.RA,16.RB,21.1101:EVX:e500:evmwhsmi %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer
1621 signed32 al, ah, bl, bh;
1627 t1 = (signed64)al * (signed64)bl;
1628 t2 = (signed64)ah * (signed64)bh;
1629 EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1630 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1632 0.4,6.RS,11.RA,16.RB,21.1133:EVX:e500:evmwhsmia %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer and Accumulate
1633 signed32 al, ah, bl, bh;
1639 t1 = (signed64)al * (signed64)bl;
1640 t2 = (signed64)ah * (signed64)bh;
1641 EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1642 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1644 0.4,6.RS,11.RA,16.RB,21.1100:EVX:e500:evmwhumi %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer
1645 unsigned32 al, ah, bl, bh;
1651 t1 = (unsigned64)al * (unsigned64)bl;
1652 t2 = (unsigned64)ah * (unsigned64)bh;
1653 EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
1654 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1656 0.4,6.RS,11.RA,16.RB,21.1132:EVX:e500:evmwhumia %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer and Accumulate
1657 unsigned32 al, ah, bl, bh;
1663 t1 = (unsigned64)al * (unsigned64)bl;
1664 t2 = (unsigned64)ah * (unsigned64)bh;
1665 EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
1666 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1669 0.4,6.RS,11.RA,16.RB,21.1091:EVX:e500:evmwlssf %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional
1670 signed32 al, ah, bl, bh;
1677 t1 = ev_multiply32_ssf(al, bl, &movl);
1678 t2 = ev_multiply32_ssf(ah, bh, &movh);
1679 EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
1680 EV_SATURATE(movl, 0xffffffff, t1));
1681 EV_SET_SPEFSCR_OV(movl, movh);
1682 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1684 0.4,6.RS,11.RA,16.RB,21.1123:EVX:e500:evmwlssfa %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate
1685 signed32 al, ah, bl, bh;
1692 t1 = ev_multiply32_ssf(al, bl, &movl);
1693 t2 = ev_multiply32_ssf(ah, bh, &movh);
1694 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
1695 EV_SATURATE(movl, 0xffffffff, t1));
1696 EV_SET_SPEFSCR_OV(movl, movh);
1697 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1699 0.4,6.RS,11.RA,16.RB,21.1099:EVX:e500:evmwlsmf %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional
1700 signed32 al, ah, bl, bh;
1706 t1 = EV_MUL32_SSF(al, bl);
1707 t2 = EV_MUL32_SSF(ah, bh);
1708 EV_SET_REG2(*rSh, *rS, t2, t1);
1709 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1711 0.4,6.RS,11.RA,16.RB,21.1131:EVX:e500:evmwlsmfa %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate
1712 signed32 al, ah, bl, bh;
1718 t1 = EV_MUL32_SSF(al, bl);
1719 t2 = EV_MUL32_SSF(ah, bh);
1720 EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
1721 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1723 0.4,6.RS,11.RA,16.RB,21.1096:EVX:e500:evmwlumi %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer
1724 unsigned32 al, ah, bl, bh;
1730 t1 = (unsigned64)al * (unsigned64)bl;
1731 t2 = (unsigned64)ah * (unsigned64)bh;
1732 EV_SET_REG2(*rSh, *rS, t2, t1);
1733 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1735 0.4,6.RS,11.RA,16.RB,21.1128:EVX:e500:evmwlumia %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate
1736 unsigned32 al, ah, bl, bh;
1742 t1 = (unsigned64)al * (unsigned64)bl;
1743 t2 = (unsigned64)ah * (unsigned64)bh;
1744 EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
1745 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1748 0.4,6.RS,11.RA,16.RB,21.1347:EVX:e500:evmwlssfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate in Words
1749 signed32 al, ah, bl, bh;
1750 signed64 t1, t2, tl, th;
1751 int movl, movh, ovl, ovh;
1756 t1 = ev_multiply32_ssf(ah, bh, &movh);
1757 t2 = ev_multiply32_ssf(al, bl, &movl);
1758 th = EV_ACCHIGH + EV_SATURATE(movh, 0xffffffff, t1);
1759 tl = EV_ACCLOW + EV_SATURATE(movl, 0xffffffff, t2);
1760 ovh = EV_SAT_P_S32(th);
1761 ovl = EV_SAT_P_S32(tl);
1762 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1763 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1764 EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
1765 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1767 0.4,6.RS,11.RA,16.RB,21.1345:EVX:e500:evmwlssiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate in Words
1768 signed32 al, ah, bl, bh;
1769 signed64 t1, t2, tl, th;
1775 t1 = (signed64)ah * (signed64)bh;
1776 t2 = (signed64)al * (signed64)bl;
1777 th = EV_ACCHIGH + (t1 & 0xffffffff);
1778 tl = EV_ACCLOW + (t2 & 0xffffffff);
1779 ovh = EV_SAT_P_S32(th);
1780 ovl = EV_SAT_P_S32(tl);
1781 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1782 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1783 EV_SET_SPEFSCR_OV(ovl, ovh);
1784 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1786 0.4,6.RS,11.RA,16.RB,21.1355:EVX:e500:evmwlsmfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate in Words
1787 signed32 al, ah, bl, bh;
1794 t1 = ev_multiply32_smf(ah, bh, &mov);
1795 t2 = ev_multiply32_smf(al, bl, &mov);
1796 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1797 EV_ACCLOW + (t2 & 0xffffffff));
1798 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1800 0.4,6.RS,11.RA,16.RB,21.1353:EVX:e500:evmwlsmiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate in Words
1801 signed32 al, ah, bl, bh;
1807 t1 = (signed64)ah * (signed64)bh;
1808 t2 = (signed64)al * (signed64)bl;
1809 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1810 EV_ACCLOW + (t2 & 0xffffffff));
1811 //printf("evmwlsmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd\n", al, ah, bl, bh, t1, t2);
1812 //printf("evmwlsmiaaw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
1813 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1815 0.4,6.RS,11.RA,16.RB,21.1344:EVX:e500:evmwlusiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate in Words
1816 unsigned32 al, ah, bl, bh;
1817 unsigned64 t1, t2, tl, th;
1823 t1 = (unsigned64)ah * (unsigned64)bh;
1824 t2 = (unsigned64)al * (unsigned64)bl;
1825 th = EV_ACCHIGH + (t1 & 0xffffffff);
1826 tl = EV_ACCLOW + (t2 & 0xffffffff);
1829 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
1830 EV_SATURATE(ovl, 0xffffffff, tl));
1831 EV_SET_SPEFSCR_OV(ovl, ovh);
1832 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1834 0.4,6.RS,11.RA,16.RB,21.1352:EVX:e500:evmwlumiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate in Words
1835 unsigned32 al, ah, bl, bh;
1841 t1 = (unsigned64)ah * (unsigned64)bh;
1842 t2 = (unsigned64)al * (unsigned64)bl;
1843 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
1844 EV_ACCLOW + (t2 & 0xffffffff));
1845 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1848 0.4,6.RS,11.RA,16.RB,21.1475:EVX:e500:evmwlssfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate Negative in Words
1849 signed32 al, ah, bl, bh;
1850 signed64 t1, t2, tl, th;
1851 int movl, movh, ovl, ovh;
1856 t1 = ev_multiply32_ssf(ah, bh, &movh);
1857 t2 = ev_multiply32_ssf(al, bl, &movl);
1858 th = EV_ACCHIGH - EV_SATURATE(movh, 0xffffffff, t1);
1859 tl = EV_ACCLOW - EV_SATURATE(movl, 0xffffffff, t2);
1860 ovh = EV_SAT_P_S32(th);
1861 ovl = EV_SAT_P_S32(tl);
1862 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1863 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1864 EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
1865 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1867 0.4,6.RS,11.RA,16.RB,21.1473:EVX:e500:evmwlssianw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate Negative in Words
1868 signed32 al, ah, bl, bh;
1869 signed64 t1, t2, tl, th;
1875 t1 = (signed64)ah * (signed64)bh;
1876 t2 = (signed64)al * (signed64)bl;
1877 th = EV_ACCHIGH - (t1 & 0xffffffff);
1878 tl = EV_ACCLOW - (t2 & 0xffffffff);
1879 ovh = EV_SAT_P_S32(th);
1880 ovl = EV_SAT_P_S32(tl);
1881 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
1882 EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
1883 EV_SET_SPEFSCR_OV(ovl, ovh);
1884 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1886 0.4,6.RS,11.RA,16.RB,21.1483:EVX:e500:evmwlsmfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate Negative in Words
1887 signed32 al, ah, bl, bh;
1894 t1 = ev_multiply32_smf(ah, bh, &mov);
1895 t2 = ev_multiply32_smf(al, bl, &mov);
1896 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1897 EV_ACCLOW - (t2 & 0xffffffff));
1898 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1900 0.4,6.RS,11.RA,16.RB,21.1481:EVX:e500:evmwlsmianw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate Negative in Words
1901 signed32 al, ah, bl, bh;
1907 t1 = (signed64)ah * (signed64)bh;
1908 t2 = (signed64)al * (signed64)bl;
1909 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1910 EV_ACCLOW - (t2 & 0xffffffff));
1911 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1913 0.4,6.RS,11.RA,16.RB,21.1472:EVX:e500:evmwlusianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate Negative in Words
1914 unsigned32 al, ah, bl, bh;
1915 unsigned64 t1, t2, tl, th;
1921 t1 = (unsigned64)ah * (unsigned64)bh;
1922 t2 = (unsigned64)al * (unsigned64)bl;
1923 th = EV_ACCHIGH - (t1 & 0xffffffff);
1924 tl = EV_ACCLOW - (t2 & 0xffffffff);
1927 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
1928 EV_SATURATE(ovl, 0xffffffff, tl));
1929 //printf("evmwlusianw: ovl %d ovh %d al %d ah %d bl %d bh %d t1 %qd t2 %qd th %qd tl %qd\n", ovl, ovh, al, ah, al, bh, t1, t2, th, tl);
1930 //printf("evmwlusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
1931 EV_SET_SPEFSCR_OV(ovl, ovh);
1932 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1934 0.4,6.RS,11.RA,16.RB,21.1480:EVX:e500:evmwlumianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate Negative in Words
1935 unsigned32 al, ah, bl, bh;
1941 t1 = (unsigned64)ah * (unsigned64)bh;
1942 t2 = (unsigned64)al * (unsigned64)bl;
1943 EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
1944 EV_ACCLOW - (t2 & 0xffffffff));
1945 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1948 0.4,6.RS,11.RA,16.RB,21.1107:EVX:e500:evmwssf %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional
1954 t = ev_multiply32_ssf(a, b, &movl);
1955 EV_SET_REG1(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
1956 EV_SET_SPEFSCR_OV(movl, 0);
1957 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1959 0.4,6.RS,11.RA,16.RB,21.1139:EVX:e500:evmwssfa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate
1965 t = ev_multiply32_ssf(a, b, &movl);
1966 EV_SET_REG1_ACC(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
1967 EV_SET_SPEFSCR_OV(movl, 0);
1968 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
1970 0.4,6.RS,11.RA,16.RB,21.1115:EVX:e500:evmwsmf %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional
1976 t = ev_multiply32_smf(a, b, &movl);
1977 EV_SET_REG1(*rSh, *rS, t);
1978 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1980 0.4,6.RS,11.RA,16.RB,21.1147:EVX:e500:evmwsmfa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate
1986 t = ev_multiply32_smf(a, b, &movl);
1987 EV_SET_REG1_ACC(*rSh, *rS, t);
1988 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
1990 0.4,6.RS,11.RA,16.RB,21.1113:EVX:e500:evmwsmi %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer
1996 t = (signed64)a * (signed64)b;
1997 EV_SET_REG1(*rSh, *rS, t);
1998 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2000 0.4,6.RS,11.RA,16.RB,21.1145:EVX:e500:evmwsmia %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate
2006 t = (signed64)a * (signed64)b;
2007 EV_SET_REG1_ACC(*rSh, *rS, t);
2008 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2010 0.4,6.RS,11.RA,16.RB,21.1112:EVX:e500:evmwumi %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer
2016 t = (signed64)a * (signed64)b;
2017 EV_SET_REG1(*rSh, *rS, t);
2018 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2020 0.4,6.RS,11.RA,16.RB,21.1144:EVX:e500:evmwumia %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer and Accumulate
2026 t = (signed64)a * (signed64)b;
2027 EV_SET_REG1_ACC(*rSh, *rS, t);
2028 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2031 0.4,6.RS,11.RA,16.RB,21.1363:EVX:e500:evmwssfaa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional Add and Accumulate
2037 t1 = ev_multiply32_ssf(a, b, &movl);
2038 t2 = ACC + EV_SATURATE(movl, 0x7fffffffffffffff, t1);
2039 EV_SET_REG1_ACC(*rSh, *rS, t2);
2040 EV_SET_SPEFSCR_OV(movl, 0);
2041 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2043 0.4,6.RS,11.RA,16.RB,21.1371:EVX:e500:evmwsmfaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional Add and Accumulate
2049 t1 = ev_multiply32_smf(a, b, &movl);
2051 EV_SET_REG1_ACC(*rSh, *rS, t2);
2052 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2054 0.4,6.RS,11.RA,16.RB,21.1369:EVX:e500:evmwsmiaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer And and Accumulate
2059 t1 = (signed64)a * (signed64)b;
2061 EV_SET_REG1_ACC(*rSh, *rS, t2);
2062 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2064 0.4,6.RS,11.RA,16.RB,21.1368:EVX:e500:evmwumiaa %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer Add and Accumulate
2069 t1 = (unsigned64)a * (unsigned64)b;
2071 EV_SET_REG1_ACC(*rSh, *rS, t2);
2072 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2075 0.4,6.RS,11.RA,16.RB,21.1491:EVX:e500:evmwssfan %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate Negative
2081 t1 = ev_multiply32_ssf(a, b, &movl);
2082 t2 = ACC - EV_SATURATE(movl, 0x7fffffffffffffff, t1);
2083 EV_SET_REG1_ACC(*rSh, *rS, t2);
2084 EV_SET_SPEFSCR_OV(movl, 0);
2085 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2087 0.4,6.RS,11.RA,16.RB,21.1499:EVX:e500:evmwsmfan %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate Negative
2093 t1 = ev_multiply32_smf(a, b, &movl);
2095 EV_SET_REG1_ACC(*rSh, *rS, t2);
2096 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2098 0.4,6.RS,11.RA,16.RB,21.1497:EVX:e500:evmwsmian %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate Negative
2103 t1 = (signed64)a * (signed64)b;
2105 EV_SET_REG1_ACC(*rSh, *rS, t2);
2106 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2108 0.4,6.RS,11.RA,16.RB,21.1496:EVX:e500:evmwumian %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer and Accumulate Negative
2113 t1 = (unsigned64)a * (unsigned64)b;
2115 EV_SET_REG1_ACC(*rSh, *rS, t2);
2116 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
2119 0.4,6.RS,11.RA,16.0,21.1217:EVX:e500:evaddssiaaw %RS,%RA:Vector Add Signed Saturate Integer to Accumulator Word
2125 t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2126 t2 = (signed64)EV_ACCLOW + (signed64)al;
2127 ovh = EV_SAT_P_S32(t1);
2128 ovl = EV_SAT_P_S32(t2);
2129 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t1),
2130 EV_SATURATE_ACC(ovl, t2 & ((unsigned64)1 << 32), 0x80000000, 0x7fffffff, t2));
2131 EV_SET_SPEFSCR_OV(ovl, ovh);
2132 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2134 0.4,6.RS,11.RA,16.0,21.1225:EVX:e500:evaddsmiaaw %RS,%RA:Vector Add Signed Modulo Integer to Accumulator Word
2139 t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2140 t2 = (signed64)EV_ACCLOW + (signed64)al;
2141 EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2142 //printf("evaddsmiaaw: al %d ah %d t1 %qd t2 %qd\n", al, ah, t1, t2);
2143 //printf("evaddsmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
2144 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2146 0.4,6.RS,11.RA,16.0,21.1216:EVX:e500:evaddusiaaw %RS,%RA:Vector Add Unsigned Saturate Integer to Accumulator Word
2152 t1 = (signed64)EV_ACCHIGH + (signed64)ah;
2153 t2 = (signed64)EV_ACCLOW + (signed64)al;
2154 ovh = EV_SAT_P_U32(t1);
2155 ovl = EV_SAT_P_U32(t2);
2156 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, t1),
2157 EV_SATURATE(ovl, 0xffffffff, t2));
2158 //printf("evaddusiaaw: ovl %d ovh %d al %d ah %d t1 %qd t2 %qd\n", ovl, ovh, al, ah, t1, t2);
2159 //printf("evaddusiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
2160 EV_SET_SPEFSCR_OV(ovl, ovh);
2161 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2163 0.4,6.RS,11.RA,16.0,21.1224:EVX:e500:evaddumiaaw %RS,%RA:Vector Add Unsigned Modulo Integer to Accumulator Word
2168 t1 = (unsigned64)EV_ACCHIGH + (unsigned64)ah;
2169 t2 = EV_ACCLOW + al;
2170 EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2171 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2174 0.4,6.RS,11.RA,16.0,21.1219:EVX:e500:evsubfssiaaw %RS,%RA:Vector Subtract Signed Saturate Integer to Accumulator Word
2180 t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2181 t2 = (signed64)EV_ACCLOW - (signed64)al;
2182 ovh = EV_SAT_P_S32(t1);
2183 ovl = EV_SAT_P_S32(t2);
2184 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1, 0x80000000, 0x7fffffff, t1),
2185 EV_SATURATE_ACC(ovl, t2, 0x80000000, 0x7fffffff, t2));
2186 EV_SET_SPEFSCR_OV(ovl, ovh);
2187 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2189 0.4,6.RS,11.RA,16.0,21.1227:EVX:e500:evsubfsmiaaw %RS,%RA:Vector Subtract Signed Modulo Integer to Accumulator Word
2194 t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2195 t2 = (signed64)EV_ACCLOW - (signed64)al;
2196 EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2197 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2199 0.4,6.RS,11.RA,16.0,21.1218:EVX:e500:evsubfusiaaw %RS,%RA:Vector Subtract Unsigned Saturate Integer to Accumulator Word
2206 t1 = (signed64)EV_ACCHIGH - (signed64)ah;
2207 t2 = (signed64)EV_ACCLOW - (signed64)al;
2208 ovh = EV_SAT_P_U32(t1);
2209 ovl = EV_SAT_P_U32(t2);
2210 EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0, t1),
2211 EV_SATURATE(ovl, 0, t2));
2212 EV_SET_SPEFSCR_OV(ovl, ovh);
2213 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2215 0.4,6.RS,11.RA,16.0,21.1226:EVX:e500:evsubfumiaaw %RS,%RA:Vector Subtract Unsigned Modulo Integer to Accumulator Word
2220 t1 = (unsigned64)EV_ACCHIGH - (unsigned64)ah;
2221 t2 = (unsigned64)EV_ACCLOW - (unsigned64)al;
2222 EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
2223 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2226 0.4,6.RS,11.RA,16.0,21.1220:EVX:e500:evmra %RS,%RA:Initialize Accumulator
2227 EV_SET_REG2_ACC(*rSh, *rS, *rAh, *rA);
2228 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2230 0.4,6.RS,11.RA,16.RB,21.1222:EVX:e500:evdivws %RS,%RA,%RB:Vector Divide Word Signed
2231 signed32 dividendh, dividendl, divisorh, divisorl;
2238 if (dividendh < 0 && divisorh == 0) {
2241 } else if (dividendh > 0 && divisorh == 0) {
2244 } else if (dividendh == 0x80000000 && divisorh == -1) {
2248 w1 = dividendh / divisorh;
2251 if (dividendl < 0 && divisorl == 0) {
2254 } else if (dividendl > 0 && divisorl == 0) {
2257 } else if (dividendl == 0x80000000 && divisorl == -1) {
2261 w2 = dividendl / divisorl;
2264 EV_SET_REG2(*rSh, *rS, w1, w2);
2265 EV_SET_SPEFSCR_OV(ovl, ovh);
2266 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2269 0.4,6.RS,11.RA,16.RB,21.1223:EVX:e500:evdivwu %RS,%RA,%RB:Vector Divide Word Unsigned
2270 unsigned32 dividendh, dividendl, divisorh, divisorl;
2277 if (divisorh == 0) {
2281 w1 = dividendh / divisorh;
2284 if (divisorl == 0) {
2288 w2 = dividendl / divisorl;
2291 EV_SET_REG2(*rSh, *rS, w1, w2);
2292 EV_SET_SPEFSCR_OV(ovl, ovh);
2293 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
2297 # A.2.9 Floating Point SPE Instructions
2300 0.4,6.RS,11.RA,16.0,21.644:EVX:e500:evfsabs %RS,%RA:Vector Floating-Point Absolute Value
2302 w1 = *rAh & 0x7fffffff;
2303 w2 = *rA & 0x7fffffff;
2304 EV_SET_REG2(*rSh, *rS, w1, w2);
2305 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2307 0.4,6.RS,11.RA,16.0,21.645:EVX:e500:evfsnabs %RS,%RA:Vector Floating-Point Negative Absolute Value
2309 w1 = *rAh | 0x80000000;
2310 w2 = *rA | 0x80000000;
2311 EV_SET_REG2(*rSh, *rS, w1, w2);
2312 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2314 0.4,6.RS,11.RA,16.0,21.646:EVX:e500:evfsneg %RS,%RA:Vector Floating-Point Negate
2318 w1 = (w1 & 0x7fffffff) | ((~w1) & 0x80000000);
2319 w2 = (w2 & 0x7fffffff) | ((~w2) & 0x80000000);
2320 EV_SET_REG2(*rSh, *rS, w1, w2);
2321 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2323 0.4,6.RS,11.RA,16.RB,21.640:EVX:e500:evfsadd %RS,%RA,%RB:Vector Floating-Point Add
2325 w1 = ev_fs_add (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2326 w2 = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2327 EV_SET_REG2(*rSh, *rS, w1, w2);
2328 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2330 0.4,6.RS,11.RA,16.RB,21.641:EVX:e500:evfssub %RS,%RA,%RB:Vector Floating-Point Subtract
2332 w1 = ev_fs_sub (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2333 w2 = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2334 EV_SET_REG2(*rSh, *rS, w1, w2);
2335 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2337 0.4,6.RS,11.RA,16.RB,21.648:EVX:e500:evfsmul %RS,%RA,%RB:Vector Floating-Point Multiply
2339 w1 = ev_fs_mul (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
2340 w2 = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2341 EV_SET_REG2(*rSh, *rS, w1, w2);
2342 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2344 0.4,6.RS,11.RA,16.RB,21.649:EVX:e500:evfsdiv %RS,%RA,%RB:Vector Floating-Point Divide
2346 w1 = ev_fs_div (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fdbzh, spefscr_fgh, spefscr_fxh, processor);
2347 w2 = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
2348 EV_SET_REG2(*rSh, *rS, w1, w2);
2349 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2351 0.4,6.BF,9./,11.RA,16.RB,21.652:EVX:e500:evfscmpgt %BF,%RA,%RB:Vector Floating-Point Compare Greater Than
2352 sim_fpu al, ah, bl, bh;
2354 sim_fpu_32to (&al, *rA);
2355 sim_fpu_32to (&ah, *rAh);
2356 sim_fpu_32to (&bl, *rB);
2357 sim_fpu_32to (&bh, *rBh);
2358 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2359 EV_SET_SPEFSCR_BITS(spefscr_finv);
2360 if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2361 EV_SET_SPEFSCR_BITS(spefscr_finvh);
2362 if (sim_fpu_is_gt(&ah, &bh))
2366 if (sim_fpu_is_gt(&al, &bl))
2370 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2372 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2374 0.4,6.BF,9./,11.RA,16.RB,21.653:EVX:e500:evfscmplt %BF,%RA,%RB:Vector Floating-Point Compare Less Than
2375 sim_fpu al, ah, bl, bh;
2377 sim_fpu_32to (&al, *rA);
2378 sim_fpu_32to (&ah, *rAh);
2379 sim_fpu_32to (&bl, *rB);
2380 sim_fpu_32to (&bh, *rBh);
2381 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2382 EV_SET_SPEFSCR_BITS(spefscr_finv);
2383 if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2384 EV_SET_SPEFSCR_BITS(spefscr_finvh);
2385 if (sim_fpu_is_lt(&ah, &bh))
2389 if (sim_fpu_is_lt(&al, &bl))
2393 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2395 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2397 0.4,6.BF,9./,11.RA,16.RB,21.654:EVX:e500:evfscmpeq %BF,%RA,%RB:Vector Floating-Point Compare Equal
2398 sim_fpu al, ah, bl, bh;
2400 sim_fpu_32to (&al, *rA);
2401 sim_fpu_32to (&ah, *rAh);
2402 sim_fpu_32to (&bl, *rB);
2403 sim_fpu_32to (&bh, *rBh);
2404 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2405 EV_SET_SPEFSCR_BITS(spefscr_finv);
2406 if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
2407 EV_SET_SPEFSCR_BITS(spefscr_finvh);
2408 if (sim_fpu_is_eq(&ah, &bh))
2412 if (sim_fpu_is_eq(&al, &bl))
2416 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2418 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2420 0.4,6.BF,9./,11.RA,16.RB,21.668:EVX:e500:evfststgt %BF,%RA,%RB:Vector Floating-Point Test Greater Than
2421 sim_fpu al, ah, bl, bh;
2423 sim_fpu_32to (&al, *rA);
2424 sim_fpu_32to (&ah, *rAh);
2425 sim_fpu_32to (&bl, *rB);
2426 sim_fpu_32to (&bh, *rBh);
2427 if (sim_fpu_is_gt(&ah, &bh))
2431 if (sim_fpu_is_gt(&al, &bl))
2435 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2437 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2439 0.4,6.BF,9./,11.RA,16.RB,21.669:EVX:e500:evfststlt %BF,%RA,%RB:Vector Floating-Point Test Less Than
2440 sim_fpu al, ah, bl, bh;
2442 sim_fpu_32to (&al, *rA);
2443 sim_fpu_32to (&ah, *rAh);
2444 sim_fpu_32to (&bl, *rB);
2445 sim_fpu_32to (&bh, *rBh);
2446 if (sim_fpu_is_lt(&ah, &bh))
2450 if (sim_fpu_is_lt(&al, &bl))
2454 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2456 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2458 0.4,6.BF,9./,11.RA,16.RB,21.670:EVX:e500:evfststeq %BF,%RA,%RB:Vector Floating-Point Test Equal
2459 sim_fpu al, ah, bl, bh;
2461 sim_fpu_32to (&al, *rA);
2462 sim_fpu_32to (&ah, *rAh);
2463 sim_fpu_32to (&bl, *rB);
2464 sim_fpu_32to (&bh, *rBh);
2465 if (sim_fpu_is_eq(&ah, &bh))
2469 if (sim_fpu_is_eq(&al, &bl))
2473 w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
2475 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2477 0.4,6.RS,11.0,16.RB,21.656:EVX:e500:evfscfui %RS,%RB:Vector Convert Floating-Point from Unsigned Integer
2478 unsigned32 f, w1, w2;
2481 sim_fpu_u32to (&b, *rBh, sim_fpu_round_default);
2482 sim_fpu_to32 (&w1, &b);
2483 sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
2484 sim_fpu_to32 (&w2, &b);
2486 EV_SET_REG2(*rSh, *rS, w1, w2);
2487 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2489 0.4,6.RS,11.0,16.RB,21.664:EVX:e500:evfsctuiz %RS,%RB:Vector Convert Floating-Point to Unsigned Integer with Round toward Zero
2493 sim_fpu_32to (&b, *rBh);
2494 sim_fpu_to32u (&w1, &b, sim_fpu_round_zero);
2495 sim_fpu_32to (&b, *rB);
2496 sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
2498 EV_SET_REG2(*rSh, *rS, w1, w2);
2499 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2501 0.4,6.RS,11.0,16.RB,21.657:EVX:e500:evfscfsi %RS,%RB:Vector Convert Floating-Point from Signed Integer
2505 sim_fpu_i32to (&b, *rBh, sim_fpu_round_default);
2506 sim_fpu_to32 (&w1, &b);
2507 sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
2508 sim_fpu_to32 (&w2, &b);
2510 EV_SET_REG2(*rSh, *rS, w1, w2);
2511 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2513 0.4,6.RS,11.0,16.RB,21.658:EVX:e500:evfscfuf %RS,%RB:Vector Convert Floating-Point from Unsigned Fraction
2514 unsigned32 w1, w2, bh, bl;
2517 if (bh == 0xffffffff)
2518 sim_fpu_to32 (&w1, &sim_fpu_one);
2520 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2521 sim_fpu_u32to (&y, bh, sim_fpu_round_default);
2522 sim_fpu_div (&b, &y, &x);
2523 sim_fpu_to32 (&w1, &b);
2526 if (bl == 0xffffffff)
2527 sim_fpu_to32 (&w2, &sim_fpu_one);
2529 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2530 sim_fpu_u32to (&y, bl, sim_fpu_round_default);
2531 sim_fpu_div (&b, &y, &x);
2532 sim_fpu_to32 (&w2, &b);
2534 EV_SET_REG2(*rSh, *rS, w1, w2);
2535 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2537 0.4,6.RS,11.0,16.RB,21.659:EVX:e500:evfscfsf %RS,%RB:Vector Convert Floating-Point from Signed Fraction
2541 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2542 sim_fpu_i32to (&y, *rBh, sim_fpu_round_default);
2543 sim_fpu_div (&b, &y, &x);
2544 sim_fpu_to32 (&w1, &b);
2546 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2547 sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
2548 sim_fpu_div (&b, &y, &x);
2549 sim_fpu_to32 (&w2, &b);
2551 EV_SET_REG2(*rSh, *rS, w1, w2);
2552 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2554 0.4,6.RS,11.0,16.RB,21.660:EVX:e500:evfsctui %RS,%RB:Vector Convert Floating-Point to Unsigned Integer
2558 sim_fpu_32to (&b, *rBh);
2559 sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
2560 sim_fpu_32to (&b, *rB);
2561 sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2563 EV_SET_REG2(*rSh, *rS, w1, w2);
2564 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2566 0.4,6.RS,11.0,16.RB,21.661:EVX:e500:evfsctsi %RS,%RB:Vector Convert Floating-Point to Signed Integer
2570 sim_fpu_32to (&b, *rBh);
2571 sim_fpu_to32i (&w1, &b, sim_fpu_round_default);
2572 sim_fpu_32to (&b, *rB);
2573 sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2575 EV_SET_REG2(*rSh, *rS, w1, w2);
2576 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2578 0.4,6.RS,11.0,16.RB,21.666:EVX:e500:evfsctsiz %RS,%RB:Vector Convert Floating-Point to Signed Integer with Round toward Zero
2582 sim_fpu_32to (&b, *rBh);
2583 sim_fpu_to32i (&w1, &b, sim_fpu_round_zero);
2584 sim_fpu_32to (&b, *rB);
2585 sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
2587 EV_SET_REG2(*rSh, *rS, w1, w2);
2588 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2590 0.4,6.RS,11.0,16.RB,21.662:EVX:e500:evfsctuf %RS,%RB:Vector Convert Floating-Point to Unsigned Fraction
2594 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2595 sim_fpu_32to (&y, *rBh);
2596 sim_fpu_mul (&b, &y, &x);
2597 sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
2599 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2600 sim_fpu_32to (&y, *rB);
2601 sim_fpu_mul (&b, &y, &x);
2602 sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2604 EV_SET_REG2(*rSh, *rS, w1, w2);
2605 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2607 0.4,6.RS,11.0,16.RB,21.663:EVX:e500:evfsctsf %RS,%RB:Vector Convert Floating-Point to Signed Fraction
2611 sim_fpu_32to (&y, *rBh);
2612 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2613 sim_fpu_mul (&b, &y, &x);
2614 sim_fpu_to32i (&w1, &b, sim_fpu_round_near);
2616 sim_fpu_32to (&y, *rB);
2617 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2618 sim_fpu_mul (&b, &y, &x);
2619 sim_fpu_to32i (&w2, &b, sim_fpu_round_near);
2621 EV_SET_REG2(*rSh, *rS, w1, w2);
2622 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2625 0.4,6.RS,11.RA,16.0,21.708:EVX:e500:efsabs %RS,%RA:Floating-Point Absolute Value
2628 w2 = *rA & 0x7fffffff;
2629 EV_SET_REG2(*rSh, *rS, w1, w2);
2630 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2632 0.4,6.RS,11.RA,16.0,21.709:EVX:e500:efsnabs %RS,%RA:Floating-Point Negative Absolute Value
2635 w2 = *rA | 0x80000000;
2636 EV_SET_REG2(*rSh, *rS, w1, w2);
2637 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2639 0.4,6.RS,11.RA,16.0,21.710:EVX:e500:efsneg %RS,%RA:Floating-Point Negate
2642 w2 = (*rA & 0x7fffffff) | ((~*rA) & 0x80000000);
2643 EV_SET_REG2(*rSh, *rS, w1, w2);
2644 PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
2646 0.4,6.RS,11.RA,16.RB,21.704:EVX:e500:efsadd %RS,%RA,%RB:Floating-Point Add
2648 w = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2650 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2652 0.4,6.RS,11.RA,16.RB,21.705:EVX:e500:efssub %RS,%RA,%RB:Floating-Point Subtract
2654 w = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2656 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2658 0.4,6.RS,11.RA,16.RB,21.712:EVX:e500:efsmul %RS,%RA,%RB:Floating-Point Multiply
2660 w = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
2662 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2664 0.4,6.RS,11.RA,16.RB,21.713:EVX:e500:efsdiv %RS,%RA,%RB:Floating-Point Divide
2666 w = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
2668 PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
2670 0.4,6.BF,9./,11.RA,16.RB,21.716:EVX:e500:efscmpgt %BF,%RA,%RB:Floating-Point Compare Greater Than
2673 sim_fpu_32to (&a, *rA);
2674 sim_fpu_32to (&b, *rB);
2675 if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
2676 EV_SET_SPEFSCR_BITS(spefscr_finv);
2677 if (sim_fpu_is_gt(&a, &b))
2681 w = cl << 2 | cl << 1;
2683 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2685 0.4,6.BF,9./,11.RA,16.RB,21.717:EVX:e500:efscmplt %BF,%RA,%RB:Floating-Point Compare Less Than
2688 sim_fpu_32to (&al, *rA);
2689 sim_fpu_32to (&bl, *rB);
2690 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2691 EV_SET_SPEFSCR_BITS(spefscr_finv);
2692 if (sim_fpu_is_lt(&al, &bl))
2696 w = cl << 2 | cl << 1;
2698 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2700 0.4,6.BF,9./,11.RA,16.RB,21.718:EVX:e500:efscmpeq %BF,%RA,%RB:Floating-Point Compare Equal
2703 sim_fpu_32to (&al, *rA);
2704 sim_fpu_32to (&bl, *rB);
2705 if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
2706 EV_SET_SPEFSCR_BITS(spefscr_finv);
2707 if (sim_fpu_is_eq(&al, &bl))
2711 w = cl << 2 | cl << 1;
2713 PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
2715 0.4,6.BF,9./,11.RA,16.RB,21.732:EVX:e500:efststgt %BF,%RA,%RB:Floating-Point Test Greater Than
2718 sim_fpu_32to (&al, *rA);
2719 sim_fpu_32to (&bl, *rB);
2720 if (sim_fpu_is_gt(&al, &bl))
2724 w = cl << 2 | cl << 1;
2726 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2728 0.4,6.BF,9./,11.RA,16.RB,21.733:EVX:e500:efststlt %BF,%RA,%RB:Floating-Point Test Less Than
2731 sim_fpu_32to (&al, *rA);
2732 sim_fpu_32to (&bl, *rB);
2733 if (sim_fpu_is_lt(&al, &bl))
2737 w = cl << 2 | cl << 1;
2739 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2741 0.4,6.BF,9./,11.RA,16.RB,21.734:EVX:e500:efststeq %BF,%RA,%RB:Floating-Point Test Equal
2744 sim_fpu_32to (&al, *rA);
2745 sim_fpu_32to (&bl, *rB);
2746 if (sim_fpu_is_eq(&al, &bl))
2750 w = cl << 2 | cl << 1;
2752 PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
2754 0.4,6.RS,11.0,16.RB,21.721:EVX:e500:efscfsi %RS,%RB:Convert Floating-Point from Signed Integer
2758 sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
2759 sim_fpu_to32 (&w2, &b);
2760 EV_SET_REG2(*rSh, *rS, w1, w2);
2761 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2763 0.4,6.RS,11.0,16.RB,21.720:EVX:e500:efscfui %RS,%RB:Convert Floating-Point from Unsigned Integer
2767 sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
2768 sim_fpu_to32 (&w2, &b);
2769 EV_SET_REG2(*rSh, *rS, w1, w2);
2770 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2772 0.4,6.RS,11.0,16.RB,21.723:EVX:e500:efscfsf %RS,%RB:Convert Floating-Point from Signed Fraction
2776 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2777 sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
2778 sim_fpu_div (&b, &y, &x);
2779 sim_fpu_to32 (&w2, &b);
2780 EV_SET_REG2(*rSh, *rS, w1, w2);
2781 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2783 0.4,6.RS,11.0,16.RB,21.722:EVX:e500:efscfuf %RS,%RB:Convert Floating-Point from Unsigned Fraction
2784 unsigned32 w1, w2, bl;
2788 if (bl == 0xffffffff)
2789 sim_fpu_to32 (&w2, &sim_fpu_one);
2791 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2792 sim_fpu_u32to (&y, bl, sim_fpu_round_default);
2793 sim_fpu_div (&b, &y, &x);
2794 sim_fpu_to32 (&w2, &b);
2796 EV_SET_REG2(*rSh, *rS, w1, w2);
2797 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2799 0.4,6.RS,11.0,16.RB,21.725:EVX:e500:efsctsi %RS,%RB:Convert Floating-Point to Signed Integer
2804 sim_fpu_32to (&b, *rB);
2805 sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2806 EV_SET_REG2(*rSh, *rS, w1, w2);
2807 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2809 0.4,6.RS,11.0,16.RB,21.730:EVX:e500:efsctsiz %RS,%RB:Convert Floating-Point to Signed Integer with Round toward Zero
2814 sim_fpu_32to (&b, *rB);
2815 sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
2816 EV_SET_REG2(*rSh, *rS, w1, w2);
2817 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2819 0.4,6.RS,11.0,16.RB,21.724:EVX:e500:efsctui %RS,%RB:Convert Floating-Point to Unsigned Integer
2824 sim_fpu_32to (&b, *rB);
2825 sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2826 EV_SET_REG2(*rSh, *rS, w1, w2);
2827 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2829 0.4,6.RS,11.0,16.RB,21.728:EVX:e500:efsctuiz %RS,%RB:Convert Floating-Point to Unsigned Integer with Round toward Zero
2834 sim_fpu_32to (&b, *rB);
2835 sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
2836 EV_SET_REG2(*rSh, *rS, w1, w2);
2837 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2839 0.4,6.RS,11.0,16.RB,21.727:EVX:e500:efsctsf %RS,%RB:Convert Floating-Point to Signed Fraction
2843 sim_fpu_32to (&y, *rB);
2844 sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
2845 sim_fpu_mul (&b, &y, &x);
2846 sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
2847 sim_fpu_to32 (&w2, &b);
2848 EV_SET_REG2(*rSh, *rS, w1, w2);
2849 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2851 0.4,6.RS,11.0,16.RB,21.726:EVX:e500:efsctuf %RS,%RB:Convert Floating-Point to Unsigned Fraction
2855 sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
2856 sim_fpu_32to (&y, *rB);
2857 sim_fpu_mul (&b, &y, &x);
2858 sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
2859 EV_SET_REG2(*rSh, *rS, w1, w2);
2860 PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
2864 # A.2.10 Vector Load/Store Instructions
2867 0.4,6.RS,11.RA,16.UIMM,21.769:EVX:e500:evldd %RS,%RA,%UIMM:Vector Load Double Word into Double Word
2873 EA = b + (UIMM << 3);
2874 m = MEM(unsigned, EA, 8);
2875 EV_SET_REG1(*rSh, *rS, m);
2876 //printf("evldd(%d<-%d + %u): m %08x.%08x, *rSh %x *rS %x\n", RS, RA, UIMM, (int)(m >> 32), (int)m, *rSh, *rS);
2877 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2879 0.4,6.RS,11.RA,16.RB,21.768:EVX:e500:evlddx %RS,%RA,%RB:Vector Load Double Word into Double Word Indexed
2886 m = MEM(unsigned, EA, 8);
2887 EV_SET_REG1(*rSh, *rS, m);
2888 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2890 0.4,6.RS,11.RA,16.UIMM,21.771:EVX:e500:evldw %RS,%RA,%UIMM:Vector Load Double into Two Words
2896 EA = b + (UIMM << 3);
2897 w1 = MEM(unsigned, EA, 4);
2898 w2 = MEM(unsigned, EA + 4, 4);
2899 EV_SET_REG2(*rSh, *rS, w1, w2);
2900 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2902 0.4,6.RS,11.RA,16.RB,21.770:EVX:e500:evldwx %RS,%RA,%RB:Vector Load Double into Two Words Indexed
2909 w1 = MEM(unsigned, EA, 4);
2910 w2 = MEM(unsigned, EA + 4, 4);
2911 EV_SET_REG2(*rSh, *rS, w1, w2);
2912 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2914 0.4,6.RS,11.RA,16.UIMM,21.773:EVX:e500:evldh %RS,%RA,%UIMM:Vector Load Double into 4 Half Words
2917 unsigned16 h1, h2, h3, h4;
2920 EA = b + (UIMM << 3);
2921 h1 = MEM(unsigned, EA, 2);
2922 h2 = MEM(unsigned, EA + 2, 2);
2923 h3 = MEM(unsigned, EA + 4, 2);
2924 h4 = MEM(unsigned, EA + 6, 2);
2925 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2926 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2928 0.4,6.RS,11.RA,16.RB,21.772:EVX:e500:evldhx %RS,%RA,%RB:Vector Load Double into 4 Half Words Indexed
2931 unsigned16 h1, h2, h3, h4;
2935 h1 = MEM(unsigned, EA, 2);
2936 h2 = MEM(unsigned, EA + 2, 2);
2937 h3 = MEM(unsigned, EA + 4, 2);
2938 h4 = MEM(unsigned, EA + 6, 2);
2939 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2940 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2942 0.4,6.RS,11.RA,16.UIMM,21.785:EVX:e500:evlwhe %RS,%RA,%UIMM:Vector Load Word into Two Half Words Even
2945 unsigned16 h1, h2, h3, h4;
2948 EA = b + (UIMM << 2);
2949 h1 = MEM(unsigned, EA, 2);
2951 h3 = MEM(unsigned, EA + 2, 2);
2953 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2954 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2956 0.4,6.RS,11.RA,16.RB,21.784:EVX:e500:evlwhex %RS,%RA,%RB:Vector Load Word into Two Half Words Even Indexed
2959 unsigned16 h1, h2, h3, h4;
2963 h1 = MEM(unsigned, EA, 2);
2965 h3 = MEM(unsigned, EA + 2, 2);
2967 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2968 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2970 0.4,6.RS,11.RA,16.UIMM,21.789:EVX:e500:evlwhou %RS,%RA,%UIMM:Vector Load Word into Two Half Words Odd Unsigned zero-extended
2973 unsigned16 h1, h2, h3, h4;
2976 EA = b + (UIMM << 2);
2978 h2 = MEM(unsigned, EA, 2);
2980 h4 = MEM(unsigned, EA + 2, 2);
2981 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2982 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
2984 0.4,6.RS,11.RA,16.RB,21.788:EVX:e500:evlwhoux %RS,%RA,%RB:Vector Load Word into Two Half Words Odd Unsigned Indexed zero-extended
2987 unsigned16 h1, h2, h3, h4;
2992 h2 = MEM(unsigned, EA, 2);
2994 h4 = MEM(unsigned, EA + 2, 2);
2995 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
2996 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
2998 0.4,6.RS,11.RA,16.UIMM,21.791:EVX:e500:evlwhos %RS,%RA,%UIMM:Vector Load Word into Half Words Odd Signed with sign extension
3001 unsigned16 h1, h2, h3, h4;
3004 EA = b + (UIMM << 2);
3005 h2 = MEM(unsigned, EA, 2);
3010 h4 = MEM(unsigned, EA + 2, 2);
3015 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
3016 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3018 0.4,6.RS,11.RA,16.RB,21.790:EVX:e500:evlwhosx %RS,%RA,%RB:Vector Load Word into Half Words Odd Signed Indexed with sign extension
3021 unsigned16 h1, h2, h3, h4;
3025 h2 = MEM(unsigned, EA, 2);
3030 h4 = MEM(unsigned, EA + 2, 2);
3035 EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
3036 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3038 0.4,6.RS,11.RA,16.UIMM,21.793:EVX:e500:evlwwsplat %RS,%RA,%UIMM:Vector Load Word into Word and Splat
3044 EA = b + (UIMM << 2);
3045 w1 = MEM(unsigned, EA, 4);
3046 EV_SET_REG2(*rSh, *rS, w1, w1);
3047 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3049 0.4,6.RS,11.RA,16.RB,21.792:EVX:e500:evlwwsplatx %RS,%RA,%RB:Vector Load Word into Word and Splat Indexed
3056 w1 = MEM(unsigned, EA, 4);
3057 EV_SET_REG2(*rSh, *rS, w1, w1);
3058 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3060 0.4,6.RS,11.RA,16.UIMM,21.797:EVX:e500:evlwhsplat %RS,%RA,%UIMM:Vector Load Word into 2 Half Words and Splat
3066 EA = b + (UIMM << 2);
3067 h1 = MEM(unsigned, EA, 2);
3068 h2 = MEM(unsigned, EA + 2, 2);
3069 EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
3070 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3072 0.4,6.RS,11.RA,16.RB,21.796:EVX:e500:evlwhsplatx %RS,%RA,%RB:Vector Load Word into 2 Half Words and Splat Indexed
3079 h1 = MEM(unsigned, EA, 2);
3080 h2 = MEM(unsigned, EA + 2, 2);
3081 EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
3082 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3084 0.4,6.RS,11.RA,16.UIMM,21.777:EVX:e500:evlhhesplat %RS,%RA,%UIMM:Vector Load Half Word into Half Words Even and Splat
3090 EA = b + (UIMM << 1);
3091 h = MEM(unsigned, EA, 2);
3092 EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
3093 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3095 0.4,6.RS,11.RA,16.RB,21.776:EVX:e500:evlhhesplatx %RS,%RA,%RB:Vector Load Half Word into Half Words Even and Splat Indexed
3102 h = MEM(unsigned, EA, 2);
3103 EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
3104 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3106 0.4,6.RS,11.RA,16.UIMM,21.781:EVX:e500:evlhhousplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Unsigned and Splat
3112 EA = b + (UIMM << 1);
3113 h = MEM(unsigned, EA, 2);
3114 EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
3115 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3117 0.4,6.RS,11.RA,16.RB,21.780:EVX:e500:evlhhousplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed
3124 h = MEM(unsigned, EA, 2);
3125 EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
3126 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3128 0.4,6.RS,11.RA,16.UIMM,21.783:EVX:e500:evlhhossplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Signed and Splat
3134 EA = b + (UIMM << 1);
3135 h2 = MEM(unsigned, EA, 2);
3140 EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
3141 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3143 0.4,6.RS,11.RA,16.RB,21.782:EVX:e500:evlhhossplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Signed and Splat Indexed
3150 h2 = MEM(unsigned, EA, 2);
3155 EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
3156 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3159 0.4,6.RS,11.RA,16.UIMM,21.801:EVX:e500:evstdd %RS,%RA,%UIMM:Vector Store Double of Double
3164 EA = b + (UIMM << 3);
3165 STORE(EA, 4, (*rSh));
3166 STORE(EA + 4, 4, (*rS));
3167 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3169 0.4,6.RS,11.RA,16.RB,21.800:EVX:e500:evstddx %RS,%RA,%RB:Vector Store Double of Double Indexed
3175 STORE(EA, 4, (*rSh));
3176 STORE(EA + 4, 4, (*rS));
3177 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3179 0.4,6.RS,11.RA,16.UIMM,21.803:EVX:e500:evstdw %RS,%RA,%UIMM:Vector Store Double of Two Words
3185 EA = b + (UIMM << 3);
3188 STORE(EA + 0, 4, w1);
3189 STORE(EA + 4, 4, w2);
3190 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3192 0.4,6.RS,11.RA,16.RB,21.802:EVX:e500:evstdwx %RS,%RA,%RB:Vector Store Double of Two Words Indexed
3201 STORE(EA + 0, 4, w1);
3202 STORE(EA + 4, 4, w2);
3203 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3205 0.4,6.RS,11.RA,16.UIMM,21.805:EVX:e500:evstdh %RS,%RA,%UIMM:Vector Store Double of Four Half Words
3208 unsigned16 h1, h2, h3, h4;
3211 EA = b + (UIMM << 3);
3212 h1 = EV_HIHALF(*rSh);
3213 h2 = EV_LOHALF(*rSh);
3214 h3 = EV_HIHALF(*rS);
3215 h4 = EV_LOHALF(*rS);
3216 STORE(EA + 0, 2, h1);
3217 STORE(EA + 2, 2, h2);
3218 STORE(EA + 4, 2, h3);
3219 STORE(EA + 6, 2, h4);
3220 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3222 0.4,6.RS,11.RA,16.RB,21.804:EVX:e500:evstdhx %RS,%RA,%RB:Vector Store Double of Four Half Words Indexed
3225 unsigned16 h1, h2, h3, h4;
3229 h1 = EV_HIHALF(*rSh);
3230 h2 = EV_LOHALF(*rSh);
3231 h3 = EV_HIHALF(*rS);
3232 h4 = EV_LOHALF(*rS);
3233 STORE(EA + 0, 2, h1);
3234 STORE(EA + 2, 2, h2);
3235 STORE(EA + 4, 2, h3);
3236 STORE(EA + 6, 2, h4);
3237 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3239 0.4,6.RS,11.RA,16.UIMM,21.825:EVX:e500:evstwwe %RS,%RA,%UIMM:Vector Store Word of Word from Even
3245 EA = b + (UIMM << 3);
3248 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3250 0.4,6.RS,11.RA,16.RB,21.824:EVX:e500:evstwwex %RS,%RA,%RB:Vector Store Word of Word from Even Indexed
3259 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3261 0.4,6.RS,11.RA,16.UIMM,21.829:EVX:e500:evstwwo %RS,%RA,%UIMM:Vector Store Word of Word from Odd
3267 EA = b + (UIMM << 3);
3270 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3272 0.4,6.RS,11.RA,16.RB,21.828:EVX:e500:evstwwox %RS,%RA,%RB:Vector Store Word of Word from Odd Indexed
3281 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3283 0.4,6.RS,11.RA,16.UIMM,21.817:EVX:e500:evstwhe %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Even
3289 EA = b + (UIMM << 3);
3290 h1 = EV_HIHALF(*rSh);
3291 h2 = EV_HIHALF(*rS);
3292 STORE(EA + 0, 2, h1);
3293 STORE(EA + 2, 2, h2);
3294 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3296 0.4,6.RS,11.RA,16.RB,21.816:EVX:e500:evstwhex %RS,%RA,%RB:Vector Store Word of Two Half Words from Even Indexed
3303 h1 = EV_HIHALF(*rSh);
3304 h2 = EV_HIHALF(*rS);
3305 STORE(EA + 0, 2, h1);
3306 STORE(EA + 2, 2, h2);
3307 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3309 0.4,6.RS,11.RA,16.UIMM,21.821:EVX:e500:evstwho %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Odd
3315 EA = b + (UIMM << 3);
3316 h1 = EV_LOHALF(*rSh);
3317 h2 = EV_LOHALF(*rS);
3318 STORE(EA + 0, 2, h1);
3319 STORE(EA + 2, 2, h2);
3320 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
3322 0.4,6.RS,11.RA,16.RB,21.820:EVX:e500:evstwhox %RS,%RA,%RB:Vector Store Word of Two Half Words from Odd Indexed
3329 h1 = EV_LOHALF(*rSh);
3330 h2 = EV_LOHALF(*rS);
3331 STORE(EA + 0, 2, h1);
3332 STORE(EA + 2, 2, h2);
3333 PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
3337 # 4.5.1 Integer Select Instruction
3340 0.31,6.RS,11.RA,16.RB,21.CRB,26.30:X:e500:isel %RS,%RA,%RB,%CRB:Integer Select
3341 if (CR & (1 << (31 - (unsigned)CRB)))
3343 EV_SET_REG1(*rSh, *rS, 0);
3345 EV_SET_REG2(*rSh, *rS, *rAh, *rA);
3347 EV_SET_REG2(*rSh, *rS, *rBh, *rB);
3348 PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);