4 restore AVX_512 ; this ensures that symbol cannot be forward-referenced
\r
11 element AVX_512.r128 : AVX_512.reg + 16
\r
12 element AVX_512.r256 : AVX_512.reg + 32
\r
13 element AVX_512.r512 : AVX_512.reg + 64
\r
16 element zmm#i? : AVX_512.r512 + i
\r
21 element xmm#i? : AVX_512.r128 + i
\r
22 element ymm#i? : AVX_512.r256 + i
\r
25 element zmm#i? : AVX_512.r512 + i
\r
29 element AVX_512.maskreg
\r
32 element k#i? : AVX_512.maskreg + i
\r
35 define x86.dqqword? :64
\r
36 define x86.zword? :64
\r
40 EVEX_REQUIRED = 1 shl 10
\r
41 EVEX_FORBIDDEN = 1 shl 2
\r
46 calminstruction AVX_512.parse_operand namespace, operand
\r
48 local size, type, mod, rm, imm
\r
51 asmcmd =x86.=parse_operand namespace, operand
\r
53 arrange type, namespace.=type
\r
54 arrange size, namespace.=size
\r
55 arrange imm, namespace.=imm
\r
57 check type = 'reg' & size = 1 & rm >= 4 & (~ defined x86.REX_FORBIDDEN | rm and x86.REX_FORBIDDEN)
\r
58 jyes invalid_operand
\r
59 check type = 'imm' & size = 0
\r
62 check imm eq 1 elementof imm & 1 metadataof imm relativeto SSE.reg
\r
64 check imm eq 1 elementof imm & 1 metadataof imm relativeto AVX.reg
\r
66 check 1 metadataof (1 metadataof imm) relativeto AVX_512.reg & imm eq 1 elementof imm
\r
68 check imm eq 1 elementof imm & 1 metadataof imm relativeto AVX_512.maskreg
\r
73 asmcmd =err 'invalid operand'
\r
77 compute rm, 1 metadataof imm - SSE.reg
\r
84 compute rm, 1 metadataof imm - AVX.reg
\r
91 compute rm, 1 metadataof imm - AVX_512.maskreg
\r
93 compute type, 'maskreg'
\r
99 compute rm, 1 metadataof imm - 1 elementof (1 metadataof imm)
\r
100 compute size, 1 metadataof (1 metadataof imm) - AVX_512.reg
\r
104 compute type, 'mmreg'
\r
110 arrange sym, namespace.=mod
\r
113 arrange sym, namespace.=rm
\r
116 arrange sym, namespace.=type
\r
119 arrange sym, namespace.=size
\r
126 arrange sym, namespace.=mask
\r
129 arrange sym, namespace.=evex_b
\r
132 arrange sym, namespace.=memsize
\r
135 end calminstruction
\r
137 calminstruction AVX_512.parse_operand_k1z namespace,operand
\r
139 local k1, z, mask, sym
\r
141 match operand {k1} { =z? }, operand
\r
143 match operand {k1}, operand
\r
145 asmcmd =AVX_512.=parse_operand namespace, operand
\r
153 asmcmd =AVX_512.=parse_operand namespace, operand
\r
154 arrange sym, namespace.=type
\r
155 check z & sym = 'mem'
\r
157 check k1 eq 1 elementof k1 & 1 metadataof k1 relativeto AVX_512.maskreg & 1 metadataof k1 - AVX_512.maskreg > 0
\r
159 compute mask, (1 metadataof k1 - AVX_512.maskreg) or z
\r
160 arrange sym, namespace.=mask
\r
164 asmcmd =err 'invalid mask'
\r
166 end calminstruction
\r
168 calminstruction AVX_512.parse_operand_k1 namespace,operand
\r
170 local k1, mask, sym
\r
172 match operand {k1}, operand
\r
174 asmcmd =AVX_512.=parse_operand namespace, operand
\r
177 asmcmd =AVX_512.=parse_operand namespace, operand
\r
178 arrange sym, namespace.=type
\r
179 check k1 eq 1 elementof k1 & 1 metadataof k1 relativeto AVX_512.maskreg & 1 metadataof k1 - AVX_512.maskreg > 0
\r
181 compute mask, 1 metadataof k1 - AVX_512.maskreg
\r
182 arrange sym, namespace.=mask
\r
186 asmcmd =err 'invalid mask'
\r
188 end calminstruction
\r
190 calminstruction AVX_512.parse_operand_bcst namespace,operand,unit
\r
192 local broadcast, memsize, size, b
\r
196 match operand {broadcast}, operand
\r
198 asmcmd =AVX_512.=parse_operand namespace, operand
\r
202 asmcmd =err 'invalid operand'
\r
205 asmcmd =AVX_512.=parse_operand namespace, operand
\r
206 arrange sym, namespace.=type
\r
208 jno invalid_operand
\r
210 arrange size, namespace.=size
\r
211 compute memsize, unit
\r
215 jno operand_size_not_specified
\r
216 compute memsize, size
\r
218 operand_size_not_specified:
\r
219 asmcmd =err 'operand size not specified'
\r
222 check size and not memsize
\r
224 asmcmd =err 'invalid operand size'
\r
228 match =1to2?, broadcast
\r
230 match =1to4?, broadcast
\r
232 match =1to8?, broadcast
\r
234 match =1to16?, broadcast
\r
236 asmcmd =err 'invalid broadcast'
\r
239 compute broadcast, 2
\r
242 compute broadcast, 4
\r
245 compute broadcast, 8
\r
248 compute broadcast, 16
\r
250 compute size, memsize * broadcast
\r
253 arrange sym, namespace.=memsize
\r
254 publish sym, memsize
\r
256 arrange sym, namespace.=size
\r
259 arrange sym, namespace.=broadcast
\r
260 publish sym, broadcast
\r
262 arrange sym, namespace.=evex_b
\r
265 end calminstruction
\r
267 calminstruction AVX_512.parse_er namespace,operand,vsize:64
\r
269 local type, size, rounding, b
\r
272 arrange type, namespace.=type
\r
273 arrange size, namespace.=size
\r
274 check type = 'mem' | size <> vsize
\r
275 jyes invalid_operand
\r
277 match { =rn?-=sae? }, operand
\r
279 match { =rd?-=sae? }, operand
\r
281 match { =ru?-=sae? }, operand
\r
283 match { =rz?-=sae? }, operand
\r
286 asmcmd =err 'invalid operand'
\r
289 compute rounding, 0
\r
292 compute rounding, 1
\r
295 compute rounding, 2
\r
298 compute rounding, 3
\r
304 arrange sym, namespace.=rounding
\r
305 publish sym, rounding
\r
307 arrange sym, namespace.=evex_b
\r
310 end calminstruction
\r
312 calminstruction AVX_512.parse_sae namespace,operand
\r
314 local type, rounding, b
\r
317 arrange type, namespace.=type
\r
319 jyes invalid_operand
\r
321 match { =sae? }, operand
\r
322 jno invalid_operand
\r
325 compute rounding, -1
\r
327 arrange sym, namespace.=rounding
\r
328 publish sym, rounding
\r
330 arrange sym, namespace.=evex_b
\r
336 asmcmd =err 'invalid operand'
\r
338 end calminstruction
\r
340 calminstruction AVX_512.store_instruction vsize*,vex_mpw*,evex_f*,opcode*,rm_operand*,mask*,reg*,vreg:0,imm_size:0,imm
\r
342 local evex, evex_flags
\r
343 local segment_prefix, evex_b, rounding, memsize
\r
344 local mode, mod, rm
\r
345 local scale, index, base
\r
346 local displacement, displacement_size, auto_relative
\r
347 local evex_displacement_size, compressed_displacement
\r
350 arrange segment_prefix, rm_operand.=segment_prefix
\r
351 arrange evex_b, rm_operand.=evex_b
\r
352 arrange rounding, rm_operand.=rounding
\r
353 arrange memsize, rm_operand.=memsize
\r
355 arrange mode, rm_operand.=mode
\r
356 arrange mod, rm_operand.=mod
\r
357 arrange rm, rm_operand.=rm
\r
359 arrange scale, rm_operand.=scale
\r
360 arrange index, rm_operand.=index
\r
361 arrange base, rm_operand.=base
\r
363 arrange displacement_size, rm_operand.=displacement_size
\r
364 arrange displacement, rm_operand.=displacement
\r
365 arrange auto_relative, rm_operand.=auto_relative
\r
367 check segment_prefix
\r
368 jno segment_prefix_ok
\r
371 jyes segment_in_long_mode
\r
372 check mode = 16 & ( rm = 2 | rm = 3 | ( mod > 0 & rm = 6 ) )
\r
373 jyes ss_segment_default
\r
374 check mode = 32 & ( ( mod > 0 & rm = 5 ) | ( rm = 4 & base = 4 ) | ( mod > 0 & rm = 4 & base = 5 ) )
\r
375 jyes ss_segment_default
\r
377 ds_segment_default:
\r
378 check segment_prefix = 3Eh
\r
379 jyes segment_prefix_ok
\r
380 jump store_segment_prefix
\r
381 ss_segment_default:
\r
382 check segment_prefix = 36h
\r
383 jyes segment_prefix_ok
\r
384 jump store_segment_prefix
\r
385 segment_in_long_mode:
\r
386 check segment_prefix < 64h
\r
387 jyes segment_prefix_ok
\r
388 store_segment_prefix:
\r
389 asmcmd =db segment_prefix
\r
392 check mod <> 11b & mode <> x86.mode
\r
393 jno addressing_prefix_ok
\r
394 check mode = 64 | (mode = 16 & x86.mode = 64)
\r
395 jno store_addressing_prefix
\r
396 asmcmd =err 'illegal addressing mode'
\r
397 store_addressing_prefix:
\r
399 addressing_prefix_ok:
\r
401 compute evex, vex_mpw
\r
402 compute evex_flags, evex_f
\r
406 compute evex, evex or evex_b shl 20
\r
408 check mod = 11b & evex_b & rounding >= 0
\r
412 check evex_flags and EVEX_VL & AVX512VL = 0
\r
414 compute evex_flags, evex_flags or EVEX_FORBIDDEN
\r
418 compute evex, evex or 1 shl 21
\r
421 compute evex, evex or 1 shl 22
\r
424 compute evex, evex or rounding shl 21
\r
429 compute evex, evex or mask shl 16
\r
432 check rm and 10000b | (mod <> 11b & mode > 16 & rm = 4 & index and 1000b)
\r
434 compute evex, evex or 1 shl 6
\r
436 check rm and 1000b | (mod <> 11b & mode > 16 & rm = 4 & base and 1000b)
\r
438 compute evex, evex or 1 shl 5
\r
440 check reg and 10000b
\r
442 compute evex, evex or 1 shl 4
\r
444 check reg and 1000b
\r
446 compute evex, evex or 1 shl 7
\r
448 check vreg and 10000b
\r
450 compute evex, evex or 1 shl 19
\r
452 compute evex, evex or (vreg and 1111b) shl 11
\r
454 check x86.mode < 64 & evex and 00001000_01000000_11110000b
\r
456 asmcmd =err 'instruction requires long mode'
\r
459 check displacement_size
\r
460 jno no_displacement_compression
\r
461 compute displacement, displacement
\r
463 jyes displacement_compression
\r
464 compute memsize, vsize
\r
465 displacement_compression:
\r
466 check displacement relativeto 0 & displacement mod? memsize = 0
\r
467 jno displacement_incompressible
\r
468 compute compressed_displacement, displacement / memsize
\r
469 check compressed_displacement < 80h & compressed_displacement >= -80h
\r
470 jyes displacement_compressed
\r
471 check compressed_displacement - 1 shl mode >= -80h & compressed_displacement < 1 shl mode
\r
472 jno displacement_incompressible
\r
473 compute compressed_displacement, compressed_displacement - 1 shl mode
\r
474 displacement_compressed:
\r
475 compute evex_displacement_size, 1
\r
477 displacement_incompressible:
\r
478 compute evex_displacement_size, 4
\r
481 compute evex_displacement_size, 2
\r
483 no_displacement_compression:
\r
484 compute evex_displacement_size, displacement_size
\r
487 check evex_flags and EVEX_REQUIRED | evex and 11011111_00000000_00010000b | rm and 10000b
\r
489 check ~ evex_flags and EVEX_FORBIDDEN & evex_displacement_size + 1 < displacement_size
\r
493 check evex_flags and EVEX_FORBIDDEN
\r
495 asmcmd =err 'invalid operand'
\r
499 compute vex, evex and 11111011_11111111b or (evex and 1 shl 21) shr (21-10)
\r
500 check vex and 10000000_01111111b <> 1
\r
503 compute byte2, ((vex and 10000000b) or ((vex shr 8) and 1111111b)) xor 11111000b
\r
504 asmcmd =db 0C5h,byte2
\r
507 compute byte2, (vex and 11111111b) xor 11100000b
\r
508 compute byte3, (vex shr 8) xor 01111000b
\r
509 asmcmd =db 0C4h,byte2,byte3
\r
513 compute evex, evex or 1 shl 10
\r
514 check evex_flags and EVEX_W1
\r
516 compute evex, evex or 1 shl 15
\r
518 compute evex, 62h + (evex xor 00001000_01111000_11110000b) shl 8
\r
520 check mod <> 11b & mod <> 0 & evex_displacement_size > 0
\r
522 compute displacement_size, evex_displacement_size
\r
523 check evex_displacement_size = 1
\r
524 jyes evex_compressed_displacement
\r
527 evex_compressed_displacement:
\r
528 arrange sym, rm_operand.=displacement
\r
529 publish sym, compressed_displacement
\r
535 compute modrm, mod shl 6 + (reg and 111b) shl 3 + rm and 111b
\r
536 asmcmd =db opcode, modrm
\r
538 check mod <> 11b & rm = 4 & mode <> 16
\r
540 compute sib, (bsf scale) shl 6 + (index and 111b) shl 3 + base and 111b
\r
544 check displacement_size = 1
\r
545 jyes displacement_8bit
\r
546 check displacement_size = 2
\r
547 jyes displacement_16bit
\r
548 check displacement_size = 4 | displacement_size = 8
\r
549 jno displacement_ok
\r
551 check auto_relative
\r
552 jno auto_relative_ok
\r
554 jyes adjust_auto_relative_displacement
\r
555 compute displacement, displacement - ($ + 4 + 4)
\r
556 jump auto_relative_ok
\r
557 adjust_auto_relative_displacement:
\r
558 compute displacement, displacement - ($ + 4 + imm_size)
\r
561 check mode = 64 & displacement relativeto 0
\r
562 jno displacement_ready
\r
563 check displacement - 1 shl 64 >= -80000000h & displacement < 1 shl 64
\r
564 jyes adjust_displacement_wrap
\r
565 check displacement >= -80000000h & displacement < 80000000h
\r
566 jyes displacement_ready
\r
567 asmcmd =err 'address value out of signed range'
\r
568 adjust_displacement_wrap:
\r
569 compute displacement, displacement - 1 shl 64
\r
570 displacement_ready:
\r
572 arrange sym, rm_operand.=displacement
\r
573 publish sym, displacement
\r
575 asmcmd =dd rm_operand.=displacement
\r
577 jump displacement_ok
\r
578 displacement_16bit:
\r
579 asmcmd =dw rm_operand.=displacement
\r
580 jump displacement_ok
\r
582 asmcmd =db rm_operand.=displacement
\r
586 jyes immediate_8bit
\r
588 jyes immediate_16bit
\r
590 jyes immediate_32bit
\r
593 asmcmd =x86.=simm32 imm
\r
605 end calminstruction
\r
607 macro AVX_512.basic_instruction_bcst_er vex_mpw,evex_f,opcode,unit,dest,src,src_er&
\r
608 AVX_512.parse_operand_k1z @dest,dest
\r
609 AVX_512.parse_operand @src,src
\r
610 match src2=,er, src_er
\r
611 AVX_512.parse_operand @src2,src2
\r
612 AVX_512.parse_er @src2,er
\r
614 AVX_512.parse_operand_bcst @src2,src_er,unit
\r
616 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
617 if @src.size <> @dest.size | @src2.size and not @dest.size
\r
618 err 'operand sizes do not match'
\r
620 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
622 err 'invalid combination of operands'
\r
626 macro AVX_512.basic_instruction_bcst_sae vex_mpw,evex_f,opcode,unit,dest,src,src_sae&
\r
627 AVX_512.parse_operand_k1z @dest,dest
\r
628 AVX_512.parse_operand @src,src
\r
629 match src2=,sae, src_sae
\r
630 AVX_512.parse_operand @src2,src2
\r
631 AVX_512.parse_sae @src2,sae
\r
633 AVX_512.parse_operand_bcst @src2,src_sae,unit
\r
635 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
636 if @src.size <> @dest.size | @src2.size and not @dest.size
\r
637 err 'operand sizes do not match'
\r
639 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
641 err 'invalid combination of operands'
\r
645 macro AVX_512.basic_instruction_bcst_sae_imm8 vex_mpw,evex_f,opcode,unit,dest,src,src2,aux&
\r
646 AVX_512.parse_operand_k1z @dest,dest
\r
647 AVX_512.parse_operand @src,src
\r
648 AVX_512.parse_operand_bcst @src2,src2,unit
\r
649 match sae=,imm, aux
\r
650 AVX_512.parse_sae @src2,sae
\r
651 x86.parse_operand @aux,imm
\r
653 x86.parse_operand @aux,aux
\r
655 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
656 if @src.size <> @dest.size | @src2.size and not @dest.size | @aux.size and not 1
\r
657 err 'operand sizes do not match'
\r
659 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
661 err 'invalid combination of operands'
\r
665 macro AVX_512.basic_instruction_bcst vex_mpw,evex_f,opcode,unit,dest,src,src2
\r
666 AVX_512.parse_operand_k1z @dest,dest
\r
667 AVX_512.parse_operand @src,src
\r
668 AVX_512.parse_operand_bcst @src2,src2,unit
\r
669 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
670 if @src.size <> @dest.size | @src2.size and not @dest.size
\r
671 err 'operand sizes do not match'
\r
673 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
675 err 'invalid combination of operands'
\r
679 macro AVX_512.basic_instruction_bcst_imm8 vex_mpw,evex_f,opcode,unit,dest,src,src2,aux
\r
680 AVX_512.parse_operand_k1z @dest,dest
\r
681 AVX_512.parse_operand @src,src
\r
682 AVX_512.parse_operand_bcst @src2,src2,unit
\r
683 x86.parse_operand @aux,aux
\r
684 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
685 if @aux.size and not 1
\r
686 err 'invalid operand size'
\r
687 else if @src.size <> @dest.size | @src2.size and not @dest.size
\r
688 err 'operand sizes do not match'
\r
690 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
692 err 'invalid combination of operands'
\r
696 macro AVX_512.basic_instruction_er vex_mpw,evex_f,opcode,unit,dest,src,src_er&
\r
697 AVX_512.parse_operand_k1z @dest,dest
\r
698 AVX_512.parse_operand @src,src
\r
699 match src2=,er, src_er
\r
700 AVX_512.parse_operand @src2,src2
\r
701 AVX_512.parse_er @src2,er,(unit-1) and not 15 + 16
\r
703 AVX_512.parse_operand @src2,src_er
\r
705 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
706 if unit & ( @dest.size <> (unit-1) and not 15 + 16 | (@src2.type = 'mem' & @src2.size and not unit) )
\r
707 err 'invalid operand size'
\r
708 else if @dest.size <> @src.size | (@src2.size and not @dest.size & (unit = 0 | @src2.type = 'mmreg'))
\r
709 err 'operand sizes do not match'
\r
711 @src2.memsize = unit
\r
712 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
714 err 'invalid combination of operands'
\r
718 macro AVX_512.basic_instruction_sae vex_mpw,evex_f,opcode,unit,dest,src,src_sae&
\r
719 AVX_512.parse_operand_k1z @dest,dest
\r
720 AVX_512.parse_operand @src,src
\r
721 match src2=,sae, src_sae
\r
722 AVX_512.parse_operand @src2,src2
\r
723 AVX_512.parse_sae @src2,sae
\r
725 AVX_512.parse_operand @src2,src_sae
\r
727 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
728 if unit & ( @dest.size <> (unit-1) and not 15 + 16 | (@src2.type = 'mem' & @src2.size and not unit) )
\r
729 err 'invalid operand size'
\r
730 else if @dest.size <> @src.size | (@src2.size and not @dest.size & (unit = 0 | @src2.type = 'mmreg'))
\r
731 err 'operand sizes do not match'
\r
733 @src2.memsize = unit
\r
734 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
736 err 'invalid combination of operands'
\r
740 macro AVX_512.basic_instruction_sae_imm8 vex_mpw,evex_f,opcode,unit,dest,src,src2,aux&
\r
741 AVX_512.parse_operand_k1z @dest,dest
\r
742 AVX_512.parse_operand @src,src
\r
743 AVX_512.parse_operand @src2,src2
\r
744 match sae=,imm, aux
\r
745 AVX_512.parse_sae @src2,sae
\r
746 x86.parse_operand @aux,imm
\r
748 x86.parse_operand @aux,aux
\r
750 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
751 if ( unit & ( @dest.size <> (unit-1) and not 15 + 16 | (@src2.type = 'mem' & @src2.size and not unit) ) ) | @aux.size and not 1
\r
752 err 'invalid operand size'
\r
753 else if @dest.size <> @src.size | (@src2.size and not @dest.size & (unit = 0 | @src2.type = 'mmreg'))
\r
754 err 'operand sizes do not match'
\r
756 @src2.memsize = unit
\r
757 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
759 err 'invalid combination of operands'
\r
763 macro AVX_512.basic_instruction vex_mpw,evex_f,opcode,unit,dest,src,src2
\r
764 AVX_512.parse_operand_k1z @dest,dest
\r
765 AVX_512.parse_operand @src,src
\r
766 AVX_512.parse_operand @src2,src2
\r
767 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
768 if unit & ( @dest.size <> (unit-1) and not 15 + 16 | (@src2.type = 'mem' & @src2.size and not unit) )
\r
769 err 'invalid operand size'
\r
770 else if @dest.size <> @src.size | (@src2.size and not @dest.size & (unit = 0 | @src2.type = 'mmreg'))
\r
771 err 'operand sizes do not match'
\r
773 @src2.memsize = unit
\r
774 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
776 err 'invalid combination of operands'
\r
780 macro AVX_512.basic_instruction_imm8 vex_mpw,evex_f,opcode,unit,dest,src,src2,aux&
\r
781 AVX_512.parse_operand_k1z @dest,dest
\r
782 AVX_512.parse_operand @src,src
\r
783 AVX_512.parse_operand @src2,src2
\r
784 x86.parse_operand @aux,aux
\r
785 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
786 if ( unit & ( @dest.size <> (unit-1) and not 15 + 16 | (@src2.type = 'mem' & @src2.size and not unit) ) ) | @aux.size and not 1
\r
787 err 'invalid operand size'
\r
788 else if @dest.size <> @src.size | (@src2.size and not @dest.size & (unit = 0 | @src2.type = 'mmreg'))
\r
789 err 'operand sizes do not match'
\r
791 @src2.memsize = unit
\r
792 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
794 err 'invalid combination of operands'
\r
798 macro AVX_512.single_source_instruction_bcst_er vex_mpw,evex_f,opcode,unit,dest,src_er&
\r
799 AVX_512.parse_operand_k1z @dest,dest
\r
800 match src=,er, src_er
\r
801 AVX_512.parse_operand @src,src
\r
802 AVX_512.parse_er @src,er
\r
804 AVX_512.parse_operand_bcst @src,src_er,unit
\r
806 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
807 if @src.size and not @dest.size
\r
808 err 'operand sizes do not match'
\r
810 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
812 err 'invalid combination of operands'
\r
816 macro AVX_512.single_source_instruction_bcst_sae vex_mpw,evex_f,opcode,unit,dest,src_sae&
\r
817 AVX_512.parse_operand_k1z @dest,dest
\r
818 match src=,sae, src_sae
\r
819 AVX_512.parse_operand @src,src
\r
820 AVX_512.parse_sae @src,sae
\r
822 AVX_512.parse_operand_bcst @src,src_sae,unit
\r
824 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
825 if @src.size and not @dest.size
\r
826 err 'operand sizes do not match'
\r
828 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
830 err 'invalid combination of operands'
\r
834 macro AVX_512.single_source_instruction_bcst_sae_imm8 vex_mpw,evex_f,opcode,unit,dest,src,aux&
\r
835 AVX_512.parse_operand_k1z @dest,dest
\r
836 AVX_512.parse_operand_bcst @src,src,unit
\r
837 match sae=,imm, aux
\r
838 AVX_512.parse_sae @src,sae
\r
839 x86.parse_operand @aux,imm
\r
841 x86.parse_operand @aux,aux
\r
843 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg') & @aux.type = 'imm'
\r
844 if @aux.size and not 1
\r
845 err 'invalid operand size'
\r
846 else if @src.size and not @dest.size
\r
847 err 'operand sizes do not match'
\r
849 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm,,1,@aux.imm
\r
851 err 'invalid combination of operands'
\r
855 macro AVX_512.single_source_instruction_bcst vex_mpw,evex_f,opcode,unit,dest,src&
\r
856 AVX_512.parse_operand_k1z @dest,dest
\r
857 AVX_512.parse_operand_bcst @src,src,unit
\r
858 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
859 if @src.size and not @dest.size
\r
860 err 'operand sizes do not match'
\r
862 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
864 err 'invalid combination of operands'
\r
868 macro AVX_512.single_source_instruction vex_mpw,evex_f,opcode,unit,dest,src
\r
869 AVX_512.parse_operand_k1z @dest,dest
\r
870 AVX_512.parse_operand @src,src
\r
871 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
872 if unit & ( @dest.size <> (unit-1) and not 15 + 16 | (@src.type = 'mem' & @src.size and not unit) )
\r
873 err 'invalid operand size'
\r
874 else if @src.size and not @dest.size & (unit = 0 | @src.type = 'mmreg')
\r
875 err 'operand sizes do not match'
\r
877 @src.memsize = unit
\r
878 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
880 err 'invalid combination of operands'
\r
884 iterate <instr,opcode>, add,58h, mul,59h, sub,5Ch, div,5Eh
\r
886 macro v#instr#pd? dest*,src*,src2*&
\r
887 AVX_512.basic_instruction_bcst_er VEX_66_0F_W0,EVEX_W1+EVEX_VL,opcode,8,dest,src,src2
\r
890 macro v#instr#ps? dest*,src*,src2*&
\r
891 AVX_512.basic_instruction_bcst_er VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,opcode,4,dest,src,src2
\r
894 macro v#instr#sd? dest*,src*,src2*&
\r
895 AVX_512.basic_instruction_er VEX_F2_0F_W0,EVEX_W1,opcode,8,dest,src,src2
\r
898 macro v#instr#ss? dest*,src*,src2*&
\r
899 AVX_512.basic_instruction_er VEX_F3_0F_W0,EVEX_AS_VEX,opcode,4,dest,src,src2
\r
904 iterate <instr,opcode>, min,5Dh, max,5Fh
\r
906 macro v#instr#pd? dest*,src*,src2*&
\r
907 AVX_512.basic_instruction_bcst_sae VEX_66_0F_W0,EVEX_W1+EVEX_VL,opcode,8,dest,src,src2
\r
910 macro v#instr#ps? dest*,src*,src2*&
\r
911 AVX_512.basic_instruction_bcst_sae VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,opcode,4,dest,src,src2
\r
914 macro v#instr#sd? dest*,src*,src2*&
\r
915 AVX_512.basic_instruction_sae VEX_F2_0F_W0,EVEX_W1,opcode,8,dest,src,src2
\r
918 macro v#instr#ss? dest*,src*,src2*&
\r
919 AVX_512.basic_instruction_sae VEX_F3_0F_W0,EVEX_AS_VEX,opcode,4,dest,src,src2
\r
924 iterate <instr,opcode>, unpckh,15h, unpckl,14h
\r
926 macro v#instr#pd? dest*,src*,src2*&
\r
927 AVX_512.basic_instruction_bcst VEX_66_0F_W0,EVEX_W1+EVEX_VL,opcode,8,dest,src,src2
\r
930 macro v#instr#ps? dest*,src*,src2*&
\r
931 AVX_512.basic_instruction_bcst VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,opcode,4,dest,src,src2
\r
936 macro vsqrtpd? dest*,src*&
\r
937 AVX_512.single_source_instruction_bcst_er VEX_66_0F_W0,EVEX_W1+EVEX_VL,51h,8,dest,src
\r
940 macro vsqrtps? dest*,src*&
\r
941 AVX_512.single_source_instruction_bcst_er VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,51h,4,dest,src
\r
944 macro vsqrtsd? dest*,src*,src2*&
\r
945 AVX_512.basic_instruction_er VEX_F2_0F_W0,EVEX_W1,51h,8,dest,src,src2
\r
948 macro vsqrtss? dest*,src*,src2*&
\r
949 AVX_512.basic_instruction_er VEX_F3_0F_W0,EVEX_AS_VEX,51h,4,dest,src,src2
\r
952 macro vshufpd? dest*,src*,src2*,aux*&
\r
953 AVX_512.basic_instruction_bcst_imm8 VEX_66_0F_W0,EVEX_W1+EVEX_VL,0C6h,8,dest,src,src2,aux
\r
956 macro vshufps? dest*,src*,src2*,aux*&
\r
957 AVX_512.basic_instruction_bcst_imm8 VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,0C6h,4,dest,src,src2,aux
\r
960 macro vbroadcastss? dest*,src*
\r
961 AVX_512.parse_operand_k1z @dest,dest
\r
962 AVX_512.parse_operand @src,src
\r
963 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')
\r
964 if (@src.type = 'mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not 4)
\r
965 err 'invalid operand size'
\r
968 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,18h,@src,@dest.mask,@dest.rm
\r
970 err 'invalid combination of operands'
\r
974 macro vbroadcastsd? dest*,src*
\r
975 AVX_512.parse_operand_k1z @dest,dest
\r
976 AVX_512.parse_operand @src,src
\r
977 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')
\r
978 if @dest.size = 16 | (@src.type = 'mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not 8)
\r
979 err 'invalid operand size'
\r
982 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,EVEX_W1+EVEX_VL,19h,@src,@dest.mask,@dest.rm
\r
984 err 'invalid combination of operands'
\r
988 iterate <instr,opcode,opcode_g,msize>, vpbroadcastd,58h,7Ch,4, vpbroadcastq,59h,7Ch,8
\r
990 macro instr? dest*,src*
\r
991 AVX_512.parse_operand_k1z @dest,dest
\r
992 AVX_512.parse_operand @src,src
\r
993 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')
\r
994 if (@src.type='mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not msize)
\r
995 err 'invalid operand size'
\r
997 @src.memsize = msize
\r
998 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,EVEX_AS_VEX,opcode,@src,@dest.mask,@dest.rm
\r
999 else if @dest.type = 'mmreg' & @src.type = 'reg'
\r
1000 if @src.size <> msize & (@src.size <> 4 | msize = 8)
\r
1001 err 'invalid operand size'
\r
1003 @src.memsize = msize
\r
1005 AVX_512.store_instruction @dest.size,VEX_66_0F38_W1,EVEX_REQUIRED+EVEX_VL,opcode_g,@src,@dest.mask,@dest.rm
\r
1007 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,EVEX_REQUIRED+EVEX_VL,opcode_g,@src,@dest.mask,@dest.rm
\r
1010 err 'invalid combination of operands'
\r
1016 iterate <instr,vex_mpw,opcode,msize>, vbroadcastf32x4,VEX_66_0F38_W0,1Ah,16, vbroadcastf64x4,VEX_66_0F38_W1,1Bh,32, \
\r
1017 vbroadcasti32x4,VEX_66_0F38_W0,5Ah,16, vbroadcasti64x4,VEX_66_0F38_W1,5Bh,32
\r
1019 macro instr? dest*,src*
\r
1020 AVX_512.parse_operand_k1z @dest,dest
\r
1021 AVX_512.parse_operand @src,src
\r
1022 if @dest.type = 'mmreg' & @src.type = 'mem'
\r
1023 if @dest.size <= msize | @src.size and not msize
\r
1024 err 'invalid operand size'
\r
1026 @src.memsize = msize
\r
1027 AVX_512.store_instruction @dest.size,vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,@src,@dest.mask,@dest.rm
\r
1029 err 'invalid combination of operands'
\r
1035 iterate <instr,vex_mpw,opcode,unit>, vshuff32x4,VEX_66_0F3A_W0,23h,4, vshuff64x2,VEX_66_0F3A_W1,23h,4, \
\r
1036 vshufi32x4,VEX_66_0F3A_W0,43h,4, vshufi64x2,VEX_66_0F3A_W1,43h,4
\r
1038 macro instr? dest*,src*,src2*,aux*
\r
1039 AVX_512.parse_operand_k1z @dest,dest
\r
1040 AVX_512.parse_operand @src,src
\r
1041 AVX_512.parse_operand_bcst @src2,src2,unit
\r
1042 x86.parse_operand @aux,aux
\r
1043 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
1044 if @dest.size < 32 | @aux.size and not 1
\r
1045 err 'invalid operand size'
\r
1046 else if @src.size <> @dest.size | @src2.size and not @dest.size
\r
1047 err 'operand sizes do not match'
\r
1049 AVX_512.store_instruction @dest.size,vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
1051 err 'invalid combination of operands'
\r
1057 macro vextractps? dest*,src*,aux*
\r
1058 AVX_512.parse_operand @dest,dest
\r
1059 AVX_512.parse_operand @src,src
\r
1060 x86.parse_operand @aux,aux
\r
1061 if (@dest.type = 'reg' | @dest.type = 'mem') & @src.type = 'mmreg' & @aux.type = 'imm'
\r
1062 if @dest.size and not 4 | @src.size <> 16 | @aux.size and not 1
\r
1063 err 'invalid operand size'
\r
1066 AVX_512.store_instruction 16,VEX_66_0F3A_W0,EVEX_AS_VEX,17h,@dest,0,@src.rm,,1,@aux.imm
\r
1068 err 'invalid combination of operands'
\r
1072 macro vinsertps? dest*,src*,src2*,aux*
\r
1073 AVX_512.parse_operand @dest,dest
\r
1074 AVX_512.parse_operand @src,src
\r
1075 AVX_512.parse_operand @src2,src2
\r
1076 x86.parse_operand @aux,aux
\r
1077 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mmreg' | @src2.type = 'mem') & @aux.type = 'imm'
\r
1078 if @dest.size <> 16 | @src.size <> 16 | (@src2.type = 'mmreg' & @src2.size <> 16) | (@src2.type = 'mem' & @src2.size and not 4) | @aux.size and not 1
\r
1079 err 'invalid operand size'
\r
1082 AVX_512.store_instruction 16,VEX_66_0F3A_W0,EVEX_AS_VEX,21h,@src2,0,@dest.rm,@src.rm,1,@aux.imm
\r
1084 err 'invalid combination of operands'
\r
1088 iterate <instr,vex_mpw,opcode,msize>, vextractf32x4,VEX_66_0F3A_W0,19h,16, vextractf64x4,VEX_66_0F3A_W1,1Bh,32, \
\r
1089 vextracti32x4,VEX_66_0F3A_W0,39h,16, vextracti64x4,VEX_66_0F3A_W1,3Bh,32
\r
1091 macro instr? dest*,src*,aux*
\r
1092 AVX_512.parse_operand_k1z @dest,dest
\r
1093 AVX_512.parse_operand @src,src
\r
1094 x86.parse_operand @aux,aux
\r
1095 if (@dest.type = 'mmreg' | @dest.type = 'mem') & @src.type = 'mmreg' & @aux.type = 'imm'
\r
1096 if @dest.size and not msize | @src.size <= msize | @aux.size and not 1
\r
1097 err 'invalid operand size'
\r
1099 @dest.memsize = msize
\r
1100 AVX_512.store_instruction @src.size,vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,@dest,@dest.mask,@src.rm,,1,@aux.imm
\r
1102 err 'invalid combination of operands'
\r
1108 iterate <instr,vex_mpw,opcode,msize>, vinsertf32x4,VEX_66_0F3A_W0,18h,16, vinsertf64x4,VEX_66_0F3A_W1,1Ah,32, \
\r
1109 vinserti32x4,VEX_66_0F3A_W0,38h,16, vinserti64x4,VEX_66_0F3A_W1,3Ah,32
\r
1111 macro instr? dest*,src*,src2*,aux*
\r
1112 AVX_512.parse_operand_k1z @dest,dest
\r
1113 AVX_512.parse_operand @src,src
\r
1114 AVX_512.parse_operand @src2,src2
\r
1115 x86.parse_operand @aux,aux
\r
1116 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mmreg' | @src2.type = 'mem') & @aux.type = 'imm'
\r
1117 if @dest.size <= msize | @src.size <= msize | @src2.size and not msize | @aux.size and not 1
\r
1118 err 'invalid operand size'
\r
1120 @src2.memsize = msize
\r
1121 AVX_512.store_instruction @dest.size,vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
1123 err 'invalid combination of operands'
\r
1129 iterate <instr,vex_mpw,evex_mpw,unit>, vcmpps,VEX_0F_W0,VEX_0F_W0,4, vcmppd,VEX_66_0F_W0,VEX_66_0F_W1,8
\r
1131 macro instr? dest*,src*,src2*,aux*&
\r
1132 AVX_512.parse_operand_k1 @dest,dest
\r
1133 AVX_512.parse_operand @src,src
\r
1134 AVX_512.parse_operand_bcst @src2,src2,unit
\r
1135 match sae=,imm, aux
\r
1136 AVX_512.parse_sae @src2,sae
\r
1137 x86.parse_operand @aux,imm
\r
1139 x86.parse_operand @aux,aux
\r
1141 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
1142 if @aux.size and not 1
\r
1143 err 'invalid operand size'
\r
1144 else if @src.size <> @dest.size | @src2.size and not @dest.size
\r
1145 err 'operand sizes do not match'
\r
1147 AVX_512.store_instruction @src.size,vex_mpw,EVEX_FORBIDDEN,0C2h,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
1148 else if @dest.type = 'maskreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
1149 if @aux.size and not 1
\r
1150 err 'invalid operand size'
\r
1151 else if @src2.size and not @src.size
\r
1152 err 'operand sizes do not match'
\r
1154 AVX_512.store_instruction @src.size,evex_mpw,EVEX_REQUIRED+EVEX_VL,0C2h,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
1156 err 'invalid combination of operands'
\r
1162 iterate <instr,vex_mpw,evex_mpw,unit>, vcmpss,VEX_F3_0F_W0,VEX_F3_0F_W0,4, vcmpsd,VEX_F2_0F_W0,VEX_F2_0F_W1,8
\r
1164 macro instr? dest*,src*,src2*,aux*&
\r
1165 AVX_512.parse_operand_k1 @dest,dest
\r
1166 AVX_512.parse_operand @src,src
\r
1167 AVX_512.parse_operand @src2,src2
\r
1168 match sae=,imm, aux
\r
1169 AVX_512.parse_sae @src2,sae
\r
1170 x86.parse_operand @aux,imm
\r
1172 x86.parse_operand @aux,aux
\r
1174 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
1175 if @dest.size <> 16 | (@src2.type = 'mem' & @src2.size and not unit)
\r
1176 err 'invalid operand size'
\r
1177 else if @dest.size <> @src.size | (@src2.type = 'mmreg' & @src2.size <> @dest.size)
\r
1178 err 'operand sizes do not match'
\r
1180 @src2.memsize = unit
\r
1181 AVX_512.store_instruction @src.size,vex_mpw,EVEX_FORBIDDEN,0C2h,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
1182 else if @dest.type = 'maskreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
1183 if @src.size <> 16 | (@src2.type = 'mem' & @src2.size and not unit) | @aux.size and not 1
\r
1184 err 'invalid operand size'
\r
1185 else if @src2.type = 'mmreg' & @src2.size <> @src.size
\r
1186 err 'operand sizes do not match'
\r
1188 @src2.memsize = unit
\r
1189 AVX_512.store_instruction @src.size,evex_mpw,EVEX_REQUIRED,0C2h,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
1191 err 'invalid combination of operands'
\r
1197 iterate <cond,code>, eq,0, lt,1, le,2, unord,3, neq,4, nlt,5, nle,6, ord,7, \
\r
1198 eq_uq,8, nge,9, ngt,0Ah, false,0Bh, neq_qq,0Ch, ge,0Dh, gt,0Eh, true,0Fh, \
\r
1199 eq_os,10h, lt_oq,11h, le_oq,12h, unord_s,13h, neq_us,14h, nlt_uq,15h, nle_uq,16h, ord_s,17h, \
\r
1200 eq_us,18h, nge_uq,19h, ngt_uq,1Ah, false_os,1Bh, neq_os,1Ch, ge_oq,1Dh, gt_oq,1Eh, true_us,1Fh
\r
1202 macro vcmp#cond#pd? dest*,src*,src2*&
\r
1203 vcmppd dest,src,src2,code
\r
1206 macro vcmp#cond#ps? dest*,src*,src2*&
\r
1207 vcmpps dest,src,src2,code
\r
1210 macro vcmp#cond#sd? dest*,src*,src2*&
\r
1211 vcmpsd dest,src,src2,code
\r
1214 macro vcmp#cond#ss? dest*,src*,src2*&
\r
1215 vcmpss dest,src,src2,code
\r
1220 iterate <instr,vex_mpw,evex_f,opcode,unit>, vcomiss,VEX_0F_W0,EVEX_AS_VEX,2Fh,4, vcomisd,VEX_66_0F_W0,EVEX_W1,2Fh,8, vucomiss,VEX_0F_W0,EVEX_AS_VEX,2Eh,4, vucomisd,VEX_66_0F_W0,EVEX_W1,2Eh,8
\r
1222 macro instr? dest*,src_sae*&
\r
1223 AVX_512.parse_operand_k1z @dest,dest
\r
1224 match src=,sae, src_sae
\r
1225 AVX_512.parse_operand @src,src
\r
1226 AVX_512.parse_sae @src,sae
\r
1228 AVX_512.parse_operand @src,src_sae
\r
1230 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
1231 if unit & ( @dest.size <> (unit-1) and not 15 + 16 | (@src.type = 'mem' & @src.size and not unit) )
\r
1232 err 'invalid operand size'
\r
1233 else if @src.size and not @dest.size & (unit = 0 | @src.type = 'mmreg')
\r
1234 err 'operand sizes do not match'
\r
1236 @src.memsize = unit
\r
1237 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
1239 err 'invalid combination of operands'
\r
1245 iterate <instr,opcode>, kandw,41h, kandnw,42h, knotw,44h, korw,45h, kxnorw,46h, kxorw,47h
\r
1247 macro instr? dest*,src*,src2*
\r
1248 AVX_512.parse_operand @dest,dest
\r
1249 AVX_512.parse_operand @src,src
\r
1250 AVX_512.parse_operand @src2,src2
\r
1251 if @dest.type = 'maskreg' & @src.type = 'maskreg' & @src2.type = 'maskreg'
\r
1252 AVX.store_instruction 32,VEX_0F_W0,opcode,@src2,@dest.rm,@src.rm
\r
1254 err 'invalid combination of operands'
\r
1260 iterate <instr,opcode>, knotw,44h, kortestw,98h
\r
1262 macro instr? dest*,src*
\r
1263 AVX_512.parse_operand @dest,dest
\r
1264 AVX_512.parse_operand @src,src
\r
1265 if @dest.type = 'maskreg' & @src.type = 'maskreg'
\r
1266 AVX.store_instruction 16,VEX_0F_W0,opcode,@src,@dest.rm
\r
1268 err 'invalid combination of operands'
\r
1274 macro kmovw? dest*,src*
\r
1275 AVX_512.parse_operand @dest,dest
\r
1276 AVX_512.parse_operand @src,src
\r
1277 if @dest.type = 'maskreg' & (@src.type = 'maskreg' | @src.type = 'mem')
\r
1278 if @src.type = 'mem' & @src.size and not 2
\r
1279 err 'invalid operand size'
\r
1281 AVX.store_instruction 16,VEX_0F_W0,90h,@src,@dest.rm
\r
1282 else if @dest.type = 'mem' & @src.type = 'maskreg'
\r
1283 if @dest.size and not 2
\r
1284 err 'invalid operand size'
\r
1286 AVX.store_instruction 16,VEX_0F_W0,91h,@dest,@src.rm
\r
1287 else if @dest.type = 'maskreg' & @src.type = 'reg'
\r
1289 err 'invalid operand size'
\r
1291 AVX.store_instruction 16,VEX_0F_W0,92h,@src,@dest.rm
\r
1292 else if @dest.type = 'reg' & @src.type = 'maskreg'
\r
1293 if @dest.size <> 4
\r
1294 err 'invalid operand size'
\r
1296 AVX.store_instruction 16,VEX_0F_W0,93h,@src,@dest.rm
\r
1298 err 'invalid combination of operands'
\r
1302 iterate <instr,vex_mpw,opcode>, kshiftrw,VEX_66_0F3A_W1,30h, kshiftlw,VEX_66_0F3A_W1,32h
\r
1304 macro instr? dest*,src*,aux*
\r
1305 AVX_512.parse_operand @dest,dest
\r
1306 AVX_512.parse_operand @src,src
\r
1307 x86.parse_operand @aux,aux
\r
1308 if @dest.type = 'maskreg' & @src.type = 'maskreg' & @aux.type = 'imm'
\r
1309 if @aux.size and not 1
\r
1310 err 'invalid operand size'
\r
1312 AVX.store_instruction 16,vex_mpw,opcode,@src,@dest.rm,,1,@aux.imm
\r
1314 err 'invalid combination of operands'
\r
1320 macro kunpckbw? dest*,src*,src2*
\r
1321 AVX_512.parse_operand @dest,dest
\r
1322 AVX_512.parse_operand @src,src
\r
1323 AVX_512.parse_operand @src2,src2
\r
1324 if @dest.type = 'maskreg' & @src.type = 'maskreg' & @src2.type = 'maskreg'
\r
1325 AVX.store_instruction 32,VEX_66_0F_W0,4Bh,@src2,@dest.rm,@src.rm
\r
1327 err 'invalid combination of operands'
\r
1331 iterate <instr,evex_f,opcode>, vcvtdq2pd,EVEX_AS_VEX+EVEX_VL,0E6h, vcvtudq2pd,EVEX_REQUIRED+EVEX_VL,7Ah
\r
1333 macro instr? dest*,src*
\r
1334 AVX_512.parse_operand_k1z @dest,dest
\r
1335 AVX_512.parse_operand_bcst @src,src,4
\r
1336 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
1337 if (@src.type = 'mem' & @src.size and not (@dest.size shr 1)) | (@src.type = 'mmreg' & (@dest.size shr 1 - 1) and not 15 + 16 <> @src.size)
\r
1338 err 'invalid operand size'
\r
1340 if @src.memsize = 0
\r
1341 @src.memsize = @dest.size shr 1
\r
1343 AVX_512.store_instruction @dest.size,VEX_F3_0F_W0,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
1345 err 'invalid combination of operands'
\r
1351 iterate <instr,vex_mpw,evex_f,opcode>, vcvtpd2dq,VEX_F2_0F_W0,EVEX_W1+EVEX_VL,0E6h, vcvtpd2ps,VEX_66_0F_W0,EVEX_W1+EVEX_VL,5Ah, vcvtpd2udq,VEX_0F_W1,EVEX_REQUIRED+EVEX_VL,79h
\r
1353 macro instr? dest*,src_er*&
\r
1354 AVX_512.parse_operand_k1z @dest,dest
\r
1355 match src=,er, src_er
\r
1356 AVX_512.parse_operand @src,src
\r
1357 AVX_512.parse_er @src,er
\r
1359 AVX_512.parse_operand_bcst @src,src_er,8
\r
1361 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
1363 if @dest.size = 16
\r
1364 err 'operand size not specified'
\r
1369 if (@src.size shr 1 - 1) and not 15 + 16 <> @dest.size | @src.size > 64
\r
1370 err 'invalid operand size'
\r
1372 AVX_512.store_instruction @src.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
1374 err 'invalid combination of operands'
\r
1380 iterate <instr,vex_mpw,evex_f,opcode>, vcvtps2pd,VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,5Ah
\r
1382 macro instr? dest*,src_sae*&
\r
1383 AVX_512.parse_operand_k1z @dest,dest
\r
1384 match src=,sae, src_sae
\r
1385 AVX_512.parse_operand @src,src
\r
1386 AVX_512.parse_sae @src,sae
\r
1388 AVX_512.parse_operand_bcst @src,src_sae,4
\r
1390 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
1391 if (@src.type = 'mem' & @src.size and not (@dest.size shr 1)) | (@src.type = 'mmreg' & (@dest.size shr 1 - 1) and not 15 + 16 <> @src.size)
\r
1392 err 'invalid operand size'
\r
1394 if @src.memsize = 0
\r
1395 @src.memsize = @dest.size shr 1
\r
1397 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
1399 err 'invalid combination of operands'
\r
1405 iterate <instr,vex_mpw,evex_f,opcode>, vcvttpd2dq,VEX_66_0F_W0,EVEX_W1+EVEX_VL,0E6h, vcvttpd2udq,VEX_0F_W1,EVEX_REQUIRED+EVEX_VL,78h
\r
1407 macro instr? dest*,src_sae*&
\r
1408 AVX_512.parse_operand_k1z @dest,dest
\r
1409 match src=,sae, src_sae
\r
1410 AVX_512.parse_operand @src,src
\r
1411 AVX_512.parse_sae @src,sae
\r
1413 AVX_512.parse_operand_bcst @src,src_sae,8
\r
1415 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
1417 if @dest.size = 16
\r
1418 err 'operand size not specified'
\r
1423 if (@src.size shr 1 - 1) and not 15 + 16 <> @dest.size | @src.size > 64
\r
1424 err 'invalid operand size'
\r
1426 AVX_512.store_instruction @src.size,vex_mpw,evex_f,opcode,@src,@dest.mask,@dest.rm
\r
1428 err 'invalid combination of operands'
\r
1434 iterate <instr,vex_mpw,evex_f,opcode>, vcvtdq2ps,VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,5Bh, vcvtudq2ps,VEX_F2_0F_W0,EVEX_REQUIRED+EVEX_VL,7Ah, \
\r
1435 vcvtps2dq,VEX_66_0F_W0,EVEX_AS_VEX+EVEX_VL,5Bh, vcvtps2udq,VEX_0F_W0,EVEX_REQUIRED+EVEX_VL,79h
\r
1437 macro instr? dest*,src*&
\r
1438 AVX_512.single_source_instruction_bcst_er vex_mpw,evex_f,opcode,4,dest,src
\r
1444 iterate <instr,vex_mpw,evex_f,opcode>, vcvttps2dq,VEX_F3_0F_W0,EVEX_AS_VEX+EVEX_VL,5Bh, vcvttps2udq,VEX_0F_W0,EVEX_REQUIRED+EVEX_VL,78h
\r
1446 macro instr? dest*,src*&
\r
1447 AVX_512.single_source_instruction_bcst_sae vex_mpw,evex_f,opcode,4,dest,src
\r
1452 macro vcvtph2ps? dest*,src_sae*&
\r
1453 AVX_512.parse_operand_k1z @dest,dest
\r
1454 match src=,sae, src_sae
\r
1455 AVX_512.parse_operand @src,src
\r
1456 AVX_512.parse_sae @src,sae
\r
1458 AVX_512.parse_operand @src,src_sae
\r
1460 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
1461 if (@src.type = 'mem' & @src.size and not (@dest.size shr 1)) | (@src.type = 'mmreg' & (@dest.size shr 1 - 1) and not 15 + 16 <> @src.size)
\r
1462 err 'invalid operand size'
\r
1464 if @src.memsize = 0
\r
1465 @src.memsize = @dest.size shr 1
\r
1467 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,13h,@src,@dest.mask,@dest.rm
\r
1469 err 'invalid combination of operands'
\r
1473 macro vcvtps2ph? dest*,src*,aux*&
\r
1474 AVX_512.parse_operand_k1z @dest,dest
\r
1475 AVX_512.parse_operand @src,src
\r
1476 match sae=,imm, aux
\r
1477 AVX_512.parse_sae @src,sae
\r
1478 x86.parse_operand @aux,imm
\r
1480 x86.parse_operand @aux,aux
\r
1482 if (@dest.type = 'mem' | @dest.type = 'mmreg') & @src.type = 'mmreg'
\r
1483 if (@dest.type = 'mem' & @dest.size and not (@src.size shr 1)) | (@dest.type = 'mmreg' & (@src.size shr 1 - 1) and not 15 + 16 <> @dest.size)
\r
1484 err 'invalid operand size'
\r
1486 if @dest.memsize = 0
\r
1487 @dest.memsize = @src.size shr 1
\r
1489 AVX_512.store_instruction @src.size,VEX_66_0F3A_W0,EVEX_AS_VEX+EVEX_VL,1Dh,@dest,@dest.mask,@src.rm,,1,@aux.imm
\r
1491 err 'invalid combination of operands'
\r
1495 iterate <instr,vex_mp,evex_f,opcode,msize>, vcvtsd2si,VEX_F2_0F,EVEX_AS_VEX,2Dh,8, vcvtss2si,VEX_F3_0F,EVEX_AS_VEX,2Dh,4, \
\r
1496 vcvtsd2usi,VEX_F2_0F,EVEX_REQUIRED,79h,8, vcvtss2usi,VEX_F3_0F,EVEX_REQUIRED,79h,4
\r
1498 macro instr? dest*,src_er*&
\r
1499 x86.parse_operand @dest,dest
\r
1500 match src=,er, src_er
\r
1501 AVX_512.parse_operand @src,src
\r
1502 AVX_512.parse_er @src,er,16
\r
1504 AVX_512.parse_operand @src,src_er
\r
1506 if @dest.type = 'reg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
1507 if (@dest.size <> 4 & @dest.size <> 8) | (@src.type = 'mem' & @src.size and not msize) | (@src.type = 'mmreg' & @src.size <> 16)
\r
1508 err 'invalid operand size'
\r
1512 err 'instruction requires long mode'
\r
1514 AVX_512.store_instruction 16,vex_mp#_W1,evex_f,opcode,@src,0,@dest.rm
\r
1516 AVX_512.store_instruction 16,vex_mp#_W0,evex_f,opcode,@src,0,@dest.rm
\r
1519 err 'invalid combination of operands'
\r
1525 iterate <instr,vex_mp,evex_f,opcode,msize>, vcvttsd2si,VEX_F2_0F,EVEX_AS_VEX,2Ch,8, vcvttss2si,VEX_F3_0F,EVEX_AS_VEX,2Ch,4, \
\r
1526 vcvttsd2usi,VEX_F2_0F,EVEX_REQUIRED,78h,8, vcvttss2usi,VEX_F3_0F,EVEX_REQUIRED,78h,4
\r
1528 macro instr? dest*,src_sae*&
\r
1529 x86.parse_operand @dest,dest
\r
1530 match src=,sae, src_sae
\r
1531 AVX_512.parse_operand @src,src
\r
1532 AVX_512.parse_sae @src,sae
\r
1534 AVX_512.parse_operand @src,src_sae
\r
1536 if @dest.type = 'reg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
1537 if (@dest.size <> 4 & @dest.size <> 8) | (@src.type = 'mem' & @src.size and not msize) | (@src.type = 'mmreg' & @src.size <> 16)
\r
1538 err 'invalid operand size'
\r
1542 err 'instruction requires long mode'
\r
1544 AVX_512.store_instruction 16,vex_mp#_W1,evex_f,opcode,@src,0,@dest.rm
\r
1546 AVX_512.store_instruction 16,vex_mp#_W0,evex_f,opcode,@src,0,@dest.rm
\r
1549 err 'invalid combination of operands'
\r
1555 macro vcvtsd2ss? dest*,src*,src2*&
\r
1556 AVX_512.basic_instruction_er VEX_F2_0F_W0,EVEX_W1,5Ah,8,dest,src,src2
\r
1559 macro vcvtss2sd? dest*,src*,src2*&
\r
1560 AVX_512.basic_instruction_sae VEX_F3_0F_W0,EVEX_AS_VEX,5Ah,4,dest,src,src2
\r
1563 iterate <instr,evex_f,opcode>, vcvtsi2sd,EVEX_AS_VEX,2Ah, vcvtusi2sd,EVEX_REQUIRED,7Bh
\r
1565 macro vcvtsi2sd? dest*,src*,src_er*&
\r
1566 AVX_512.parse_operand @dest,dest
\r
1567 AVX_512.parse_operand @src,src
\r
1568 match src2=,er, src_er
\r
1569 AVX_512.parse_operand @src2,src2
\r
1570 AVX_512.parse_er @src2,er,8
\r
1572 AVX_512.parse_operand @src2,src_er
\r
1574 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'reg' | @src2.type = 'mem')
\r
1576 err ' operand size not specified'
\r
1577 else if @dest.size <> 16 | @src.size <> 16 | (@src2.size <> 4 & @src2.size <> 8)
\r
1578 err 'invalid operand size'
\r
1582 err 'instruction requires long mode'
\r
1584 AVX_512.store_instruction 16,VEX_F2_0F_W1,evex_f,opcode,@src2,0,@dest.rm,@src.rm
\r
1586 AVX_512.store_instruction 16,VEX_F2_0F_W0,evex_f,opcode,@src2,0,@dest.rm,@src.rm
\r
1589 err 'invalid combination of operands'
\r
1595 iterate <instr,evex_f,opcode>, vcvtsi2ss,EVEX_AS_VEX,2Ah, vcvtusi2ss,EVEX_REQUIRED,7Bh
\r
1597 macro vcvtsi2ss? dest*,src*,src_er*&
\r
1598 AVX_512.parse_operand @dest,dest
\r
1599 AVX_512.parse_operand @src,src
\r
1600 match src2=,er, src_er
\r
1601 AVX_512.parse_operand @src2,src2
\r
1602 AVX_512.parse_er @src2,er,@src2.size
\r
1604 AVX_512.parse_operand @src2,src_er
\r
1606 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'reg' | @src2.type = 'mem')
\r
1608 err ' operand size not specified'
\r
1609 else if @dest.size <> 16 | @src.size <> 16 | (@src2.size <> 4 & @src2.size <> 8)
\r
1610 err 'invalid operand size'
\r
1614 err 'instruction requires long mode'
\r
1616 AVX_512.store_instruction 16,VEX_F3_0F_W1,evex_f,opcode,@src2,0,@dest.rm,@src.rm
\r
1618 AVX_512.store_instruction 16,VEX_F3_0F_W0,evex_f,opcode,@src2,0,@dest.rm,@src.rm
\r
1621 err 'invalid combination of operands'
\r
1627 iterate <instr,vex_mpw,evex_f,opcode_rm,opcode_mr>, vmovapd,VEX_66_0F_W0,EVEX_W1+EVEX_VL,28h,29h, vmovaps,VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,28h,29h, \
\r
1628 vmovupd,VEX_66_0F_W0,EVEX_W1+EVEX_VL,10h,11h, vmovups,VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,10h,11h, \
\r
1629 vmovdqa32,VEX_66_0F_W0,EVEX_REQUIRED+EVEX_VL,6Fh,7Fh, vmovdqa64,VEX_66_0F_W1,EVEX_REQUIRED+EVEX_VL,6Fh,7Fh, \
\r
1630 vmovdqu32,VEX_F3_0F_W0,EVEX_REQUIRED+EVEX_VL,6Fh,7Fh, vmovdqu64,VEX_F3_0F_W1,EVEX_REQUIRED+EVEX_VL,6Fh,7Fh
\r
1632 macro instr? dest*,src*
\r
1633 AVX_512.parse_operand_k1z @dest,dest
\r
1634 AVX_512.parse_operand @src,src
\r
1635 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')
\r
1636 if @src.size and not @dest.size
\r
1637 err 'operand sizes do not match'
\r
1639 AVX_512.store_instruction @dest.size,vex_mpw,evex_f,opcode_rm,@src,@dest.mask,@dest.rm
\r
1640 else if @dest.type = 'mem' & @src.type = 'mmreg'
\r
1641 if @dest.size and not @src.size
\r
1642 err 'operand sizes do not match'
\r
1644 AVX_512.store_instruction @src.size,vex_mpw,evex_f,opcode_mr,@dest,@dest.mask,@src.rm
\r
1646 err 'invalid combination of operands'
\r
1652 macro vmovd? dest*,src*
\r
1653 AVX_512.parse_operand @dest,dest
\r
1654 AVX_512.parse_operand @src,src
\r
1655 if @dest.type = 'mmreg' & (@src.type = 'reg' | @src.type = 'mem')
\r
1656 if @dest.size <> 16 | @src.size and not 4
\r
1657 err 'invalid operand size'
\r
1660 AVX_512.store_instruction 16,VEX_66_0F_W0,EVEX_AS_VEX,6Eh,@src,0,@dest.rm
\r
1661 else if (@dest.type = 'reg' | @dest.type = 'mem') & @src.type = 'mmreg'
\r
1662 if @dest.size and not 4 | @src.size <> 16
\r
1663 err 'operand sizes do not match'
\r
1666 AVX_512.store_instruction 16,VEX_66_0F_W0,EVEX_AS_VEX,7Eh,@dest,0,@src.rm
\r
1668 err 'invalid combination of operands'
\r
1672 macro vmovq? dest*,src*
\r
1673 AVX_512.parse_operand @dest,dest
\r
1674 AVX_512.parse_operand @src,src
\r
1675 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')
\r
1676 if @dest.size <> 16 | (@src.type = 'mmreg' & @src.size <> 16) | (@src.type = 'mem' and @src.size and not 8)
\r
1677 err 'invalid operand size'
\r
1680 AVX_512.store_instruction 16,VEX_F3_0F_W0,EVEX_W1,7Eh,@src,0,@dest.rm
\r
1681 else if @dest.type = 'mem' & @src.type = 'mmreg'
\r
1682 if @dest.size and not 8 | @src.size <> 16
\r
1683 err 'invalid operand size'
\r
1686 AVX_512.store_instruction 16,VEX_66_0F_W0,EVEX_W1,0D6h,@dest,0,@src.rm
\r
1687 else if @dest.type = 'mmreg' & @src.type = 'reg'
\r
1688 if @dest.size <> 16 | @src.size <> 8
\r
1689 err 'invalid operand size'
\r
1692 err 'instruction requires long mode'
\r
1694 AVX_512.store_instruction 16,VEX_66_0F_W1,EVEX_W1,6Eh,@src,0,@dest.rm
\r
1695 else if @dest.type = 'reg' & @src.type = 'mmreg'
\r
1696 if @dest.size <> 8 | @src.size <> 16
\r
1697 err 'invalid operand size'
\r
1700 err 'instruction requires long mode'
\r
1702 AVX_512.store_instruction 16,VEX_66_0F_W1,EVEX_W1,7Eh,@dest,0,@src.rm
\r
1704 err 'invalid combination of operands'
\r
1708 macro vmovddup? dest*,src*
\r
1709 AVX_512.parse_operand_k1z @dest,dest
\r
1710 AVX_512.parse_operand @src,src
\r
1711 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')
\r
1712 if @src.type = 'mem' & @dest.size = 16
\r
1713 if @src.size and not 8
\r
1714 err 'invalid operand size'
\r
1718 if @src.size and not @dest.size
\r
1719 err 'operand sizes do not match'
\r
1721 @src.memsize = @dest.size
\r
1723 AVX_512.store_instruction @dest.size,VEX_F2_0F_W0,EVEX_W1+EVEX_VL,12h,@src,@dest.mask,@dest.rm
\r
1725 err 'invalid combination of operands'
\r
1729 iterate <instr,opcode>, vmovhlps,12h, vmovlhps,16h
\r
1731 macro instr? dest*,src*,src2*
\r
1732 AVX_512.parse_operand @dest,dest
\r
1733 AVX_512.parse_operand @src,src
\r
1734 AVX_512.parse_operand @src2,src2
\r
1735 if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'mmreg'
\r
1736 if @dest.size <> 16
\r
1737 err 'invalid operand size'
\r
1738 else if @src.size <> @dest.size | @src2.size <> @dest.size
\r
1739 err 'operand sizes do not match'
\r
1741 AVX_512.store_instruction 16,VEX_0F_W0,EVEX_AS_VEX,opcode,@src2,0,@dest.rm,@src.rm
\r
1743 err 'invalid combination of operands'
\r
1749 iterate <instr,vex_mpw,evex_f,opcode>, vmovhpd,VEX_66_0F_W0,EVEX_W1,16h, vmovhps,VEX_0F_W0,EVEX_AS_VEX,16h, vmovlpd,VEX_66_0F_W0,EVEX_W1,12h, vmovlps,VEX_0F_W0,EVEX_AS_VEX,12h
\r
1751 macro instr? dest*,src*,src2
\r
1752 AVX_512.parse_operand @dest,dest
\r
1753 AVX_512.parse_operand @src,src
\r
1755 if @dest.type = 'mem' & @src.type = 'mmreg'
\r
1756 if @dest.size and not 8 | @src.size <> 16
\r
1757 err 'invalid operand size'
\r
1760 AVX_512.store_instruction 16,vex_mpw,evex_f,opcode+1,@dest,0,@src.rm
\r
1762 err 'invalid combination of operands'
\r
1765 AVX_512.parse_operand @src2,src2
\r
1766 if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'mem'
\r
1767 if @dest.size <> 16 | @src.size <> 16 | @src2.size and not 8
\r
1768 err 'invalid operand size'
\r
1771 AVX_512.store_instruction 16,vex_mpw,evex_f,opcode,@src2,0,@dest.rm,@src.rm
\r
1773 err 'invalid combination of operands'
\r
1780 iterate <instr,vex_mpw,evex_f,opcode>, vmovntdq,VEX_66_0F_W0,EVEX_AS_VEX+EVEX_VL,0E7h, vmovntpd,VEX_66_0F_W0,EVEX_W1+EVEX_VL,2Bh, vmovntps,VEX_0F_W0,EVEX_AS_VEX+EVEX_VL,2Bh
\r
1782 macro instr? dest*,src*
\r
1783 AVX_512.parse_operand @dest,dest
\r
1784 AVX_512.parse_operand @src,src
\r
1785 if @dest.type = 'mem' & @src.type = 'mmreg'
\r
1786 if @dest.size and not @src.size
\r
1787 err 'operand sizes do not match'
\r
1789 AVX_512.store_instruction @src.size,vex_mpw,evex_f,opcode,@dest,0,@src.rm
\r
1791 err 'invalid combination of operands'
\r
1797 macro vmovntdqa? dest*,src*
\r
1798 AVX_512.parse_operand @dest,dest
\r
1799 AVX_512.parse_operand @src,src
\r
1800 if @dest.type = 'mmreg' & @src.type = 'mem'
\r
1801 if @src.size and not @dest.size
\r
1802 err 'operand sizes do not match'
\r
1804 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,2Ah,@src,0,@dest.rm
\r
1806 err 'invalid combination of operands'
\r
1810 iterate <instr,vex_mpw,evex_f,msize>, vmovsd,VEX_F2_0F_W0,EVEX_W1,8, vmovss,VEX_F3_0F_W0,EVEX_AS_VEX,4
\r
1812 macro instr? dest*,src*,src2
\r
1813 AVX_512.parse_operand_k1z @dest,dest
\r
1814 AVX_512.parse_operand @src,src
\r
1816 if @dest.type = 'mmreg' & @src.type = 'mem'
\r
1817 if @dest.size <> 16 | @src.size and not msize
\r
1818 err 'invalid operand size'
\r
1820 @src.memsize = msize
\r
1821 AVX_512.store_instruction 16,vex_mpw,evex_f,10h,@src,@dest.mask,@dest.rm
\r
1822 else if @dest.type = 'mem' & @src.type = 'mmreg'
\r
1823 if @dest.size and not msize | @src.size <> 16
\r
1824 err 'invalid operand size'
\r
1826 @dest.memsize = msize
\r
1827 AVX_512.store_instruction 16,vex_mpw,evex_f,11h,@dest,@dest.mask,@src.rm
\r
1829 err 'invalid combination of operands'
\r
1832 AVX_512.parse_operand @src2,src2
\r
1833 if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'mmreg'
\r
1834 if @dest.size <> 16 | @src.size <> 16 | @src2.size <> 16
\r
1835 err 'invalid operand size'
\r
1837 AVX_512.store_instruction 16,vex_mpw,evex_f,10h,@src2,@dest.mask,@dest.rm,@src.rm
\r
1839 err 'invalid combination of operands'
\r
1846 macro vmovshdup? dest*,src*
\r
1847 AVX_512.single_source_instruction VEX_F3_0F_W0,EVEX_AS_VEX+EVEX_VL,16h,0,dest,src
\r
1850 macro vmovsldup? dest*,src*
\r
1851 AVX_512.single_source_instruction VEX_F3_0F_W0,EVEX_AS_VEX+EVEX_VL,12h,0,dest,src
\r
1854 iterate <instr,unit,evex_f,opcode_rrm,opcode_rri>, vpermilps,4,EVEX_AS_VEX+EVEX_VL,0Ch,4, vpermilpd,8,EVEX_W1+EVEX_VL,0Dh,5
\r
1856 macro instr? dest*,src*,src2*
\r
1857 AVX_512.parse_operand_k1z @dest,dest
\r
1858 AVX_512.parse_operand_bcst @src,src,unit
\r
1859 AVX_512.parse_operand_bcst @src2,src2,unit
\r
1860 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
1861 if @src.size <> @dest.size | @src2.size and not @dest.size
\r
1862 err 'operand sizes do not match'
\r
1864 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,evex_f,opcode_rrm,@src2,@dest.mask,@dest.rm,@src.rm
\r
1865 else if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg') & @src2.type = 'imm'
\r
1866 if @src2.size and not 1
\r
1867 err 'invalid operand size'
\r
1868 else if @src.size and not @dest.size
\r
1869 err 'operand sizes do not match'
\r
1871 AVX_512.store_instruction @dest.size,VEX_66_0F3A_W0,evex_f,opcode_rri,@src,@dest.mask,@dest.rm,,1,@src2.imm
\r
1873 err 'invalid combination of operands'
\r
1879 iterate <instr,opcode>, vpaddd,0FEh, vpsubd,0FAh, vpunpckhdq,6Ah, vpunpckldq,62h
\r
1881 macro instr? dest*,src*,src2*
\r
1882 AVX_512.basic_instruction_bcst VEX_66_0F_W0,EVEX_AS_VEX+EVEX_VL,opcode,4,dest,src,src2
\r
1887 iterate <instr,opcode>, vpaddq,0D4h, vpmuludq,0F4h, vpsubq,0FBh, vpunpckhqdq,6Dh, vpunpcklqdq,6Ch
\r
1889 macro instr? dest*,src*,src2*
\r
1890 AVX_512.basic_instruction_bcst VEX_66_0F_W0,EVEX_W1+EVEX_VL,opcode,8,dest,src,src2
\r
1895 iterate <instr,opcode>, vpandd,0DBh, vpandnd,0DFh, vpord,0EBh, vpxord,0EFh
\r
1897 macro instr? dest*,src*,src2*
\r
1898 AVX_512.basic_instruction_bcst VEX_66_0F_W0,EVEX_REQUIRED+EVEX_VL,opcode,4,dest,src,src2
\r
1903 iterate <instr,opcode>, vpandq,0DBh, vpandnq,0DFh, vporq,0EBh, vpxorq,0EFh
\r
1905 macro instr? dest*,src*,src2*
\r
1906 AVX_512.basic_instruction_bcst VEX_66_0F_W1,EVEX_REQUIRED+EVEX_VL,opcode,8,dest,src,src2
\r
1911 iterate <instr,opcode>, vpmaxsd,3Dh, vpmaxud,3Fh, vpminsd,39h, vpminud,3Bh, vpmulld,40h
\r
1913 macro instr? dest*,src*,src2*
\r
1914 AVX_512.basic_instruction_bcst VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,opcode,4,dest,src,src2
\r
1919 iterate <instr,opcode>, vpmuldq,28h
\r
1921 macro instr? dest*,src*,src2*
\r
1922 AVX_512.basic_instruction_bcst VEX_66_0F38_W0,EVEX_W1+EVEX_VL,opcode,8,dest,src,src2
\r
1927 iterate <instr,opcode>, vpmuldq,28h, vpmaxsq,3Dh, vpmaxuq,3Fh, vpminsq,39h, vpminuq,3Bh, vpmullq,40h
\r
1929 macro instr? dest*,src*,src2*
\r
1930 AVX_512.basic_instruction_bcst VEX_66_0F38_W1,EVEX_REQUIRED+EVEX_VL,opcode,8,dest,src,src2
\r
1935 iterate <instr,opcode>, vpabsd,1Eh
\r
1937 macro instr? dest*,src*
\r
1938 AVX_512.single_source_instruction_bcst VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,opcode,4,dest,src
\r
1943 iterate <instr,opcode>, vpabsq,1Fh
\r
1945 macro instr? dest*,src*
\r
1946 AVX_512.single_source_instruction_bcst VEX_66_0F38_W1,EVEX_REQUIRED+EVEX_VL,opcode,8,dest,src
\r
1951 iterate <instr,vex_mpw>, vpshufd,VEX_66_0F_W0
\r
1953 macro instr? dest*,src*,aux*
\r
1954 AVX_512.parse_operand_k1z @dest,dest
\r
1955 AVX_512.parse_operand @src,src
\r
1956 x86.parse_operand @aux,aux
\r
1957 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg') & @aux.type = 'imm'
\r
1958 if @aux.size and not 1
\r
1959 err 'invalid operand size'
\r
1960 else if @src.size and not @dest.size
\r
1961 err 'operand sizes do not match'
\r
1963 AVX_512.store_instruction @dest.size,vex_mpw,EVEX_AS_VEX+EVEX_VL,70h,@src,@dest.mask,@dest.rm,,1,@aux.imm
\r
1965 err 'invalid combination of operands'
\r
1971 iterate <instr,opcode>, vpsllvd,47h, vpsrlvd,45h, vpsravd,46h
\r
1973 macro instr? dest*,src*,src2*
\r
1974 AVX_512.basic_instruction_bcst VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,opcode,4,dest,src,src2
\r
1979 iterate <instr,evex_f,opcode>, vpsllvq,EVEX_AS_VEX+EVEX_VL,47h, vpsrlvq,EVEX_AS_VEX+EVEX_VL,45h, vpsravq,EVEX_REQUIRED+EVEX_VL,46h
\r
1981 macro instr? dest*,src*,src2*
\r
1982 AVX_512.basic_instruction_bcst VEX_66_0F38_W1,evex_f,opcode,8,dest,src,src2
\r
1987 iterate <instr,unit,vex_mpw,opcode>, vpermi2d,4,VEX_66_0F38_W0,76h, vpermi2q,8,VEX_66_0F38_W1,76h, \
\r
1988 vpermt2d,4,VEX_66_0F38_W0,7Eh, vpermt2q,8,VEX_66_0F38_W1,7Eh, \
\r
1989 vprorvd,4,VEX_66_0F38_W0,14h, vprorvq,8,VEX_66_0F38_W1,14h, \
\r
1990 vprolvd,4,VEX_66_0F38_W0,15h, vprolvq,8,VEX_66_0F38_W1,15h
\r
1992 macro instr? dest*,src*,src2*
\r
1993 AVX_512.basic_instruction_bcst vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,unit,dest,src,src2
\r
1998 iterate <instr,unit,vex_mpw,postbyte>, vprord,4,VEX_66_0F_W0,0, vprorq,8,VEX_66_0F_W1,0, vprold,4,VEX_66_0F_W0,1, vprolq,8,VEX_66_0F_W1,1
\r
2000 macro instr? dest*,src*,aux*
\r
2001 AVX_512.parse_operand_k1z @dest,dest
\r
2002 AVX_512.parse_operand_bcst @src,src,unit
\r
2003 x86.parse_operand @aux,aux
\r
2004 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg') & @aux.type = 'imm'
\r
2005 if @src.size and not @dest.size | @aux.size and not 1
\r
2006 err 'operand sizes do not match'
\r
2008 AVX_512.store_instruction @dest.size,vex_mpw,EVEX_REQUIRED+EVEX_VL,72h,@src,@dest.mask,postbyte,@dest.rm,1,@aux.imm
\r
2010 err 'invalid combination of operands'
\r
2016 iterate <instr,opcode_rrm,opcode,postbyte>, vpslld,0F2h,72h,6, vpsrad,0E2h,72h,4, vpsrld,0D2h,72h,2
\r
2018 macro instr? dest*,src*,src2*
\r
2019 AVX_512.parse_operand_k1z @dest,dest
\r
2020 AVX_512.parse_operand @src2,src2
\r
2021 if @src2.type = 'imm'
\r
2022 AVX_512.parse_operand_bcst @src,src,4
\r
2024 AVX_512.parse_operand @src,src
\r
2026 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
2027 @src2.memsize = 16
\r
2028 if @src2.size and not @src2.memsize
\r
2029 err 'invalid operand size'
\r
2030 else if @src.size <> @dest.size
\r
2031 err 'operand sizes do not match'
\r
2033 AVX_512.store_instruction @dest.size,VEX_66_0F_W0,EVEX_AS_VEX,opcode_rrm,@src2,@dest.mask,@dest.rm,@src.rm
\r
2034 else if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem') & @src2.type = 'imm'
\r
2035 if @src2.size and not 1
\r
2036 err 'invalid operand size'
\r
2037 else if @src.size <> @dest.size
\r
2038 err 'operand sizes do not match'
\r
2040 if @src.type = 'mem'
\r
2041 AVX_512.store_instruction @dest.size,VEX_66_0F_W0,EVEX_REQUIRED+EVEX_VL,opcode,@src,@dest.mask,postbyte,@dest.rm,1,@src2.imm
\r
2043 AVX_512.store_instruction @dest.size,VEX_66_0F_W0,EVEX_AS_VEX+EVEX_VL,opcode,@src,@dest.mask,postbyte,@dest.rm,1,@src2.imm
\r
2046 err 'invalid combination of operands'
\r
2052 iterate <instr,opcode_rrm,opcode,postbyte>, vpsllq,0F3h,73h,6, vpsraq,0E2h,72h,4, vpsrlq,0D3h,73h,2
\r
2054 macro instr? dest*,src*,src2*
\r
2055 AVX_512.parse_operand_k1z @dest,dest
\r
2056 AVX_512.parse_operand @src2,src2
\r
2057 if @src2.type = 'imm'
\r
2058 AVX_512.parse_operand_bcst @src,src,8
\r
2060 AVX_512.parse_operand @src,src
\r
2062 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
2063 @src2.memsize = 16
\r
2064 if @src2.size and not @src2.memsize
\r
2065 err 'invalid operand size'
\r
2066 else if @src.size <> @dest.size
\r
2067 err 'operand sizes do not match'
\r
2069 if `instr = 'vpsraq'
\r
2070 AVX_512.store_instruction @dest.size,VEX_66_0F_W1,EVEX_REQUIRED+EVEX_VL,opcode_rrm,@src2,@dest.mask,@dest.rm,@src.rm
\r
2072 AVX_512.store_instruction @dest.size,VEX_66_0F_W0,EVEX_W1+EVEX_VL,opcode_rrm,@src2,@dest.mask,@dest.rm,@src.rm
\r
2074 else if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem') & @src2.type = 'imm'
\r
2075 if @src2.size and not 1
\r
2076 err 'invalid operand size'
\r
2077 else if @src.size and not @dest.size
\r
2078 err 'operand sizes do not match'
\r
2080 if @src.type = 'mem' | `instr = 'vpsraq'
\r
2081 AVX_512.store_instruction @dest.size,VEX_66_0F_W1,EVEX_REQUIRED+EVEX_VL,opcode,@src,@dest.mask,postbyte,@dest.rm,1,@src2.imm
\r
2083 AVX_512.store_instruction @dest.size,VEX_66_0F_W0,EVEX_W1+EVEX_VL,opcode,@src,@dest.mask,postbyte,@dest.rm,1,@src2.imm
\r
2086 err 'invalid combination of operands'
\r
2092 iterate <instr,opcode>, vpcmpeqd,76h, vpcmpgtd,66h
\r
2094 macro instr? dest*,src*,src2*
\r
2095 AVX_512.parse_operand_k1 @dest,dest
\r
2096 AVX_512.parse_operand @src,src
\r
2097 AVX_512.parse_operand_bcst @src2,src2,4
\r
2098 if @dest.type = 'maskreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
2099 if @src2.size and not @src.size
\r
2100 err 'operand sizes do not match'
\r
2102 AVX_512.store_instruction @src.size,VEX_66_0F_W0,EVEX_REQUIRED+EVEX_VL,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
2103 else if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
2104 if @src.size <> @dest.size | @src2.size and not @dest.size
\r
2105 err 'operand sizes do not match'
\r
2107 AVX_512.store_instruction @src.size,VEX_66_0F_W0,EVEX_FORBIDDEN,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
2109 err 'invalid combination of operands'
\r
2115 iterate <instr,opcode>, vpcmpeqq,29h, vpcmpgtq,37h
\r
2117 macro instr? dest*,src*,src2*
\r
2118 AVX_512.parse_operand_k1 @dest,dest
\r
2119 AVX_512.parse_operand @src,src
\r
2120 AVX_512.parse_operand_bcst @src2,src2,8
\r
2121 if @dest.type = 'maskreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
2122 if @src2.size and not @src.size
\r
2123 err 'operand sizes do not match'
\r
2125 AVX_512.store_instruction @src.size,VEX_66_0F38_W1,EVEX_REQUIRED+EVEX_VL,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
2126 else if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
2127 if @src.size <> @dest.size | @src2.size and not @dest.size
\r
2128 err 'operand sizes do not match'
\r
2130 AVX_512.store_instruction @src.size,VEX_66_0F38_W0,EVEX_FORBIDDEN,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
2132 err 'invalid combination of operands'
\r
2138 iterate <instr,unit,vex_mpw,opcode>, vptestnmd,4,VEX_F3_0F38_W0,27h, vptestnmq,8,VEX_F3_0F38_W1,27h, vptestmd,4,VEX_66_0F38_W0,27h, vptestmq,8,VEX_66_0F38_W1,27h
\r
2140 macro instr? dest*,src*,src2*
\r
2141 AVX_512.parse_operand_k1 @dest,dest
\r
2142 AVX_512.parse_operand @src,src
\r
2143 AVX_512.parse_operand_bcst @src2,src2,unit
\r
2144 if @dest.type = 'maskreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
2145 if @src2.size and not @src.size
\r
2146 err 'operand sizes do not match'
\r
2148 AVX_512.store_instruction @src.size,vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
2150 err 'invalid combination of operands'
\r
2156 iterate <instr,unit,vex_mpw,opcode>, vpcmpd,4,VEX_66_0F3A_W0,1Fh, vpcmpud,4,VEX_66_0F3A_W0,1Eh, vpcmpq,8,VEX_66_0F3A_W1,1Fh, vpcmpuq,8,VEX_66_0F3A_W1,1Eh
\r
2158 macro instr? dest*,src*,src2*,aux*
\r
2159 AVX_512.parse_operand_k1 @dest,dest
\r
2160 AVX_512.parse_operand @src,src
\r
2161 AVX_512.parse_operand_bcst @src2,src2,unit
\r
2162 x86.parse_operand @aux,aux
\r
2163 if @dest.type = 'maskreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'imm'
\r
2164 if @src2.size and not @src.size | @aux.size and not 1
\r
2165 err 'operand sizes do not match'
\r
2167 AVX_512.store_instruction @src.size,vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,@src2,@dest.mask,@dest.rm,@src.rm,1,@aux.imm
\r
2169 err 'invalid combination of operands'
\r
2175 iterate <instr,opcode,msize>, vpmovsxbd,21h,4, vpmovsxbq,22h,2, vpmovsxwd,23h,8, vpmovsxwq,24h,4, vpmovsxdq,25h,8, \
\r
2176 vpmovzxbd,31h,4, vpmovzxbq,32h,2, vpmovzxwd,33h,8, vpmovzxwq,34h,4, vpmovzxdq,35h,8
\r
2178 macro instr? dest*,src*
\r
2179 AVX_512.parse_operand_k1z @dest,dest
\r
2180 AVX_512.parse_operand @src,src
\r
2181 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
2182 @src.memsize = msize * (@dest.size shr 4)
\r
2183 if (@src.type = 'mmreg' & @src.size <> (@src.memsize-1) and not 15 + 16) | (@src.type = 'mem' & @src.size and not @src.memsize)
\r
2184 err 'invalid operand size'
\r
2186 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,opcode,@src,@dest.mask,@dest.rm
\r
2188 err 'invalid combination of operands'
\r
2194 iterate <instr,opcode>, vpermq,0, vpermpd,1
\r
2196 macro instr? dest*,src*,aux*
\r
2197 AVX_512.parse_operand_k1z @dest,dest
\r
2198 AVX_512.parse_operand_bcst @src,src,8
\r
2199 x86.parse_operand @aux,aux
\r
2200 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg') & @aux.type = 'imm'
\r
2201 if @dest.size < 32 | @aux.size and not 1
\r
2202 err 'invalid operand size'
\r
2203 else if @src.size and not @dest.size
\r
2204 err 'operand sizes do not match'
\r
2206 AVX_512.store_instruction @dest.size,VEX_66_0F3A_W1,EVEX_AS_VEX+EVEX_VL,opcode,@src,@dest.mask,@dest.rm,,1,@aux.imm
\r
2208 err 'invalid combination of operands'
\r
2214 iterate <instr,opcode>, vpermd,36h, vpermps,16h
\r
2216 macro instr? dest*,src*,src2*
\r
2217 AVX_512.parse_operand_k1z @dest,dest
\r
2218 AVX_512.parse_operand @src,src
\r
2219 AVX_512.parse_operand_bcst @src2,src2,4
\r
2220 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
2221 if @dest.size < 32
\r
2222 err 'invalid operand size'
\r
2223 else if @src.size <> @dest.size | @src2.size and not @dest.size
\r
2224 err 'operand sizes do not match'
\r
2226 AVX_512.store_instruction @dest.size,VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,opcode,@src2,@dest.mask,@dest.rm,@src.rm
\r
2228 err 'invalid combination of operands'
\r
2234 iterate <instr,lcode>, vfmaddsub,6, vfmsubadd,7, vfmaddsub,8, vfmsub,0Ah, vfnmadd,0Ch, vfnmsub,0Eh
\r
2236 iterate <order,hcode>, 132,90h, 213,0A0h, 231,0B0h
\r
2238 macro instr#order#pd? dest*,src*,src2*&
\r
2239 AVX_512.basic_instruction_bcst_er VEX_66_0F38_W1,EVEX_AS_VEX+EVEX_VL,hcode+lcode,8,dest,src,src2
\r
2242 macro instr#order#ps? dest*,src*,src2*&
\r
2243 AVX_512.basic_instruction_bcst_er VEX_66_0F38_W0,EVEX_AS_VEX+EVEX_VL,hcode+lcode,4,dest,src,src2
\r
2248 macro instr#order#sd? dest*,src*,src2*&
\r
2249 AVX_512.basic_instruction_er VEX_66_0F38_W1,EVEX_AS_VEX,hcode+lcode+1,8,dest,src,src2
\r
2252 macro instr#order#ss? dest*,src*,src2*&
\r
2253 AVX_512.basic_instruction_er VEX_66_0F38_W0,EVEX_AS_VEX,hcode+lcode+1,4,dest,src,src2
\r
2262 iterate <instr,unit,vex_mpw,opcode>, valignd,4,VEX_66_0F3A_W0,3, vpternlogd,4,VEX_66_0F3A_W0,25h, vpternlogq,8,VEX_66_0F3A_W1,25h
\r
2264 macro instr? dest*,src*,src2*,aux*&
\r
2265 AVX_512.basic_instruction_bcst_imm8 vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,unit,dest,src,src2,aux
\r
2270 iterate <instr,opcode>, valignq,3
\r
2272 macro instr? dest*,src*,src2*,aux*&
\r
2273 AVX_512.basic_instruction_bcst_imm8 VEX_66_0F3A_W1,EVEX_REQUIRED+EVEX_VL,opcode,8,dest,src,src2,aux
\r
2278 iterate <instr,opcode>, vblendmps,65h, vpblendmd,64h
\r
2280 macro instr? dest*,src*,src2*&
\r
2281 AVX_512.basic_instruction_bcst VEX_66_0F38_W0,EVEX_REQUIRED+EVEX_VL,opcode,4,dest,src,src2
\r
2286 iterate <instr,opcode>, vblendmpd,65h, vpblendmq,64h
\r
2288 macro instr? dest*,src*,src2*&
\r
2289 AVX_512.basic_instruction_bcst VEX_66_0F38_W1,EVEX_REQUIRED+EVEX_VL,opcode,8,dest,src,src2
\r
2294 iterate <instr,unit,vex_mpw,opcode>, vrcp14ps,4,VEX_66_0F38_W0,4Ch, vrcp14pd,8,VEX_66_0F38_W1,4Ch, vrsqrt14ps,4,VEX_66_0F38_W0,4Eh, vrsqrt14pd,8,VEX_66_0F38_W1,4Eh
\r
2296 macro instr? dest*,src*&
\r
2297 AVX_512.single_source_instruction_bcst vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,unit,dest,src
\r
2302 iterate <instr,unit,vex_mpw,opcode>, vrcp14ss,4,VEX_66_0F38_W0,4Dh, vrcp14sd,8,VEX_66_0F38_W1,4Dh, vrsqrt14ss,4,VEX_66_0F38_W0,4Fh, vrsqrt14sd,8,VEX_66_0F38_W1,4Fh
\r
2304 macro instr? dest*,src*&
\r
2305 AVX_512.basic_instruction vex_mpw,EVEX_REQUIRED,opcode,unit,dest,src
\r
2310 iterate <instr,vex_mpw,opcode>, vcompressps,VEX_66_0F_W0,8Ah, vcompresspd,VEX_66_0F_W1,8Ah, vpcompressd,VEX_66_0F38_W0,8Bh, vpcompressq,VEX_66_0F38_W1,8Bh
\r
2312 macro instr? dest*,src*
\r
2313 AVX_512.parse_operand_k1z @dest,dest
\r
2314 AVX_512.parse_operand @src,src
\r
2315 if (@dest.type = 'mmreg' | @dest.type = 'mem') & @src.type = 'mmreg'
\r
2316 if @dest.size and not @src.size
\r
2317 err 'operand sizes do not match'
\r
2319 AVX_512.store_instruction @src.size,vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,@dest,@dest.mask,@src.rm
\r
2321 err 'invalid combination of operands'
\r
2327 iterate <instr,vex_mpw,opcode>, vexpandps,VEX_66_0F38_W0,88h, vexpandpd,VEX_66_0F38_W1,88h, vpexpandd,VEX_66_0F38_W0,89h, vpexpandq,VEX_66_0F38_W1,89h
\r
2329 macro instr? dest*,src*
\r
2330 AVX_512.single_source_instruction vex_mpw,EVEX_REQUIRED+EVEX_VL,opcode,0,dest,src
\r
2335 iterate <instr,opcode>, fixupimm,54h
\r
2337 macro v#instr#pd? dest*,src*,src2*,aux*&
\r
2338 AVX_512.basic_instruction_bcst_sae_imm8 VEX_66_0F3A_W1,EVEX_REQUIRED+EVEX_VL,opcode,8,dest,src,src2,aux
\r
2341 macro v#instr#ps? dest*,src*,src2*,aux*&
\r
2342 AVX_512.basic_instruction_bcst_sae_imm8 VEX_66_0F3A_W0,EVEX_REQUIRED+EVEX_VL,opcode,4,dest,src,src2,aux
\r
2345 macro v#instr#sd? dest*,src*,src2*,aux*&
\r
2346 AVX_512.basic_instruction_sae_imm8 VEX_66_0F3A_W1,EVEX_REQUIRED,opcode+1,8,dest,src,src2,aux
\r
2349 macro v#instr#ss? dest*,src*,src2*,aux*&
\r
2350 AVX_512.basic_instruction_sae_imm8 VEX_66_0F3A_W0,EVEX_REQUIRED,opcode+1,4,dest,src,src2,aux
\r
2355 iterate <instr,opcode>, getexp,42h
\r
2357 macro v#instr#pd? dest*,src*&
\r
2358 AVX_512.single_source_instruction_bcst_sae VEX_66_0F38_W1,EVEX_REQUIRED+EVEX_VL,opcode,8,dest,src
\r
2361 macro v#instr#ps? dest*,src*&
\r
2362 AVX_512.single_source_instruction_bcst_sae VEX_66_0F38_W0,EVEX_REQUIRED+EVEX_VL,opcode,4,dest,src
\r
2365 macro v#instr#sd? dest*,src*,src2*&
\r
2366 AVX_512.basic_instruction_sae VEX_66_0F38_W1,EVEX_REQUIRED,opcode+1,8,dest,src,src2
\r
2369 macro v#instr#ss? dest*,src*,src2*&
\r
2370 AVX_512.basic_instruction_sae VEX_66_0F38_W0,EVEX_REQUIRED,opcode+1,4,dest,src,src2
\r
2375 iterate <instr,opcode_ps,opcode_pd,opcode_ss,opcode_sd>, getmant,26h,26h,27h,27h, rndscale,8,9,0Ah,0Bh
\r
2377 macro v#instr#pd? dest*,src*,aux*&
\r
2378 AVX_512.single_source_instruction_bcst_sae_imm8 VEX_66_0F3A_W1,EVEX_REQUIRED+EVEX_VL,opcode_pd,8,dest,src,aux
\r
2381 macro v#instr#ps? dest*,src*,aux*&
\r
2382 AVX_512.single_source_instruction_bcst_sae_imm8 VEX_66_0F3A_W0,EVEX_REQUIRED+EVEX_VL,opcode_ps,4,dest,src,aux
\r
2385 macro v#instr#sd? dest*,src*,src2*,aux*&
\r
2386 AVX_512.basic_instruction_sae_imm8 VEX_66_0F3A_W1,EVEX_REQUIRED,opcode_sd,8,dest,src,src2,aux
\r
2389 macro v#instr#ss? dest*,src*,src2*,aux*&
\r
2390 AVX_512.basic_instruction_sae_imm8 VEX_66_0F3A_W0,EVEX_REQUIRED,opcode_ss,4,dest,src,src2,aux
\r
2395 iterate <instr,unit,vex_mpw>, vscalefpd,8,VEX_66_0F38_W1, vscalefps,4,VEX_66_0F38_W0
\r
2397 macro instr? dest*,src*,src2*&
\r
2398 AVX_512.basic_instruction_bcst_er vex_mpw,EVEX_REQUIRED+EVEX_VL,2Ch,unit,dest,src,src2
\r
2403 iterate <instr,unit,vex_mpw>, vscalefsd,8,VEX_66_0F38_W1, vscalefss,4,VEX_66_0F38_W0
\r
2405 macro instr? dest*,src*,src2*&
\r
2406 AVX_512.basic_instruction_er vex_mpw,EVEX_REQUIRED,2Dh,unit,dest,src,src2
\r
2411 iterate <instr,ratio,opcode>, vpmovusdb,4,11h, vpmovsdb,4,21h, vpmovdb,4,31h, \
\r
2412 vpmovusqb,8,12h, vpmovsqb,8,22h, vpmovqb,8,32h, \
\r
2413 vpmovusdw,2,13h, vpmovsdw,2,23h, vpmovdw,2,33h, \
\r
2414 vpmovusqw,4,14h, vpmovsqw,4,24h, vpmovqw,4,34h, \
\r
2415 vpmovusqd,2,15h, vpmovsqd,2,25h, vpmovqd,2,35h
\r
2417 macro instr? dest*,src*
\r
2418 AVX_512.parse_operand_k1z @dest,dest
\r
2419 AVX_512.parse_operand @src,src
\r
2420 if (@dest.type = 'mmreg' | @dest.type = 'mem') & @src.type = 'mmreg'
\r
2421 @dest.memsize = @src.size / ratio
\r
2422 if (@dest.type = 'mmreg' & @dest.size <> (@dest.memsize-1) and not 15 + 16) | (@dest.type = 'mem' & @dest.size and not @dest.memsize)
\r
2423 err 'invalid operand size'
\r
2425 AVX_512.store_instruction @src.size,VEX_F3_0F38_W0,EVEX_REQUIRED+EVEX_VL,opcode,@dest,@dest.mask,@src.rm
\r
2427 err 'invalid combination of operands'
\r
2433 calminstruction AVX_512.parse_vsib_operand namespace, operand
\r
2435 local size, type, segment_prefix
\r
2436 local displacement, displacement_size, auto_relative
\r
2437 local address, base_registers, index_registers
\r
2438 local mode, mod, rm
\r
2439 local scale, index, base
\r
2442 local i, pre, suf, sym
\r
2444 compute segment_prefix, 0
\r
2447 compute displacement_size, 0
\r
2451 match pre suf, operand
\r
2452 jno no_size_prefix
\r
2453 transform pre, x86
\r
2454 jno no_size_prefix
\r
2456 jno no_size_prefix
\r
2457 arrange operand, suf
\r
2460 match [address], operand
\r
2461 jyes memory_operand
\r
2462 match =ptr? address, operand
\r
2463 jyes memory_operand
\r
2465 jump invalid_operand
\r
2468 compute type, 'mem'
\r
2470 match segment:address, address
\r
2471 jno segment_prefix_ok
\r
2472 check segment eq 1 elementof segment & 1 metadataof segment relativeto x86.sreg
\r
2473 jno invalid_operand
\r
2474 compute segment, 1 metadataof segment - x86.sreg
\r
2475 check segment >= 4
\r
2476 jyes segment_prefix_386
\r
2477 compute segment_prefix, 26h + segment shl 3
\r
2478 jump segment_prefix_ok
\r
2479 segment_prefix_386:
\r
2480 compute segment_prefix, 64h + segment-4
\r
2481 segment_prefix_ok:
\r
2483 match pre suf, address
\r
2484 jno no_address_size_prefix
\r
2485 transform pre, x86
\r
2486 jno no_address_size_prefix
\r
2488 jno no_address_size_prefix
\r
2489 arrange address, suf
\r
2490 check pre = 4 | pre = 8
\r
2491 jno invalid_address_size
\r
2492 compute mode, pre shl 3
\r
2493 no_address_size_prefix:
\r
2499 compute auto_relative, 0
\r
2502 jyes size_override
\r
2503 compute size, sizeof address
\r
2506 compute address, address
\r
2507 compute base_registers, 0
\r
2508 compute index_registers, 0
\r
2510 extract_registers:
\r
2511 check i > elementsof address
\r
2512 jyes registers_extracted
\r
2513 check i metadataof address relativeto SSE.reg | i metadataof address relativeto AVX.reg | 1 metadataof (i metadataof address) relativeto AVX_512.reg
\r
2515 check i metadataof address relativeto x86.r32 | i metadataof address relativeto x86.r64
\r
2517 compute base_registers, base_registers + i elementof address * i scaleof address
\r
2520 compute index_registers, index_registers + i elementof address * i scaleof address
\r
2523 jump extract_registers
\r
2524 registers_extracted:
\r
2525 compute displacement, address - base_registers - index_registers
\r
2526 compute auto_relative, 0
\r
2528 check elementsof index_registers = 1
\r
2529 jno invalid_address
\r
2530 compute scale, 1 scaleof index_registers
\r
2531 compute index, 0 scaleof (1 metadataof index_registers)
\r
2532 check scale > 2 & scale <> 4 & scale <> 8
\r
2533 jyes invalid_address
\r
2534 check 1 metadataof index_registers relativeto SSE.reg
\r
2536 check 1 metadataof index_registers relativeto AVX.reg
\r
2538 compute visize, 1 metadataof (1 metadataof index_registers) - AVX_512.reg
\r
2541 compute visize, 32
\r
2544 compute visize, 16
\r
2548 check elementsof base_registers = 1 & 1 scaleof base_registers = 1
\r
2549 jyes base_and_index
\r
2550 check elementsof base_registers = 0
\r
2551 jno invalid_address
\r
2553 compute displacement_size, 4
\r
2555 compute mode, x86.mode
\r
2557 jyes export_address
\r
2559 jump export_address
\r
2561 compute base, 0 scaleof (1 metadataof base_registers)
\r
2562 check mode & mode <> 0 scaleof (1 metadataof (1 metadataof base_registers)) shl 3
\r
2563 jyes invalid_address
\r
2564 compute mode, 0 scaleof (1 metadataof (1 metadataof base_registers)) shl 3
\r
2566 setup_displacement:
\r
2567 check displacement relativeto 0
\r
2568 jno displacement_32bit
\r
2569 check displacement = 0 & rm and 111b <> 5 & (rm <> 4 | base and 111b <> 5)
\r
2570 jyes displacement_empty
\r
2571 check displacement < 80h & displacement >= -80h
\r
2572 jyes displacement_8bit
\r
2573 check displacement - 1 shl mode >= -80h & displacement < 1 shl mode
\r
2574 jyes displacement_8bit_wrap
\r
2575 displacement_32bit:
\r
2576 compute displacement_size, 4
\r
2578 jump export_address
\r
2579 displacement_8bit_wrap:
\r
2580 compute displacement, displacement - 1 shl mode
\r
2581 displacement_8bit:
\r
2582 compute displacement_size, 1
\r
2584 jump export_address
\r
2586 compute displacement_size, 4
\r
2588 jump export_address
\r
2589 displacement_empty:
\r
2590 compute displacement_size, 0
\r
2595 arrange sym, namespace.=address
\r
2596 publish sym, address
\r
2598 arrange sym, namespace.=scale
\r
2599 publish sym, scale
\r
2601 arrange sym, namespace.=index
\r
2602 publish sym, index
\r
2604 arrange sym, namespace.=base
\r
2607 arrange sym, namespace.=auto_relative
\r
2608 publish sym, auto_relative
\r
2610 arrange sym, namespace.=displacement
\r
2611 publish sym, displacement
\r
2613 arrange sym, namespace.=mode
\r
2616 arrange sym, namespace.=mod
\r
2619 arrange sym, namespace.=rm
\r
2622 arrange sym, namespace.=type
\r
2625 arrange sym, namespace.=size
\r
2628 arrange sym, namespace.=visize
\r
2629 publish sym, visize
\r
2631 arrange sym, namespace.=displacement_size
\r
2632 publish sym, displacement_size
\r
2634 arrange sym, namespace.=segment_prefix
\r
2635 publish sym, segment_prefix
\r
2639 arrange sym, namespace.=mask
\r
2642 arrange sym, namespace.=evex_b
\r
2645 arrange sym, namespace.=memsize
\r
2651 asmcmd =err 'invalid operand'
\r
2654 asmcmd =err 'invalid address'
\r
2656 invalid_address_size:
\r
2657 asmcmd =err 'invalid address size'
\r
2660 end calminstruction
\r
2662 calminstruction AVX_512.parse_vsib_operand_k1 namespace,operand
\r
2664 local k1, mask, sym
\r
2666 match operand {k1}, operand
\r
2668 asmcmd =AVX_512.=parse_vsib_operand namespace, operand
\r
2671 asmcmd =AVX_512.=parse_vsib_operand namespace, operand
\r
2672 arrange sym, namespace.=type
\r
2673 check k1 eq 1 elementof k1 & 1 metadataof k1 relativeto AVX_512.maskreg & 1 metadataof k1 - AVX_512.maskreg > 0
\r
2675 compute mask, 1 metadataof k1 - AVX_512.maskreg
\r
2676 arrange sym, namespace.=mask
\r
2680 asmcmd =err 'invalid mask'
\r
2682 end calminstruction
\r
2684 iterate <instr,opcode,asize>, vpgatherdd,90h,4, vpgatherqd,91h,8, vgatherdps,92h,4, vgatherqps,93h,8
\r
2686 macro instr? dest*,src*,aux
\r
2688 AVX_512.parse_operand_k1 @dest,dest
\r
2689 AVX_512.parse_vsib_operand @src,src
\r
2690 if @dest.type = 'mmreg' & @dest.mask & @src.type = 'mem'
\r
2691 if @src.size and not 4 | (@dest.size > 16 & @dest.size * (asize shr 2) > @src.visize) | (@src.visize > 16 & @dest.size * (asize shr 2) < @src.visize)
\r
2692 err 'invalid operand size'
\r
2693 else if @dest.rm = @src.index
\r
2694 err 'disallowed combination of registers'
\r
2697 AVX_512.store_instruction @src.visize,VEX_66_0F38_W0,EVEX_REQUIRED+EVEX_VL,opcode,@src,@dest.mask,@dest.rm,@src.index and 10000b
\r
2699 err 'invalid combination of operands'
\r
2702 AVX.parse_operand @dest,dest
\r
2703 AVX.parse_vsib_operand @src,src
\r
2704 AVX.parse_operand @aux,aux
\r
2705 if @dest.type = 'mmreg' & @src.type = 'mem' & @aux.type = 'mmreg'
\r
2706 if @src.size and not 4 | (@dest.size > 16 & @dest.size * (asize shr 2) > @src.visize) | (@src.visize > 16 & @dest.size * (asize shr 2) < @src.visize)
\r
2707 err 'invalid operand size'
\r
2708 else if @aux.size <> @dest.size
\r
2709 err 'operand sizes do not match'
\r
2710 else if @dest.rm = @aux.rm | @dest.rm = @src.index | @aux.rm = @src.index
\r
2711 err 'disallowed combination of registers'
\r
2713 AVX.store_instruction @src.visize,VEX_66_0F38_W0,opcode,@src,@dest.rm,@aux.rm
\r
2715 err 'invalid combination of operands'
\r
2722 iterate <instr,opcode,asize>, vpgatherdq,90h,4, vpgatherqq,91h,8, vgatherdpd,92h,4, vgatherqpd,93h,8
\r
2724 macro instr? dest*,src*,aux
\r
2726 AVX_512.parse_operand_k1 @dest,dest
\r
2727 AVX_512.parse_vsib_operand @src,src
\r
2728 if @dest.type = 'mmreg' & @dest.mask & @src.type = 'mem'
\r
2729 if @src.size and not 8 | (@dest.size > 16 & @dest.size * (asize shr 2) > @src.visize * 2) | (@src.visize > 16 & @dest.size * (asize shr 2) < @src.visize * 2)
\r
2730 err 'invalid operand size'
\r
2731 else if @dest.rm = @src.index
\r
2732 err 'disallowed combination of registers'
\r
2735 AVX_512.store_instruction @dest.size,VEX_66_0F38_W1,EVEX_REQUIRED+EVEX_VL,opcode,@src,@dest.mask,@dest.rm,@src.index and 10000b
\r
2737 err 'invalid combination of operands'
\r
2740 AVX.parse_operand @dest,dest
\r
2741 AVX.parse_vsib_operand @src,src
\r
2742 AVX.parse_operand @aux,aux
\r
2743 if @dest.type = 'mmreg' & @src.type = 'mem' & @aux.type = 'mmreg'
\r
2744 if @src.size and not 8 | (@dest.size > 16 & @dest.size * (asize shr 2) > @src.visize * 2) | (@src.visize > 16 & @dest.size * (asize shr 2) < @src.visize * 2)
\r
2745 err 'invalid operand size'
\r
2746 else if @aux.size <> @dest.size
\r
2747 err 'operand sizes do not match'
\r
2748 else if @dest.rm = @aux.rm | @dest.rm = @src.index | @aux.rm = @src.index
\r
2749 err 'disallowed combination of registers'
\r
2751 AVX.store_instruction @dest.size,VEX_66_0F38_W1,opcode,@src,@dest.rm,@aux.rm
\r
2753 err 'invalid combination of operands'
\r
2760 iterate <instr,opcode,asize>, vpscatterdd,0A0h,4, vpscatterqd,0A1h,8, vscatterdps,0A2h,4, vscatterqps,0A3h,8
\r
2762 macro instr? dest*,src*
\r
2763 AVX_512.parse_vsib_operand_k1 @dest,dest
\r
2764 AVX_512.parse_operand @src,src
\r
2765 if @dest.type = 'mem' & @dest.mask & @src.type = 'mmreg'
\r
2766 if @dest.size and not 4 | (@src.size > 16 & @src.size * (asize shr 2) > @dest.visize) | (@dest.visize > 16 & @src.size * (asize shr 2) < @dest.visize)
\r
2767 err 'invalid operand size'
\r
2768 else if @src.rm = @dest.index
\r
2769 err 'disallowed combination of registers'
\r
2772 AVX_512.store_instruction @dest.visize,VEX_66_0F38_W0,EVEX_REQUIRED+EVEX_VL,opcode,@dest,@dest.mask,@src.rm,@dest.index and 10000b
\r
2774 err 'invalid combination of operands'
\r
2780 iterate <instr,opcode,asize>, vpscatterdq,0A0h,4, vpscatterqq,0A1h,8, vscatterdpd,0A2h,4, vscatterqpd,0A3h,8
\r
2782 macro instr? dest*,src*
\r
2783 AVX_512.parse_vsib_operand_k1 @dest,dest
\r
2784 AVX_512.parse_operand @src,src
\r
2785 if @dest.type = 'mem' & @dest.mask & @src.type = 'mmreg'
\r
2786 if @dest.size and not 8 | (@src.size > 16 & @src.size * (asize shr 2) > @dest.visize * 2) | (@dest.visize > 16 & @src.size * (asize shr 2) < @dest.visize * 2)
\r
2787 err 'invalid operand size'
\r
2788 else if @src.rm = @dest.index
\r
2789 err 'disallowed combination of registers'
\r
2792 AVX_512.store_instruction @src.size,VEX_66_0F38_W1,EVEX_REQUIRED+EVEX_VL,opcode,@dest,@dest.mask,@src.rm,@dest.index and 10000b
\r
2794 err 'invalid combination of operands'
\r