initial commit: a mess of assembly code
[fmap.git] / x86_64_sse2_x87 / fasm / examples / x86 / include / ext / avx2.inc
blobcc4a60289408f6e5bae87f0e2e2ec25788b1c328
1 \r
2 if ~ defined AVX2\r
3 \r
4         restore AVX2    ; this ensures that symbol cannot be forward-referenced\r
5         AVX2 = 1\r
6 \r
7         include 'avx.inc'\r
8 \r
9         calminstruction AVX.parse_vsib_operand namespace, operand\r
11                 local   size, type, segment_prefix\r
12                 local   displacement, displacement_size, auto_relative\r
13                 local   address, base_registers, index_registers\r
14                 local   mode, mod, rm\r
15                 local   scale, index, base\r
16                 local   visize\r
18                 local   i, pre, suf, sym\r
20                 compute segment_prefix, 0\r
22                 compute size, 0\r
23                 compute displacement_size, 0\r
25                 transform operand\r
27                 match   pre suf, operand\r
28                 jno     no_size_prefix\r
29                 transform pre, x86\r
30                 jno     no_size_prefix\r
31                 match   :size, pre\r
32                 jno     no_size_prefix\r
33                 arrange operand, suf\r
34               no_size_prefix:\r
36                 match   [address], operand\r
37                 jyes    memory_operand\r
38                 match   =ptr? address, operand\r
39                 jyes    memory_operand\r
41                 jump    invalid_operand\r
43           memory_operand:\r
44                 compute type, 'mem'\r
46                 match   segment:address, address\r
47                 jno     segment_prefix_ok\r
48                 check   segment eq 1 elementof segment & 1 metadataof segment relativeto x86.sreg\r
49                 jno     invalid_operand\r
50                 compute segment, 1 metadataof segment - x86.sreg\r
51                 check   segment >= 4\r
52                 jyes    segment_prefix_386\r
53                 compute segment_prefix, 26h + segment shl 3\r
54                 jump    segment_prefix_ok\r
55               segment_prefix_386:\r
56                 compute segment_prefix, 64h + segment-4\r
57               segment_prefix_ok:\r
59                 match   pre suf, address\r
60                 jno     no_address_size_prefix\r
61                 transform pre, x86\r
62                 jno     no_address_size_prefix\r
63                 match   :pre, pre\r
64                 jno     no_address_size_prefix\r
65                 arrange address, suf\r
66                 check   pre = 4 | pre = 8\r
67                 jno     invalid_address_size\r
68                 compute mode, pre shl 3\r
69               no_address_size_prefix:\r
71                 compute mode, 0\r
72                 compute scale, 0\r
73                 compute index, 0\r
74                 compute base, 0\r
75                 compute auto_relative, 0\r
77                 check   size\r
78                 jyes    size_override\r
79                 compute size, sizeof address\r
80               size_override:\r
82                 compute address, address\r
83                 compute base_registers, 0\r
84                 compute index_registers, 0\r
85                 compute i, 1\r
86               extract_registers:\r
87                 check   i > elementsof address\r
88                 jyes    registers_extracted\r
89                 check   i metadataof address relativeto SSE.reg | i metadataof address relativeto AVX.reg\r
90                 jyes    index_term\r
91                 check   i metadataof address relativeto x86.r32 | i metadataof address relativeto x86.r64\r
92                 jno     next_term\r
93                 compute base_registers, base_registers + i elementof address * i scaleof address\r
94                 jump    next_term\r
95               index_term:\r
96                 compute index_registers, index_registers + i elementof address * i scaleof address\r
97               next_term:\r
98                 compute i, i+1\r
99                 jump    extract_registers\r
100               registers_extracted:\r
101                 compute displacement, address - base_registers - index_registers\r
102                 compute auto_relative, 0\r
104                 check   elementsof index_registers = 1\r
105                 jno     invalid_address\r
106                 compute scale, 1 scaleof index_registers\r
107                 compute index, 0 scaleof (1 metadataof index_registers)\r
108                 check   scale > 2 & scale <> 4 & scale <> 8\r
109                 jyes    invalid_address\r
110                 check   1 metadataof index_registers relativeto SSE.reg\r
111                 jyes    xmm_index\r
112                 compute visize, 32\r
113                 jump    index_ok\r
114               xmm_index:\r
115                 compute visize, 16\r
116               index_ok:\r
118                 compute rm, 4\r
119                 check   elementsof base_registers = 1 & 1 scaleof base_registers = 1\r
120                 jyes    base_and_index\r
121                 check   elementsof base_registers = 0\r
122                 jno     invalid_address\r
123                 compute base, 5\r
124                 compute displacement_size, 4\r
125                 compute mod, 0\r
126                 compute mode, x86.mode\r
127                 check   mode > 16\r
128                 jyes    export_address\r
129                 compute mode, 32\r
130                 jump    export_address\r
131               base_and_index:\r
132                 compute base, 0 scaleof (1 metadataof base_registers)\r
133                 check   mode & mode <> 0 scaleof (1 metadataof (1 metadataof base_registers)) shl 3\r
134                 jyes    invalid_address\r
135                 compute mode, 0 scaleof (1 metadataof (1 metadataof base_registers)) shl 3\r
137               setup_displacement:\r
138                 check   displacement relativeto 0\r
139                 jno     displacement_32bit\r
140                 check   displacement = 0 & rm and 111b <> 5 & (rm <> 4 | base and 111b <> 5)\r
141                 jyes    displacement_empty\r
142                 check   displacement < 80h & displacement >= -80h\r
143                 jyes    displacement_8bit\r
144                 check   displacement - 1 shl mode >= -80h & displacement < 1 shl mode\r
145                 jyes    displacement_8bit_wrap\r
146               displacement_32bit:\r
147                 compute displacement_size, 4\r
148                 compute mod, 2\r
149                 jump    export_address\r
150               displacement_8bit_wrap:\r
151                 compute displacement, displacement - 1 shl mode\r
152               displacement_8bit:\r
153                 compute displacement_size, 1\r
154                 compute mod, 1\r
155                 jump    export_address\r
156               index_only:\r
157                 compute displacement_size, 4\r
158                 compute mod, 0\r
159                 jump    export_address\r
160               displacement_empty:\r
161                 compute displacement_size, 0\r
162                 compute mod, 0\r
164           export_address:\r
166                 arrange sym, namespace.=address\r
167                 publish sym, address\r
169                 arrange sym, namespace.=scale\r
170                 publish sym, scale\r
172                 arrange sym, namespace.=index\r
173                 publish sym, index\r
175                 arrange sym, namespace.=base\r
176                 publish sym, base\r
178                 arrange sym, namespace.=auto_relative\r
179                 publish sym, auto_relative\r
181                 arrange sym, namespace.=displacement\r
182                 publish sym, displacement\r
184                 arrange sym, namespace.=mode\r
185                 publish sym, mode\r
187                 arrange sym, namespace.=mod\r
188                 publish sym, mod\r
190                 arrange sym, namespace.=rm\r
191                 publish sym, rm\r
193                 arrange sym, namespace.=type\r
194                 publish sym, type\r
196                 arrange sym, namespace.=size\r
197                 publish sym, size\r
199                 arrange sym, namespace.=visize\r
200                 publish sym, visize\r
202                 arrange sym, namespace.=displacement_size\r
203                 publish sym, displacement_size\r
205                 arrange sym, namespace.=segment_prefix\r
206                 publish sym, segment_prefix\r
207                 exit\r
209           invalid_operand:\r
210                 asmcmd  =err 'invalid operand'\r
211                 exit\r
212           invalid_address:\r
213                 asmcmd  =err 'invalid address'\r
214                 exit\r
215           invalid_address_size:\r
216                 asmcmd  =err 'invalid address size'\r
217                 exit\r
219         end calminstruction\r
221         iterate <instr,opcode,asize>, vpgatherdd,90h,4, vpgatherqd,91h,8, vgatherdps,92h,4, vgatherqps,93h,8\r
223                 macro instr? dest*,src*,mask*\r
224                         AVX.parse_operand @dest,dest\r
225                         AVX.parse_vsib_operand @src,src\r
226                         AVX.parse_operand @aux,mask\r
227                         if @dest.type = 'mmreg' & @src.type = 'mem' & @aux.type = 'mmreg'\r
228                                 if @src.size and not 4 | (@dest.size > 16 & @dest.size * (asize shr 2) > @src.visize) | (@src.visize > 16 & @dest.size * (asize shr 2) < @src.visize)\r
229                                         err 'invalid operand size'\r
230                                 else if @aux.size <> @dest.size\r
231                                         err 'operand sizes do not match'\r
232                                 else if @dest.rm = @aux.rm | @dest.rm = @src.index | @aux.rm = @src.index\r
233                                         err 'disallowed combination of registers'\r
234                                 end if\r
235                                 AVX.store_instruction @src.visize,VEX_66_0F38_W0,opcode,@src,@dest.rm,@aux.rm\r
236                         else\r
237                                 err 'invalid combination of operands'\r
238                         end if\r
239                 end macro\r
241         end iterate\r
243         iterate <instr,opcode,asize>, vpgatherdq,90h,4, vpgatherqq,91h,8, vgatherdpd,92h,4, vgatherqpd,93h,8\r
245                 macro instr? dest*,src*,mask*\r
246                         AVX.parse_operand @dest,dest\r
247                         AVX.parse_vsib_operand @src,src\r
248                         AVX.parse_operand @aux,mask\r
249                         if @dest.type = 'mmreg' & @src.type = 'mem' & @aux.type = 'mmreg'\r
250                                 if @src.size and not 8 | (@dest.size > 16 & @dest.size * (asize shr 2) > @src.visize * 2) | (@src.visize > 16 & @dest.size * (asize shr 2) < @src.visize * 2)\r
251                                         err 'invalid operand size'\r
252                                 else if @aux.size <> @dest.size\r
253                                         err 'operand sizes do not match'\r
254                                 else if @dest.rm = @aux.rm | @dest.rm = @src.index | @aux.rm = @src.index\r
255                                         err 'disallowed combination of registers'\r
256                                 end if\r
257                                 AVX.store_instruction @dest.size,VEX_66_0F38_W1,opcode,@src,@dest.rm,@aux.rm\r
258                         else\r
259                                 err 'invalid combination of operands'\r
260                         end if\r
261                 end macro\r
263         end iterate\r
265         iterate <instr,opcode>, packsswb,63h, packuswb,67h, packssdw,6Bh, paddb,0FCh, paddw,0FDh, paddd,0FEh, paddq,0D4h, paddsb,0ECh, paddsw,0EDh, paddusb,0DCh, paddusw,0DDh, \\r
266                                 pand,0DBh, pandn,0DFh, pavgb,0E0h, pavgw,0E3h, pcmpeqb,74h, pcmpeqw,75h, pcmpeqd,76h, pcmpgtb,64h, pcmpgtw,65h, pcmpgtd,66h, \\r
267                                 pmaddwd,0F5h, pmaxsw,0EEh, pmaxub,0DEh, pminsw,0EAh, pminub,0DAh, pmulhuw,0E4h, pmulhw,0E5h, pmullw,0D5h, pmuludq,0F4h, \\r
268                                 por,0EBh, psadbw,0F6h, psubb,0F8h, psubw,0F9h, psubd,0FAh, psubq,0FBh, psubsb,0E8h, psubsw,0E9h, psubusb,0D8h, psubusw,0D9h, \\r
269                                 punpckhbw,68h, punpckhwd,69h, punpckhdq,6Ah, punpckhqdq,6Dh, punpcklbw,60h, punpcklwd,61h, punpckldq,62h, punpcklqdq,6Ch, pxor,0EFh\r
271                 macro v#instr? dest*,src*,src2*\r
272                         AVX.basic_instruction VEX_66_0F_W0,opcode,0,dest,src,src2\r
273                 end macro\r
275         end iterate\r
277         iterate <instr,opcode>, packusdw,2Bh, pcmpeqq,29h, pcmpgtq,37h, phaddw,1, phaddd,2, phaddsw,3, phsubw,5, phsubd,6, phsubsw,7, pmaddubsw,4, \\r
278                                 pmaxsb,3Ch, pmaxsd,3Dh, pmaxuw,3Eh, pmaxud,3Fh, pminsb,38h, pminsd,39h, pminuw,3Ah, pminud,3Bh, pmulhrsw,0Bh, pmulld,40h, pmuldq,28h, \\r
279                                 pshufb,0, psignb,8, psignw,9, psignd,0Ah\r
281                 macro v#instr? dest*,src*,src2*\r
282                         AVX.basic_instruction VEX_66_0F38_W0,opcode,0,dest,src,src2\r
283                 end macro\r
285         end iterate\r
287         iterate <instr,opcode>, mpsadbw,42h, palignr,0Fh\r
289                 macro v#instr? dest*,src*,src2*,imm*\r
290                         AVX.basic_instruction_imm8 VEX_66_0F3A_W0,opcode,0,dest,src,src2,imm\r
291                 end macro\r
293         end iterate\r
295         iterate <instr,opcode>, pabsb,1Ch, pabsw,1Dh, pabsd,1Eh, pblendw,0Eh\r
297                 macro v#instr? dest*,src*\r
298                         AVX.single_source_instruction VEX_66_0F38_W0,opcode,0,dest,src\r
299                 end macro\r
301         end iterate\r
303         iterate <instr,vex_mpw>, pshufd,VEX_66_0F_W0, pshufhw,VEX_F3_0F_W0, pshuflw,VEX_F2_0F_W0\r
305                 macro v#instr? dest*,src*,imm*\r
306                         AVX.single_source_instruction_imm8 vex_mpw,70h,0,dest,src,imm\r
307                 end macro\r
309         end iterate\r
311         iterate <instr,vex_mpw,opcode>, vpsllvd,VEX_66_0F38_W0,47h, vpsllvq,VEX_66_0F38_W1,47h, vpsrlvd,VEX_66_0F38_W0,45h, vpsrlvq,VEX_66_0F38_W1,45h, vpsravd,VEX_66_0F38_W0,46h\r
313                 macro instr? dest*,src*,src2*\r
314                         AVX.basic_instruction vex_mpw,opcode,0,dest,src,src2\r
315                 end macro\r
317         end iterate\r
319         macro vpblendvb? dest*,src*,src2*,mask*\r
320                 AVX.parse_operand @dest,dest\r
321                 AVX.parse_operand @src,src\r
322                 AVX.parse_operand @src2,src2\r
323                 AVX.parse_operand @aux,mask\r
324                 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'mmreg'\r
325                         if @src.size <> @dest.size | @src2.size and not @dest.size | @aux.size <> @dest.size\r
326                                 err 'operand sizes do not match'\r
327                         end if\r
328                         AVX.store_instruction @dest.size,VEX_66_0F3A_W0,4Ch,@src2,@dest.rm,@src.rm,1,(@aux.rm and 1111b) shl 4\r
329                 else\r
330                         err 'invalid combination of operands'\r
331                 end if\r
332         end macro\r
334         macro vpmovmskb? dest*,src*\r
335                 x86.parse_operand @dest,dest\r
336                 AVX.parse_operand @src,src\r
337                 if @dest.type = 'reg' & @src.type = 'mmreg'\r
338                         if (@dest.size <> 4 & (x86.mode < 64 | @dest.size <> 8))\r
339                                 err 'invalid operand size'\r
340                         end if\r
341                         AVX.store_instruction @src.size,VEX_66_0F_W0,0D7h,@src,@dest.rm\r
342                 else\r
343                         err 'invalid combination of operands'\r
344                 end if\r
345         end macro\r
347         iterate <instr,opcode,msize>, pmovsxbw,20h,8, pmovsxbd,21h,4, pmovsxbq,22h,2, pmovsxwd,23h,8, pmovsxwq,24h,4, pmovsxdq,25h,8, \\r
348                                       pmovzxbw,30h,8, pmovzxbd,31h,4, pmovzxbq,32h,2, pmovzxwd,33h,8, pmovzxwq,34h,4, pmovzxdq,35h,8\r
350                 macro v#instr? dest*,src*\r
351                         AVX.parse_operand @dest,dest\r
352                         AVX.parse_operand @src,src\r
353                         if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')\r
354                                 if (@src.type = 'mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not (msize * (@dest.size shr 4)))\r
355                                         err 'invalid operand size'\r
356                                 end if\r
357                                 AVX.store_instruction @dest.size,VEX_66_0F38_W0,opcode,@src,@dest.rm\r
358                         else\r
359                                 err 'invalid combination of operands'\r
360                         end if\r
361                 end macro\r
363         end iterate\r
365         iterate <instr,postbyte>, pslldq,7, psrldq,3\r
367                 macro v#instr dest*,src*,src2*\r
368                         AVX.parse_operand @dest,dest\r
369                         AVX.parse_operand @src,src\r
370                         x86.parse_operand @src2,src2\r
371                         if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'imm'\r
372                                 if @src2.size and not 1\r
373                                         err 'invalid operand size'\r
374                                 else if @src.size <> @dest.size\r
375                                         err 'operand sizes do not match'\r
376                                 end if\r
377                                 AVX.store_instruction @dest.size,VEX_66_0F_W0,73h,@src,postbyte,@dest.rm,1,@src2.imm\r
378                         else\r
379                                 err 'invalid combination of operands'\r
380                         end if\r
381                 end macro\r
383         end iterate\r
385         iterate <instr,opcode_rrm,opcode,postbyte>, psllw,0F1h,71h,6, pslld,0F2h,72h,6, psllq,0F3h,73h,6, psraw,0E1h,71h,4, psrad,0E2h,72h,4, psrlw,0D1h,71h,2, psrld,0D2h,72h,2, psrlq,0D3h,73h,2\r
387                 macro v#instr? dest*,src*,src2*\r
388                         AVX.parse_operand @dest,dest\r
389                         AVX.parse_operand @src,src\r
390                         AVX.parse_operand @src2,src2\r
391                         if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')\r
392                                 if @src2.size and not 16\r
393                                         err 'invalid operand size'\r
394                                 else if @src.size <> @dest.size\r
395                                         err 'operand sizes do not match'\r
396                                 end if\r
397                                 AVX.store_instruction @dest.size,VEX_66_0F_W0,opcode_rrm,@src2,@dest.rm,@src.rm\r
398                         else if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'imm'\r
399                                 if @src2.size and not 1\r
400                                         err 'invalid operand size'\r
401                                 else if @src.size <> @dest.size\r
402                                         err 'operand sizes do not match'\r
403                                 end if\r
404                                 AVX.store_instruction @dest.size,VEX_66_0F_W0,opcode,@src,postbyte,@dest.rm,1,@src2.imm\r
405                         else\r
406                                 err 'invalid combination of operands'\r
407                         end if\r
408                 end macro\r
410         end iterate\r
412         macro vmovntdqa? dest*,src*\r
413                 AVX.parse_operand @dest,dest\r
414                 x86.parse_operand @src,src\r
415                 if @dest.type = 'mmreg' & @src.type = 'mem'\r
416                         if @src.size and not @dest.size\r
417                                 err 'operand sizes do not match'\r
418                         end if\r
419                         AVX.store_instruction @dest.size,VEX_66_0F38_W0,2Ah,@src,@dest.rm\r
420                 else\r
421                         err 'invalid combination of operands'\r
422                 end if\r
423         end macro\r
425         iterate <instr,w>, vpmaskmovd,0, vpmaskmovq,1\r
427                 macro instr? dest*,src*,src2*\r
428                         AVX.parse_operand @dest,dest\r
429                         AVX.parse_operand @src,src\r
430                         AVX.parse_operand @src2,src2\r
431                         if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'mem'\r
432                                 if @src.size <> @dest.size | @src2.size and not @dest.size\r
433                                         err 'operand sizes do not match'\r
434                                 end if\r
435                                 AVX.store_instruction @dest.size,VEX_66_0F38_W#w,8Ch,@src2,@dest.rm,@src.rm\r
436                         else if @dest.type = 'mem' & @src.type = 'mmreg' & @src2.type = 'mmreg'\r
437                                 if @src.size <> @src2.size | @dest.size and not @src.size\r
438                                         err 'operand sizes do not match'\r
439                                 end if\r
440                                 AVX.store_instruction @dest.size,VEX_66_0F38_W#w,8Eh,@dest,@src2.rm,@src.rm\r
441                         else\r
442                                 err 'invalid combination of operands'\r
443                         end if\r
444                 end macro\r
446         end iterate\r
448         macro vbroadcasti128? dest*,src*\r
449                 AVX.parse_operand @dest,dest\r
450                 AVX.parse_operand @src,src\r
451                 if @dest.type = 'mmreg' & @src.type = 'mem'\r
452                         if @dest.size <> 32 | @src.size and not 16\r
453                                 err 'invalid operand size'\r
454                         end if\r
455                         AVX.store_instruction 32,VEX_66_0F38_W0,5Ah,@src,@dest.rm\r
456                 else\r
457                         err 'invalid combination of operands'\r
458                 end if\r
459         end macro\r
461         macro vextracti128? dest*,src*,aux*\r
462                 AVX.parse_operand @dest,dest\r
463                 AVX.parse_operand @src,src\r
464                 x86.parse_operand @aux,aux\r
465                 if (@dest.type = 'mmreg' | @dest.type = 'mem') & @src.type = 'mmreg' & @aux.type = 'imm'\r
466                         if @dest.size and not 16 | @src.size <> 32 | @aux.size and not 1\r
467                                 err 'invalid operand size'\r
468                         end if\r
469                         AVX.store_instruction 32,VEX_66_0F3A_W0,39h,@dest,@src.rm,,1,@aux.imm\r
470                 else\r
471                         err 'invalid combination of operands'\r
472                 end if\r
473         end macro\r
475         macro vinserti128? dest*,src*,src2*,aux*\r
476                 AVX.parse_operand @dest,dest\r
477                 AVX.parse_operand @src,src\r
478                 AVX.parse_operand @src2,src2\r
479                 x86.parse_operand @aux,aux\r
480                 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mmreg' | @src2.type = 'mem') & @aux.type = 'imm'\r
481                         if @dest.size <> 32 | @src.size <> 32 | @src2.size and not 16 | @aux.size and not 1\r
482                                 err 'invalid operand size'\r
483                         end if\r
484                         AVX.store_instruction 32,VEX_66_0F3A_W0,38h,@src2,@dest.rm,@src.rm,1,@aux.imm\r
485                 else\r
486                         err 'invalid combination of operands'\r
487                 end if\r
488         end macro\r
490         macro vperm2i128? dest*,src*,src2*,imm*\r
491                 AVX.basic_instruction_imm8 VEX_66_0F3A_W0,46h,32,dest,src,src2,imm\r
492         end macro\r
494         iterate <instr,opcode,msize>, vbroadcastss,18h,4, vpbroadcastb,78h,1, vpbroadcastw,79h,2, vpbroadcastd,58h,4, vpbroadcastq,59h,8\r
496                 macro instr? dest*,src*\r
497                         AVX.parse_operand @dest,dest\r
498                         AVX.parse_operand @src,src\r
499                         if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')\r
500                                 if (@src.type='mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not msize)\r
501                                         err 'invalid operand size'\r
502                                 end if\r
503                                 AVX.store_instruction @dest.size,VEX_66_0F38_W0,opcode,@src,@dest.rm\r
504                         else\r
505                                 err 'invalid combination of operands'\r
506                         end if\r
507                 end macro\r
509         end iterate\r
511         macro vbroadcastsd? dest*,src*\r
512                 AVX.parse_operand @dest,dest\r
513                 AVX.parse_operand @src,src\r
514                 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')\r
515                         if @dest.size <> 32 | (@src.type='mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not 8)\r
516                                 err 'invalid operand size'\r
517                         end if\r
518                         AVX.store_instruction 32,VEX_66_0F38_W0,19h,@src,@dest.rm\r
519                 else\r
520                         err 'invalid combination of operands'\r
521                 end if\r
522         end macro\r
524         iterate <instr,opcode>, vpermq,0, vpermpd,1\r
526                 macro instr? dest*,src*,imm*\r
527                         AVX.single_source_instruction_imm8 VEX_66_0F3A_W1,opcode,32,dest,src,imm\r
528                 end macro\r
530         end iterate\r
532         iterate <instr,opcode>, vpermd,36h, vpermps,16h\r
534                 macro instr? dest*,src*,src2*\r
535                         AVX.basic_instruction VEX_66_0F38_W0,opcode,32,dest,src,src2\r
536                 end macro\r
538         end iterate\r
540 end if\r