4 restore AVX2 ; this ensures that symbol cannot be forward-referenced
\r
9 calminstruction AVX.parse_vsib_operand namespace, operand
\r
11 local size, type, segment_prefix
\r
12 local displacement, displacement_size, auto_relative
\r
13 local address, base_registers, index_registers
\r
15 local scale, index, base
\r
18 local i, pre, suf, sym
\r
20 compute segment_prefix, 0
\r
23 compute displacement_size, 0
\r
27 match pre suf, operand
\r
33 arrange operand, suf
\r
36 match [address], operand
\r
38 match =ptr? address, operand
\r
41 jump invalid_operand
\r
46 match segment:address, address
\r
47 jno segment_prefix_ok
\r
48 check segment eq 1 elementof segment & 1 metadataof segment relativeto x86.sreg
\r
50 compute segment, 1 metadataof segment - x86.sreg
\r
52 jyes segment_prefix_386
\r
53 compute segment_prefix, 26h + segment shl 3
\r
54 jump segment_prefix_ok
\r
56 compute segment_prefix, 64h + segment-4
\r
59 match pre suf, address
\r
60 jno no_address_size_prefix
\r
62 jno no_address_size_prefix
\r
64 jno no_address_size_prefix
\r
65 arrange address, suf
\r
66 check pre = 4 | pre = 8
\r
67 jno invalid_address_size
\r
68 compute mode, pre shl 3
\r
69 no_address_size_prefix:
\r
75 compute auto_relative, 0
\r
79 compute size, sizeof address
\r
82 compute address, address
\r
83 compute base_registers, 0
\r
84 compute index_registers, 0
\r
87 check i > elementsof address
\r
88 jyes registers_extracted
\r
89 check i metadataof address relativeto SSE.reg | i metadataof address relativeto AVX.reg
\r
91 check i metadataof address relativeto x86.r32 | i metadataof address relativeto x86.r64
\r
93 compute base_registers, base_registers + i elementof address * i scaleof address
\r
96 compute index_registers, index_registers + i elementof address * i scaleof address
\r
99 jump extract_registers
\r
100 registers_extracted:
\r
101 compute displacement, address - base_registers - index_registers
\r
102 compute auto_relative, 0
\r
104 check elementsof index_registers = 1
\r
105 jno invalid_address
\r
106 compute scale, 1 scaleof index_registers
\r
107 compute index, 0 scaleof (1 metadataof index_registers)
\r
108 check scale > 2 & scale <> 4 & scale <> 8
\r
109 jyes invalid_address
\r
110 check 1 metadataof index_registers relativeto SSE.reg
\r
119 check elementsof base_registers = 1 & 1 scaleof base_registers = 1
\r
120 jyes base_and_index
\r
121 check elementsof base_registers = 0
\r
122 jno invalid_address
\r
124 compute displacement_size, 4
\r
126 compute mode, x86.mode
\r
128 jyes export_address
\r
130 jump export_address
\r
132 compute base, 0 scaleof (1 metadataof base_registers)
\r
133 check mode & mode <> 0 scaleof (1 metadataof (1 metadataof base_registers)) shl 3
\r
134 jyes invalid_address
\r
135 compute mode, 0 scaleof (1 metadataof (1 metadataof base_registers)) shl 3
\r
137 setup_displacement:
\r
138 check displacement relativeto 0
\r
139 jno displacement_32bit
\r
140 check displacement = 0 & rm and 111b <> 5 & (rm <> 4 | base and 111b <> 5)
\r
141 jyes displacement_empty
\r
142 check displacement < 80h & displacement >= -80h
\r
143 jyes displacement_8bit
\r
144 check displacement - 1 shl mode >= -80h & displacement < 1 shl mode
\r
145 jyes displacement_8bit_wrap
\r
146 displacement_32bit:
\r
147 compute displacement_size, 4
\r
149 jump export_address
\r
150 displacement_8bit_wrap:
\r
151 compute displacement, displacement - 1 shl mode
\r
153 compute displacement_size, 1
\r
155 jump export_address
\r
157 compute displacement_size, 4
\r
159 jump export_address
\r
160 displacement_empty:
\r
161 compute displacement_size, 0
\r
166 arrange sym, namespace.=address
\r
167 publish sym, address
\r
169 arrange sym, namespace.=scale
\r
172 arrange sym, namespace.=index
\r
175 arrange sym, namespace.=base
\r
178 arrange sym, namespace.=auto_relative
\r
179 publish sym, auto_relative
\r
181 arrange sym, namespace.=displacement
\r
182 publish sym, displacement
\r
184 arrange sym, namespace.=mode
\r
187 arrange sym, namespace.=mod
\r
190 arrange sym, namespace.=rm
\r
193 arrange sym, namespace.=type
\r
196 arrange sym, namespace.=size
\r
199 arrange sym, namespace.=visize
\r
200 publish sym, visize
\r
202 arrange sym, namespace.=displacement_size
\r
203 publish sym, displacement_size
\r
205 arrange sym, namespace.=segment_prefix
\r
206 publish sym, segment_prefix
\r
210 asmcmd =err 'invalid operand'
\r
213 asmcmd =err 'invalid address'
\r
215 invalid_address_size:
\r
216 asmcmd =err 'invalid address size'
\r
219 end calminstruction
\r
221 iterate <instr,opcode,asize>, vpgatherdd,90h,4, vpgatherqd,91h,8, vgatherdps,92h,4, vgatherqps,93h,8
\r
223 macro instr? dest*,src*,mask*
\r
224 AVX.parse_operand @dest,dest
\r
225 AVX.parse_vsib_operand @src,src
\r
226 AVX.parse_operand @aux,mask
\r
227 if @dest.type = 'mmreg' & @src.type = 'mem' & @aux.type = 'mmreg'
\r
228 if @src.size and not 4 | (@dest.size > 16 & @dest.size * (asize shr 2) > @src.visize) | (@src.visize > 16 & @dest.size * (asize shr 2) < @src.visize)
\r
229 err 'invalid operand size'
\r
230 else if @aux.size <> @dest.size
\r
231 err 'operand sizes do not match'
\r
232 else if @dest.rm = @aux.rm | @dest.rm = @src.index | @aux.rm = @src.index
\r
233 err 'disallowed combination of registers'
\r
235 AVX.store_instruction @src.visize,VEX_66_0F38_W0,opcode,@src,@dest.rm,@aux.rm
\r
237 err 'invalid combination of operands'
\r
243 iterate <instr,opcode,asize>, vpgatherdq,90h,4, vpgatherqq,91h,8, vgatherdpd,92h,4, vgatherqpd,93h,8
\r
245 macro instr? dest*,src*,mask*
\r
246 AVX.parse_operand @dest,dest
\r
247 AVX.parse_vsib_operand @src,src
\r
248 AVX.parse_operand @aux,mask
\r
249 if @dest.type = 'mmreg' & @src.type = 'mem' & @aux.type = 'mmreg'
\r
250 if @src.size and not 8 | (@dest.size > 16 & @dest.size * (asize shr 2) > @src.visize * 2) | (@src.visize > 16 & @dest.size * (asize shr 2) < @src.visize * 2)
\r
251 err 'invalid operand size'
\r
252 else if @aux.size <> @dest.size
\r
253 err 'operand sizes do not match'
\r
254 else if @dest.rm = @aux.rm | @dest.rm = @src.index | @aux.rm = @src.index
\r
255 err 'disallowed combination of registers'
\r
257 AVX.store_instruction @dest.size,VEX_66_0F38_W1,opcode,@src,@dest.rm,@aux.rm
\r
259 err 'invalid combination of operands'
\r
265 iterate <instr,opcode>, packsswb,63h, packuswb,67h, packssdw,6Bh, paddb,0FCh, paddw,0FDh, paddd,0FEh, paddq,0D4h, paddsb,0ECh, paddsw,0EDh, paddusb,0DCh, paddusw,0DDh, \
\r
266 pand,0DBh, pandn,0DFh, pavgb,0E0h, pavgw,0E3h, pcmpeqb,74h, pcmpeqw,75h, pcmpeqd,76h, pcmpgtb,64h, pcmpgtw,65h, pcmpgtd,66h, \
\r
267 pmaddwd,0F5h, pmaxsw,0EEh, pmaxub,0DEh, pminsw,0EAh, pminub,0DAh, pmulhuw,0E4h, pmulhw,0E5h, pmullw,0D5h, pmuludq,0F4h, \
\r
268 por,0EBh, psadbw,0F6h, psubb,0F8h, psubw,0F9h, psubd,0FAh, psubq,0FBh, psubsb,0E8h, psubsw,0E9h, psubusb,0D8h, psubusw,0D9h, \
\r
269 punpckhbw,68h, punpckhwd,69h, punpckhdq,6Ah, punpckhqdq,6Dh, punpcklbw,60h, punpcklwd,61h, punpckldq,62h, punpcklqdq,6Ch, pxor,0EFh
\r
271 macro v#instr? dest*,src*,src2*
\r
272 AVX.basic_instruction VEX_66_0F_W0,opcode,0,dest,src,src2
\r
277 iterate <instr,opcode>, packusdw,2Bh, pcmpeqq,29h, pcmpgtq,37h, phaddw,1, phaddd,2, phaddsw,3, phsubw,5, phsubd,6, phsubsw,7, pmaddubsw,4, \
\r
278 pmaxsb,3Ch, pmaxsd,3Dh, pmaxuw,3Eh, pmaxud,3Fh, pminsb,38h, pminsd,39h, pminuw,3Ah, pminud,3Bh, pmulhrsw,0Bh, pmulld,40h, pmuldq,28h, \
\r
279 pshufb,0, psignb,8, psignw,9, psignd,0Ah
\r
281 macro v#instr? dest*,src*,src2*
\r
282 AVX.basic_instruction VEX_66_0F38_W0,opcode,0,dest,src,src2
\r
287 iterate <instr,opcode>, mpsadbw,42h, palignr,0Fh
\r
289 macro v#instr? dest*,src*,src2*,imm*
\r
290 AVX.basic_instruction_imm8 VEX_66_0F3A_W0,opcode,0,dest,src,src2,imm
\r
295 iterate <instr,opcode>, pabsb,1Ch, pabsw,1Dh, pabsd,1Eh, pblendw,0Eh
\r
297 macro v#instr? dest*,src*
\r
298 AVX.single_source_instruction VEX_66_0F38_W0,opcode,0,dest,src
\r
303 iterate <instr,vex_mpw>, pshufd,VEX_66_0F_W0, pshufhw,VEX_F3_0F_W0, pshuflw,VEX_F2_0F_W0
\r
305 macro v#instr? dest*,src*,imm*
\r
306 AVX.single_source_instruction_imm8 vex_mpw,70h,0,dest,src,imm
\r
311 iterate <instr,vex_mpw,opcode>, vpsllvd,VEX_66_0F38_W0,47h, vpsllvq,VEX_66_0F38_W1,47h, vpsrlvd,VEX_66_0F38_W0,45h, vpsrlvq,VEX_66_0F38_W1,45h, vpsravd,VEX_66_0F38_W0,46h
\r
313 macro instr? dest*,src*,src2*
\r
314 AVX.basic_instruction vex_mpw,opcode,0,dest,src,src2
\r
319 macro vpblendvb? dest*,src*,src2*,mask*
\r
320 AVX.parse_operand @dest,dest
\r
321 AVX.parse_operand @src,src
\r
322 AVX.parse_operand @src2,src2
\r
323 AVX.parse_operand @aux,mask
\r
324 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg') & @aux.type = 'mmreg'
\r
325 if @src.size <> @dest.size | @src2.size and not @dest.size | @aux.size <> @dest.size
\r
326 err 'operand sizes do not match'
\r
328 AVX.store_instruction @dest.size,VEX_66_0F3A_W0,4Ch,@src2,@dest.rm,@src.rm,1,(@aux.rm and 1111b) shl 4
\r
330 err 'invalid combination of operands'
\r
334 macro vpmovmskb? dest*,src*
\r
335 x86.parse_operand @dest,dest
\r
336 AVX.parse_operand @src,src
\r
337 if @dest.type = 'reg' & @src.type = 'mmreg'
\r
338 if (@dest.size <> 4 & (x86.mode < 64 | @dest.size <> 8))
\r
339 err 'invalid operand size'
\r
341 AVX.store_instruction @src.size,VEX_66_0F_W0,0D7h,@src,@dest.rm
\r
343 err 'invalid combination of operands'
\r
347 iterate <instr,opcode,msize>, pmovsxbw,20h,8, pmovsxbd,21h,4, pmovsxbq,22h,2, pmovsxwd,23h,8, pmovsxwq,24h,4, pmovsxdq,25h,8, \
\r
348 pmovzxbw,30h,8, pmovzxbd,31h,4, pmovzxbq,32h,2, pmovzxwd,33h,8, pmovzxwq,34h,4, pmovzxdq,35h,8
\r
350 macro v#instr? dest*,src*
\r
351 AVX.parse_operand @dest,dest
\r
352 AVX.parse_operand @src,src
\r
353 if @dest.type = 'mmreg' & (@src.type = 'mem' | @src.type = 'mmreg')
\r
354 if (@src.type = 'mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not (msize * (@dest.size shr 4)))
\r
355 err 'invalid operand size'
\r
357 AVX.store_instruction @dest.size,VEX_66_0F38_W0,opcode,@src,@dest.rm
\r
359 err 'invalid combination of operands'
\r
365 iterate <instr,postbyte>, pslldq,7, psrldq,3
\r
367 macro v#instr dest*,src*,src2*
\r
368 AVX.parse_operand @dest,dest
\r
369 AVX.parse_operand @src,src
\r
370 x86.parse_operand @src2,src2
\r
371 if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'imm'
\r
372 if @src2.size and not 1
\r
373 err 'invalid operand size'
\r
374 else if @src.size <> @dest.size
\r
375 err 'operand sizes do not match'
\r
377 AVX.store_instruction @dest.size,VEX_66_0F_W0,73h,@src,postbyte,@dest.rm,1,@src2.imm
\r
379 err 'invalid combination of operands'
\r
385 iterate <instr,opcode_rrm,opcode,postbyte>, psllw,0F1h,71h,6, pslld,0F2h,72h,6, psllq,0F3h,73h,6, psraw,0E1h,71h,4, psrad,0E2h,72h,4, psrlw,0D1h,71h,2, psrld,0D2h,72h,2, psrlq,0D3h,73h,2
\r
387 macro v#instr? dest*,src*,src2*
\r
388 AVX.parse_operand @dest,dest
\r
389 AVX.parse_operand @src,src
\r
390 AVX.parse_operand @src2,src2
\r
391 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mem' | @src2.type = 'mmreg')
\r
392 if @src2.size and not 16
\r
393 err 'invalid operand size'
\r
394 else if @src.size <> @dest.size
\r
395 err 'operand sizes do not match'
\r
397 AVX.store_instruction @dest.size,VEX_66_0F_W0,opcode_rrm,@src2,@dest.rm,@src.rm
\r
398 else if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'imm'
\r
399 if @src2.size and not 1
\r
400 err 'invalid operand size'
\r
401 else if @src.size <> @dest.size
\r
402 err 'operand sizes do not match'
\r
404 AVX.store_instruction @dest.size,VEX_66_0F_W0,opcode,@src,postbyte,@dest.rm,1,@src2.imm
\r
406 err 'invalid combination of operands'
\r
412 macro vmovntdqa? dest*,src*
\r
413 AVX.parse_operand @dest,dest
\r
414 x86.parse_operand @src,src
\r
415 if @dest.type = 'mmreg' & @src.type = 'mem'
\r
416 if @src.size and not @dest.size
\r
417 err 'operand sizes do not match'
\r
419 AVX.store_instruction @dest.size,VEX_66_0F38_W0,2Ah,@src,@dest.rm
\r
421 err 'invalid combination of operands'
\r
425 iterate <instr,w>, vpmaskmovd,0, vpmaskmovq,1
\r
427 macro instr? dest*,src*,src2*
\r
428 AVX.parse_operand @dest,dest
\r
429 AVX.parse_operand @src,src
\r
430 AVX.parse_operand @src2,src2
\r
431 if @dest.type = 'mmreg' & @src.type = 'mmreg' & @src2.type = 'mem'
\r
432 if @src.size <> @dest.size | @src2.size and not @dest.size
\r
433 err 'operand sizes do not match'
\r
435 AVX.store_instruction @dest.size,VEX_66_0F38_W#w,8Ch,@src2,@dest.rm,@src.rm
\r
436 else if @dest.type = 'mem' & @src.type = 'mmreg' & @src2.type = 'mmreg'
\r
437 if @src.size <> @src2.size | @dest.size and not @src.size
\r
438 err 'operand sizes do not match'
\r
440 AVX.store_instruction @dest.size,VEX_66_0F38_W#w,8Eh,@dest,@src2.rm,@src.rm
\r
442 err 'invalid combination of operands'
\r
448 macro vbroadcasti128? dest*,src*
\r
449 AVX.parse_operand @dest,dest
\r
450 AVX.parse_operand @src,src
\r
451 if @dest.type = 'mmreg' & @src.type = 'mem'
\r
452 if @dest.size <> 32 | @src.size and not 16
\r
453 err 'invalid operand size'
\r
455 AVX.store_instruction 32,VEX_66_0F38_W0,5Ah,@src,@dest.rm
\r
457 err 'invalid combination of operands'
\r
461 macro vextracti128? dest*,src*,aux*
\r
462 AVX.parse_operand @dest,dest
\r
463 AVX.parse_operand @src,src
\r
464 x86.parse_operand @aux,aux
\r
465 if (@dest.type = 'mmreg' | @dest.type = 'mem') & @src.type = 'mmreg' & @aux.type = 'imm'
\r
466 if @dest.size and not 16 | @src.size <> 32 | @aux.size and not 1
\r
467 err 'invalid operand size'
\r
469 AVX.store_instruction 32,VEX_66_0F3A_W0,39h,@dest,@src.rm,,1,@aux.imm
\r
471 err 'invalid combination of operands'
\r
475 macro vinserti128? dest*,src*,src2*,aux*
\r
476 AVX.parse_operand @dest,dest
\r
477 AVX.parse_operand @src,src
\r
478 AVX.parse_operand @src2,src2
\r
479 x86.parse_operand @aux,aux
\r
480 if @dest.type = 'mmreg' & @src.type = 'mmreg' & (@src2.type = 'mmreg' | @src2.type = 'mem') & @aux.type = 'imm'
\r
481 if @dest.size <> 32 | @src.size <> 32 | @src2.size and not 16 | @aux.size and not 1
\r
482 err 'invalid operand size'
\r
484 AVX.store_instruction 32,VEX_66_0F3A_W0,38h,@src2,@dest.rm,@src.rm,1,@aux.imm
\r
486 err 'invalid combination of operands'
\r
490 macro vperm2i128? dest*,src*,src2*,imm*
\r
491 AVX.basic_instruction_imm8 VEX_66_0F3A_W0,46h,32,dest,src,src2,imm
\r
494 iterate <instr,opcode,msize>, vbroadcastss,18h,4, vpbroadcastb,78h,1, vpbroadcastw,79h,2, vpbroadcastd,58h,4, vpbroadcastq,59h,8
\r
496 macro instr? dest*,src*
\r
497 AVX.parse_operand @dest,dest
\r
498 AVX.parse_operand @src,src
\r
499 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')
\r
500 if (@src.type='mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not msize)
\r
501 err 'invalid operand size'
\r
503 AVX.store_instruction @dest.size,VEX_66_0F38_W0,opcode,@src,@dest.rm
\r
505 err 'invalid combination of operands'
\r
511 macro vbroadcastsd? dest*,src*
\r
512 AVX.parse_operand @dest,dest
\r
513 AVX.parse_operand @src,src
\r
514 if @dest.type = 'mmreg' & (@src.type = 'mmreg' | @src.type = 'mem')
\r
515 if @dest.size <> 32 | (@src.type='mmreg' & @src.size <> 16) | (@src.type = 'mem' & @src.size and not 8)
\r
516 err 'invalid operand size'
\r
518 AVX.store_instruction 32,VEX_66_0F38_W0,19h,@src,@dest.rm
\r
520 err 'invalid combination of operands'
\r
524 iterate <instr,opcode>, vpermq,0, vpermpd,1
\r
526 macro instr? dest*,src*,imm*
\r
527 AVX.single_source_instruction_imm8 VEX_66_0F3A_W1,opcode,32,dest,src,imm
\r
532 iterate <instr,opcode>, vpermd,36h, vpermps,16h
\r
534 macro instr? dest*,src*,src2*
\r
535 AVX.basic_instruction VEX_66_0F38_W0,opcode,32,dest,src,src2
\r