From 9ad6e6a5d20a0c5ae8cb4552971c23c1d20ec2e1 Mon Sep 17 00:00:00 2001 From: Dirk Steinke Date: Fri, 1 Aug 2014 12:28:01 +0200 Subject: [PATCH] Disabled all instructions which could not be used sensibly right now anyway (HLM, call, jmp). Now the test passes. (And for call/jmp, i need to think up something anyway.) --- src/data/x86_insalias.ltxt | 4 +-- src/data/x86_inslist.ltxt | 62 ++++++++++++++++++++++-------------------- tests/test_allinstructions.cpp | 5 ++-- tests/x86/x86_cf.ins | 2 ++ tests/x86/x86_other.ins | 1 + 5 files changed, 41 insertions(+), 33 deletions(-) diff --git a/src/data/x86_insalias.ltxt b/src/data/x86_insalias.ltxt index 157b7bb..0e396c5 100644 --- a/src/data/x86_insalias.ltxt +++ b/src/data/x86_insalias.ltxt @@ -109,8 +109,8 @@ CMOV%(l)_D%(DM) -> INVALID, CMOV%(w)_D%(DM) CMOV%(l)_R%(RM) -> CMOV%(w)_W%(WM), CMOV%(w)_D%(DM) | end // ---------------------------------- -IJMP_%(RM) -> IJMP_%(WM32)_x32, IJMP_%(DM64)_x64 -ICALL_%(RM) -> ICALL_%(WM32)_x32, ICALL_%(DM64)_x64 +// DISABLED: IJMP_%(RM) -> IJMP_%(WM32)_x32, IJMP_%(DM64)_x64 +// DISABLED: ICALL_%(RM) -> ICALL_%(WM32)_x32, ICALL_%(DM64)_x64 PUSH_%(RM) -> PUSH_%(WM32)_x32, PUSH_%(DM64)_x64 POP_%(RM) -> POP_%(WM32)_x32, POP_%(DM64)_x64 CRC32_W$%(RM) diff --git a/src/data/x86_inslist.ltxt b/src/data/x86_inslist.ltxt index 4d0b71e..fb683d8 100644 --- a/src/data/x86_inslist.ltxt +++ b/src/data/x86_inslist.ltxt @@ -80,8 +80,9 @@ TEST_%(RNM(rm,v))%(RN(v)) fold TEST_%(RN(v))M TEST_M%(RN(v)); commute dst<->src} | for l,w in ipairs{{"INC",0,0xfe,"wflag_c"},{"DEC",1,0xfe,"wflag_c"},{"NOT",2,0xf6,"wflags"},{"NEG",3,0xf6,""}} do | local suffix = (w[1] == "INC" or w[1] == "DEC") and (v == 16 or v == 32) and rm == "rr" and "_x64" or "" +| -- DISABLED INC/DEC_H/W_x64 in 32bit mode, because we can't generate it via nasm %(w[1])_%(RNMN(rm,v))%(suffix) //= UNARY%(v)_%(RM).%(w[2]) - {isa %ISA; ops %RRM dst/%(ROMI); flags %(w[4]) %RWMEM; + {isa %ISA %(#suffix > 0 and "x64" or ""); ops %RRM dst/%(ROMI); flags %(w[4]) %RWMEM; pref %P; rex %rm %W 0 dst; coding !par(%(w[3]+R16)) !RRM%(RM)(!sub(%(w[2])),$dst); fold %(w[1])_%(MN(v))} | end @@ -352,22 +353,22 @@ SET%(w)_%(RNMN(rm,8)) //= SET_%(RM).%(l-1) rex %rm 0 0 dst; extopcode 0f; coding !parsub(%(0x90+l-1)) !RRM%(RM)(0,$dst); fold SET%(w)_M8} | end -IJMP_%(RNMN(rm,32))_x32 //= IJMP%(RM)_x32.4 - {isa x32only; ops %(grm(rm,32)) src/i; flags cf_jmp %RMEM; - rex %rm 0 0 src; coding !par(0xff) !RRM%(RM)(!sub(4),$src); - fold IJMP_M32_x32} -IJMP_%(RNMN(rm,64))_x64 //= IJMP%(RM)_x64.4 - {isa x64only; ops %(grm(rm,64)) src/i; flags cf_jmp %RMEM; - rex %rm 0 0 src; coding !par(0xff) !RRM%(RM)(!sub(4),$src); - fold IJMP_M64_x64} -ICALL_%(RNMN(rm,32))_x32 //= ICALL%(RM)_x32.2 - {isa x32only; ops %(grm(rm,32)) src/i; flags cf_call %RMEM; - rex %rm 0 0 src; coding !par(0xff) !RRM%(RM)(!sub(2),$src); - fold ICALL_M32_x32} -ICALL_%(RNMN(rm,64))_x64 //= ICALL%(RM)_x64.2 - {isa x64only; ops %(grm(rm,64)) src/i; flags cf_call %RMEM; - rex %rm 0 0 src; coding !par(0xff) !RRM%(RM)(!sub(2),$src); - fold ICALL_M64_x64} +//DISABLED: IJMP_%(RNMN(rm,32))_x32 //= IJMP%(RM)_x32.4 +// {isa x32only; ops %(grm(rm,32)) src/i; flags cf_jmp %RMEM; +// rex %rm 0 0 src; coding !par(0xff) !RRM%(RM)(!sub(4),$src); +// fold IJMP_M32_x32} +//DISABLED: IJMP_%(RNMN(rm,64))_x64 //= IJMP%(RM)_x64.4 +// {isa x64only; ops %(grm(rm,64)) src/i; flags cf_jmp %RMEM; +// rex %rm 0 0 src; coding !par(0xff) !RRM%(RM)(!sub(4),$src); +// fold IJMP_M64_x64} +//DISABLED: ICALL_%(RNMN(rm,32))_x32 //= ICALL%(RM)_x32.2 +// {isa x32only; ops %(grm(rm,32)) src/i; flags cf_call %RMEM; +// rex %rm 0 0 src; coding !par(0xff) !RRM%(RM)(!sub(2),$src); +// fold ICALL_M32_x32} +//DISABLED: ICALL_%(RNMN(rm,64))_x64 //= ICALL%(RM)_x64.2 +// {isa x64only; ops %(grm(rm,64)) src/i; flags cf_call %RMEM; +// rex %rm 0 0 src; coding !par(0xff) !RRM%(RM)(!sub(2),$src); +// fold ICALL_M64_x64} | end | ------------------------------------ @@ -401,7 +402,7 @@ CDQE {ops GR64{rax} dst/o, GR32{eax} src/i; rex rr 1 0 0; coding !par(0 CWD {ops GR16{dx} dst/o, GR16{ax} src/i; pref 66; rex rr 0 0 0; coding !par(0x99)} CDQ {ops GR32{edx} dst/o, GR32{eax} src/i; rex rr 0 0 0; coding !par(0x99)} CQO {ops GR64{rdx} dst/o, GR64{rax} src/i; rex rr 1 0 0; coding !par(0x99); isa x64} -PAUSE {} {pref f3; coding !parsub(0x90)} +//DISABLED: PAUSE {} {pref f3; coding !parsub(0x90)} CLC {flags wflag_c; coding !parsub(0xf8)} //CLD {flags wflag_d; coding !parsub(0xfc)} //CLI {flags wflag_i; coding !parsub(0xfa)} @@ -422,22 +423,22 @@ SFENCE {isa sse; flags rwmem; rex rr 0 0 0; extopcode 0f; coding !par(0xae) !RRM // {isa monitor; ops GR32{ecx} ext/i, GR32{eax} hint/i; flags rwmem; // extopcode 0f; coding 0x01 !parsub(0xc9)} //--0xc9 -LOCK {coding !parsub(0xf0)} +//DISABLED: LOCK {coding !parsub(0xf0)} //RDPMC {isa rdpmc; ops GR32{edx} hi/o, GR32{eax} lo/o, GR32{ecx} sel/i; extopcode 0f; coding !parsub(0x33)} //xacquire is ignored if hle is not present //useable with lock_{add,adc,and,btc,btr,bts,cmpxchg,cmpxchg8b,dec,inc,neg,not,or,sbb,sub,xor,xadd,xchg}_mr, //useable with xchg_mr -XACQUIRE {isa hle; coding !parsub(0xf2)} +//DISABLED: XACQUIRE {isa hle; coding !parsub(0xf2)} //xrelease is ignored if hle is not present //useable with lock_{add,adc,and,btc,btr,bts,cmpxchg,cmpxchg8b,dec,inc,neg,not,or,sbb,sub,xor,xadd,xchg}_mr, //useable with xchg_mr, mov_mr, mov_mi -XRELEASE {isa hle; coding !parsub(0xf3)} +//DISABLED: XRELEASE {isa hle; coding !parsub(0xf3)} // resume operation at fallback address of outermost xbegin fallback address, imm is provided as EAX[31:24] -XABORT_I {isa rtm; ops i8 imm/i; rex rr 0 0 0; coding !par(0xc6) !RRMR(!sub(7),0) !I8($imm)} -XBEGIN_REL16 {isa rtm; ops BB fback/i; flags cf_jmp cf_fallthru; pref 66; coding !par(0xc7) !RRMR(!sub(7),0) !REL16($fback)} -XBEGIN_REL32 {isa rtm; ops BB fback/i; flags cf_jmp cf_fallthru; coding !par(0xc7) !RRMR(!sub(7),0) !REL32($fback)} -XEND {isa rtm; extopcode 0f; coding 0x01 !parsub(0xd5)} -XTEST {isa rtm; flags wflags; extopcode 0f; coding 0x01 !parsub(0xd6)} +//DISABLED: XABORT_I {isa rtm; ops i8 imm/i; rex rr 0 0 0; coding !par(0xc6) !RRMR(!sub(7),0) !I8($imm)} +//DISABLED: XBEGIN_REL16 {isa rtm; ops BB fback/i; flags cf_jmp cf_fallthru; pref 66; coding !par(0xc7) !RRMR(!sub(7),0) !REL16($fback)} +//DISABLED: XBEGIN_REL32 {isa rtm; ops BB fback/i; flags cf_jmp cf_fallthru; coding !par(0xc7) !RRMR(!sub(7),0) !REL32($fback)} +//DISABLED: XEND {isa rtm; extopcode 0f; coding 0x01 !parsub(0xd5)} +//DISABLED: XTEST {isa rtm; flags wflags; extopcode 0f; coding 0x01 !parsub(0xd6)} XGETBV_EDX_EAX_ECX {isa avx; ops GR32{edx} hi/o, GR32{eax} lo/o, GR32{ecx} sel/i; extopcode 0f; coding !par(1) !RRMR(!sub(2),0)} @@ -458,11 +459,11 @@ J%(w)_BB_FT //= JCC_BB_FT.%(l-1) JMP_BB {ops BB tgt/i; flags cf_jmp; coding !par(0xe9) !REL32_8_JMP($tgt)} JMP_FT {ops BB ft/i; flags cf_fallthru } RET {ops GR32{esp} sp/io; flags cf_ret; coding !par(0xc3) } -RET_AMD {ops GR32{esp} sp/io; flags cf_ret; pref f3; coding !par(0xc3)} +//DISABLED: RET_AMD {ops GR32{esp} sp/io; flags cf_ret; pref f3; coding !par(0xc3)} RET_I {ops GR32{esp} sp/io, i16 imm/i; flags cf_ret; coding !par(0xc2) !I16($imm) } PUSH_I {ops GR32{esp} sp/io, i32 imm/i; flags usemem; coding !par(0x68) !I32_8($imm) } -CALL_I_x32 {isa x32only; ops GR32{esp} sp/io, i32 tgt/i; flags cf_call; coding !par(0xe8) !REL32($tgt) } -CALL_I_x64 {isa x64; ops GR32{esp} sp/io, i64 tgt/i; flags cf_call; coding !par(0xe8) !REL32($tgt) } +//DISABLED: CALL_I_x32 {isa x32only; ops GR32{esp} sp/io, i32 tgt/i; flags cf_call; coding !par(0xe8) !REL32($tgt) } +//DISABLED: CALL_I_x64 {isa x64; ops GR32{esp} sp/io, i64 tgt/i; flags cf_call; coding !par(0xe8) !REL32($tgt) } PUSH_W_x32 {isa x32only; ops GR32{esp} sp/io, GR32 dst/i; flags usemem; rex rr 0 0 dst; coding !par(0x50)+($dst&7); @@ -1108,10 +1109,13 @@ VPBLENDVB_%(XN(w))%(X(w))%(XNM(w))%(XN(w)) fold %(V(w))PINSRW_%(XN(w))%(X(w))%(MN(16))I} | for m,u in ipairs{{"B",8,"0x14",32,"0x20"}, {"W",16,"0x15",32}, {"D",32,"0x16",32,"0x22"}, {"Q",64,"0x16",64,"0x22"}} do | local u1x = u[1] == "W" and w[3] == "rr" and "X" or "" +| -- DISABLED sse41 pextrw pattern because we can't check it with nasm +| if #u1x == 0 then -- DISABLED sse41 pextrw patternm because we can't check it with nasm %(V(w))PEXTR%(u[1])%(u1x)_%(rrm(w[3],RN(u[4]),"M"..u[2]))%(XN(w))I {isa %ISA sse41 %(u[4]==64 and "x64" or ""); ops %(rrm(w[3],greg(u[4]),gmem(u[2]))) dst/%(ROMI), %VR src/i, i8 imm/i; flags %WMEM; pref 66; %(VEX_S0D(w,r64bit(u[4]))); extopcode 0f3a; coding !parsub(%(u[3])) !RRM%(RM)($src,$dst) !I8($imm); fold %(V(w))PEXTR%(u[1])_%("M"..u[2])%(XN(w))I} +| end | if u[2] ~= 16 then %(V(w))PINSR%(u[1])_%(XN(w))%(X(w))%(rrm(w[3],RN(u[4]),"M"..u[2]))I {isa %ISA sse41 %(u[4]==64 and "x64" or ""); ops %(DST(w,VR)), %(rrm(w[3],greg(u[4]),gmem(u[2]))) src/i, i8 imm/i; flags %RMEM; diff --git a/tests/test_allinstructions.cpp b/tests/test_allinstructions.cpp index eb07bee..37244b2 100644 --- a/tests/test_allinstructions.cpp +++ b/tests/test_allinstructions.cpp @@ -105,9 +105,10 @@ static void test(UnitTest& t) { prefix + fn + ".bin"); for (auto v: allins) { + u32 vv = v & 0x7fffffff; allins_compressed.insert - (((v >> x86_64::ICL_NonMainBits) << x86_64::ICL_SubBits) - + (v & x86_64::ICL_SubMask)); + (((vv >> x86_64::ICL_NonMainBits) << x86_64::ICL_SubBits) + + (vv & x86_64::ICL_SubMask)); } size_t n = 0; diff --git a/tests/x86/x86_cf.ins b/tests/x86/x86_cf.ins index 65af04f..528f5f0 100644 --- a/tests/x86/x86_cf.ins +++ b/tests/x86/x86_cf.ins @@ -7,3 +7,5 @@ {:}bbjmp<:> {JMP_FT exit} {:exit} + ret{_i} 2 +{:exit2} diff --git a/tests/x86/x86_other.ins b/tests/x86/x86_other.ins index ab42199..05d22ff 100644 --- a/tests/x86/x86_other.ins +++ b/tests/x86/x86_other.ins @@ -51,6 +51,7 @@ $(X86(Y)) RDRAND{_$(RSZ(Y))} $(RNAME("ECX",Y)) CBW CWDE CWD +64: CDQE PUSHF POPF LFENCE -- 2.11.4.GIT