2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #define X86_ALU_RM8_R8 0x00
20 #define X86_ALU_RM16_R16 0x01
21 #define X86_ALU_R8_RM8 0x02
22 #define X86_ALU_R16_RM16 0x03
23 #define X86_ALU_AL_IMM8 0x04
24 #define X86_ALU_AX_IMM16 0x05
31 #define X86_REX_B 0x01
32 #define X86_REX_X 0x02
33 #define X86_REX_R 0x04
34 #define X86_REX_W 0x08
35 #define X86_INC_R16 0x40
36 #define X86_DEC_R16 0x48
37 #define X86_PUSH_R16 0x50
38 #define X86_POP_R16 0x58
39 #define X86_MOVSXD 0x63
42 #define X86_OP_SIZE 0x66
43 #define X86_PUSH_IMM16 0x68
44 #define X86_IMUL_R16_RM16_IMM16 0x69
45 #define X86_PUSH_IMM8 0x6a
46 #define X86_IMUL_R16_RM16_IMM8 0x6b
47 #define X86_JCC_8 0x70
48 #define X86_ALU_RM8_IMM8 0x80
49 #define X86_ALU_RM16_IMM16 0x81
50 #define X86_ALU_RM16_IMM8 0x83
51 #define X86_TEST_RM8_R8 0x84
52 #define X86_TEST_RM16_R16 0x85
53 #define X86_MOV_RM8_R8 0x88
54 #define X86_MOV_RM16_R16 0x89
55 #define X86_MOV_R8_RM8 0x8a
56 #define X86_MOV_R16_RM16 0x8b
57 #define X86_LEA_R16_RM16 0x8d
60 #define X86_MOV_AL_M16 0xa0
61 #define X86_MOV_AX_M16 0xa1
62 #define X86_MOV_M16_AL 0xa2
63 #define X86_MOV_M16_AX 0xa3
64 #define X86_MOVSB 0xa4
65 #define X86_TEST_AL_IMM8 0xa8
66 #define X86_TEST_AX_IMM16 0xa9
67 #define X86_STOSB 0xaa
68 #define X86_MOV_R16_IMM16 0xb8
69 #define X86_ROT_RM8_IMM8 0xc0
70 #define X86_ROT_RM16_IMM8 0xc1
71 #define X86_RET_IMM16 0xc2
73 #define X86_VEX_3 0xc4
74 #define X86_VEX_2 0xc5
75 #define X86_MOV_RM8_IMM8 0xc6
76 #define X86_MOV_RM16_IMM16 0xc7
77 #define X86_MOV_R16_IMM16_REG 0x0
79 #define X86_8F_POP 0x0
80 #define X86_ROT_RM8_1 0xd0
81 #define X86_ROT_RM16_1 0xd1
82 #define X86_ROT_RM8_CL 0xd2
83 #define X86_ROT_RM16_CL 0xd3
84 #define X86_JMP_16 0xe9
85 #define X86_JMP_8 0xeb
86 #define X86_REPNE 0xf2
89 #define X86_F6_TEST_RM8_IMM8 0x0
90 #define X86_F6_NOT_RM8 0x2
91 #define X86_F6_NEG_RM8 0x3
92 #define X86_F6_MUL_RM8 0x4
93 #define X86_F6_IMUL_RM8 0x5
94 #define X86_F6_DIV_RM8 0x6
95 #define X86_F6_IDIV_RM8 0x7
97 #define X86_F7_TEST_RM16_IMM16 0x0
98 #define X86_F7_NOT_RM16 0x2
99 #define X86_F7_NEG_RM16 0x3
100 #define X86_F7_MUL_RM16 0x4
101 #define X86_F7_IMUL_RM16 0x5
102 #define X86_F7_DIV_RM16 0x6
103 #define X86_F7_IDIV_RM16 0x7
105 #define X86_FE_INC_RM8 0x0
106 #define X86_FE_DEC_RM8 0x1
108 #define X86_FF_INC_RM16 0x0
109 #define X86_FF_DEC_RM16 0x1
110 #define X86_FF_CALL_INDIRECT 0x2
111 #define X86_FF_JMP_INDIRECT 0x4
112 #define X86_FF_PUSH 0x6
114 #define X86_0F_MOVSS_X128_M32 0x10
115 #define X86_0F_MOVSS_M32_X128 0x11
116 #define X86_0F_MOVAPS_X128_M128 0x28
117 #define X86_0F_MOVAPS_M128_X128 0x29
118 #define X86_0F_CVTSI2SS_X128_RM32 0x2a
119 #define X86_0F_CVTTSS2SI_X128_RM32 0x2c
120 #define X86_0F_UCOMISS_X128_RM32 0x2e
121 #define X86_0F_38 0x38
122 #define X86_0F_3A 0x3a
123 #define X86_0F_CMOVCC_R16_RM16 0x40
124 #define X86_0F_SQRTPS_X128_M32 0x51
125 #define X86_0F_ANDPS_X128_M128 0x54
126 #define X86_0F_ANDNPS_X128_M128 0x55
127 #define X86_0F_ORPS_X128_M128 0x56
128 #define X86_0F_XORPS_X128_M128 0x57
129 #define X86_0F_ADDPS_X128_M32 0x58
130 #define X86_0F_MULPS_X128_M32 0x59
131 #define X86_0F_SUBPS_X128_M32 0x5c
132 #define X86_0F_DIVPS_X128_M32 0x5e
133 #define X86_0F_JCC_16 0x80
134 #define X86_0F_SETCC_RM8 0x90
135 #define X86_0F_BT_RM16_R16 0xa3
136 #define X86_0F_BTS_RM16_R16 0xab
137 #define X86_0F_BTR_RM16_R16 0xb3
138 #define X86_0F_BTX_RM16_IMM8 0xba
139 #define X86_0F_BTX_BT_RM16_IMM8 0x4
140 #define X86_0F_BTX_BTS_RM16_IMM8 0x5
141 #define X86_0F_BTX_BTR_RM16_IMM8 0x6
142 #define X86_0F_BTX_BTC_RM16_IMM8 0x7
143 #define X86_0F_BSWAP 0xc8
144 #define X86_0F_BTC_RM16_R16 0xbb
145 #define X86_0F_IMUL_R16_RM16 0xaf
146 #define X86_0F_MOVZX_R16_RM8 0xb6
147 #define X86_0F_MOVZX_R16_RM16 0xb7
148 #define X86_0F_POPCNT_R16_RM16 0xb8
149 #define X86_0F_BSF_R16_RM16 0xbc
150 #define X86_0F_BSR_R16_RM16 0xbd
151 #define X86_0F_MOVSX_R16_RM8 0xbe
152 #define X86_0F_MOVSX_R16_RM16 0xbf
153 #define X86_0F_PINSRW_X128_RM16_IMM8 0xc4
155 #define X86_0F_38_CVTPH2PS_X128_RM64 0x13
156 #define X86_0F_38_ROTX 0xf7
158 #define X86_0F_3A_ROUNDSS_X128_M32 0x0a
159 #define X86_0F_3A_ROUNDSD_X128_M64 0x0b
160 #define X86_0F_3A_PEXTRW_RM16_X128_IMM8 0x15
161 #define X86_0F_3A_CVTPS2PH_RM64_X128 0x1d
163 #define X87_FLD_RM32 0xd9
164 #define X87_FLD_RM32_X 0x0
165 #define X87_FLDCW 0xd9
166 #define X87_FLDCW_X 0x5
167 #define X87_FILD_M32 0xdb
168 #define X87_FILD_M32_X 0x0
169 #define X87_FISTTP_M32 0xdb
170 #define X87_FISTTP_M32_X 0x1
171 #define X87_FISTP_M32 0xdb
172 #define X87_FISTP_M32_X 0x3
173 #define X87_FLD_M80 0xdb
174 #define X87_FLD_M80_X 0x5
175 #define X87_FLD_M64 0xdd
176 #define X87_FLD_M64_X 0x0
177 #define X87_FSTP_M32 0xd9
178 #define X87_FSTP_M32_X 0x3
179 #define X87_FSTP_M80 0xdb
180 #define X87_FSTP_M80_X 0x7
181 #define X87_FSTP_RM64 0xdd
182 #define X87_FSTP_RM64_X 0x3
183 #define X87_FALU_ST_RM32 0xd8
184 #define X87_FCHS 0xd9
185 #define X87_FCHS_2 0xe0
186 #define X87_FSQRT 0xd9
187 #define X87_FSQRT_2 0xfa
188 #define X87_FRNDINT 0xd9
189 #define X87_FRNDINT_2 0xfc
190 #define X87_FALU_ST_M64 0xdc
191 #define X87_FALU_STi_ST 0xdc
192 #define X87_FISTTP_M64 0xdd
193 #define X87_FISTTP_M64_X 0x1
194 #define X87_FALUP_STi_ST0 0xde
195 #define X87_ALU_ADD 0x0
196 #define X87_ALU_MUL 0x1
197 #define X87_ALU_FCOM 0x2
198 #define X87_ALU_FCOMP 0x3
199 #define X87_ALU_SUBR 0x4
200 #define X87_ALU_SUB 0x5
201 #define X87_ALU_DIVR 0x6
202 #define X87_ALU_DIV 0x7
203 #define X87_FCOMPP 0xde
204 #define X87_FCOMPP_2 0xd9
205 #define X87_FILD_M16 0xdf
206 #define X87_FILD_M16_X 0x0
207 #define X87_FISTTP_M16 0xdf
208 #define X87_FISTTP_M16_X 0x1
209 #define X87_FISTP_M16 0xdf
210 #define X87_FISTP_M16_X 0x3
211 #define X87_FILD_M64 0xdf
212 #define X87_FILD_M64_X 0x5
213 #define X87_FISTP_M64 0xdf
214 #define X87_FISTP_M64_X 0x7
215 #define X87_FNSTSW 0xdf
216 #define X87_FNSTSW_2 0xe0
217 #define X87_FCOMIP 0xdf
218 #define X87_FCOMIP_2 0xf0
220 #define SSE_PREFIX_NONE 0
221 #define SSE_PREFIX_66 1
222 #define SSE_PREFIX_F3 2
223 #define SSE_PREFIX_F2 3
225 #define PREFIX_NONE 0
227 #define PREFIX_0F_38 2
228 #define PREFIX_0F_3A 3
231 #define cgen_rex(rex) internal(file_line, "cgen_rex: attempting to generate rex in 32-bit mode: %02x", rex)
233 #define cgen_rex(rex) cgen_one(rex)
236 #define force_vex 0x10000
238 static bool attr_w cgen_rm_insn(struct codegen_context *ctx, int32_t sse_prefix, uint8_t prefix, uint8_t opcode, unsigned size, bool reg_is_reg, uint8_t reg, uint8_t *arg)
240 uint8_t rex, mod, rm;
242 int64_t imm = 0; /* avoid warning */
247 if (unlikely(R_IS_XMM(reg)))
249 if (unlikely(R_IS_XMM(arg[0]))) {
250 arg_reg = arg[0] - R_XMM0;
253 if (unlikely(!R_IS_GPR(reg)))
254 internal(file_line, "cgen_rm_insn: invalid register %02x", reg);
258 if (size == OP_SIZE_8)
269 uint8_t *imm_ptr = arg + arg_size(arg[0]) - 8;
270 imm = get_imm(imm_ptr);
271 if (unlikely(!imm_is_32bit(imm)))
272 internal(file_line, "cgen_rm_insn: immediate out of range: %"PRIxMAX"", (uintmax_t)imm);
273 if (arg[0] == ARG_ADDRESS_0) {
283 } else if (imm >= -0x80 && imm <= 0x7f) {
288 if ((arg[1] & 7) == 0x5 && addr_size == 0)
292 else if (addr_size == 1)
296 if (arg[0] == ARG_ADDRESS_1) {
297 if (reg_is_segment(arg[1])) {
298 static const uint8_t segments[6] = { X86_ES, X86_CS, X86_SS, X86_DS, X86_FS, X86_GS };
299 cgen_one(segments[arg[1] - R_ES]);
304 if ((arg[1] & 7) == 0x4) {
312 if (arg[0] >= ARG_ADDRESS_1_2 && arg[0] <= ARG_ADDRESS_1_8) {
313 if (unlikely(arg[1] == R_SP))
314 internal(file_line, "cgen_rm_insn: attemptint to scale SP");
320 sib = ((arg[0] - ARG_ADDRESS_1) << 6) | ((arg[1] & 7) << 3) | 0x5;
323 if (arg[0] >= ARG_ADDRESS_2 && arg[0] <= ARG_ADDRESS_2_8) {
324 if (unlikely(arg[2] == R_SP))
325 internal(file_line, "cgen_rm_insn: attemptint to scale SP");
331 sib = ((arg[0] - ARG_ADDRESS_2) << 6) | ((arg[2] & 7) << 3) | (arg[1] & 7);
334 internal(file_line, "cgen_rm_insn: invalid argument %02x", arg[0]);
338 if (unlikely(sse_prefix >= 0)) {
339 if (likely(cpu_test_feature(CPU_FEATURE_avx)) || (sse_prefix & force_vex)) {
340 if ((rex & (X86_REX_X | X86_REX_B | X86_REX_W)) == 0 && prefix == PREFIX_0F) {
342 cgen_one((~rex & X86_REX_R) << 5 | (~(sse_prefix >> 8) & 0xf) << 3 | (sse_prefix & 3));
345 cgen_one((~rex & (X86_REX_R | X86_REX_X | X86_REX_B)) << 5 | prefix);
346 cgen_one((rex & X86_REX_W) << 4 | (~(sse_prefix >> 8) & 0xf) << 3 | (sse_prefix & 3));
350 switch (sse_prefix & 3) {
351 case SSE_PREFIX_66: cgen_one(X86_OP_SIZE); break;
352 case SSE_PREFIX_F3: cgen_one(X86_REPE); break;
353 case SSE_PREFIX_F2: cgen_one(X86_REPNE); break;
356 if (size == OP_SIZE_2)
357 cgen_one(X86_OP_SIZE);
359 need_rex = rex != X86_REX;
360 need_rex |= size == OP_SIZE_1 && ((reg_is_reg && !reg_is_fp(reg) && reg >= 4) || (mod == 0xc0 && !reg_is_fp(arg[0]) && arg[0] >= 4));
361 if (prefix == PREFIX_0F && (opcode == X86_0F_MOVZX_R16_RM8 || opcode == X86_0F_MOVSX_R16_RM8)) {
362 need_rex |= mod == 0xc0 && arg[0] >= 4;
382 internal(file_line, "cgen_rm_insn: invalid prefix %u", prefix);
386 cgen_one(mod | ((reg & 7) << 3) | (rm & 7));
400 static bool attr_w cgen_sse_insn(struct codegen_context *ctx, unsigned sse_prefix, unsigned sse_op_map, uint8_t opcode, bool wide, uint8_t reg, uint8_t reg2, uint8_t *arg)
402 g(cgen_rm_insn(ctx, sse_prefix + (reg2 << 8), sse_op_map, opcode, !wide ? OP_SIZE_4 : OP_SIZE_8, true, reg, arg));
406 static bool attr_w cgen_push(struct codegen_context *ctx)
408 uint8_t *arg1 = ctx->code_position;
409 ctx->code_position += arg_size(*arg1);
410 if (likely(R_IS_GPR(arg1[0]))) {
412 cgen_rex(X86_REX | X86_REX_B);
413 cgen_one(X86_PUSH_R16 + (arg1[0] & 7));
416 if (arg1[0] == ARG_IMM) {
418 imm = get_imm(&arg1[1]);
419 if (unlikely(!imm_is_32bit(imm)))
420 internal(file_line, "cgen_push: immediate out of range: %"PRIxMAX"", (uintmax_t)imm);
421 if (imm >= -0x80 && imm <= 0x7f) {
422 cgen_one(X86_PUSH_IMM8);
426 cgen_one(X86_PUSH_IMM16);
431 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_FF, OP_SIZE_4, false, X86_FF_PUSH, arg1));
435 static bool attr_w cgen_pop(struct codegen_context *ctx)
437 uint8_t *arg1 = ctx->code_position;
438 ctx->code_position += arg_size(*arg1);
439 if (likely(R_IS_GPR(arg1[0]))) {
441 cgen_rex(X86_REX | X86_REX_B);
442 cgen_one(X86_POP_R16 + (arg1[0] & 7));
445 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_8F, OP_SIZE_4, false, X86_8F_POP, arg1));
449 static bool attr_w cgen_mov(struct codegen_context *ctx, unsigned size)
451 uint8_t *arg1 = ctx->code_position;
452 uint8_t *arg2 = arg1 + arg_size(*arg1);
453 ctx->code_position = arg2 + arg_size(*arg2);
454 if (arg2[0] == ARG_IMM) {
457 imm = get_imm(&arg2[1]);
458 if (R_IS_GPR(arg1[0])) {
462 if (imm >= 0 && imm < 0x100000000LL)
467 cgen_one(X86_MOV_R16_IMM16 + (arg1[0] & 7));
471 if (imm >= ~(int64_t)0x7fffffff && imm < 0) {
476 cgen_one(X86_MOV_R16_IMM16 + (arg1[0] & 7));
480 if (size < OP_SIZE_4) {
481 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_MOV_RM8_IMM8 : X86_MOV_RM16_IMM16, size, false, X86_MOV_R16_IMM16_REG, arg1));
482 if (size == OP_SIZE_1)
489 if (unlikely(!imm_is_32bit(imm)))
490 internal(file_line, "cgen_mov: immediate out of range: %"PRIxMAX"", (uintmax_t)imm);
491 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_MOV_RM16_IMM16, maximum(size, OP_SIZE_4), false, X86_MOV_R16_IMM16_REG, arg1));
496 if (arg1[0] == R_AX && size >= OP_SIZE_4 && arg2[0] == ARG_ADDRESS_0) {
498 imm = get_imm(&arg2[1]);
499 if (size == OP_SIZE_8)
500 cgen_rex(X86_REX | X86_REX_W);
501 cgen_one(X86_MOV_AX_M16);
505 if (arg1[0] == ARG_ADDRESS_0 && arg2[0] == R_AX) {
506 uint8_t code = size == OP_SIZE_1 ? X86_MOV_M16_AL : X86_MOV_M16_AX;
508 imm = get_imm(&arg1[1]);
509 if (size == OP_SIZE_2)
510 cgen_one(X86_OP_SIZE);
511 if (size == OP_SIZE_8)
512 cgen_rex(X86_REX | X86_REX_W);
517 if (R_IS_XMM(arg1[0]) && ARG_IS_ADDRESS(arg2[0])) {
518 if (size == OP_SIZE_2) {
519 g(cgen_sse_insn(ctx, SSE_PREFIX_66, PREFIX_0F, X86_0F_PINSRW_X128_RM16_IMM8, false, arg1[0], R_XMM7, arg2));
523 if (size == OP_SIZE_16) {
524 g(cgen_sse_insn(ctx, SSE_PREFIX_NONE, PREFIX_0F, X86_0F_MOVAPS_X128_M128, false, arg1[0], 0, arg2));
527 g(cgen_sse_insn(ctx, size == OP_SIZE_4 ? SSE_PREFIX_F3 : SSE_PREFIX_F2, PREFIX_0F, X86_0F_MOVSS_X128_M32, false, arg1[0], 0, arg2));
530 if (ARG_IS_ADDRESS(arg1[0]) && R_IS_XMM(arg2[0])) {
531 if (size == OP_SIZE_2) {
532 g(cgen_sse_insn(ctx, SSE_PREFIX_66, PREFIX_0F_3A, X86_0F_3A_PEXTRW_RM16_X128_IMM8, false, arg2[0], 0, arg1));
536 if (size == OP_SIZE_16) {
537 g(cgen_sse_insn(ctx, SSE_PREFIX_NONE, PREFIX_0F, X86_0F_MOVAPS_M128_X128, false, arg2[0], 0, arg1));
540 g(cgen_sse_insn(ctx, size == OP_SIZE_4 ? SSE_PREFIX_F3 : SSE_PREFIX_F2, PREFIX_0F, X86_0F_MOVSS_M32_X128, false, arg2[0], 0, arg1));
543 if (R_IS_XMM(arg1[0]) && R_IS_XMM(arg2[0])) {
544 g(cgen_sse_insn(ctx, SSE_PREFIX_NONE, PREFIX_0F, X86_0F_MOVAPS_X128_M128, false, arg1[0], 0, arg2));
547 if (!R_IS_GPR(arg1[0]) && unlikely(!R_IS_GPR(arg2[0]))) {
548 /*debug("%s", da(ctx->fn,function)->function_name);*/
549 internal(file_line, "cgen_mov: two addresses not supported");
551 if (!R_IS_GPR(arg1[0])) {
552 uint8_t code = size == OP_SIZE_1 ? X86_MOV_RM8_R8 : X86_MOV_RM16_R16;
553 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, code, size, true, arg2[0], arg1));
555 } else if (size >= OP_SIZE_4) {
556 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_MOV_R16_RM16, size, true, arg1[0], arg2));
559 uint8_t code = size == OP_SIZE_1 ? X86_0F_MOVZX_R16_RM8 : X86_0F_MOVZX_R16_RM16;
560 g(cgen_rm_insn(ctx, -1, PREFIX_0F, code, OP_SIZE_4, false, arg1[0], arg2));
565 static bool attr_w cgen_movsx(struct codegen_context *ctx, unsigned size)
567 uint8_t *arg1, *arg2;
568 if (unlikely(size == OP_SIZE_NATIVE)) {
569 g(cgen_mov(ctx, size));
572 arg1 = ctx->code_position;
573 arg2 = arg1 + arg_size(*arg1);
574 ctx->code_position = arg2 + arg_size(*arg2);
575 if (size <= OP_SIZE_2) {
576 g(cgen_rm_insn(ctx, -1, PREFIX_0F, size == OP_SIZE_1 ? X86_0F_MOVSX_R16_RM8 : X86_0F_MOVSX_R16_RM16, OP_SIZE_NATIVE, true, arg1[0], arg2));
578 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_MOVSXD, OP_SIZE_NATIVE, true, arg1[0], arg2));
583 static bool attr_w cgen_lea(struct codegen_context *ctx, unsigned size)
586 uint8_t *arg1, *arg2, *arg3;
588 arg1 = ctx->code_position;
589 arg2 = arg1 + arg_size(*arg1);
590 arg3 = arg2 + arg_size(*arg2);
591 ctx->code_position = arg3 + arg_size(*arg3);
593 if (arg3[0] == ARG_IMM) {
594 if (arg2[0] == ARG_SHIFTED_REGISTER) {
595 if (unlikely((arg2[1] & ARG_SHIFT_MODE) != ARG_SHIFT_LSL) ||
596 unlikely((arg2[1] & ARG_SHIFT_AMOUNT) > 3))
598 addr[0] = ARG_ADDRESS_1 + (arg2[1] & ARG_SHIFT_AMOUNT);
600 memcpy(&addr[2], &arg3[1], 8);
602 addr[0] = ARG_ADDRESS_1;
604 memcpy(&addr[2], &arg3[1], 8);
606 } else if (R_IS_GPR(arg3[0])) {
607 addr[0] = ARG_ADDRESS_2;
610 memset(&addr[3], 0, 8);
611 } else if (arg3[0] == ARG_SHIFTED_REGISTER) {
612 if (unlikely((arg3[1] & ARG_SHIFT_MODE) != ARG_SHIFT_LSL) ||
613 unlikely((arg3[1] & ARG_SHIFT_AMOUNT) > 3))
615 addr[0] = ARG_ADDRESS_2 + (arg3[1] & ARG_SHIFT_AMOUNT);
618 memset(&addr[3], 0, 8);
621 internal(file_line, "cgen_lea: invalid argument %02x, %02x, %02x", arg1[0], arg2[0], arg3[0]);
623 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_LEA_R16_RM16, size, true, arg1[0], addr));
627 static bool attr_w cgen_alu(struct codegen_context *ctx, unsigned size, unsigned alu)
629 uint8_t *arg1, *arg2, *arg3;
631 arg1 = ctx->code_position;
632 arg2 = arg1 + arg_size(*arg1);
633 arg3 = arg2 + arg_size(*arg2);
634 ctx->code_position = arg3 + arg_size(*arg3);
635 if (unlikely(arg_size(*arg1) != arg_size(*arg2)))
636 internal(file_line, "cgen_alu: three-operand mode not supported");
637 if (unlikely(memcmp(arg1, arg2, arg_size(*arg1))))
638 internal(file_line, "cgen_alu: three-operand mode not supported");
640 arg1 = ctx->code_position;
642 arg3 = arg2 + arg_size(*arg2);
643 ctx->code_position = arg3 + arg_size(*arg3);
646 if (unlikely(alu == ALU_MUL)) {
647 if (unlikely(arg3[0] == ARG_IMM)) {
650 imm = get_imm(&arg3[1]);
651 if (unlikely(!imm_is_32bit(imm)))
652 internal(file_line, "cgen_alu: immediate out of range: %"PRIxMAX"", (uintmax_t)imm);
653 code = imm_is_8bit(imm) ? X86_IMUL_R16_RM16_IMM8 : X86_IMUL_R16_RM16_IMM16;
654 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, code, size, true, arg1[0], arg2));
655 if (code == X86_IMUL_R16_RM16_IMM8) {
657 } else if (size == OP_SIZE_2) {
664 if (unlikely(size == OP_SIZE_1)) {
665 if (unlikely(arg1[0] != R_AX))
666 internal(file_line, "cgen_alu: imul with unsupported register");
667 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_F6, size, false, X86_F6_IMUL_RM8, arg3));
670 if (unlikely(!R_IS_GPR(arg1[0])))
671 internal(file_line, "cgen_alu: invalid multiply args");
672 g(cgen_rm_insn(ctx, -1, PREFIX_0F, X86_0F_IMUL_R16_RM16, size, true, arg1[0], arg3));
677 if (arg3[0] == ARG_IMM) {
681 imm = get_imm(&arg3[1]);
682 if (unlikely(!imm_is_32bit(imm)))
683 internal(file_line, "cgen_alu: immediate out of range: %"PRIxMAX"", (uintmax_t)imm);
685 if (arg1[0] == R_AX) {
686 if (imm_is_8bit(imm) && size >= OP_SIZE_4)
689 code = size == OP_SIZE_1 ? X86_ALU_AL_IMM8 : X86_ALU_AX_IMM16;
690 if (size == OP_SIZE_2)
691 cgen_one(X86_OP_SIZE);
692 if (size == OP_SIZE_8)
693 cgen_rex(X86_REX | X86_REX_W);
698 bit8 = imm_is_8bit(imm);
699 code = size == OP_SIZE_1 ? X86_ALU_RM8_IMM8 : bit8 ? X86_ALU_RM16_IMM8 : X86_ALU_RM16_IMM16;
700 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, code, size, false, alu, arg1));
702 if (bit8 || size == OP_SIZE_1) {
704 } else if (size == OP_SIZE_2) {
712 if (R_IS_XMM(arg1[0]) && size == OP_SIZE_16) {
715 case ALU_AND: code = X86_0F_ANDPS_X128_M128; break;
716 case ALU_ANDN: code = X86_0F_ANDNPS_X128_M128; break;
717 case ALU_OR: code = X86_0F_ORPS_X128_M128; break;
718 case ALU_XOR: code = X86_0F_XORPS_X128_M128; break;
719 default: internal(file_line, "invalid sse alu: %u", alu);
721 g(cgen_sse_insn(ctx, SSE_PREFIX_NONE, PREFIX_0F, code, false, arg1[0], arg2[0], arg3));
725 if (!R_IS_GPR(arg1[0]) && unlikely(!R_IS_GPR(arg3[0])))
726 internal(file_line, "cgen_alu: two addresses not supported");
728 if (!R_IS_GPR(arg1[0])) {
729 uint8_t code = size == OP_SIZE_1 ? X86_ALU_RM8_R8 : X86_ALU_RM16_R16;
731 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, code, size, true, arg3[0], arg1));
734 uint8_t code = size == OP_SIZE_1 ? X86_ALU_R8_RM8 : X86_ALU_R16_RM16;
736 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, code, size, true, arg1[0], arg3));
741 static bool attr_w cgen_alu1(struct codegen_context *ctx, unsigned size, unsigned alu, unsigned writes_flags)
744 uint8_t *arg1 = ctx->code_position;
745 uint8_t *arg2 = arg1 + arg_size(*arg1);
746 ctx->code_position = arg2 + arg_size(*arg2);
747 if (alu == ALU1_NOT || alu == ALU1_NEG || alu == ALU1_INC || alu == ALU1_DEC || alu == ALU1_BSWAP) {
748 if (unlikely(arg1[0] != arg2[0]))
749 internal(file_line, "cgen_alu1: arguments mismatch: %x, %x", arg1[0], arg2[0]);
753 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_F6 : X86_F7, size, false, X86_F6_NOT_RM8, arg1));
756 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_F6 : X86_F7, size, false, X86_F6_NEG_RM8, arg1));
759 if (writes_flags & 2) {
760 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_ALU_RM8_IMM8 : X86_ALU_RM16_IMM8, size, false, ALU_ADD, arg1));
765 if (R_IS_GPR(arg1[0]) && size >= OP_SIZE_2) {
766 if (size == OP_SIZE_2)
767 cgen_one(X86_OP_SIZE);
768 cgen_one(X86_INC_R16 + arg1[0]);
772 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_FE : X86_FF, size, false, X86_FE_INC_RM8, arg1));
775 if (writes_flags & 2) {
776 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_ALU_RM8_IMM8 : X86_ALU_RM16_IMM8, size, false, ALU_SUB, arg1));
781 if (R_IS_GPR(arg1[0]) && size >= OP_SIZE_2) {
782 if (size == OP_SIZE_2)
783 cgen_one(X86_OP_SIZE);
784 cgen_one(X86_DEC_R16 + arg1[0]);
788 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_FE : X86_FF, size, false, X86_FE_DEC_RM8, arg1));
791 if (unlikely(size <= OP_SIZE_2))
792 internal(file_line, "cgen_alu1: bytes or words not supported with this operation");
794 if (!R_IS_GPR(arg1[0]))
795 internal(file_line, "cgen_alu1: bswap needs a register");
798 if (size == OP_SIZE_8)
803 cgen_one(X86_0F_BSWAP + (arg1[0] & 7));
809 if (unlikely(size == OP_SIZE_1))
810 internal(file_line, "cgen_alu1: bytes not supported with this operation");
811 if (alu == ALU1_POPCNT || alu == ALU1_LZCNT)
813 g(cgen_rm_insn(ctx, -1, PREFIX_0F, alu == ALU1_BSF ? X86_0F_BSF_R16_RM16 : alu == ALU1_BSR || alu == ALU1_LZCNT ? X86_0F_BSR_R16_RM16 : X86_0F_POPCNT_R16_RM16, size, true, arg1[0], arg2));
816 internal(file_line, "cgen_alu1: invalid operation %u", alu);
821 static bool attr_w cgen_test(struct codegen_context *ctx, unsigned size)
823 uint8_t *arg1, *arg2;
824 arg1 = ctx->code_position;
825 arg2 = arg1 + arg_size(*arg1);
826 ctx->code_position = arg2 + arg_size(*arg2);
828 if (arg2[0] == ARG_IMM) {
830 imm = get_imm(&arg2[1]);
831 if (arg1[0] == R_AX) {
832 if (size == OP_SIZE_1) {
833 cgen_one(X86_TEST_AL_IMM8);
834 } else if (size == OP_SIZE_2) {
835 cgen_one(X86_OP_SIZE);
836 cgen_one(X86_TEST_AX_IMM16);
837 } else if (size == OP_SIZE_4) {
838 cgen_one(X86_TEST_AX_IMM16);
840 if (unlikely(!imm_is_32bit(imm)))
841 internal(file_line, "cgen_Test: immediate out of range: %"PRIxMAX"", (uintmax_t)imm);
842 cgen_rex(X86_REX | X86_REX_W);
843 cgen_one(X86_TEST_AX_IMM16);
846 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_F6 : X86_F7, size, false, X86_F6_TEST_RM8_IMM8, arg1));
848 if (size == OP_SIZE_1) {
850 } else if (size == OP_SIZE_2) {
852 } else if (size == OP_SIZE_4) {
855 if (unlikely(!imm_is_32bit(imm)))
856 internal(file_line, "cgen_Test: immediate out of range: %"PRIxMAX"", (uintmax_t)imm);
862 if (!R_IS_GPR(arg1[0]) && unlikely(!R_IS_GPR(arg2[0])))
863 internal(file_line, "cgen_test: two addresses not supported");
865 if (!R_IS_GPR(arg1[0])) {
866 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_TEST_RM8_R8 : X86_TEST_RM16_R16, size, true, arg2[0], arg1));
868 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_TEST_RM8_R8 : X86_TEST_RM16_R16, size, true, arg1[0], arg2));
873 static bool attr_w cgen_lea3(struct codegen_context *ctx, unsigned size, unsigned shift)
876 uint8_t *arg1, *arg2, *arg3, *arg4;
878 arg1 = ctx->code_position;
879 arg2 = arg1 + arg_size(*arg1);
880 arg3 = arg2 + arg_size(*arg2);
881 arg4 = arg3 + arg_size(*arg3);
882 ctx->code_position = arg4 + arg_size(*arg4);
884 if (unlikely(!R_IS_GPR(arg1[0])) || unlikely(!R_IS_GPR(arg2[0])) || unlikely(!R_IS_GPR(arg3[0])) || unlikely(arg4[0] != ARG_IMM))
885 internal(file_line, "cgen_lea3: invalid arguments");
887 addr[0] = ARG_ADDRESS_2 + shift;
890 memcpy(&addr[3], &arg4[1], 8);
892 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_LEA_R16_RM16, size, true, arg1[0], addr));
897 static bool attr_w cgen_rot(struct codegen_context *ctx, unsigned size, uint8_t rot, unsigned writes_flags)
899 uint8_t *arg1 = ctx->code_position;
900 uint8_t *arg2 = arg1 + arg_size(*arg1);
901 uint8_t *arg3 = arg2 + arg_size(*arg2);
902 ctx->code_position = arg3 + arg_size(*arg3);
904 if (cpu_test_feature(CPU_FEATURE_bmi2) && size >= OP_SIZE_4 && arg3[0] != ARG_IMM && !writes_flags && (rot == ROT_SHL || rot == ROT_SHR || rot == ROT_SAR)) {
907 case ROT_SHL: sse_prefix = SSE_PREFIX_66; break;
908 case ROT_SAR: sse_prefix = SSE_PREFIX_F3; break;
909 case ROT_SHR: sse_prefix = SSE_PREFIX_F2; break;
910 default: internal(file_line, "cgen_rot: invalid rotation %x", rot);
912 g(cgen_sse_insn(ctx, sse_prefix + force_vex, PREFIX_0F_38, X86_0F_38_ROTX, size == OP_SIZE_8, arg1[0], arg3[0], arg2));
916 if (arg1[0] != arg2[0])
917 internal(file_line, "cgen_rot: invalid arguments: %x, %02x, %02x, %02x", rot, arg1[0], arg2[0], arg3[0]);
919 if (arg3[0] == R_CX) {
920 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_ROT_RM8_CL : X86_ROT_RM16_CL, size, false, rot, arg1));
921 } else if (likely(arg3[0] == ARG_IMM)) {
923 imm = get_imm(&arg1[3]);
925 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_ROT_RM8_1 : X86_ROT_RM16_1, size, false, rot, arg1));
927 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_ROT_RM8_IMM8 : X86_ROT_RM16_IMM8, size, false, rot, arg1));
931 internal(file_line, "cgen_rot: invalid argument %02x", arg3[0]);
936 static bool attr_w cgen_btxt(struct codegen_context *ctx, unsigned size, uint8_t bt, uint8_t *arg1, uint8_t *arg2)
938 if (arg2[0] == ARG_IMM) {
939 g(cgen_rm_insn(ctx, -1, PREFIX_0F, X86_0F_BTX_RM16_IMM8, size, false, X86_0F_BTX_BT_RM16_IMM8 + bt, arg1));
942 g(cgen_rm_insn(ctx, -1, PREFIX_0F, X86_0F_BT_RM16_R16 + bt * 8, size, true, arg2[0], arg1));
947 static bool attr_w cgen_bt(struct codegen_context *ctx, unsigned size)
949 uint8_t *arg1 = ctx->code_position;
950 uint8_t *arg2 = arg1 + arg_size(*arg1);
951 ctx->code_position = arg2 + arg_size(*arg2);
953 return cgen_btxt(ctx, size, BTX_BT, arg1, arg2);
956 static bool attr_w cgen_btx(struct codegen_context *ctx, unsigned size, uint8_t bt)
958 uint8_t *arg1 = ctx->code_position;
959 uint8_t *arg2 = arg1 + arg_size(*arg1);
960 uint8_t *arg3 = arg2 + arg_size(*arg2);
961 ctx->code_position = arg3 + arg_size(*arg3);
963 if (arg1[0] != arg2[0])
964 internal(file_line, "cgen_btx: invalid arguments");
966 return cgen_btxt(ctx, size, bt, arg1, arg3);
969 static bool attr_w cgen_mul_l(struct codegen_context *ctx, unsigned size, bool sgn)
971 uint8_t *arg1, *arg2, *arg3, *arg4;
973 arg1 = ctx->code_position;
974 arg2 = arg1 + arg_size(*arg1);
975 arg3 = arg2 + arg_size(*arg2);
976 arg4 = arg3 + arg_size(*arg3);
977 ctx->code_position = arg4 + arg_size(*arg4);
978 reg_up = size == OP_SIZE_1 ? R_AX : R_DX;
979 if (unlikely(arg1[0] != R_AX) || unlikely(arg2[0] != reg_up) || unlikely(arg3[0] != R_AX) || unlikely(arg4[0] == ARG_IMM))
980 internal(file_line, "cgen_mul_l: invalid mul arguments");
982 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_F6 : X86_F7, size, false, !sgn ? X86_F6_MUL_RM8 : X86_F6_IMUL_RM8, arg4));
986 static bool attr_w cgen_div_l(struct codegen_context *ctx, unsigned size, bool sgn)
988 uint8_t *arg1, *arg2, *arg3, *arg4, *arg5;
990 arg1 = ctx->code_position;
991 arg2 = arg1 + arg_size(*arg1);
992 arg3 = arg2 + arg_size(*arg2);
993 arg4 = arg3 + arg_size(*arg3);
994 arg5 = arg4 + arg_size(*arg4);
995 ctx->code_position = arg5 + arg_size(*arg5);
996 reg_up = size == OP_SIZE_1 ? R_AX : R_DX;
997 if (unlikely(arg1[0] != R_AX) || unlikely(arg2[0] != reg_up) || unlikely(arg3[0] != R_AX) || unlikely(arg4[0] != reg_up) || unlikely(arg5[0] == ARG_IMM))
998 internal(file_line, "cgen_div_l: invalid div arguments");
1000 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, size == OP_SIZE_1 ? X86_F6 : X86_F7, size, false, !sgn ? X86_F6_DIV_RM8 : X86_F6_IDIV_RM8, arg5));
1004 static bool attr_w cgen_cmov(struct codegen_context *ctx, unsigned size, unsigned cond)
1006 uint8_t *arg1 = ctx->code_position;
1007 uint8_t *arg2 = arg1 + arg_size(*arg1);
1008 uint8_t *arg3 = arg2 + arg_size(*arg2);
1009 ctx->code_position = arg3 + arg_size(*arg3);
1010 if (unlikely(arg1[0] != arg2[0]))
1011 internal(file_line, "cgen_cmov: invalid arguments");
1012 g(cgen_rm_insn(ctx, -1, PREFIX_0F, X86_0F_CMOVCC_R16_RM16 + cond, size, true, arg1[0], arg3));
1016 static bool attr_w cgen_memcpy(struct codegen_context *ctx)
1018 int64_t disp_dest, disp_src;
1019 uint8_t *arg1, *arg2, *arg3;
1020 arg1 = ctx->code_position;
1021 arg2 = arg1 + arg_size(*arg1);
1022 arg3 = arg2 + arg_size(*arg2);
1023 ctx->code_position = arg3 + arg_size(*arg3);
1024 if (unlikely(arg1[0] != ARG_ADDRESS_1_POST_I) || unlikely(arg2[0] != ARG_ADDRESS_1_POST_I) || unlikely(arg3[0] != R_CX))
1026 if (unlikely(arg1[1] != R_DI) || unlikely(arg2[1] != R_SI))
1028 disp_dest = get_imm(&arg1[2]);
1029 disp_src = get_imm(&arg2[2]);
1030 if (unlikely(disp_dest != 0) || unlikely(disp_src != 0))
1034 cgen_one(X86_MOVSB);
1038 internal(file_line, "cgen_memcpy: invalid arguments %02x, %02x, %02x", *arg1, *arg2, *arg3);
1042 static bool attr_w cgen_memset(struct codegen_context *ctx)
1045 uint8_t *arg1, *arg2, *arg3;
1046 arg1 = ctx->code_position;
1047 arg2 = arg1 + arg_size(*arg1);
1048 arg3 = arg2 + arg_size(*arg2);
1049 ctx->code_position = arg3 + arg_size(*arg3);
1050 if (unlikely(arg1[0] != ARG_ADDRESS_1_POST_I) || unlikely(arg2[0] != R_CX) || unlikely(arg3[0] != R_AX))
1052 if (unlikely(arg1[1] != R_DI))
1054 disp_dest = get_imm(&arg1[2]);
1055 if (unlikely(disp_dest != 0))
1059 cgen_one(X86_STOSB);
1063 internal(file_line, "cgen_memset: invalid arguments %02x, %02x, %02x", *arg1, *arg2, *arg3);
1067 static bool attr_w cgen_sse_cmp(struct codegen_context *ctx, unsigned size)
1069 uint8_t *arg1 = ctx->code_position;
1070 uint8_t *arg2 = arg1 + arg_size(*arg1);
1071 ctx->code_position = arg2 + arg_size(*arg2);
1072 g(cgen_sse_insn(ctx, size == OP_SIZE_4 ? SSE_PREFIX_NONE: SSE_PREFIX_66, PREFIX_0F, X86_0F_UCOMISS_X128_RM32, false, arg1[0], 0, arg2));
1076 static bool attr_w cgen_sse_alu(struct codegen_context *ctx, unsigned size, unsigned alu)
1079 uint8_t *arg1 = ctx->code_position;
1080 uint8_t *arg2 = arg1 + arg_size(*arg1);
1081 uint8_t *arg3 = arg2 + arg_size(*arg2);
1082 ctx->code_position = arg3 + arg_size(*arg3);
1084 case FP_ALU_ADD: opcode = X86_0F_ADDPS_X128_M32; break;
1085 case FP_ALU_SUB: opcode = X86_0F_SUBPS_X128_M32; break;
1086 case FP_ALU_MUL: opcode = X86_0F_MULPS_X128_M32; break;
1087 case FP_ALU_DIV: opcode = X86_0F_DIVPS_X128_M32; break;
1088 default: internal(file_line, "cgen_sse_alu: invalid alu %u", alu);
1090 g(cgen_sse_insn(ctx, size == OP_SIZE_4 ? SSE_PREFIX_F3 : SSE_PREFIX_F2, PREFIX_0F, opcode, false, arg1[0], arg2[0], arg3));
1094 static bool attr_w cgen_sse_alu1(struct codegen_context *ctx, unsigned size, unsigned alu)
1097 unsigned sse_pfx, sse_op_map;
1098 uint8_t *arg1 = ctx->code_position;
1099 uint8_t *arg2 = arg1 + arg_size(*arg1);
1100 ctx->code_position = arg2 + arg_size(*arg2);
1102 case FP_ALU1_SQRT: if (size == OP_SIZE_4) {
1103 sse_pfx = SSE_PREFIX_F3;
1104 } else if (size == OP_SIZE_8) {
1105 sse_pfx = SSE_PREFIX_F2;
1109 sse_op_map = PREFIX_0F;
1110 opcode = X86_0F_SQRTPS_X128_M32;
1115 case FP_ALU1_TRUNC: sse_pfx = SSE_PREFIX_66;
1116 sse_op_map = PREFIX_0F_3A;
1117 if (size == OP_SIZE_4) {
1118 opcode = X86_0F_3A_ROUNDSS_X128_M32;
1119 } else if (size == OP_SIZE_8) {
1120 opcode = X86_0F_3A_ROUNDSD_X128_M64;
1126 default: internal(file_line, "cgen_sse_alu1: invalid alu %u, %u", alu, size);
1128 g(cgen_sse_insn(ctx, sse_pfx, sse_op_map, opcode, false, arg1[0], arg1[0], arg2));
1129 if (OP_IS_ROUND(alu))
1130 cgen_one(alu - FP_ALU1_ROUND);
1134 static bool attr_w cgen_sse_from_int(struct codegen_context *ctx, unsigned int_op_size, unsigned fp_op_size)
1136 uint8_t *arg1 = ctx->code_position;
1137 uint8_t *arg2 = arg1 + arg_size(*arg1);
1138 ctx->code_position = arg2 + arg_size(*arg2);
1139 g(cgen_sse_insn(ctx, fp_op_size == OP_SIZE_4 ? SSE_PREFIX_F3 : SSE_PREFIX_F2, PREFIX_0F, X86_0F_CVTSI2SS_X128_RM32, int_op_size == OP_SIZE_8, arg1[0], R_XMM7, arg2));
1143 static bool attr_w cgen_sse_to_int(struct codegen_context *ctx, unsigned int_op_size, unsigned fp_op_size)
1145 uint8_t *arg1 = ctx->code_position;
1146 uint8_t *arg2 = arg1 + arg_size(*arg1);
1147 ctx->code_position = arg2 + arg_size(*arg2);
1148 g(cgen_sse_insn(ctx, fp_op_size == OP_SIZE_4 ? SSE_PREFIX_F3 : SSE_PREFIX_F2, PREFIX_0F, X86_0F_CVTTSS2SI_X128_RM32, int_op_size == OP_SIZE_8, arg1[0], 0, arg2));
1152 static bool attr_w cgen_sse_cvt(struct codegen_context *ctx, unsigned from_op_size, unsigned to_op_size)
1154 uint8_t *arg1 = ctx->code_position;
1155 uint8_t *arg2 = arg1 + arg_size(*arg1);
1156 ctx->code_position = arg2 + arg_size(*arg2);
1157 if (from_op_size == OP_SIZE_2 && to_op_size == OP_SIZE_4) {
1158 g(cgen_sse_insn(ctx, SSE_PREFIX_66, PREFIX_0F_38, X86_0F_38_CVTPH2PS_X128_RM64, false, arg1[0], 0, arg2));
1160 } else if (from_op_size == OP_SIZE_4 && to_op_size == OP_SIZE_2) {
1161 g(cgen_sse_insn(ctx, SSE_PREFIX_66, PREFIX_0F_3A, X86_0F_3A_CVTPS2PH_RM64_X128, false, arg2[0], 0, arg1));
1165 internal(file_line, "cgen_sse_cvt: unsupported arguments %u, %u", from_op_size, to_op_size);
1169 static bool attr_w cgen_x87_fld(struct codegen_context *ctx, unsigned size)
1172 uint8_t *arg1 = ctx->code_position;
1173 ctx->code_position = arg1 + arg_size(*arg1);
1174 if (arg1[0] >= R_ST0 && arg1[0] <= R_ST7)
1178 c1 = X87_FLD_RM32; c2 = X87_FLD_RM32_X; break;
1180 c1 = X87_FLD_M64; c2 = X87_FLD_M64_X; break;
1182 c1 = X87_FLD_M80; c2 = X87_FLD_M80_X; break;
1184 internal(file_line, "cgen_x87_fld: invalid size %u", size);
1186 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, c1, OP_SIZE_4, false, c2, arg1));
1190 static bool attr_w cgen_x87_fild(struct codegen_context *ctx, unsigned size)
1193 uint8_t *arg1 = ctx->code_position;
1194 ctx->code_position = arg1 + arg_size(*arg1);
1197 c1 = X87_FILD_M16; c2 = X87_FILD_M16_X; break;
1199 c1 = X87_FILD_M32; c2 = X87_FILD_M32_X; break;
1201 c1 = X87_FILD_M64; c2 = X87_FILD_M64_X; break;
1203 internal(file_line, "cgen_x87_fild: invalid size %u", size);
1205 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, c1, OP_SIZE_4, false, c2, arg1));
1209 static bool attr_w cgen_x87_fstp(struct codegen_context *ctx, unsigned size)
1212 uint8_t *arg1 = ctx->code_position;
1213 ctx->code_position = arg1 + arg_size(*arg1);
1214 if (arg1[0] >= R_ST0 && arg1[0] <= R_ST7)
1218 c1 = X87_FSTP_M32; c2 = X87_FSTP_M32_X; break;
1220 c1 = X87_FSTP_RM64; c2 = X87_FSTP_RM64_X; break;
1222 c1 = X87_FSTP_M80; c2 = X87_FSTP_M80_X; break;
1224 internal(file_line, "cgen_x87_fstp: invalid size %u", size);
1226 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, c1, OP_SIZE_4, false, c2, arg1));
1230 static bool attr_w cgen_x87_fistp(struct codegen_context *ctx, unsigned size)
1233 uint8_t *arg1 = ctx->code_position;
1234 ctx->code_position = arg1 + arg_size(*arg1);
1237 c1 = X87_FISTP_M16; c2 = X87_FISTP_M16_X; break;
1239 c1 = X87_FISTP_M32; c2 = X87_FISTP_M32_X; break;
1241 c1 = X87_FISTP_M64; c2 = X87_FISTP_M64_X; break;
1243 internal(file_line, "cgen_x87_fistp: invalid size %u", size);
1245 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, c1, OP_SIZE_4, false, c2, arg1));
1249 static bool attr_w cgen_x87_fisttp(struct codegen_context *ctx, unsigned size)
1252 uint8_t *arg1 = ctx->code_position;
1253 ctx->code_position = arg1 + arg_size(*arg1);
1256 c1 = X87_FISTTP_M16; c2 = X87_FISTTP_M16_X; break;
1258 c1 = X87_FISTTP_M32; c2 = X87_FISTTP_M32_X; break;
1260 c1 = X87_FISTTP_M64; c2 = X87_FISTTP_M64_X; break;
1262 internal(file_line, "cgen_x87_fisttp: invalid size %u", size);
1264 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, c1, OP_SIZE_4, false, c2, arg1));
1268 static bool attr_w cgen_x87_fcomp(struct codegen_context *ctx, unsigned size)
1271 uint8_t *arg1 = ctx->code_position;
1272 ctx->code_position = arg1 + arg_size(*arg1);
1273 if (arg1[0] < ARG_REGS_MAX) {
1274 c1 = X87_FALU_ST_RM32;
1275 } else switch (size) {
1277 c1 = X87_FALU_ST_RM32; break;
1279 c1 = X87_FALU_ST_M64; break;
1281 internal(file_line, "cgen_x87_fcomp: invalid size %u", size);
1284 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, c1, OP_SIZE_4, false, c2, arg1));
1288 static bool attr_w cgen_x87_alu(struct codegen_context *ctx, unsigned size, unsigned aux)
1291 uint8_t *arg1 = ctx->code_position;
1292 ctx->code_position = arg1 + arg_size(*arg1);
1293 if (arg1[0] < ARG_REGS_MAX) {
1294 c1 = X87_FALU_ST_RM32;
1295 } else switch (size) {
1297 c1 = X87_FALU_ST_RM32; break;
1299 c1 = X87_FALU_ST_M64; break;
1301 internal(file_line, "cgen_x87_alu: invalid size %u", size);
1305 c2 = X87_ALU_ADD; break;
1307 c2 = X87_ALU_SUB; break;
1309 c2 = X87_ALU_MUL; break;
1311 c2 = X87_ALU_DIV; break;
1313 internal(file_line, "cgen_x87_fst: invalid operation %u", aux);
1315 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, c1, OP_SIZE_4, false, c2, arg1));
1319 static bool attr_w cgen_x87_alup(struct codegen_context *ctx, unsigned aux)
1322 uint8_t *arg1 = ctx->code_position;
1323 ctx->code_position = arg1 + arg_size(*arg1);
1326 c2 = X87_ALU_ADD; break;
1328 c2 = X87_ALU_SUB; break;
1330 c2 = X87_ALU_MUL; break;
1332 c2 = X87_ALU_DIV; break;
1334 internal(file_line, "cgen_x87_fstp: invalid operation %u", aux);
1336 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X87_FALUP_STi_ST0, OP_SIZE_4, false, c2, arg1));
1340 static bool attr_w resolve_relocation(struct codegen_context *ctx, struct relocation *reloc)
1342 int64_t offs = (int64_t)ctx->label_to_pos[reloc->label_id] - (int64_t)(reloc->position + (reloc->length == JMP_SHORT ? 1 : 4));
1343 switch (reloc->length) {
1346 if (!imm_is_8bit(offs))
1349 memcpy(ctx->mcode + reloc->position, &i8, 1);
1354 if (!imm_is_32bit(offs))
1357 memcpy(ctx->mcode + reloc->position, &i32, 4);
1361 internal(file_line, "resolve_relocation: invalid relocation length %u", reloc->length);
1367 static bool attr_w cgen_insn(struct codegen_context *ctx, uint32_t insn)
1371 /*debug("insn: %08x", insn);*/
1372 switch (insn_opcode(insn)) {
1383 imm16 = cget_two(ctx);
1384 cgen_one(X86_RET_IMM16);
1393 case INSN_CALL_INDIRECT:
1394 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_FF, OP_SIZE_4, false, X86_FF_CALL_INDIRECT, ctx->code_position));
1395 ctx->code_position += arg_size(*ctx->code_position);
1398 g(cgen_mov(ctx, insn_op_size(insn)));
1401 g(cgen_movsx(ctx, insn_op_size(insn)));
1404 g(cgen_alu(ctx, insn_op_size(insn), 7));
1407 g(cgen_test(ctx, insn_op_size(insn)));
1410 case INSN_ALU_FLAGS:
1411 if (unlikely(insn_op_size(insn) < OP_SIZE_4))
1413 if (!insn_writes_flags(insn) && insn_op_size(insn) <= OP_SIZE_8) {
1414 if (unlikely(insn_aux(insn) != ALU_ADD))
1416 g(cgen_lea(ctx, insn_op_size(insn)));
1419 g(cgen_alu(ctx, insn_op_size(insn), insn_aux(insn)));
1421 case INSN_ALU_PARTIAL:
1422 case INSN_ALU_FLAGS_PARTIAL:
1423 if (unlikely(insn_op_size(insn) >= OP_SIZE_4))
1425 g(cgen_alu(ctx, insn_op_size(insn), insn_aux(insn)));
1428 case INSN_ALU1_FLAGS:
1429 if (unlikely(insn_op_size(insn) < OP_SIZE_4))
1431 g(cgen_alu1(ctx, insn_op_size(insn), insn_aux(insn), insn_writes_flags(insn)));
1433 case INSN_ALU1_PARTIAL:
1434 case INSN_ALU1_FLAGS_PARTIAL:
1435 if (unlikely(insn_op_size(insn) >= OP_SIZE_4))
1437 g(cgen_alu1(ctx, insn_op_size(insn), insn_aux(insn), insn_writes_flags(insn)));
1440 if (unlikely(insn_op_size(insn) < OP_SIZE_4))
1442 g(cgen_lea3(ctx, insn_op_size(insn), insn_aux(insn)));
1445 if (unlikely(insn_op_size(insn) < OP_SIZE_4))
1447 g(cgen_rot(ctx, insn_op_size(insn), insn_aux(insn), insn_writes_flags(insn)));
1449 case INSN_ROT_PARTIAL:
1450 if (unlikely(insn_op_size(insn) >= OP_SIZE_4))
1452 g(cgen_rot(ctx, insn_op_size(insn), insn_aux(insn), insn_writes_flags(insn)));
1455 if (unlikely(insn_op_size(insn) == OP_SIZE_1) || unlikely(!insn_writes_flags(insn)))
1457 g(cgen_bt(ctx, insn_op_size(insn)));
1460 if (unlikely(insn_op_size(insn) == OP_SIZE_1) || unlikely(!insn_writes_flags(insn)))
1462 g(cgen_btx(ctx, insn_op_size(insn), insn_aux(insn)));
1465 g(cgen_mul_l(ctx, insn_op_size(insn), insn_aux(insn)));
1468 g(cgen_div_l(ctx, insn_op_size(insn), insn_aux(insn)));
1471 if (unlikely(insn_op_size(insn) <= OP_SIZE_2))
1473 if (insn_op_size(insn) == OP_SIZE_8)
1474 cgen_rex(X86_REX | X86_REX_W);
1475 if (unlikely(cget_one(ctx) != R_AX))
1477 if (unlikely(cget_one(ctx) != R_AX))
1481 case INSN_CBW_PARTIAL:
1482 if (unlikely(insn_op_size(insn) != OP_SIZE_2))
1484 if (unlikely(cget_one(ctx) != R_AX))
1486 if (unlikely(cget_one(ctx) != R_AX))
1488 cgen_one(X86_OP_SIZE);
1492 if (unlikely(insn_op_size(insn) <= OP_SIZE_2))
1494 if (unlikely(cget_one(ctx) != R_DX))
1496 if (unlikely(cget_one(ctx) != R_AX))
1498 if (insn_op_size(insn) == OP_SIZE_8)
1499 cgen_rex(X86_REX | X86_REX_W);
1502 case INSN_CWD_PARTIAL:
1503 if (unlikely(insn_op_size(insn) != OP_SIZE_2))
1505 if (unlikely(cget_one(ctx) != R_DX))
1507 if (unlikely(cget_one(ctx) != R_AX))
1509 if (unlikely(cget_one(ctx) != R_DX))
1511 cgen_one(X86_OP_SIZE);
1515 if (unlikely(insn_op_size(insn) != OP_SIZE_1))
1517 g(cgen_rm_insn(ctx, -1, PREFIX_0F, X86_0F_SETCC_RM8 + (insn_aux(insn) & 0xf), OP_SIZE_1, false, 0, ctx->code_position));
1518 ctx->code_position += arg_size(*ctx->code_position);
1520 case INSN_SET_COND_PARTIAL:
1521 if (unlikely(insn_op_size(insn) != OP_SIZE_1))
1523 g(cgen_rm_insn(ctx, -1, PREFIX_0F, X86_0F_SETCC_RM8 + (insn_aux(insn) & 0xf), OP_SIZE_1, false, 0, ctx->code_position));
1524 ctx->code_position += arg_size(*ctx->code_position);
1525 ctx->code_position += arg_size(*ctx->code_position);
1529 if (unlikely(insn_op_size(insn) == OP_SIZE_1))
1531 g(cgen_cmov(ctx, insn_op_size(insn), insn_aux(insn)));
1534 g(cgen_memcpy(ctx));
1537 g(cgen_memset(ctx));
1540 g(cgen_sse_cmp(ctx, insn_op_size(insn)));
1543 g(cgen_sse_alu(ctx, insn_op_size(insn), insn_aux(insn)));
1546 g(cgen_sse_alu1(ctx, insn_op_size(insn), insn_aux(insn)));
1548 case INSN_FP_FROM_INT32:
1549 case INSN_FP_FROM_INT64:
1550 g(cgen_sse_from_int(ctx, insn_opcode(insn) == INSN_FP_FROM_INT32 ? OP_SIZE_4 : OP_SIZE_8, insn_op_size(insn)));
1552 case INSN_FP_TO_INT32:
1553 case INSN_FP_TO_INT64:
1554 g(cgen_sse_to_int(ctx, insn_opcode(insn) == INSN_FP_TO_INT32 ? OP_SIZE_4 : OP_SIZE_8, insn_op_size(insn)));
1557 g(cgen_sse_cvt(ctx, insn_op_size(insn), insn_aux(insn)));
1560 g(cgen_x87_fld(ctx, insn_op_size(insn)));
1563 g(cgen_x87_fild(ctx, insn_op_size(insn)));
1566 g(cgen_x87_fstp(ctx, insn_op_size(insn)));
1568 case INSN_X87_FISTP:
1569 g(cgen_x87_fistp(ctx, insn_op_size(insn)));
1571 case INSN_X87_FISTTP:
1572 g(cgen_x87_fisttp(ctx, insn_op_size(insn)));
1574 case INSN_X87_FCOMP:
1575 g(cgen_x87_fcomp(ctx, insn_op_size(insn)));
1577 case INSN_X87_FCOMPP:
1578 cgen_one(X87_FCOMPP);
1579 cgen_one(X87_FCOMPP_2);
1581 case INSN_X87_FCOMIP:
1582 imm8 = cget_one(ctx);
1583 cgen_one(X87_FCOMIP);
1584 cgen_one(X87_FCOMIP_2 + (imm8 & 7));
1587 g(cgen_x87_alu(ctx, insn_op_size(insn), insn_aux(insn)));
1590 g(cgen_x87_alup(ctx, insn_aux(insn)));
1594 cgen_one(X87_FCHS_2);
1596 case INSN_X87_FSQRT:
1597 cgen_one(X87_FSQRT);
1598 cgen_one(X87_FSQRT_2);
1600 case INSN_X87_FRNDINT:
1601 cgen_one(X87_FRNDINT);
1602 cgen_one(X87_FRNDINT_2);
1604 case INSN_X87_FNSTSW:
1605 if (unlikely(cget_one(ctx) != R_AX))
1607 if (unlikely(cget_one(ctx) != R_AX))
1609 cgen_one(X87_FNSTSW);
1610 cgen_one(X87_FNSTSW_2);
1612 case INSN_X87_FLDCW:
1613 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X87_FLDCW, OP_SIZE_4, false, X87_FLDCW_X, ctx->code_position));
1614 ctx->code_position += arg_size(*ctx->code_position);
1617 if (insn_jump_size(insn) == JMP_SHORT || insn_jump_size(insn) == JMP_SHORTEST) {
1618 cgen_one(X86_JMP_8);
1619 g(add_relocation(ctx, JMP_SHORT, 0, NULL));
1621 } else if (likely(insn_jump_size(insn) == JMP_LONG)) {
1622 cgen_one(X86_JMP_16);
1623 g(add_relocation(ctx, JMP_LONG, 0, NULL));
1630 if (insn_jump_size(insn) == JMP_SHORT || insn_jump_size(insn) == JMP_SHORTEST) {
1631 cgen_one(X86_JCC_8 + (insn_aux(insn) & 0xf));
1632 g(add_relocation(ctx, JMP_SHORT, 0, NULL));
1634 } else if (likely(insn_jump_size(insn) == JMP_LONG)) {
1636 cgen_one(X86_0F_JCC_16 + (insn_aux(insn) & 0xf));
1637 g(add_relocation(ctx, JMP_LONG, 0, NULL));
1643 case INSN_JMP_INDIRECT:
1644 g(cgen_rm_insn(ctx, -1, PREFIX_NONE, X86_FF, OP_SIZE_4, false, X86_FF_JMP_INDIRECT, ctx->code_position));
1645 ctx->code_position += arg_size(*ctx->code_position);
1649 internal(file_line, "cgen_insn: invalid insn %08lx", (unsigned long)insn);