AVX-512: Add Pseudo-ops for CMP instructions
[nasm/avx512.git] / assemble.c
blobad345239471f876d450b4528807787bfbbbbbf6e
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2013 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * assemble.c code generation for the Netwide Assembler
37 * the actual codes (C syntax, i.e. octal):
38 * \0 - terminates the code. (Unless it's a literal of course.)
39 * \1..\4 - that many literal bytes follow in the code stream
40 * \5 - add 4 to the primary operand number (b, low octdigit)
41 * \6 - add 4 to the secondary operand number (a, middle octdigit)
42 * \7 - add 4 to both the primary and the secondary operand number
43 * \10..\13 - a literal byte follows in the code stream, to be added
44 * to the register value of operand 0..3
45 * \20..\23 - a byte immediate operand, from operand 0..3
46 * \24..\27 - a zero-extended byte immediate operand, from operand 0..3
47 * \30..\33 - a word immediate operand, from operand 0..3
48 * \34..\37 - select between \3[0-3] and \4[0-3] depending on 16/32 bit
49 * assembly mode or the operand-size override on the operand
50 * \40..\43 - a long immediate operand, from operand 0..3
51 * \44..\47 - select between \3[0-3], \4[0-3] and \5[4-7]
52 * depending on the address size of the instruction.
53 * \50..\53 - a byte relative operand, from operand 0..3
54 * \54..\57 - a qword immediate operand, from operand 0..3
55 * \60..\63 - a word relative operand, from operand 0..3
56 * \64..\67 - select between \6[0-3] and \7[0-3] depending on 16/32 bit
57 * assembly mode or the operand-size override on the operand
58 * \70..\73 - a long relative operand, from operand 0..3
59 * \74..\77 - a word constant, from the _segment_ part of operand 0..3
60 * \1ab - a ModRM, calculated on EA in operand a, with the spare
61 * field the register value of operand b.
62 * \172\ab - the register number from operand a in bits 7..4, with
63 * the 4-bit immediate from operand b in bits 3..0.
64 * \173\xab - the register number from operand a in bits 7..4, with
65 * the value b in bits 3..0.
66 * \174..\177 - the register number from operand 0..3 in bits 7..4, and
67 * an arbitrary value in bits 3..0 (assembled as zero.)
68 * \2ab - a ModRM, calculated on EA in operand a, with the spare
69 * field equal to digit b.
71 * \240..\243 - this instruction uses EVEX rather than REX or VEX/XOP, with the
72 * V field taken from operand 0..3.
73 * \250 - this instruction uses EVEX rather than REX or VEX/XOP, with the
74 * V field set to 1111b.
75 * EVEX prefixes are followed by the sequence:
76 * \cm\wlp\tup where cm is:
77 * cc 000 0mm
78 * c = 2 for EVEX and m is the legacy escape (0f, 0f38, 0f3a)
79 * and wlp is:
80 * 00 wwl lpp
81 * [l0] ll = 0 (.128, .lz)
82 * [l1] ll = 1 (.256)
83 * [l2] ll = 2 (.512)
84 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
86 * [w0] ww = 0 for W = 0
87 * [w1] ww = 1 for W = 1
88 * [wig] ww = 2 for W don't care (always assembled as 0)
89 * [ww] ww = 3 for W used as REX.W
91 * [p0] pp = 0 for no prefix
92 * [60] pp = 1 for legacy prefix 60
93 * [f3] pp = 2
94 * [f2] pp = 3
96 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
97 * (compressed displacement encoding)
99 * \254..\257 - a signed 32-bit operand to be extended to 64 bits.
100 * \260..\263 - this instruction uses VEX/XOP rather than REX, with the
101 * V field taken from operand 0..3.
102 * \270 - this instruction uses VEX/XOP rather than REX, with the
103 * V field set to 1111b.
105 * VEX/XOP prefixes are followed by the sequence:
106 * \tmm\wlp where mm is the M field; and wlp is:
107 * 00 wwl lpp
108 * [l0] ll = 0 for L = 0 (.128, .lz)
109 * [l1] ll = 1 for L = 1 (.256)
110 * [lig] ll = 2 for L don't care (always assembled as 0)
112 * [w0] ww = 0 for W = 0
113 * [w1 ] ww = 1 for W = 1
114 * [wig] ww = 2 for W don't care (always assembled as 0)
115 * [ww] ww = 3 for W used as REX.W
117 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
119 * \271 - instruction takes XRELEASE (F3) with or without lock
120 * \272 - instruction takes XACQUIRE/XRELEASE with or without lock
121 * \273 - instruction takes XACQUIRE/XRELEASE with lock only
122 * \274..\277 - a byte immediate operand, from operand 0..3, sign-extended
123 * to the operand size (if o16/o32/o64 present) or the bit size
124 * \310 - indicates fixed 16-bit address size, i.e. optional 0x67.
125 * \311 - indicates fixed 32-bit address size, i.e. optional 0x67.
126 * \312 - (disassembler only) invalid with non-default address size.
127 * \313 - indicates fixed 64-bit address size, 0x67 invalid.
128 * \314 - (disassembler only) invalid with REX.B
129 * \315 - (disassembler only) invalid with REX.X
130 * \316 - (disassembler only) invalid with REX.R
131 * \317 - (disassembler only) invalid with REX.W
132 * \320 - indicates fixed 16-bit operand size, i.e. optional 0x66.
133 * \321 - indicates fixed 32-bit operand size, i.e. optional 0x66.
134 * \322 - indicates that this instruction is only valid when the
135 * operand size is the default (instruction to disassembler,
136 * generates no code in the assembler)
137 * \323 - indicates fixed 64-bit operand size, REX on extensions only.
138 * \324 - indicates 64-bit operand size requiring REX prefix.
139 * \325 - instruction which always uses spl/bpl/sil/dil
140 * \326 - instruction not valid with 0xF3 REP prefix. Hint for
141 disassembler only; for SSE instructions.
142 * \330 - a literal byte follows in the code stream, to be added
143 * to the condition code value of the instruction.
144 * \331 - instruction not valid with REP prefix. Hint for
145 * disassembler only; for SSE instructions.
146 * \332 - REP prefix (0xF2 byte) used as opcode extension.
147 * \333 - REP prefix (0xF3 byte) used as opcode extension.
148 * \334 - LOCK prefix used as REX.R (used in non-64-bit mode)
149 * \335 - disassemble a rep (0xF3 byte) prefix as repe not rep.
150 * \336 - force a REP(E) prefix (0xF3) even if not specified.
151 * \337 - force a REPNE prefix (0xF2) even if not specified.
152 * \336-\337 are still listed as prefixes in the disassembler.
153 * \340 - reserve <operand 0> bytes of uninitialized storage.
154 * Operand 0 had better be a segmentless constant.
155 * \341 - this instruction needs a WAIT "prefix"
156 * \360 - no SSE prefix (== \364\331)
157 * \361 - 66 SSE prefix (== \366\331)
158 * \364 - operand-size prefix (0x66) not permitted
159 * \365 - address-size prefix (0x67) not permitted
160 * \366 - operand-size prefix (0x66) used as opcode extension
161 * \367 - address-size prefix (0x67) used as opcode extension
162 * \370,\371 - match only if operand 0 meets byte jump criteria.
163 * 370 is used for Jcc, 371 is used for JMP.
164 * \373 - assemble 0x03 if bits==16, 0x05 if bits==32;
165 * used for conditional jump over longer jump
166 * \374 - this instruction takes an XMM VSIB memory EA
167 * \375 - this instruction takes an YMM VSIB memory EA
168 * \376 - this instruction takes an ZMM VSIB memory EA
171 #include "compiler.h"
173 #include <stdio.h>
174 #include <string.h>
175 #include <inttypes.h>
177 #include "nasm.h"
178 #include "nasmlib.h"
179 #include "assemble.h"
180 #include "insns.h"
181 #include "tables.h"
183 enum match_result {
185 * Matching errors. These should be sorted so that more specific
186 * errors come later in the sequence.
188 MERR_INVALOP,
189 MERR_OPSIZEMISSING,
190 MERR_OPSIZEMISMATCH,
191 MERR_BADCPU,
192 MERR_BADMODE,
193 MERR_BADHLE,
194 MERR_ENCMISMATCH,
196 * Matching success; the conditional ones first
198 MOK_JUMP, /* Matching OK but needs jmp_match() */
199 MOK_GOOD /* Matching unconditionally OK */
202 typedef struct {
203 enum ea_type type; /* what kind of EA is this? */
204 int sib_present; /* is a SIB byte necessary? */
205 int bytes; /* # of bytes of offset needed */
206 int size; /* lazy - this is sib+bytes+1 */
207 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
208 int8_t disp8; /* compressed displacement for EVEX */
209 } ea;
211 #define GEN_SIB(scale, index, base) \
212 (((scale) << 6) | ((index) << 3) | ((base)))
214 #define GEN_MODRM(mod, reg, rm) \
215 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
217 static iflags_t cpu; /* cpu level received from nasm.c */
218 static efunc errfunc;
219 static struct ofmt *outfmt;
220 static ListGen *list;
222 static int64_t calcsize(int32_t, int64_t, int, insn *,
223 const struct itemplate *);
224 static void gencode(int32_t segment, int64_t offset, int bits,
225 insn * ins, const struct itemplate *temp,
226 int64_t insn_end);
227 static enum match_result find_match(const struct itemplate **tempp,
228 insn *instruction,
229 int32_t segment, int64_t offset, int bits);
230 static enum match_result matches(const struct itemplate *, insn *, int bits);
231 static opflags_t regflag(const operand *);
232 static int32_t regval(const operand *);
233 static int rexflags(int, opflags_t, int);
234 static int op_rexflags(const operand *, int);
235 static int op_evexflags(const operand *, int, uint8_t);
236 static void add_asp(insn *, int);
238 static enum ea_type process_ea(operand *, ea *, int, int, opflags_t, insn *);
240 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
242 return ins->prefixes[pos] == prefix;
245 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
247 if (ins->prefixes[pos])
248 errfunc(ERR_NONFATAL, "invalid %s prefix",
249 prefix_name(ins->prefixes[pos]));
252 static const char *size_name(int size)
254 switch (size) {
255 case 1:
256 return "byte";
257 case 2:
258 return "word";
259 case 4:
260 return "dword";
261 case 8:
262 return "qword";
263 case 10:
264 return "tword";
265 case 16:
266 return "oword";
267 case 32:
268 return "yword";
269 case 64:
270 return "zword";
271 default:
272 return "???";
276 static void warn_overflow(int pass, int size)
278 errfunc(ERR_WARNING | pass | ERR_WARN_NOV,
279 "%s data exceeds bounds", size_name(size));
282 static void warn_overflow_const(int64_t data, int size)
284 if (overflow_general(data, size))
285 warn_overflow(ERR_PASS1, size);
288 static void warn_overflow_opd(const struct operand *o, int size)
290 if (o->wrt == NO_SEG && o->segment == NO_SEG) {
291 if (overflow_general(o->offset, size))
292 warn_overflow(ERR_PASS2, size);
297 * This routine wrappers the real output format's output routine,
298 * in order to pass a copy of the data off to the listing file
299 * generator at the same time.
301 static void out(int64_t offset, int32_t segto, const void *data,
302 enum out_type type, uint64_t size,
303 int32_t segment, int32_t wrt)
305 static int32_t lineno = 0; /* static!!! */
306 static char *lnfname = NULL;
307 uint8_t p[8];
309 if (type == OUT_ADDRESS && segment == NO_SEG && wrt == NO_SEG) {
311 * This is a non-relocated address, and we're going to
312 * convert it into RAWDATA format.
314 uint8_t *q = p;
316 if (size > 8) {
317 errfunc(ERR_PANIC, "OUT_ADDRESS with size > 8");
318 return;
321 WRITEADDR(q, *(int64_t *)data, size);
322 data = p;
323 type = OUT_RAWDATA;
326 list->output(offset, data, type, size);
329 * this call to src_get determines when we call the
330 * debug-format-specific "linenum" function
331 * it updates lineno and lnfname to the current values
332 * returning 0 if "same as last time", -2 if lnfname
333 * changed, and the amount by which lineno changed,
334 * if it did. thus, these variables must be static
337 if (src_get(&lineno, &lnfname))
338 outfmt->current_dfmt->linenum(lnfname, lineno, segto);
340 outfmt->output(segto, data, type, size, segment, wrt);
343 static void out_imm8(int64_t offset, int32_t segment, struct operand *opx)
345 if (opx->segment != NO_SEG) {
346 uint64_t data = opx->offset;
347 out(offset, segment, &data, OUT_ADDRESS, 1, opx->segment, opx->wrt);
348 } else {
349 uint8_t byte = opx->offset;
350 out(offset, segment, &byte, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
354 static bool jmp_match(int32_t segment, int64_t offset, int bits,
355 insn * ins, const struct itemplate *temp)
357 int64_t isize;
358 const uint8_t *code = temp->code;
359 uint8_t c = code[0];
361 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
362 return false;
363 if (!optimizing)
364 return false;
365 if (optimizing < 0 && c == 0371)
366 return false;
368 isize = calcsize(segment, offset, bits, ins, temp);
370 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
371 /* Be optimistic in pass 1 */
372 return true;
374 if (ins->oprs[0].segment != segment)
375 return false;
377 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
378 return (isize >= -128 && isize <= 127); /* is it byte size? */
381 int64_t assemble(int32_t segment, int64_t offset, int bits, iflags_t cp,
382 insn * instruction, struct ofmt *output, efunc error,
383 ListGen * listgen)
385 const struct itemplate *temp;
386 int j;
387 enum match_result m;
388 int64_t insn_end;
389 int32_t itimes;
390 int64_t start = offset;
391 int64_t wsize; /* size for DB etc. */
393 errfunc = error; /* to pass to other functions */
394 cpu = cp;
395 outfmt = output; /* likewise */
396 list = listgen; /* and again */
398 wsize = idata_bytes(instruction->opcode);
399 if (wsize == -1)
400 return 0;
402 if (wsize) {
403 extop *e;
404 int32_t t = instruction->times;
405 if (t < 0)
406 errfunc(ERR_PANIC,
407 "instruction->times < 0 (%ld) in assemble()", t);
409 while (t--) { /* repeat TIMES times */
410 list_for_each(e, instruction->eops) {
411 if (e->type == EOT_DB_NUMBER) {
412 if (wsize > 8) {
413 errfunc(ERR_NONFATAL,
414 "integer supplied to a DT, DO or DY"
415 " instruction");
416 } else {
417 out(offset, segment, &e->offset,
418 OUT_ADDRESS, wsize, e->segment, e->wrt);
419 offset += wsize;
421 } else if (e->type == EOT_DB_STRING ||
422 e->type == EOT_DB_STRING_FREE) {
423 int align;
425 out(offset, segment, e->stringval,
426 OUT_RAWDATA, e->stringlen, NO_SEG, NO_SEG);
427 align = e->stringlen % wsize;
429 if (align) {
430 align = wsize - align;
431 out(offset, segment, zero_buffer,
432 OUT_RAWDATA, align, NO_SEG, NO_SEG);
434 offset += e->stringlen + align;
437 if (t > 0 && t == instruction->times - 1) {
439 * Dummy call to list->output to give the offset to the
440 * listing module.
442 list->output(offset, NULL, OUT_RAWDATA, 0);
443 list->uplevel(LIST_TIMES);
446 if (instruction->times > 1)
447 list->downlevel(LIST_TIMES);
448 return offset - start;
451 if (instruction->opcode == I_INCBIN) {
452 const char *fname = instruction->eops->stringval;
453 FILE *fp;
455 fp = fopen(fname, "rb");
456 if (!fp) {
457 error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
458 fname);
459 } else if (fseek(fp, 0L, SEEK_END) < 0) {
460 error(ERR_NONFATAL, "`incbin': unable to seek on file `%s'",
461 fname);
462 fclose(fp);
463 } else {
464 static char buf[4096];
465 size_t t = instruction->times;
466 size_t base = 0;
467 size_t len;
469 len = ftell(fp);
470 if (instruction->eops->next) {
471 base = instruction->eops->next->offset;
472 len -= base;
473 if (instruction->eops->next->next &&
474 len > (size_t)instruction->eops->next->next->offset)
475 len = (size_t)instruction->eops->next->next->offset;
478 * Dummy call to list->output to give the offset to the
479 * listing module.
481 list->output(offset, NULL, OUT_RAWDATA, 0);
482 list->uplevel(LIST_INCBIN);
483 while (t--) {
484 size_t l;
486 fseek(fp, base, SEEK_SET);
487 l = len;
488 while (l > 0) {
489 int32_t m;
490 m = fread(buf, 1, l > sizeof(buf) ? sizeof(buf) : l, fp);
491 if (!m) {
493 * This shouldn't happen unless the file
494 * actually changes while we are reading
495 * it.
497 error(ERR_NONFATAL,
498 "`incbin': unexpected EOF while"
499 " reading file `%s'", fname);
500 t = 0; /* Try to exit cleanly */
501 break;
503 out(offset, segment, buf, OUT_RAWDATA, m,
504 NO_SEG, NO_SEG);
505 l -= m;
508 list->downlevel(LIST_INCBIN);
509 if (instruction->times > 1) {
511 * Dummy call to list->output to give the offset to the
512 * listing module.
514 list->output(offset, NULL, OUT_RAWDATA, 0);
515 list->uplevel(LIST_TIMES);
516 list->downlevel(LIST_TIMES);
518 fclose(fp);
519 return instruction->times * len;
521 return 0; /* if we're here, there's an error */
524 /* Check to see if we need an address-size prefix */
525 add_asp(instruction, bits);
527 m = find_match(&temp, instruction, segment, offset, bits);
529 if (m == MOK_GOOD) {
530 /* Matches! */
531 int64_t insn_size = calcsize(segment, offset, bits, instruction, temp);
532 itimes = instruction->times;
533 if (insn_size < 0) /* shouldn't be, on pass two */
534 error(ERR_PANIC, "errors made it through from pass one");
535 else
536 while (itimes--) {
537 for (j = 0; j < MAXPREFIX; j++) {
538 uint8_t c = 0;
539 switch (instruction->prefixes[j]) {
540 case P_WAIT:
541 c = 0x9B;
542 break;
543 case P_LOCK:
544 c = 0xF0;
545 break;
546 case P_REPNE:
547 case P_REPNZ:
548 case P_XACQUIRE:
549 c = 0xF2;
550 break;
551 case P_REPE:
552 case P_REPZ:
553 case P_REP:
554 case P_XRELEASE:
555 c = 0xF3;
556 break;
557 case R_CS:
558 if (bits == 64) {
559 error(ERR_WARNING | ERR_PASS2,
560 "cs segment base generated, but will be ignored in 64-bit mode");
562 c = 0x2E;
563 break;
564 case R_DS:
565 if (bits == 64) {
566 error(ERR_WARNING | ERR_PASS2,
567 "ds segment base generated, but will be ignored in 64-bit mode");
569 c = 0x3E;
570 break;
571 case R_ES:
572 if (bits == 64) {
573 error(ERR_WARNING | ERR_PASS2,
574 "es segment base generated, but will be ignored in 64-bit mode");
576 c = 0x26;
577 break;
578 case R_FS:
579 c = 0x64;
580 break;
581 case R_GS:
582 c = 0x65;
583 break;
584 case R_SS:
585 if (bits == 64) {
586 error(ERR_WARNING | ERR_PASS2,
587 "ss segment base generated, but will be ignored in 64-bit mode");
589 c = 0x36;
590 break;
591 case R_SEGR6:
592 case R_SEGR7:
593 error(ERR_NONFATAL,
594 "segr6 and segr7 cannot be used as prefixes");
595 break;
596 case P_A16:
597 if (bits == 64) {
598 error(ERR_NONFATAL,
599 "16-bit addressing is not supported "
600 "in 64-bit mode");
601 } else if (bits != 16)
602 c = 0x67;
603 break;
604 case P_A32:
605 if (bits != 32)
606 c = 0x67;
607 break;
608 case P_A64:
609 if (bits != 64) {
610 error(ERR_NONFATAL,
611 "64-bit addressing is only supported "
612 "in 64-bit mode");
614 break;
615 case P_ASP:
616 c = 0x67;
617 break;
618 case P_O16:
619 if (bits != 16)
620 c = 0x66;
621 break;
622 case P_O32:
623 if (bits == 16)
624 c = 0x66;
625 break;
626 case P_O64:
627 /* REX.W */
628 break;
629 case P_OSP:
630 c = 0x66;
631 break;
632 case P_none:
633 break;
634 default:
635 error(ERR_PANIC, "invalid instruction prefix");
637 if (c != 0) {
638 out(offset, segment, &c, OUT_RAWDATA, 1,
639 NO_SEG, NO_SEG);
640 offset++;
643 insn_end = offset + insn_size;
644 gencode(segment, offset, bits, instruction,
645 temp, insn_end);
646 offset += insn_size;
647 if (itimes > 0 && itimes == instruction->times - 1) {
649 * Dummy call to list->output to give the offset to the
650 * listing module.
652 list->output(offset, NULL, OUT_RAWDATA, 0);
653 list->uplevel(LIST_TIMES);
656 if (instruction->times > 1)
657 list->downlevel(LIST_TIMES);
658 return offset - start;
659 } else {
660 /* No match */
661 switch (m) {
662 case MERR_OPSIZEMISSING:
663 error(ERR_NONFATAL, "operation size not specified");
664 break;
665 case MERR_OPSIZEMISMATCH:
666 error(ERR_NONFATAL, "mismatch in operand sizes");
667 break;
668 case MERR_BADCPU:
669 error(ERR_NONFATAL, "no instruction for this cpu level");
670 break;
671 case MERR_BADMODE:
672 error(ERR_NONFATAL, "instruction not supported in %d-bit mode",
673 bits);
674 break;
675 default:
676 error(ERR_NONFATAL,
677 "invalid combination of opcode and operands");
678 break;
681 return 0;
684 int64_t insn_size(int32_t segment, int64_t offset, int bits, iflags_t cp,
685 insn * instruction, efunc error)
687 const struct itemplate *temp;
688 enum match_result m;
690 errfunc = error; /* to pass to other functions */
691 cpu = cp;
693 if (instruction->opcode == I_none)
694 return 0;
696 if (instruction->opcode == I_DB || instruction->opcode == I_DW ||
697 instruction->opcode == I_DD || instruction->opcode == I_DQ ||
698 instruction->opcode == I_DT || instruction->opcode == I_DO ||
699 instruction->opcode == I_DY) {
700 extop *e;
701 int32_t isize, osize, wsize;
703 isize = 0;
704 wsize = idata_bytes(instruction->opcode);
706 list_for_each(e, instruction->eops) {
707 int32_t align;
709 osize = 0;
710 if (e->type == EOT_DB_NUMBER) {
711 osize = 1;
712 warn_overflow_const(e->offset, wsize);
713 } else if (e->type == EOT_DB_STRING ||
714 e->type == EOT_DB_STRING_FREE)
715 osize = e->stringlen;
717 align = (-osize) % wsize;
718 if (align < 0)
719 align += wsize;
720 isize += osize + align;
722 return isize * instruction->times;
725 if (instruction->opcode == I_INCBIN) {
726 const char *fname = instruction->eops->stringval;
727 FILE *fp;
728 int64_t val = 0;
729 size_t len;
731 fp = fopen(fname, "rb");
732 if (!fp)
733 error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
734 fname);
735 else if (fseek(fp, 0L, SEEK_END) < 0)
736 error(ERR_NONFATAL, "`incbin': unable to seek on file `%s'",
737 fname);
738 else {
739 len = ftell(fp);
740 if (instruction->eops->next) {
741 len -= instruction->eops->next->offset;
742 if (instruction->eops->next->next &&
743 len > (size_t)instruction->eops->next->next->offset) {
744 len = (size_t)instruction->eops->next->next->offset;
747 val = instruction->times * len;
749 if (fp)
750 fclose(fp);
751 return val;
754 /* Check to see if we need an address-size prefix */
755 add_asp(instruction, bits);
757 m = find_match(&temp, instruction, segment, offset, bits);
758 if (m == MOK_GOOD) {
759 /* we've matched an instruction. */
760 int64_t isize;
761 int j;
763 isize = calcsize(segment, offset, bits, instruction, temp);
764 if (isize < 0)
765 return -1;
766 for (j = 0; j < MAXPREFIX; j++) {
767 switch (instruction->prefixes[j]) {
768 case P_A16:
769 if (bits != 16)
770 isize++;
771 break;
772 case P_A32:
773 if (bits != 32)
774 isize++;
775 break;
776 case P_O16:
777 if (bits != 16)
778 isize++;
779 break;
780 case P_O32:
781 if (bits == 16)
782 isize++;
783 break;
784 case P_A64:
785 case P_O64:
786 case P_none:
787 break;
788 default:
789 isize++;
790 break;
793 return isize * instruction->times;
794 } else {
795 return -1; /* didn't match any instruction */
799 static void bad_hle_warn(const insn * ins, uint8_t hleok)
801 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
802 enum whatwarn { w_none, w_lock, w_inval } ww;
803 static const enum whatwarn warn[2][4] =
805 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
806 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
808 unsigned int n;
810 n = (unsigned int)rep_pfx - P_XACQUIRE;
811 if (n > 1)
812 return; /* Not XACQUIRE/XRELEASE */
814 ww = warn[n][hleok];
815 if (!is_class(MEMORY, ins->oprs[0].type))
816 ww = w_inval; /* HLE requires operand 0 to be memory */
818 switch (ww) {
819 case w_none:
820 break;
822 case w_lock:
823 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
824 errfunc(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
825 "%s with this instruction requires lock",
826 prefix_name(rep_pfx));
828 break;
830 case w_inval:
831 errfunc(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
832 "%s invalid with this instruction",
833 prefix_name(rep_pfx));
834 break;
838 /* Common construct */
839 #define case3(x) case (x): case (x)+1: case (x)+2
840 #define case4(x) case3(x): case (x)+3
842 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
843 insn * ins, const struct itemplate *temp)
845 const uint8_t *codes = temp->code;
846 int64_t length = 0;
847 uint8_t c;
848 int rex_mask = ~0;
849 int op1, op2;
850 struct operand *opx;
851 uint8_t opex = 0;
852 enum ea_type eat;
853 uint8_t hleok = 0;
854 bool lockcheck = true;
856 ins->rex = 0; /* Ensure REX is reset */
857 eat = EA_SCALAR; /* Expect a scalar EA */
858 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
860 if (ins->prefixes[PPS_OSIZE] == P_O64)
861 ins->rex |= REX_W;
863 (void)segment; /* Don't warn that this parameter is unused */
864 (void)offset; /* Don't warn that this parameter is unused */
866 while (*codes) {
867 c = *codes++;
868 op1 = (c & 3) + ((opex & 1) << 2);
869 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
870 opx = &ins->oprs[op1];
871 opex = 0; /* For the next iteration */
873 switch (c) {
874 case4(01):
875 codes += c, length += c;
876 break;
878 case3(05):
879 opex = c;
880 break;
882 case4(010):
883 ins->rex |=
884 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
885 codes++, length++;
886 break;
888 case4(020):
889 case4(024):
890 length++;
891 break;
893 case4(030):
894 length += 2;
895 break;
897 case4(034):
898 if (opx->type & (BITS16 | BITS32 | BITS64))
899 length += (opx->type & BITS16) ? 2 : 4;
900 else
901 length += (bits == 16) ? 2 : 4;
902 break;
904 case4(040):
905 length += 4;
906 break;
908 case4(044):
909 length += ins->addr_size >> 3;
910 break;
912 case4(050):
913 length++;
914 break;
916 case4(054):
917 length += 8; /* MOV reg64/imm */
918 break;
920 case4(060):
921 length += 2;
922 break;
924 case4(064):
925 if (opx->type & (BITS16 | BITS32 | BITS64))
926 length += (opx->type & BITS16) ? 2 : 4;
927 else
928 length += (bits == 16) ? 2 : 4;
929 break;
931 case4(070):
932 length += 4;
933 break;
935 case4(074):
936 length += 2;
937 break;
939 case 0172:
940 case 0173:
941 codes++;
942 length++;
943 break;
945 case4(0174):
946 length++;
947 break;
949 case4(0240):
950 ins->rex |= REX_EV;
951 ins->vexreg = regval(opx);
952 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
953 ins->vex_cm = *codes++;
954 ins->vex_wlp = *codes++;
955 ins->evex_tuple = (*codes++ - 0300);
956 break;
958 case 0250:
959 ins->rex |= REX_EV;
960 ins->vexreg = 0;
961 ins->vex_cm = *codes++;
962 ins->vex_wlp = *codes++;
963 ins->evex_tuple = (*codes++ - 0300);
964 break;
966 case4(0254):
967 length += 4;
968 break;
970 case4(0260):
971 ins->rex |= REX_V;
972 ins->vexreg = regval(opx);
973 ins->vex_cm = *codes++;
974 ins->vex_wlp = *codes++;
975 break;
977 case 0270:
978 ins->rex |= REX_V;
979 ins->vexreg = 0;
980 ins->vex_cm = *codes++;
981 ins->vex_wlp = *codes++;
982 break;
984 case3(0271):
985 hleok = c & 3;
986 break;
988 case4(0274):
989 length++;
990 break;
992 case4(0300):
993 break;
995 case 0310:
996 if (bits == 64)
997 return -1;
998 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
999 break;
1001 case 0311:
1002 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1003 break;
1005 case 0312:
1006 break;
1008 case 0313:
1009 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1010 has_prefix(ins, PPS_ASIZE, P_A32))
1011 return -1;
1012 break;
1014 case4(0314):
1015 break;
1017 case 0320:
1019 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1020 if (pfx == P_O16)
1021 break;
1022 if (pfx != P_none)
1023 errfunc(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1024 else
1025 ins->prefixes[PPS_OSIZE] = P_O16;
1026 break;
1029 case 0321:
1031 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1032 if (pfx == P_O32)
1033 break;
1034 if (pfx != P_none)
1035 errfunc(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1036 else
1037 ins->prefixes[PPS_OSIZE] = P_O32;
1038 break;
1041 case 0322:
1042 break;
1044 case 0323:
1045 rex_mask &= ~REX_W;
1046 break;
1048 case 0324:
1049 ins->rex |= REX_W;
1050 break;
1052 case 0325:
1053 ins->rex |= REX_NH;
1054 break;
1056 case 0326:
1057 break;
1059 case 0330:
1060 codes++, length++;
1061 break;
1063 case 0331:
1064 break;
1066 case 0332:
1067 case 0333:
1068 length++;
1069 break;
1071 case 0334:
1072 ins->rex |= REX_L;
1073 break;
1075 case 0335:
1076 break;
1078 case 0336:
1079 if (!ins->prefixes[PPS_REP])
1080 ins->prefixes[PPS_REP] = P_REP;
1081 break;
1083 case 0337:
1084 if (!ins->prefixes[PPS_REP])
1085 ins->prefixes[PPS_REP] = P_REPNE;
1086 break;
1088 case 0340:
1089 if (ins->oprs[0].segment != NO_SEG)
1090 errfunc(ERR_NONFATAL, "attempt to reserve non-constant"
1091 " quantity of BSS space");
1092 else
1093 length += ins->oprs[0].offset;
1094 break;
1096 case 0341:
1097 if (!ins->prefixes[PPS_WAIT])
1098 ins->prefixes[PPS_WAIT] = P_WAIT;
1099 break;
1101 case 0360:
1102 break;
1104 case 0361:
1105 length++;
1106 break;
1108 case 0364:
1109 case 0365:
1110 break;
1112 case 0366:
1113 case 0367:
1114 length++;
1115 break;
1117 case3(0370):
1118 break;
1120 case 0373:
1121 length++;
1122 break;
1124 case 0374:
1125 eat = EA_XMMVSIB;
1126 break;
1128 case 0375:
1129 eat = EA_YMMVSIB;
1130 break;
1132 case 0376:
1133 eat = EA_ZMMVSIB;
1134 break;
1136 case4(0100):
1137 case4(0110):
1138 case4(0120):
1139 case4(0130):
1140 case4(0200):
1141 case4(0204):
1142 case4(0210):
1143 case4(0214):
1144 case4(0220):
1145 case4(0224):
1146 case4(0230):
1147 case4(0234):
1149 ea ea_data;
1150 int rfield;
1151 opflags_t rflags;
1152 struct operand *opy = &ins->oprs[op2];
1153 struct operand *op_er_sae;
1155 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1157 if (c <= 0177) {
1158 /* pick rfield from operand b (opx) */
1159 rflags = regflag(opx);
1160 rfield = nasm_regvals[opx->basereg];
1161 } else {
1162 rflags = 0;
1163 rfield = c & 7;
1166 /* EVEX.b1 : evex_brerop contains the operand position */
1167 op_er_sae = (ins->evex_brerop >= 0 ?
1168 &ins->oprs[ins->evex_brerop] : NULL);
1170 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1171 /* set EVEX.b */
1172 ins->evex_p[2] |= EVEX_P2B;
1173 if (op_er_sae->decoflags & ER) {
1174 /* set EVEX.RC (rounding control) */
1175 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1176 & EVEX_P2RC;
1178 } else {
1179 /* set EVEX.L'L (vector length) */
1180 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1181 if (opy->decoflags & BRDCAST_MASK) {
1182 /* set EVEX.b */
1183 ins->evex_p[2] |= EVEX_P2B;
1187 if (process_ea(opy, &ea_data, bits,
1188 rfield, rflags, ins) != eat) {
1189 errfunc(ERR_NONFATAL, "invalid effective address");
1190 return -1;
1191 } else {
1192 ins->rex |= ea_data.rex;
1193 length += ea_data.size;
1196 break;
1198 default:
1199 errfunc(ERR_PANIC, "internal instruction table corrupt"
1200 ": instruction code \\%o (0x%02X) given", c, c);
1201 break;
1205 ins->rex &= rex_mask;
1207 if (ins->rex & REX_NH) {
1208 if (ins->rex & REX_H) {
1209 errfunc(ERR_NONFATAL, "instruction cannot use high registers");
1210 return -1;
1212 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1215 if (ins->rex & (REX_V | REX_EV)) {
1216 int bad32 = REX_R|REX_W|REX_X|REX_B;
1218 if (ins->rex & REX_H) {
1219 errfunc(ERR_NONFATAL, "cannot use high register in AVX instruction");
1220 return -1;
1222 switch (ins->vex_wlp & 060) {
1223 case 000:
1224 case 040:
1225 ins->rex &= ~REX_W;
1226 break;
1227 case 020:
1228 ins->rex |= REX_W;
1229 bad32 &= ~REX_W;
1230 break;
1231 case 060:
1232 /* Follow REX_W */
1233 break;
1236 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1237 errfunc(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1238 return -1;
1239 } else if (!(ins->rex & REX_EV) &&
1240 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1241 errfunc(ERR_NONFATAL, "invalid high-16 register in non-AVX-512");
1242 return -1;
1244 if (ins->rex & REX_EV)
1245 length += 4;
1246 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)))
1247 length += 3;
1248 else
1249 length += 2;
1250 } else if (ins->rex & REX_REAL) {
1251 if (ins->rex & REX_H) {
1252 errfunc(ERR_NONFATAL, "cannot use high register in rex instruction");
1253 return -1;
1254 } else if (bits == 64) {
1255 length++;
1256 } else if ((ins->rex & REX_L) &&
1257 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1258 cpu >= IF_X86_64) {
1259 /* LOCK-as-REX.R */
1260 assert_no_prefix(ins, PPS_LOCK);
1261 lockcheck = false; /* Already errored, no need for warning */
1262 length++;
1263 } else {
1264 errfunc(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1265 return -1;
1269 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1270 (!(temp->flags & IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1271 errfunc(ERR_WARNING | ERR_WARN_LOCK | ERR_PASS2 ,
1272 "instruction is not lockable");
1275 bad_hle_warn(ins, hleok);
1277 return length;
1280 static inline unsigned int emit_rex(insn *ins, int32_t segment, int64_t offset, int bits)
1282 if (bits == 64) {
1283 if ((ins->rex & REX_REAL) && !(ins->rex & (REX_V | REX_EV))) {
1284 ins->rex = (ins->rex & REX_REAL) | REX_P;
1285 out(offset, segment, &ins->rex, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1286 ins->rex = 0;
1287 return 1;
1291 return 0;
1294 static void gencode(int32_t segment, int64_t offset, int bits,
1295 insn * ins, const struct itemplate *temp,
1296 int64_t insn_end)
1298 uint8_t c;
1299 uint8_t bytes[4];
1300 int64_t size;
1301 int64_t data;
1302 int op1, op2;
1303 struct operand *opx;
1304 const uint8_t *codes = temp->code;
1305 uint8_t opex = 0;
1306 enum ea_type eat = EA_SCALAR;
1308 while (*codes) {
1309 c = *codes++;
1310 op1 = (c & 3) + ((opex & 1) << 2);
1311 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1312 opx = &ins->oprs[op1];
1313 opex = 0; /* For the next iteration */
1315 switch (c) {
1316 case 01:
1317 case 02:
1318 case 03:
1319 case 04:
1320 offset += emit_rex(ins, segment, offset, bits);
1321 out(offset, segment, codes, OUT_RAWDATA, c, NO_SEG, NO_SEG);
1322 codes += c;
1323 offset += c;
1324 break;
1326 case 05:
1327 case 06:
1328 case 07:
1329 opex = c;
1330 break;
1332 case4(010):
1333 offset += emit_rex(ins, segment, offset, bits);
1334 bytes[0] = *codes++ + (regval(opx) & 7);
1335 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1336 offset += 1;
1337 break;
1339 case4(020):
1340 if (opx->offset < -256 || opx->offset > 255) {
1341 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1342 "byte value exceeds bounds");
1344 out_imm8(offset, segment, opx);
1345 offset += 1;
1346 break;
1348 case4(024):
1349 if (opx->offset < 0 || opx->offset > 255)
1350 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1351 "unsigned byte value exceeds bounds");
1352 out_imm8(offset, segment, opx);
1353 offset += 1;
1354 break;
1356 case4(030):
1357 warn_overflow_opd(opx, 2);
1358 data = opx->offset;
1359 out(offset, segment, &data, OUT_ADDRESS, 2,
1360 opx->segment, opx->wrt);
1361 offset += 2;
1362 break;
1364 case4(034):
1365 if (opx->type & (BITS16 | BITS32))
1366 size = (opx->type & BITS16) ? 2 : 4;
1367 else
1368 size = (bits == 16) ? 2 : 4;
1369 warn_overflow_opd(opx, size);
1370 data = opx->offset;
1371 out(offset, segment, &data, OUT_ADDRESS, size,
1372 opx->segment, opx->wrt);
1373 offset += size;
1374 break;
1376 case4(040):
1377 warn_overflow_opd(opx, 4);
1378 data = opx->offset;
1379 out(offset, segment, &data, OUT_ADDRESS, 4,
1380 opx->segment, opx->wrt);
1381 offset += 4;
1382 break;
1384 case4(044):
1385 data = opx->offset;
1386 size = ins->addr_size >> 3;
1387 warn_overflow_opd(opx, size);
1388 out(offset, segment, &data, OUT_ADDRESS, size,
1389 opx->segment, opx->wrt);
1390 offset += size;
1391 break;
1393 case4(050):
1394 if (opx->segment != segment) {
1395 data = opx->offset;
1396 out(offset, segment, &data,
1397 OUT_REL1ADR, insn_end - offset,
1398 opx->segment, opx->wrt);
1399 } else {
1400 data = opx->offset - insn_end;
1401 if (data > 127 || data < -128)
1402 errfunc(ERR_NONFATAL, "short jump is out of range");
1403 out(offset, segment, &data,
1404 OUT_ADDRESS, 1, NO_SEG, NO_SEG);
1406 offset += 1;
1407 break;
1409 case4(054):
1410 data = (int64_t)opx->offset;
1411 out(offset, segment, &data, OUT_ADDRESS, 8,
1412 opx->segment, opx->wrt);
1413 offset += 8;
1414 break;
1416 case4(060):
1417 if (opx->segment != segment) {
1418 data = opx->offset;
1419 out(offset, segment, &data,
1420 OUT_REL2ADR, insn_end - offset,
1421 opx->segment, opx->wrt);
1422 } else {
1423 data = opx->offset - insn_end;
1424 out(offset, segment, &data,
1425 OUT_ADDRESS, 2, NO_SEG, NO_SEG);
1427 offset += 2;
1428 break;
1430 case4(064):
1431 if (opx->type & (BITS16 | BITS32 | BITS64))
1432 size = (opx->type & BITS16) ? 2 : 4;
1433 else
1434 size = (bits == 16) ? 2 : 4;
1435 if (opx->segment != segment) {
1436 data = opx->offset;
1437 out(offset, segment, &data,
1438 size == 2 ? OUT_REL2ADR : OUT_REL4ADR,
1439 insn_end - offset, opx->segment, opx->wrt);
1440 } else {
1441 data = opx->offset - insn_end;
1442 out(offset, segment, &data,
1443 OUT_ADDRESS, size, NO_SEG, NO_SEG);
1445 offset += size;
1446 break;
1448 case4(070):
1449 if (opx->segment != segment) {
1450 data = opx->offset;
1451 out(offset, segment, &data,
1452 OUT_REL4ADR, insn_end - offset,
1453 opx->segment, opx->wrt);
1454 } else {
1455 data = opx->offset - insn_end;
1456 out(offset, segment, &data,
1457 OUT_ADDRESS, 4, NO_SEG, NO_SEG);
1459 offset += 4;
1460 break;
1462 case4(074):
1463 if (opx->segment == NO_SEG)
1464 errfunc(ERR_NONFATAL, "value referenced by FAR is not"
1465 " relocatable");
1466 data = 0;
1467 out(offset, segment, &data, OUT_ADDRESS, 2,
1468 outfmt->segbase(1 + opx->segment),
1469 opx->wrt);
1470 offset += 2;
1471 break;
1473 case 0172:
1474 c = *codes++;
1475 opx = &ins->oprs[c >> 3];
1476 bytes[0] = nasm_regvals[opx->basereg] << 4;
1477 opx = &ins->oprs[c & 7];
1478 if (opx->segment != NO_SEG || opx->wrt != NO_SEG) {
1479 errfunc(ERR_NONFATAL,
1480 "non-absolute expression not permitted as argument %d",
1481 c & 7);
1482 } else {
1483 if (opx->offset & ~15) {
1484 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1485 "four-bit argument exceeds bounds");
1487 bytes[0] |= opx->offset & 15;
1489 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1490 offset++;
1491 break;
1493 case 0173:
1494 c = *codes++;
1495 opx = &ins->oprs[c >> 4];
1496 bytes[0] = nasm_regvals[opx->basereg] << 4;
1497 bytes[0] |= c & 15;
1498 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1499 offset++;
1500 break;
1502 case4(0174):
1503 bytes[0] = nasm_regvals[opx->basereg] << 4;
1504 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1505 offset++;
1506 break;
1508 case4(0254):
1509 data = opx->offset;
1510 if (opx->wrt == NO_SEG && opx->segment == NO_SEG &&
1511 (int32_t)data != (int64_t)data) {
1512 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1513 "signed dword immediate exceeds bounds");
1515 out(offset, segment, &data, OUT_ADDRESS, 4,
1516 opx->segment, opx->wrt);
1517 offset += 4;
1518 break;
1520 case4(0240):
1521 case 0250:
1522 codes += 3;
1523 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1524 EVEX_P2Z | EVEX_P2AAA, 2);
1525 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1526 bytes[0] = 0x62;
1527 /* EVEX.X can be set by either REX or EVEX for different reasons */
1528 bytes[1] = (~(((ins->rex & 7) << 5) |
1529 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) & 0xf0) |
1530 (ins->vex_cm & 3);
1531 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
1532 ((~ins->vexreg & 15) << 3) |
1533 (1 << 2) | (ins->vex_wlp & 3);
1534 bytes[3] = ins->evex_p[2];
1535 out(offset, segment, &bytes, OUT_RAWDATA, 4, NO_SEG, NO_SEG);
1536 offset += 4;
1537 break;
1539 case4(0260):
1540 case 0270:
1541 codes += 2;
1542 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B))) {
1543 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
1544 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
1545 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
1546 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
1547 out(offset, segment, &bytes, OUT_RAWDATA, 3, NO_SEG, NO_SEG);
1548 offset += 3;
1549 } else {
1550 bytes[0] = 0xc5;
1551 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
1552 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
1553 out(offset, segment, &bytes, OUT_RAWDATA, 2, NO_SEG, NO_SEG);
1554 offset += 2;
1556 break;
1558 case 0271:
1559 case 0272:
1560 case 0273:
1561 break;
1563 case4(0274):
1565 uint64_t uv, um;
1566 int s;
1568 if (ins->rex & REX_W)
1569 s = 64;
1570 else if (ins->prefixes[PPS_OSIZE] == P_O16)
1571 s = 16;
1572 else if (ins->prefixes[PPS_OSIZE] == P_O32)
1573 s = 32;
1574 else
1575 s = bits;
1577 um = (uint64_t)2 << (s-1);
1578 uv = opx->offset;
1580 if (uv > 127 && uv < (uint64_t)-128 &&
1581 (uv < um-128 || uv > um-1)) {
1582 /* If this wasn't explicitly byte-sized, warn as though we
1583 * had fallen through to the imm16/32/64 case.
1585 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1586 "%s value exceeds bounds",
1587 (opx->type & BITS8) ? "signed byte" :
1588 s == 16 ? "word" :
1589 s == 32 ? "dword" :
1590 "signed dword");
1592 if (opx->segment != NO_SEG) {
1593 data = uv;
1594 out(offset, segment, &data, OUT_ADDRESS, 1,
1595 opx->segment, opx->wrt);
1596 } else {
1597 bytes[0] = uv;
1598 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG,
1599 NO_SEG);
1601 offset += 1;
1602 break;
1605 case4(0300):
1606 break;
1608 case 0310:
1609 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16)) {
1610 *bytes = 0x67;
1611 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1612 offset += 1;
1613 } else
1614 offset += 0;
1615 break;
1617 case 0311:
1618 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32)) {
1619 *bytes = 0x67;
1620 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1621 offset += 1;
1622 } else
1623 offset += 0;
1624 break;
1626 case 0312:
1627 break;
1629 case 0313:
1630 ins->rex = 0;
1631 break;
1633 case4(0314):
1634 break;
1636 case 0320:
1637 case 0321:
1638 break;
1640 case 0322:
1641 case 0323:
1642 break;
1644 case 0324:
1645 ins->rex |= REX_W;
1646 break;
1648 case 0325:
1649 break;
1651 case 0326:
1652 break;
1654 case 0330:
1655 *bytes = *codes++ ^ get_cond_opcode(ins->condition);
1656 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1657 offset += 1;
1658 break;
1660 case 0331:
1661 break;
1663 case 0332:
1664 case 0333:
1665 *bytes = c - 0332 + 0xF2;
1666 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1667 offset += 1;
1668 break;
1670 case 0334:
1671 if (ins->rex & REX_R) {
1672 *bytes = 0xF0;
1673 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1674 offset += 1;
1676 ins->rex &= ~(REX_L|REX_R);
1677 break;
1679 case 0335:
1680 break;
1682 case 0336:
1683 case 0337:
1684 break;
1686 case 0340:
1687 if (ins->oprs[0].segment != NO_SEG)
1688 errfunc(ERR_PANIC, "non-constant BSS size in pass two");
1689 else {
1690 int64_t size = ins->oprs[0].offset;
1691 if (size > 0)
1692 out(offset, segment, NULL,
1693 OUT_RESERVE, size, NO_SEG, NO_SEG);
1694 offset += size;
1696 break;
1698 case 0341:
1699 break;
1701 case 0360:
1702 break;
1704 case 0361:
1705 bytes[0] = 0x66;
1706 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1707 offset += 1;
1708 break;
1710 case 0364:
1711 case 0365:
1712 break;
1714 case 0366:
1715 case 0367:
1716 *bytes = c - 0366 + 0x66;
1717 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1718 offset += 1;
1719 break;
1721 case 0370:
1722 case 0371:
1723 break;
1725 case 0373:
1726 *bytes = bits == 16 ? 3 : 5;
1727 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1728 offset += 1;
1729 break;
1731 case 0374:
1732 eat = EA_XMMVSIB;
1733 break;
1735 case 0375:
1736 eat = EA_YMMVSIB;
1737 break;
1739 case 0376:
1740 eat = EA_ZMMVSIB;
1741 break;
1743 case4(0100):
1744 case4(0110):
1745 case4(0120):
1746 case4(0130):
1747 case4(0200):
1748 case4(0204):
1749 case4(0210):
1750 case4(0214):
1751 case4(0220):
1752 case4(0224):
1753 case4(0230):
1754 case4(0234):
1756 ea ea_data;
1757 int rfield;
1758 opflags_t rflags;
1759 uint8_t *p;
1760 int32_t s;
1761 struct operand *opy = &ins->oprs[op2];
1763 if (c <= 0177) {
1764 /* pick rfield from operand b (opx) */
1765 rflags = regflag(opx);
1766 rfield = nasm_regvals[opx->basereg];
1767 } else {
1768 /* rfield is constant */
1769 rflags = 0;
1770 rfield = c & 7;
1773 if (process_ea(opy, &ea_data, bits,
1774 rfield, rflags, ins) != eat)
1775 errfunc(ERR_NONFATAL, "invalid effective address");
1777 p = bytes;
1778 *p++ = ea_data.modrm;
1779 if (ea_data.sib_present)
1780 *p++ = ea_data.sib;
1782 s = p - bytes;
1783 out(offset, segment, bytes, OUT_RAWDATA, s, NO_SEG, NO_SEG);
1786 * Make sure the address gets the right offset in case
1787 * the line breaks in the .lst file (BR 1197827)
1789 offset += s;
1790 s = 0;
1792 switch (ea_data.bytes) {
1793 case 0:
1794 break;
1795 case 1:
1796 case 2:
1797 case 4:
1798 case 8:
1799 /* use compressed displacement, if available */
1800 data = ea_data.disp8 ? ea_data.disp8 : opy->offset;
1801 s += ea_data.bytes;
1802 if (ea_data.rip) {
1803 if (opy->segment == segment) {
1804 data -= insn_end;
1805 if (overflow_signed(data, ea_data.bytes))
1806 warn_overflow(ERR_PASS2, ea_data.bytes);
1807 out(offset, segment, &data, OUT_ADDRESS,
1808 ea_data.bytes, NO_SEG, NO_SEG);
1809 } else {
1810 /* overflow check in output/linker? */
1811 out(offset, segment, &data, OUT_REL4ADR,
1812 insn_end - offset, opy->segment, opy->wrt);
1814 } else {
1815 if (overflow_general(data, ins->addr_size >> 3) ||
1816 signed_bits(data, ins->addr_size) !=
1817 signed_bits(data, ea_data.bytes * 8))
1818 warn_overflow(ERR_PASS2, ea_data.bytes);
1820 out(offset, segment, &data, OUT_ADDRESS,
1821 ea_data.bytes, opy->segment, opy->wrt);
1823 break;
1824 default:
1825 /* Impossible! */
1826 errfunc(ERR_PANIC,
1827 "Invalid amount of bytes (%d) for offset?!",
1828 ea_data.bytes);
1829 break;
1831 offset += s;
1833 break;
1835 default:
1836 errfunc(ERR_PANIC, "internal instruction table corrupt"
1837 ": instruction code \\%o (0x%02X) given", c, c);
1838 break;
1843 static opflags_t regflag(const operand * o)
1845 if (!is_register(o->basereg))
1846 errfunc(ERR_PANIC, "invalid operand passed to regflag()");
1847 return nasm_reg_flags[o->basereg];
1850 static int32_t regval(const operand * o)
1852 if (!is_register(o->basereg))
1853 errfunc(ERR_PANIC, "invalid operand passed to regval()");
1854 return nasm_regvals[o->basereg];
1857 static int op_rexflags(const operand * o, int mask)
1859 opflags_t flags;
1860 int val;
1862 if (!is_register(o->basereg))
1863 errfunc(ERR_PANIC, "invalid operand passed to op_rexflags()");
1865 flags = nasm_reg_flags[o->basereg];
1866 val = nasm_regvals[o->basereg];
1868 return rexflags(val, flags, mask);
1871 static int rexflags(int val, opflags_t flags, int mask)
1873 int rex = 0;
1875 if (val >= 8)
1876 rex |= REX_B|REX_X|REX_R;
1877 if (flags & BITS64)
1878 rex |= REX_W;
1879 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
1880 rex |= REX_H;
1881 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
1882 rex |= REX_P;
1884 return rex & mask;
1887 static int evexflags(int val, decoflags_t deco,
1888 int mask, uint8_t byte)
1890 int evex = 0;
1892 switch(byte) {
1893 case 0:
1894 if (val >= 16)
1895 evex |= (EVEX_P0RP | EVEX_P0X);
1896 break;
1897 case 2:
1898 if (val >= 16)
1899 evex |= EVEX_P2VP;
1900 if (deco & Z)
1901 evex |= EVEX_P2Z;
1902 if (deco & OPMASK_MASK)
1903 evex |= deco & EVEX_P2AAA;
1904 break;
1906 return evex & mask;
1909 static int op_evexflags(const operand * o, int mask, uint8_t byte)
1911 int val;
1913 if (!is_register(o->basereg))
1914 errfunc(ERR_PANIC, "invalid operand passed to op_evexflags()");
1916 val = nasm_regvals[o->basereg];
1918 return evexflags(val, o->decoflags, mask, byte);
1921 static enum match_result find_match(const struct itemplate **tempp,
1922 insn *instruction,
1923 int32_t segment, int64_t offset, int bits)
1925 const struct itemplate *temp;
1926 enum match_result m, merr;
1927 opflags_t xsizeflags[MAX_OPERANDS];
1928 bool opsizemissing = false;
1929 int8_t broadcast = instruction->evex_brerop;
1930 int i;
1932 /* broadcasting uses a different data element size */
1933 for (i = 0; i < instruction->operands; i++)
1934 if (i == broadcast)
1935 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
1936 else
1937 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
1939 merr = MERR_INVALOP;
1941 for (temp = nasm_instructions[instruction->opcode];
1942 temp->opcode != I_none; temp++) {
1943 m = matches(temp, instruction, bits);
1944 if (m == MOK_JUMP) {
1945 if (jmp_match(segment, offset, bits, instruction, temp))
1946 m = MOK_GOOD;
1947 else
1948 m = MERR_INVALOP;
1949 } else if (m == MERR_OPSIZEMISSING &&
1950 (temp->flags & IF_SMASK) != IF_SX) {
1952 * Missing operand size and a candidate for fuzzy matching...
1954 for (i = 0; i < temp->operands; i++)
1955 if (i == broadcast)
1956 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
1957 else
1958 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
1959 opsizemissing = true;
1961 if (m > merr)
1962 merr = m;
1963 if (merr == MOK_GOOD)
1964 goto done;
1967 /* No match, but see if we can get a fuzzy operand size match... */
1968 if (!opsizemissing)
1969 goto done;
1971 for (i = 0; i < instruction->operands; i++) {
1973 * We ignore extrinsic operand sizes on registers, so we should
1974 * never try to fuzzy-match on them. This also resolves the case
1975 * when we have e.g. "xmmrm128" in two different positions.
1977 if (is_class(REGISTER, instruction->oprs[i].type))
1978 continue;
1980 /* This tests if xsizeflags[i] has more than one bit set */
1981 if ((xsizeflags[i] & (xsizeflags[i]-1)))
1982 goto done; /* No luck */
1984 if (i == broadcast)
1985 instruction->oprs[i].decoflags |= xsizeflags[i];
1986 else
1987 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
1990 /* Try matching again... */
1991 for (temp = nasm_instructions[instruction->opcode];
1992 temp->opcode != I_none; temp++) {
1993 m = matches(temp, instruction, bits);
1994 if (m == MOK_JUMP) {
1995 if (jmp_match(segment, offset, bits, instruction, temp))
1996 m = MOK_GOOD;
1997 else
1998 m = MERR_INVALOP;
2000 if (m > merr)
2001 merr = m;
2002 if (merr == MOK_GOOD)
2003 goto done;
2006 done:
2007 *tempp = temp;
2008 return merr;
2011 static enum match_result matches(const struct itemplate *itemp,
2012 insn *instruction, int bits)
2014 opflags_t size[MAX_OPERANDS], asize;
2015 bool opsizemissing = false;
2016 int i, oprs;
2019 * Check the opcode
2021 if (itemp->opcode != instruction->opcode)
2022 return MERR_INVALOP;
2025 * Count the operands
2027 if (itemp->operands != instruction->operands)
2028 return MERR_INVALOP;
2031 * Is it legal?
2033 if (!(optimizing > 0) && (itemp->flags & IF_OPT))
2034 return MERR_INVALOP;
2037 * Check that no spurious colons or TOs are present
2039 for (i = 0; i < itemp->operands; i++)
2040 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2041 return MERR_INVALOP;
2044 * Process size flags
2046 switch (itemp->flags & IF_SMASK) {
2047 case IF_SB:
2048 asize = BITS8;
2049 break;
2050 case IF_SW:
2051 asize = BITS16;
2052 break;
2053 case IF_SD:
2054 asize = BITS32;
2055 break;
2056 case IF_SQ:
2057 asize = BITS64;
2058 break;
2059 case IF_SO:
2060 asize = BITS128;
2061 break;
2062 case IF_SY:
2063 asize = BITS256;
2064 break;
2065 case IF_SZ:
2066 asize = BITS512;
2067 break;
2068 case IF_SIZE:
2069 switch (bits) {
2070 case 16:
2071 asize = BITS16;
2072 break;
2073 case 32:
2074 asize = BITS32;
2075 break;
2076 case 64:
2077 asize = BITS64;
2078 break;
2079 default:
2080 asize = 0;
2081 break;
2083 break;
2084 default:
2085 asize = 0;
2086 break;
2089 if (itemp->flags & IF_ARMASK) {
2090 /* S- flags only apply to a specific operand */
2091 i = ((itemp->flags & IF_ARMASK) >> IF_ARSHFT) - 1;
2092 memset(size, 0, sizeof size);
2093 size[i] = asize;
2094 } else {
2095 /* S- flags apply to all operands */
2096 for (i = 0; i < MAX_OPERANDS; i++)
2097 size[i] = asize;
2101 * Check that the operand flags all match up,
2102 * it's a bit tricky so lets be verbose:
2104 * 1) Find out the size of operand. If instruction
2105 * doesn't have one specified -- we're trying to
2106 * guess it either from template (IF_S* flag) or
2107 * from code bits.
2109 * 2) If template operand do not match the instruction OR
2110 * template has an operand size specified AND this size differ
2111 * from which instruction has (perhaps we got it from code bits)
2112 * we are:
2113 * a) Check that only size of instruction and operand is differ
2114 * other characteristics do match
2115 * b) Perhaps it's a register specified in instruction so
2116 * for such a case we just mark that operand as "size
2117 * missing" and this will turn on fuzzy operand size
2118 * logic facility (handled by a caller)
2120 for (i = 0; i < itemp->operands; i++) {
2121 opflags_t type = instruction->oprs[i].type;
2122 decoflags_t deco = instruction->oprs[i].decoflags;
2123 if (!(type & SIZE_MASK))
2124 type |= size[i];
2126 if ((itemp->opd[i] & ~type & ~SIZE_MASK) ||
2127 (itemp->deco[i] & deco) != deco) {
2128 return MERR_INVALOP;
2129 } else if ((itemp->opd[i] & SIZE_MASK) &&
2130 (itemp->opd[i] & SIZE_MASK) != (type & SIZE_MASK)) {
2131 if (type & SIZE_MASK) {
2133 * when broadcasting, the element size depends on
2134 * the instruction type. decorator flag should match.
2136 #define MATCH_BRSZ(bits) (((type & SIZE_MASK) == BITS##bits) && \
2137 ((itemp->deco[i] & BRSIZE_MASK) == BR_BITS##bits))
2138 if (!((deco & BRDCAST_MASK) &&
2139 (MATCH_BRSZ(32) || MATCH_BRSZ(64)))) {
2140 return MERR_INVALOP;
2142 } else if (!is_class(REGISTER, type)) {
2144 * Note: we don't honor extrinsic operand sizes for registers,
2145 * so "missing operand size" for a register should be
2146 * considered a wildcard match rather than an error.
2148 opsizemissing = true;
2150 } else if (nasm_regvals[instruction->oprs[i].basereg] >= 16 &&
2151 (itemp->flags & IF_INSMASK) != IF_AVX512) {
2152 return MERR_ENCMISMATCH;
2156 if (opsizemissing)
2157 return MERR_OPSIZEMISSING;
2160 * Check operand sizes
2162 if (itemp->flags & (IF_SM | IF_SM2)) {
2163 oprs = (itemp->flags & IF_SM2 ? 2 : itemp->operands);
2164 for (i = 0; i < oprs; i++) {
2165 asize = itemp->opd[i] & SIZE_MASK;
2166 if (asize) {
2167 for (i = 0; i < oprs; i++)
2168 size[i] = asize;
2169 break;
2172 } else {
2173 oprs = itemp->operands;
2176 for (i = 0; i < itemp->operands; i++) {
2177 if (!(itemp->opd[i] & SIZE_MASK) &&
2178 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2179 return MERR_OPSIZEMISMATCH;
2183 * Check template is okay at the set cpu level
2185 if (((itemp->flags & IF_PLEVEL) > cpu))
2186 return MERR_BADCPU;
2189 * Verify the appropriate long mode flag.
2191 if ((itemp->flags & (bits == 64 ? IF_NOLONG : IF_LONG)))
2192 return MERR_BADMODE;
2195 * If we have a HLE prefix, look for the NOHLE flag
2197 if ((itemp->flags & IF_NOHLE) &&
2198 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2199 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2200 return MERR_BADHLE;
2203 * Check if special handling needed for Jumps
2205 if ((itemp->code[0] & ~1) == 0370)
2206 return MOK_JUMP;
2208 return MOK_GOOD;
2212 * Check if offset is a multiple of N with corresponding tuple type
2213 * if Disp8*N is available, compressed displacement is stored in compdisp
2215 static bool is_disp8n(operand *input, insn *ins, int8_t *compdisp)
2217 const uint8_t fv_n[2][2][VLMAX] = {{{16, 32, 64}, {4, 4, 4}},
2218 {{16, 32, 64}, {8, 8, 8}}};
2219 const uint8_t hv_n[2][VLMAX] = {{8, 16, 32}, {4, 4, 4}};
2220 const uint8_t dup_n[VLMAX] = {8, 32, 64};
2222 bool evex_b = input->decoflags & BRDCAST_MASK;
2223 enum ttypes tuple = ins->evex_tuple;
2224 /* vex_wlp composed as [wwllpp] */
2225 enum vectlens vectlen = (ins->vex_wlp & 0x0c) >> 2;
2226 /* wig(=2) is treated as w0(=0) */
2227 bool evex_w = (ins->vex_wlp & 0x10) >> 4;
2228 int32_t off = input->offset;
2229 uint8_t n = 0;
2230 int32_t disp8;
2232 switch(tuple) {
2233 case FV:
2234 n = fv_n[evex_w][evex_b][vectlen];
2235 break;
2236 case HV:
2237 n = hv_n[evex_b][vectlen];
2238 break;
2240 case FVM:
2241 /* 16, 32, 64 for VL 128, 256, 512 respectively*/
2242 n = 1 << (vectlen + 4);
2243 break;
2244 case T1S8: /* N = 1 */
2245 case T1S16: /* N = 2 */
2246 n = tuple - T1S8 + 1;
2247 break;
2248 case T1S:
2249 /* N = 4 for 32bit, 8 for 64bit */
2250 n = evex_w ? 8 : 4;
2251 break;
2252 case T1F32:
2253 case T1F64:
2254 /* N = 4 for 32bit, 8 for 64bit */
2255 n = (tuple == T1F32 ? 4 : 8);
2256 break;
2257 case T2:
2258 case T4:
2259 case T8:
2260 if (vectlen + 7 <= (evex_w + 5) + (tuple - T2 + 1))
2261 n = 0;
2262 else
2263 n = 1 << (tuple - T2 + evex_w + 3);
2264 break;
2265 case HVM:
2266 case QVM:
2267 case OVM:
2268 n = 1 << (OVM - tuple + vectlen + 1);
2269 break;
2270 case M128:
2271 n = 16;
2272 break;
2273 case DUP:
2274 n = dup_n[vectlen];
2275 break;
2277 default:
2278 break;
2281 if (n && !(off & (n - 1))) {
2282 disp8 = off / n;
2283 /* if it fits in Disp8 */
2284 if (disp8 >= -128 && disp8 <= 127) {
2285 *compdisp = disp8;
2286 return true;
2290 *compdisp = 0;
2291 return false;
2295 * Check if ModR/M.mod should/can be 01.
2296 * - EAF_BYTEOFFS is set
2297 * - offset can fit in a byte when EVEX is not used
2298 * - offset can be compressed when EVEX is used
2300 #define IS_MOD_01() (input->eaflags & EAF_BYTEOFFS || \
2301 (o >= -128 && o <= 127 && \
2302 seg == NO_SEG && !forw_ref && \
2303 !(input->eaflags & EAF_WORDOFFS) && \
2304 !(ins->rex & REX_EV)) || \
2305 (ins->rex & REX_EV && \
2306 is_disp8n(input, ins, &output->disp8)))
2308 static enum ea_type process_ea(operand *input, ea *output, int bits,
2309 int rfield, opflags_t rflags, insn *ins)
2311 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2312 int addrbits = ins->addr_size;
2314 output->type = EA_SCALAR;
2315 output->rip = false;
2317 /* REX flags for the rfield operand */
2318 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2319 /* EVEX.R' flag for the REG operand */
2320 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2322 if (is_class(REGISTER, input->type)) {
2324 * It's a direct register.
2326 if (!is_register(input->basereg))
2327 goto err;
2329 if (!is_reg_class(REG_EA, input->basereg))
2330 goto err;
2332 /* broadcasting is not available with a direct register operand. */
2333 if (input->decoflags & BRDCAST_MASK) {
2334 nasm_error(ERR_NONFATAL, "Broadcasting not allowed from a register");
2335 goto err;
2338 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2339 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2340 output->sib_present = false; /* no SIB necessary */
2341 output->bytes = 0; /* no offset necessary either */
2342 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2343 } else {
2345 * It's a memory reference.
2348 /* Embedded rounding or SAE is not available with a mem ref operand. */
2349 if (input->decoflags & (ER | SAE)) {
2350 nasm_error(ERR_NONFATAL,
2351 "Embedded rounding is available only with reg-reg op.");
2352 return -1;
2355 if (input->basereg == -1 &&
2356 (input->indexreg == -1 || input->scale == 0)) {
2358 * It's a pure offset.
2360 if (bits == 64 && ((input->type & IP_REL) == IP_REL) &&
2361 input->segment == NO_SEG) {
2362 nasm_error(ERR_WARNING | ERR_PASS1, "absolute address can not be RIP-relative");
2363 input->type &= ~IP_REL;
2364 input->type |= MEMORY;
2367 if (input->eaflags & EAF_BYTEOFFS ||
2368 (input->eaflags & EAF_WORDOFFS &&
2369 input->disp_size != (addrbits != 16 ? 32 : 16))) {
2370 nasm_error(ERR_WARNING | ERR_PASS1, "displacement size ignored on absolute address");
2373 if (bits == 64 && (~input->type & IP_REL)) {
2374 output->sib_present = true;
2375 output->sib = GEN_SIB(0, 4, 5);
2376 output->bytes = 4;
2377 output->modrm = GEN_MODRM(0, rfield, 4);
2378 output->rip = false;
2379 } else {
2380 output->sib_present = false;
2381 output->bytes = (addrbits != 16 ? 4 : 2);
2382 output->modrm = GEN_MODRM(0, rfield, (addrbits != 16 ? 5 : 6));
2383 output->rip = bits == 64;
2385 } else {
2387 * It's an indirection.
2389 int i = input->indexreg, b = input->basereg, s = input->scale;
2390 int32_t seg = input->segment;
2391 int hb = input->hintbase, ht = input->hinttype;
2392 int t, it, bt; /* register numbers */
2393 opflags_t x, ix, bx; /* register flags */
2395 if (s == 0)
2396 i = -1; /* make this easy, at least */
2398 if (is_register(i)) {
2399 it = nasm_regvals[i];
2400 ix = nasm_reg_flags[i];
2401 } else {
2402 it = -1;
2403 ix = 0;
2406 if (is_register(b)) {
2407 bt = nasm_regvals[b];
2408 bx = nasm_reg_flags[b];
2409 } else {
2410 bt = -1;
2411 bx = 0;
2414 /* if either one are a vector register... */
2415 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2416 opflags_t sok = BITS32 | BITS64;
2417 int32_t o = input->offset;
2418 int mod, scale, index, base;
2421 * For a vector SIB, one has to be a vector and the other,
2422 * if present, a GPR. The vector must be the index operand.
2424 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2425 if (s == 0)
2426 s = 1;
2427 else if (s != 1)
2428 goto err;
2430 t = bt, bt = it, it = t;
2431 x = bx, bx = ix, ix = x;
2434 if (bt != -1) {
2435 if (REG_GPR & ~bx)
2436 goto err;
2437 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2438 sok &= bx;
2439 else
2440 goto err;
2444 * While we're here, ensure the user didn't specify
2445 * WORD or QWORD
2447 if (input->disp_size == 16 || input->disp_size == 64)
2448 goto err;
2450 if (addrbits == 16 ||
2451 (addrbits == 32 && !(sok & BITS32)) ||
2452 (addrbits == 64 && !(sok & BITS64)))
2453 goto err;
2455 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2456 : ((ix & YMMREG & ~REG_EA)
2457 ? EA_YMMVSIB : EA_XMMVSIB));
2459 output->rex |= rexflags(it, ix, REX_X);
2460 output->rex |= rexflags(bt, bx, REX_B);
2461 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2463 index = it & 7; /* it is known to be != -1 */
2465 switch (s) {
2466 case 1:
2467 scale = 0;
2468 break;
2469 case 2:
2470 scale = 1;
2471 break;
2472 case 4:
2473 scale = 2;
2474 break;
2475 case 8:
2476 scale = 3;
2477 break;
2478 default: /* then what the smeg is it? */
2479 goto err; /* panic */
2482 if (bt == -1) {
2483 base = 5;
2484 mod = 0;
2485 } else {
2486 base = (bt & 7);
2487 if (base != REG_NUM_EBP && o == 0 &&
2488 seg == NO_SEG && !forw_ref &&
2489 !(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2490 mod = 0;
2491 else if (IS_MOD_01())
2492 mod = 1;
2493 else
2494 mod = 2;
2497 output->sib_present = true;
2498 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2499 output->modrm = GEN_MODRM(mod, rfield, 4);
2500 output->sib = GEN_SIB(scale, index, base);
2501 } else if ((ix|bx) & (BITS32|BITS64)) {
2503 * it must be a 32/64-bit memory reference. Firstly we have
2504 * to check that all registers involved are type E/Rxx.
2506 opflags_t sok = BITS32 | BITS64;
2507 int32_t o = input->offset;
2509 if (it != -1) {
2510 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2511 sok &= ix;
2512 else
2513 goto err;
2516 if (bt != -1) {
2517 if (REG_GPR & ~bx)
2518 goto err; /* Invalid register */
2519 if (~sok & bx & SIZE_MASK)
2520 goto err; /* Invalid size */
2521 sok &= bx;
2525 * While we're here, ensure the user didn't specify
2526 * WORD or QWORD
2528 if (input->disp_size == 16 || input->disp_size == 64)
2529 goto err;
2531 if (addrbits == 16 ||
2532 (addrbits == 32 && !(sok & BITS32)) ||
2533 (addrbits == 64 && !(sok & BITS64)))
2534 goto err;
2536 /* now reorganize base/index */
2537 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2538 ((hb == b && ht == EAH_NOTBASE) ||
2539 (hb == i && ht == EAH_MAKEBASE))) {
2540 /* swap if hints say so */
2541 t = bt, bt = it, it = t;
2542 x = bx, bx = ix, ix = x;
2544 if (bt == it) /* convert EAX+2*EAX to 3*EAX */
2545 bt = -1, bx = 0, s++;
2546 if (bt == -1 && s == 1 && !(hb == it && ht == EAH_NOTBASE)) {
2547 /* make single reg base, unless hint */
2548 bt = it, bx = ix, it = -1, ix = 0;
2550 if (((s == 2 && it != REG_NUM_ESP && !(input->eaflags & EAF_TIMESTWO)) ||
2551 s == 3 || s == 5 || s == 9) && bt == -1)
2552 bt = it, bx = ix, s--; /* convert 3*EAX to EAX+2*EAX */
2553 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
2554 (input->eaflags & EAF_TIMESTWO))
2555 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2556 /* convert [NOSPLIT EAX] to sib format with 0x0 displacement */
2557 if (s == 1 && it == REG_NUM_ESP) {
2558 /* swap ESP into base if scale is 1 */
2559 t = it, it = bt, bt = t;
2560 x = ix, ix = bx, bx = x;
2562 if (it == REG_NUM_ESP ||
2563 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
2564 goto err; /* wrong, for various reasons */
2566 output->rex |= rexflags(it, ix, REX_X);
2567 output->rex |= rexflags(bt, bx, REX_B);
2569 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
2570 /* no SIB needed */
2571 int mod, rm;
2573 if (bt == -1) {
2574 rm = 5;
2575 mod = 0;
2576 } else {
2577 rm = (bt & 7);
2578 if (rm != REG_NUM_EBP && o == 0 &&
2579 seg == NO_SEG && !forw_ref &&
2580 !(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2581 mod = 0;
2582 else if (IS_MOD_01())
2583 mod = 1;
2584 else
2585 mod = 2;
2588 output->sib_present = false;
2589 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2590 output->modrm = GEN_MODRM(mod, rfield, rm);
2591 } else {
2592 /* we need a SIB */
2593 int mod, scale, index, base;
2595 if (it == -1)
2596 index = 4, s = 1;
2597 else
2598 index = (it & 7);
2600 switch (s) {
2601 case 1:
2602 scale = 0;
2603 break;
2604 case 2:
2605 scale = 1;
2606 break;
2607 case 4:
2608 scale = 2;
2609 break;
2610 case 8:
2611 scale = 3;
2612 break;
2613 default: /* then what the smeg is it? */
2614 goto err; /* panic */
2617 if (bt == -1) {
2618 base = 5;
2619 mod = 0;
2620 } else {
2621 base = (bt & 7);
2622 if (base != REG_NUM_EBP && o == 0 &&
2623 seg == NO_SEG && !forw_ref &&
2624 !(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2625 mod = 0;
2626 else if (IS_MOD_01())
2627 mod = 1;
2628 else
2629 mod = 2;
2632 output->sib_present = true;
2633 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2634 output->modrm = GEN_MODRM(mod, rfield, 4);
2635 output->sib = GEN_SIB(scale, index, base);
2637 } else { /* it's 16-bit */
2638 int mod, rm;
2639 int16_t o = input->offset;
2641 /* check for 64-bit long mode */
2642 if (addrbits == 64)
2643 goto err;
2645 /* check all registers are BX, BP, SI or DI */
2646 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
2647 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
2648 goto err;
2650 /* ensure the user didn't specify DWORD/QWORD */
2651 if (input->disp_size == 32 || input->disp_size == 64)
2652 goto err;
2654 if (s != 1 && i != -1)
2655 goto err; /* no can do, in 16-bit EA */
2656 if (b == -1 && i != -1) {
2657 int tmp = b;
2658 b = i;
2659 i = tmp;
2660 } /* swap */
2661 if ((b == R_SI || b == R_DI) && i != -1) {
2662 int tmp = b;
2663 b = i;
2664 i = tmp;
2666 /* have BX/BP as base, SI/DI index */
2667 if (b == i)
2668 goto err; /* shouldn't ever happen, in theory */
2669 if (i != -1 && b != -1 &&
2670 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
2671 goto err; /* invalid combinations */
2672 if (b == -1) /* pure offset: handled above */
2673 goto err; /* so if it gets to here, panic! */
2675 rm = -1;
2676 if (i != -1)
2677 switch (i * 256 + b) {
2678 case R_SI * 256 + R_BX:
2679 rm = 0;
2680 break;
2681 case R_DI * 256 + R_BX:
2682 rm = 1;
2683 break;
2684 case R_SI * 256 + R_BP:
2685 rm = 2;
2686 break;
2687 case R_DI * 256 + R_BP:
2688 rm = 3;
2689 break;
2690 } else
2691 switch (b) {
2692 case R_SI:
2693 rm = 4;
2694 break;
2695 case R_DI:
2696 rm = 5;
2697 break;
2698 case R_BP:
2699 rm = 6;
2700 break;
2701 case R_BX:
2702 rm = 7;
2703 break;
2705 if (rm == -1) /* can't happen, in theory */
2706 goto err; /* so panic if it does */
2708 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
2709 !(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2710 mod = 0;
2711 else if (IS_MOD_01())
2712 mod = 1;
2713 else
2714 mod = 2;
2716 output->sib_present = false; /* no SIB - it's 16-bit */
2717 output->bytes = mod; /* bytes of offset needed */
2718 output->modrm = GEN_MODRM(mod, rfield, rm);
2723 output->size = 1 + output->sib_present + output->bytes;
2724 return output->type;
2726 err:
2727 return output->type = EA_INVALID;
2730 static void add_asp(insn *ins, int addrbits)
2732 int j, valid;
2733 int defdisp;
2735 valid = (addrbits == 64) ? 64|32 : 32|16;
2737 switch (ins->prefixes[PPS_ASIZE]) {
2738 case P_A16:
2739 valid &= 16;
2740 break;
2741 case P_A32:
2742 valid &= 32;
2743 break;
2744 case P_A64:
2745 valid &= 64;
2746 break;
2747 case P_ASP:
2748 valid &= (addrbits == 32) ? 16 : 32;
2749 break;
2750 default:
2751 break;
2754 for (j = 0; j < ins->operands; j++) {
2755 if (is_class(MEMORY, ins->oprs[j].type)) {
2756 opflags_t i, b;
2758 /* Verify as Register */
2759 if (!is_register(ins->oprs[j].indexreg))
2760 i = 0;
2761 else
2762 i = nasm_reg_flags[ins->oprs[j].indexreg];
2764 /* Verify as Register */
2765 if (!is_register(ins->oprs[j].basereg))
2766 b = 0;
2767 else
2768 b = nasm_reg_flags[ins->oprs[j].basereg];
2770 if (ins->oprs[j].scale == 0)
2771 i = 0;
2773 if (!i && !b) {
2774 int ds = ins->oprs[j].disp_size;
2775 if ((addrbits != 64 && ds > 8) ||
2776 (addrbits == 64 && ds == 16))
2777 valid &= ds;
2778 } else {
2779 if (!(REG16 & ~b))
2780 valid &= 16;
2781 if (!(REG32 & ~b))
2782 valid &= 32;
2783 if (!(REG64 & ~b))
2784 valid &= 64;
2786 if (!(REG16 & ~i))
2787 valid &= 16;
2788 if (!(REG32 & ~i))
2789 valid &= 32;
2790 if (!(REG64 & ~i))
2791 valid &= 64;
2796 if (valid & addrbits) {
2797 ins->addr_size = addrbits;
2798 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
2799 /* Add an address size prefix */
2800 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
2801 ins->addr_size = (addrbits == 32) ? 16 : 32;
2802 } else {
2803 /* Impossible... */
2804 errfunc(ERR_NONFATAL, "impossible combination of address sizes");
2805 ins->addr_size = addrbits; /* Error recovery */
2808 defdisp = ins->addr_size == 16 ? 16 : 32;
2810 for (j = 0; j < ins->operands; j++) {
2811 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
2812 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
2814 * mem_offs sizes must match the address size; if not,
2815 * strip the MEM_OFFS bit and match only EA instructions
2817 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);