Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Target / X86 / AsmParser / X86AsmParser.cpp
blob44173d9b2b3eda9db70b66390e8ab81533349e59
1 //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "InstPrinter/X86IntelInstPrinter.h"
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "MCTargetDesc/X86MCExpr.h"
12 #include "MCTargetDesc/X86TargetStreamer.h"
13 #include "X86AsmInstrumentation.h"
14 #include "X86AsmParserCommon.h"
15 #include "X86Operand.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallString.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCParser/MCAsmLexer.h"
26 #include "llvm/MC/MCParser/MCAsmParser.h"
27 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
29 #include "llvm/MC/MCRegisterInfo.h"
30 #include "llvm/MC/MCSection.h"
31 #include "llvm/MC/MCStreamer.h"
32 #include "llvm/MC/MCSubtargetInfo.h"
33 #include "llvm/MC/MCSymbol.h"
34 #include "llvm/Support/SourceMgr.h"
35 #include "llvm/Support/TargetRegistry.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include <algorithm>
38 #include <memory>
40 using namespace llvm;
42 static bool checkScale(unsigned Scale, StringRef &ErrMsg) {
43 if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
44 ErrMsg = "scale factor in address must be 1, 2, 4 or 8";
45 return true;
47 return false;
50 namespace {
52 static const char OpPrecedence[] = {
53 0, // IC_OR
54 1, // IC_XOR
55 2, // IC_AND
56 3, // IC_LSHIFT
57 3, // IC_RSHIFT
58 4, // IC_PLUS
59 4, // IC_MINUS
60 5, // IC_MULTIPLY
61 5, // IC_DIVIDE
62 5, // IC_MOD
63 6, // IC_NOT
64 7, // IC_NEG
65 8, // IC_RPAREN
66 9, // IC_LPAREN
67 0, // IC_IMM
68 0 // IC_REGISTER
71 class X86AsmParser : public MCTargetAsmParser {
72 ParseInstructionInfo *InstInfo;
73 std::unique_ptr<X86AsmInstrumentation> Instrumentation;
74 bool Code16GCC;
76 private:
77 SMLoc consumeToken() {
78 MCAsmParser &Parser = getParser();
79 SMLoc Result = Parser.getTok().getLoc();
80 Parser.Lex();
81 return Result;
84 X86TargetStreamer &getTargetStreamer() {
85 assert(getParser().getStreamer().getTargetStreamer() &&
86 "do not have a target streamer");
87 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
88 return static_cast<X86TargetStreamer &>(TS);
91 unsigned MatchInstruction(const OperandVector &Operands, MCInst &Inst,
92 uint64_t &ErrorInfo, bool matchingInlineAsm,
93 unsigned VariantID = 0) {
94 // In Code16GCC mode, match as 32-bit.
95 if (Code16GCC)
96 SwitchMode(X86::Mode32Bit);
97 unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo,
98 matchingInlineAsm, VariantID);
99 if (Code16GCC)
100 SwitchMode(X86::Mode16Bit);
101 return rv;
104 enum InfixCalculatorTok {
105 IC_OR = 0,
106 IC_XOR,
107 IC_AND,
108 IC_LSHIFT,
109 IC_RSHIFT,
110 IC_PLUS,
111 IC_MINUS,
112 IC_MULTIPLY,
113 IC_DIVIDE,
114 IC_MOD,
115 IC_NOT,
116 IC_NEG,
117 IC_RPAREN,
118 IC_LPAREN,
119 IC_IMM,
120 IC_REGISTER
123 enum IntelOperatorKind {
124 IOK_INVALID = 0,
125 IOK_LENGTH,
126 IOK_SIZE,
127 IOK_TYPE,
128 IOK_OFFSET
131 class InfixCalculator {
132 typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
133 SmallVector<InfixCalculatorTok, 4> InfixOperatorStack;
134 SmallVector<ICToken, 4> PostfixStack;
136 bool isUnaryOperator(const InfixCalculatorTok Op) {
137 return Op == IC_NEG || Op == IC_NOT;
140 public:
141 int64_t popOperand() {
142 assert (!PostfixStack.empty() && "Poped an empty stack!");
143 ICToken Op = PostfixStack.pop_back_val();
144 if (!(Op.first == IC_IMM || Op.first == IC_REGISTER))
145 return -1; // The invalid Scale value will be caught later by checkScale
146 return Op.second;
148 void pushOperand(InfixCalculatorTok Op, int64_t Val = 0) {
149 assert ((Op == IC_IMM || Op == IC_REGISTER) &&
150 "Unexpected operand!");
151 PostfixStack.push_back(std::make_pair(Op, Val));
154 void popOperator() { InfixOperatorStack.pop_back(); }
155 void pushOperator(InfixCalculatorTok Op) {
156 // Push the new operator if the stack is empty.
157 if (InfixOperatorStack.empty()) {
158 InfixOperatorStack.push_back(Op);
159 return;
162 // Push the new operator if it has a higher precedence than the operator
163 // on the top of the stack or the operator on the top of the stack is a
164 // left parentheses.
165 unsigned Idx = InfixOperatorStack.size() - 1;
166 InfixCalculatorTok StackOp = InfixOperatorStack[Idx];
167 if (OpPrecedence[Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
168 InfixOperatorStack.push_back(Op);
169 return;
172 // The operator on the top of the stack has higher precedence than the
173 // new operator.
174 unsigned ParenCount = 0;
175 while (1) {
176 // Nothing to process.
177 if (InfixOperatorStack.empty())
178 break;
180 Idx = InfixOperatorStack.size() - 1;
181 StackOp = InfixOperatorStack[Idx];
182 if (!(OpPrecedence[StackOp] >= OpPrecedence[Op] || ParenCount))
183 break;
185 // If we have an even parentheses count and we see a left parentheses,
186 // then stop processing.
187 if (!ParenCount && StackOp == IC_LPAREN)
188 break;
190 if (StackOp == IC_RPAREN) {
191 ++ParenCount;
192 InfixOperatorStack.pop_back();
193 } else if (StackOp == IC_LPAREN) {
194 --ParenCount;
195 InfixOperatorStack.pop_back();
196 } else {
197 InfixOperatorStack.pop_back();
198 PostfixStack.push_back(std::make_pair(StackOp, 0));
201 // Push the new operator.
202 InfixOperatorStack.push_back(Op);
205 int64_t execute() {
206 // Push any remaining operators onto the postfix stack.
207 while (!InfixOperatorStack.empty()) {
208 InfixCalculatorTok StackOp = InfixOperatorStack.pop_back_val();
209 if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
210 PostfixStack.push_back(std::make_pair(StackOp, 0));
213 if (PostfixStack.empty())
214 return 0;
216 SmallVector<ICToken, 16> OperandStack;
217 for (unsigned i = 0, e = PostfixStack.size(); i != e; ++i) {
218 ICToken Op = PostfixStack[i];
219 if (Op.first == IC_IMM || Op.first == IC_REGISTER) {
220 OperandStack.push_back(Op);
221 } else if (isUnaryOperator(Op.first)) {
222 assert (OperandStack.size() > 0 && "Too few operands.");
223 ICToken Operand = OperandStack.pop_back_val();
224 assert (Operand.first == IC_IMM &&
225 "Unary operation with a register!");
226 switch (Op.first) {
227 default:
228 report_fatal_error("Unexpected operator!");
229 break;
230 case IC_NEG:
231 OperandStack.push_back(std::make_pair(IC_IMM, -Operand.second));
232 break;
233 case IC_NOT:
234 OperandStack.push_back(std::make_pair(IC_IMM, ~Operand.second));
235 break;
237 } else {
238 assert (OperandStack.size() > 1 && "Too few operands.");
239 int64_t Val;
240 ICToken Op2 = OperandStack.pop_back_val();
241 ICToken Op1 = OperandStack.pop_back_val();
242 switch (Op.first) {
243 default:
244 report_fatal_error("Unexpected operator!");
245 break;
246 case IC_PLUS:
247 Val = Op1.second + Op2.second;
248 OperandStack.push_back(std::make_pair(IC_IMM, Val));
249 break;
250 case IC_MINUS:
251 Val = Op1.second - Op2.second;
252 OperandStack.push_back(std::make_pair(IC_IMM, Val));
253 break;
254 case IC_MULTIPLY:
255 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
256 "Multiply operation with an immediate and a register!");
257 Val = Op1.second * Op2.second;
258 OperandStack.push_back(std::make_pair(IC_IMM, Val));
259 break;
260 case IC_DIVIDE:
261 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
262 "Divide operation with an immediate and a register!");
263 assert (Op2.second != 0 && "Division by zero!");
264 Val = Op1.second / Op2.second;
265 OperandStack.push_back(std::make_pair(IC_IMM, Val));
266 break;
267 case IC_MOD:
268 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
269 "Modulo operation with an immediate and a register!");
270 Val = Op1.second % Op2.second;
271 OperandStack.push_back(std::make_pair(IC_IMM, Val));
272 break;
273 case IC_OR:
274 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
275 "Or operation with an immediate and a register!");
276 Val = Op1.second | Op2.second;
277 OperandStack.push_back(std::make_pair(IC_IMM, Val));
278 break;
279 case IC_XOR:
280 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
281 "Xor operation with an immediate and a register!");
282 Val = Op1.second ^ Op2.second;
283 OperandStack.push_back(std::make_pair(IC_IMM, Val));
284 break;
285 case IC_AND:
286 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
287 "And operation with an immediate and a register!");
288 Val = Op1.second & Op2.second;
289 OperandStack.push_back(std::make_pair(IC_IMM, Val));
290 break;
291 case IC_LSHIFT:
292 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
293 "Left shift operation with an immediate and a register!");
294 Val = Op1.second << Op2.second;
295 OperandStack.push_back(std::make_pair(IC_IMM, Val));
296 break;
297 case IC_RSHIFT:
298 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
299 "Right shift operation with an immediate and a register!");
300 Val = Op1.second >> Op2.second;
301 OperandStack.push_back(std::make_pair(IC_IMM, Val));
302 break;
306 assert (OperandStack.size() == 1 && "Expected a single result.");
307 return OperandStack.pop_back_val().second;
311 enum IntelExprState {
312 IES_INIT,
313 IES_OR,
314 IES_XOR,
315 IES_AND,
316 IES_LSHIFT,
317 IES_RSHIFT,
318 IES_PLUS,
319 IES_MINUS,
320 IES_NOT,
321 IES_MULTIPLY,
322 IES_DIVIDE,
323 IES_MOD,
324 IES_LBRAC,
325 IES_RBRAC,
326 IES_LPAREN,
327 IES_RPAREN,
328 IES_REGISTER,
329 IES_INTEGER,
330 IES_IDENTIFIER,
331 IES_ERROR
334 class IntelExprStateMachine {
335 IntelExprState State, PrevState;
336 unsigned BaseReg, IndexReg, TmpReg, Scale;
337 int64_t Imm;
338 const MCExpr *Sym;
339 StringRef SymName;
340 InfixCalculator IC;
341 InlineAsmIdentifierInfo Info;
342 short BracCount;
343 bool MemExpr;
345 public:
346 IntelExprStateMachine()
347 : State(IES_INIT), PrevState(IES_ERROR), BaseReg(0), IndexReg(0),
348 TmpReg(0), Scale(0), Imm(0), Sym(nullptr), BracCount(0),
349 MemExpr(false) {}
351 void addImm(int64_t imm) { Imm += imm; }
352 short getBracCount() { return BracCount; }
353 bool isMemExpr() { return MemExpr; }
354 unsigned getBaseReg() { return BaseReg; }
355 unsigned getIndexReg() { return IndexReg; }
356 unsigned getScale() { return Scale; }
357 const MCExpr *getSym() { return Sym; }
358 StringRef getSymName() { return SymName; }
359 int64_t getImm() { return Imm + IC.execute(); }
360 bool isValidEndState() {
361 return State == IES_RBRAC || State == IES_INTEGER;
363 bool hadError() { return State == IES_ERROR; }
364 InlineAsmIdentifierInfo &getIdentifierInfo() { return Info; }
366 void onOr() {
367 IntelExprState CurrState = State;
368 switch (State) {
369 default:
370 State = IES_ERROR;
371 break;
372 case IES_INTEGER:
373 case IES_RPAREN:
374 case IES_REGISTER:
375 State = IES_OR;
376 IC.pushOperator(IC_OR);
377 break;
379 PrevState = CurrState;
381 void onXor() {
382 IntelExprState CurrState = State;
383 switch (State) {
384 default:
385 State = IES_ERROR;
386 break;
387 case IES_INTEGER:
388 case IES_RPAREN:
389 case IES_REGISTER:
390 State = IES_XOR;
391 IC.pushOperator(IC_XOR);
392 break;
394 PrevState = CurrState;
396 void onAnd() {
397 IntelExprState CurrState = State;
398 switch (State) {
399 default:
400 State = IES_ERROR;
401 break;
402 case IES_INTEGER:
403 case IES_RPAREN:
404 case IES_REGISTER:
405 State = IES_AND;
406 IC.pushOperator(IC_AND);
407 break;
409 PrevState = CurrState;
411 void onLShift() {
412 IntelExprState CurrState = State;
413 switch (State) {
414 default:
415 State = IES_ERROR;
416 break;
417 case IES_INTEGER:
418 case IES_RPAREN:
419 case IES_REGISTER:
420 State = IES_LSHIFT;
421 IC.pushOperator(IC_LSHIFT);
422 break;
424 PrevState = CurrState;
426 void onRShift() {
427 IntelExprState CurrState = State;
428 switch (State) {
429 default:
430 State = IES_ERROR;
431 break;
432 case IES_INTEGER:
433 case IES_RPAREN:
434 case IES_REGISTER:
435 State = IES_RSHIFT;
436 IC.pushOperator(IC_RSHIFT);
437 break;
439 PrevState = CurrState;
441 bool onPlus(StringRef &ErrMsg) {
442 IntelExprState CurrState = State;
443 switch (State) {
444 default:
445 State = IES_ERROR;
446 break;
447 case IES_INTEGER:
448 case IES_RPAREN:
449 case IES_REGISTER:
450 State = IES_PLUS;
451 IC.pushOperator(IC_PLUS);
452 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
453 // If we already have a BaseReg, then assume this is the IndexReg with
454 // no explicit scale.
455 if (!BaseReg) {
456 BaseReg = TmpReg;
457 } else {
458 if (IndexReg) {
459 ErrMsg = "BaseReg/IndexReg already set!";
460 return true;
462 IndexReg = TmpReg;
463 Scale = 0;
466 break;
468 PrevState = CurrState;
469 return false;
471 bool onMinus(StringRef &ErrMsg) {
472 IntelExprState CurrState = State;
473 switch (State) {
474 default:
475 State = IES_ERROR;
476 break;
477 case IES_OR:
478 case IES_XOR:
479 case IES_AND:
480 case IES_LSHIFT:
481 case IES_RSHIFT:
482 case IES_PLUS:
483 case IES_NOT:
484 case IES_MULTIPLY:
485 case IES_DIVIDE:
486 case IES_MOD:
487 case IES_LPAREN:
488 case IES_RPAREN:
489 case IES_LBRAC:
490 case IES_RBRAC:
491 case IES_INTEGER:
492 case IES_REGISTER:
493 case IES_INIT:
494 State = IES_MINUS;
495 // push minus operator if it is not a negate operator
496 if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
497 CurrState == IES_INTEGER || CurrState == IES_RBRAC)
498 IC.pushOperator(IC_MINUS);
499 else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
500 // We have negate operator for Scale: it's illegal
501 ErrMsg = "Scale can't be negative";
502 return true;
503 } else
504 IC.pushOperator(IC_NEG);
505 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
506 // If we already have a BaseReg, then assume this is the IndexReg with
507 // no explicit scale.
508 if (!BaseReg) {
509 BaseReg = TmpReg;
510 } else {
511 if (IndexReg) {
512 ErrMsg = "BaseReg/IndexReg already set!";
513 return true;
515 IndexReg = TmpReg;
516 Scale = 0;
519 break;
521 PrevState = CurrState;
522 return false;
524 void onNot() {
525 IntelExprState CurrState = State;
526 switch (State) {
527 default:
528 State = IES_ERROR;
529 break;
530 case IES_OR:
531 case IES_XOR:
532 case IES_AND:
533 case IES_LSHIFT:
534 case IES_RSHIFT:
535 case IES_PLUS:
536 case IES_MINUS:
537 case IES_NOT:
538 case IES_MULTIPLY:
539 case IES_DIVIDE:
540 case IES_MOD:
541 case IES_LPAREN:
542 case IES_LBRAC:
543 case IES_INIT:
544 State = IES_NOT;
545 IC.pushOperator(IC_NOT);
546 break;
548 PrevState = CurrState;
551 bool onRegister(unsigned Reg, StringRef &ErrMsg) {
552 IntelExprState CurrState = State;
553 switch (State) {
554 default:
555 State = IES_ERROR;
556 break;
557 case IES_PLUS:
558 case IES_LPAREN:
559 case IES_LBRAC:
560 State = IES_REGISTER;
561 TmpReg = Reg;
562 IC.pushOperand(IC_REGISTER);
563 break;
564 case IES_MULTIPLY:
565 // Index Register - Scale * Register
566 if (PrevState == IES_INTEGER) {
567 if (IndexReg) {
568 ErrMsg = "BaseReg/IndexReg already set!";
569 return true;
571 State = IES_REGISTER;
572 IndexReg = Reg;
573 // Get the scale and replace the 'Scale * Register' with '0'.
574 Scale = IC.popOperand();
575 if (checkScale(Scale, ErrMsg))
576 return true;
577 IC.pushOperand(IC_IMM);
578 IC.popOperator();
579 } else {
580 State = IES_ERROR;
582 break;
584 PrevState = CurrState;
585 return false;
587 bool onIdentifierExpr(const MCExpr *SymRef, StringRef SymRefName,
588 const InlineAsmIdentifierInfo &IDInfo,
589 bool ParsingInlineAsm, StringRef &ErrMsg) {
590 // InlineAsm: Treat an enum value as an integer
591 if (ParsingInlineAsm)
592 if (IDInfo.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
593 return onInteger(IDInfo.Enum.EnumVal, ErrMsg);
594 // Treat a symbolic constant like an integer
595 if (auto *CE = dyn_cast<MCConstantExpr>(SymRef))
596 return onInteger(CE->getValue(), ErrMsg);
597 PrevState = State;
598 bool HasSymbol = Sym != nullptr;
599 switch (State) {
600 default:
601 State = IES_ERROR;
602 break;
603 case IES_PLUS:
604 case IES_MINUS:
605 case IES_NOT:
606 case IES_INIT:
607 case IES_LBRAC:
608 MemExpr = true;
609 State = IES_INTEGER;
610 Sym = SymRef;
611 SymName = SymRefName;
612 IC.pushOperand(IC_IMM);
613 if (ParsingInlineAsm)
614 Info = IDInfo;
615 break;
617 if (HasSymbol)
618 ErrMsg = "cannot use more than one symbol in memory operand";
619 return HasSymbol;
621 bool onInteger(int64_t TmpInt, StringRef &ErrMsg) {
622 IntelExprState CurrState = State;
623 switch (State) {
624 default:
625 State = IES_ERROR;
626 break;
627 case IES_PLUS:
628 case IES_MINUS:
629 case IES_NOT:
630 case IES_OR:
631 case IES_XOR:
632 case IES_AND:
633 case IES_LSHIFT:
634 case IES_RSHIFT:
635 case IES_DIVIDE:
636 case IES_MOD:
637 case IES_MULTIPLY:
638 case IES_LPAREN:
639 case IES_INIT:
640 case IES_LBRAC:
641 State = IES_INTEGER;
642 if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
643 // Index Register - Register * Scale
644 if (IndexReg) {
645 ErrMsg = "BaseReg/IndexReg already set!";
646 return true;
648 IndexReg = TmpReg;
649 Scale = TmpInt;
650 if (checkScale(Scale, ErrMsg))
651 return true;
652 // Get the scale and replace the 'Register * Scale' with '0'.
653 IC.popOperator();
654 } else {
655 IC.pushOperand(IC_IMM, TmpInt);
657 break;
659 PrevState = CurrState;
660 return false;
662 void onStar() {
663 PrevState = State;
664 switch (State) {
665 default:
666 State = IES_ERROR;
667 break;
668 case IES_INTEGER:
669 case IES_REGISTER:
670 case IES_RPAREN:
671 State = IES_MULTIPLY;
672 IC.pushOperator(IC_MULTIPLY);
673 break;
676 void onDivide() {
677 PrevState = State;
678 switch (State) {
679 default:
680 State = IES_ERROR;
681 break;
682 case IES_INTEGER:
683 case IES_RPAREN:
684 State = IES_DIVIDE;
685 IC.pushOperator(IC_DIVIDE);
686 break;
689 void onMod() {
690 PrevState = State;
691 switch (State) {
692 default:
693 State = IES_ERROR;
694 break;
695 case IES_INTEGER:
696 case IES_RPAREN:
697 State = IES_MOD;
698 IC.pushOperator(IC_MOD);
699 break;
702 bool onLBrac() {
703 if (BracCount)
704 return true;
705 PrevState = State;
706 switch (State) {
707 default:
708 State = IES_ERROR;
709 break;
710 case IES_RBRAC:
711 case IES_INTEGER:
712 case IES_RPAREN:
713 State = IES_PLUS;
714 IC.pushOperator(IC_PLUS);
715 break;
716 case IES_INIT:
717 assert(!BracCount && "BracCount should be zero on parsing's start");
718 State = IES_LBRAC;
719 break;
721 MemExpr = true;
722 BracCount++;
723 return false;
725 bool onRBrac() {
726 IntelExprState CurrState = State;
727 switch (State) {
728 default:
729 State = IES_ERROR;
730 break;
731 case IES_INTEGER:
732 case IES_REGISTER:
733 case IES_RPAREN:
734 if (BracCount-- != 1)
735 return true;
736 State = IES_RBRAC;
737 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
738 // If we already have a BaseReg, then assume this is the IndexReg with
739 // no explicit scale.
740 if (!BaseReg) {
741 BaseReg = TmpReg;
742 } else {
743 assert (!IndexReg && "BaseReg/IndexReg already set!");
744 IndexReg = TmpReg;
745 Scale = 0;
748 break;
750 PrevState = CurrState;
751 return false;
753 void onLParen() {
754 IntelExprState CurrState = State;
755 switch (State) {
756 default:
757 State = IES_ERROR;
758 break;
759 case IES_PLUS:
760 case IES_MINUS:
761 case IES_NOT:
762 case IES_OR:
763 case IES_XOR:
764 case IES_AND:
765 case IES_LSHIFT:
766 case IES_RSHIFT:
767 case IES_MULTIPLY:
768 case IES_DIVIDE:
769 case IES_MOD:
770 case IES_LPAREN:
771 case IES_INIT:
772 case IES_LBRAC:
773 State = IES_LPAREN;
774 IC.pushOperator(IC_LPAREN);
775 break;
777 PrevState = CurrState;
779 void onRParen() {
780 PrevState = State;
781 switch (State) {
782 default:
783 State = IES_ERROR;
784 break;
785 case IES_INTEGER:
786 case IES_REGISTER:
787 case IES_RPAREN:
788 State = IES_RPAREN;
789 IC.pushOperator(IC_RPAREN);
790 break;
795 bool Error(SMLoc L, const Twine &Msg, SMRange Range = None,
796 bool MatchingInlineAsm = false) {
797 MCAsmParser &Parser = getParser();
798 if (MatchingInlineAsm) {
799 if (!getLexer().isAtStartOfStatement())
800 Parser.eatToEndOfStatement();
801 return false;
803 return Parser.Error(L, Msg, Range);
806 std::nullptr_t ErrorOperand(SMLoc Loc, StringRef Msg, SMRange R = SMRange()) {
807 Error(Loc, Msg, R);
808 return nullptr;
811 std::unique_ptr<X86Operand> DefaultMemSIOperand(SMLoc Loc);
812 std::unique_ptr<X86Operand> DefaultMemDIOperand(SMLoc Loc);
813 bool IsSIReg(unsigned Reg);
814 unsigned GetSIDIForRegClass(unsigned RegClassID, unsigned Reg, bool IsSIReg);
815 void
816 AddDefaultSrcDestOperands(OperandVector &Operands,
817 std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
818 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
819 bool VerifyAndAdjustOperands(OperandVector &OrigOperands,
820 OperandVector &FinalOperands);
821 std::unique_ptr<X86Operand> ParseOperand();
822 std::unique_ptr<X86Operand> ParseATTOperand();
823 std::unique_ptr<X86Operand> ParseIntelOperand();
824 std::unique_ptr<X86Operand> ParseIntelOffsetOfOperator();
825 bool ParseIntelDotOperator(IntelExprStateMachine &SM, SMLoc &End);
826 unsigned IdentifyIntelInlineAsmOperator(StringRef Name);
827 unsigned ParseIntelInlineAsmOperator(unsigned OpKind);
828 std::unique_ptr<X86Operand> ParseRoundingModeOp(SMLoc Start);
829 bool ParseIntelNamedOperator(StringRef Name, IntelExprStateMachine &SM);
830 void RewriteIntelExpression(IntelExprStateMachine &SM, SMLoc Start,
831 SMLoc End);
832 bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End);
833 bool ParseIntelInlineAsmIdentifier(const MCExpr *&Val, StringRef &Identifier,
834 InlineAsmIdentifierInfo &Info,
835 bool IsUnevaluatedOperand, SMLoc &End);
837 std::unique_ptr<X86Operand> ParseMemOperand(unsigned SegReg,
838 const MCExpr *&Disp,
839 const SMLoc &StartLoc,
840 SMLoc &EndLoc);
842 bool ParseIntelMemoryOperandSize(unsigned &Size);
843 std::unique_ptr<X86Operand>
844 CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg,
845 unsigned IndexReg, unsigned Scale, SMLoc Start,
846 SMLoc End, unsigned Size, StringRef Identifier,
847 const InlineAsmIdentifierInfo &Info);
849 bool parseDirectiveEven(SMLoc L);
850 bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
852 /// CodeView FPO data directives.
853 bool parseDirectiveFPOProc(SMLoc L);
854 bool parseDirectiveFPOSetFrame(SMLoc L);
855 bool parseDirectiveFPOPushReg(SMLoc L);
856 bool parseDirectiveFPOStackAlloc(SMLoc L);
857 bool parseDirectiveFPOStackAlign(SMLoc L);
858 bool parseDirectiveFPOEndPrologue(SMLoc L);
859 bool parseDirectiveFPOEndProc(SMLoc L);
860 bool parseDirectiveFPOData(SMLoc L);
862 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
863 bool processInstruction(MCInst &Inst, const OperandVector &Ops);
865 /// Wrapper around MCStreamer::EmitInstruction(). Possibly adds
866 /// instrumentation around Inst.
867 void EmitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out);
869 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
870 OperandVector &Operands, MCStreamer &Out,
871 uint64_t &ErrorInfo,
872 bool MatchingInlineAsm) override;
874 void MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op, OperandVector &Operands,
875 MCStreamer &Out, bool MatchingInlineAsm);
877 bool ErrorMissingFeature(SMLoc IDLoc, uint64_t ErrorInfo,
878 bool MatchingInlineAsm);
880 bool MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
881 OperandVector &Operands, MCStreamer &Out,
882 uint64_t &ErrorInfo,
883 bool MatchingInlineAsm);
885 bool MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
886 OperandVector &Operands, MCStreamer &Out,
887 uint64_t &ErrorInfo,
888 bool MatchingInlineAsm);
890 bool OmitRegisterFromClobberLists(unsigned RegNo) override;
892 /// Parses AVX512 specific operand primitives: masked registers ({%k<NUM>}, {z})
893 /// and memory broadcasting ({1to<NUM>}) primitives, updating Operands vector if required.
894 /// return false if no parsing errors occurred, true otherwise.
895 bool HandleAVX512Operand(OperandVector &Operands,
896 const MCParsedAsmOperand &Op);
898 bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
900 bool is64BitMode() const {
901 // FIXME: Can tablegen auto-generate this?
902 return getSTI().getFeatureBits()[X86::Mode64Bit];
904 bool is32BitMode() const {
905 // FIXME: Can tablegen auto-generate this?
906 return getSTI().getFeatureBits()[X86::Mode32Bit];
908 bool is16BitMode() const {
909 // FIXME: Can tablegen auto-generate this?
910 return getSTI().getFeatureBits()[X86::Mode16Bit];
912 void SwitchMode(unsigned mode) {
913 MCSubtargetInfo &STI = copySTI();
914 FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86::Mode16Bit});
915 FeatureBitset OldMode = STI.getFeatureBits() & AllModes;
916 uint64_t FB = ComputeAvailableFeatures(
917 STI.ToggleFeature(OldMode.flip(mode)));
918 setAvailableFeatures(FB);
920 assert(FeatureBitset({mode}) == (STI.getFeatureBits() & AllModes));
923 unsigned getPointerWidth() {
924 if (is16BitMode()) return 16;
925 if (is32BitMode()) return 32;
926 if (is64BitMode()) return 64;
927 llvm_unreachable("invalid mode");
930 bool isParsingIntelSyntax() {
931 return getParser().getAssemblerDialect();
934 /// @name Auto-generated Matcher Functions
935 /// {
937 #define GET_ASSEMBLER_HEADER
938 #include "X86GenAsmMatcher.inc"
940 /// }
942 public:
944 X86AsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser,
945 const MCInstrInfo &mii, const MCTargetOptions &Options)
946 : MCTargetAsmParser(Options, sti, mii), InstInfo(nullptr),
947 Code16GCC(false) {
949 Parser.addAliasForDirective(".word", ".2byte");
951 // Initialize the set of available features.
952 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
953 Instrumentation.reset(
954 CreateX86AsmInstrumentation(Options, Parser.getContext(), STI));
957 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
959 void SetFrameRegister(unsigned RegNo) override;
961 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
963 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
964 SMLoc NameLoc, OperandVector &Operands) override;
966 bool ParseDirective(AsmToken DirectiveID) override;
968 } // end anonymous namespace
970 /// @name Auto-generated Match Functions
971 /// {
973 static unsigned MatchRegisterName(StringRef Name);
975 /// }
977 static bool CheckBaseRegAndIndexRegAndScale(unsigned BaseReg, unsigned IndexReg,
978 unsigned Scale, bool Is64BitMode,
979 StringRef &ErrMsg) {
980 // If we have both a base register and an index register make sure they are
981 // both 64-bit or 32-bit registers.
982 // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
984 if (BaseReg != 0 &&
985 !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
986 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) ||
987 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) ||
988 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg))) {
989 ErrMsg = "invalid base+index expression";
990 return true;
993 if (IndexReg != 0 &&
994 !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
995 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
996 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
997 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
998 X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
999 X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
1000 X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg))) {
1001 ErrMsg = "invalid base+index expression";
1002 return true;
1005 if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg != 0) ||
1006 IndexReg == X86::EIP || IndexReg == X86::RIP ||
1007 IndexReg == X86::ESP || IndexReg == X86::RSP) {
1008 ErrMsg = "invalid base+index expression";
1009 return true;
1012 // Check for use of invalid 16-bit registers. Only BX/BP/SI/DI are allowed,
1013 // and then only in non-64-bit modes.
1014 if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
1015 (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
1016 BaseReg != X86::SI && BaseReg != X86::DI))) {
1017 ErrMsg = "invalid 16-bit base register";
1018 return true;
1021 if (BaseReg == 0 &&
1022 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) {
1023 ErrMsg = "16-bit memory operand may not include only index register";
1024 return true;
1027 if (BaseReg != 0 && IndexReg != 0) {
1028 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
1029 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1030 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
1031 IndexReg == X86::EIZ)) {
1032 ErrMsg = "base register is 64-bit, but index register is not";
1033 return true;
1035 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
1036 (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1037 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
1038 IndexReg == X86::RIZ)) {
1039 ErrMsg = "base register is 32-bit, but index register is not";
1040 return true;
1042 if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg)) {
1043 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
1044 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) {
1045 ErrMsg = "base register is 16-bit, but index register is not";
1046 return true;
1048 if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
1049 (IndexReg != X86::SI && IndexReg != X86::DI)) {
1050 ErrMsg = "invalid 16-bit base/index register combination";
1051 return true;
1056 // RIP/EIP-relative addressing is only supported in 64-bit mode.
1057 if (!Is64BitMode && BaseReg != 0 &&
1058 (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
1059 ErrMsg = "IP-relative addressing requires 64-bit mode";
1060 return true;
1063 return checkScale(Scale, ErrMsg);
1066 bool X86AsmParser::ParseRegister(unsigned &RegNo,
1067 SMLoc &StartLoc, SMLoc &EndLoc) {
1068 MCAsmParser &Parser = getParser();
1069 RegNo = 0;
1070 const AsmToken &PercentTok = Parser.getTok();
1071 StartLoc = PercentTok.getLoc();
1073 // If we encounter a %, ignore it. This code handles registers with and
1074 // without the prefix, unprefixed registers can occur in cfi directives.
1075 if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent))
1076 Parser.Lex(); // Eat percent token.
1078 const AsmToken &Tok = Parser.getTok();
1079 EndLoc = Tok.getEndLoc();
1081 if (Tok.isNot(AsmToken::Identifier)) {
1082 if (isParsingIntelSyntax()) return true;
1083 return Error(StartLoc, "invalid register name",
1084 SMRange(StartLoc, EndLoc));
1087 RegNo = MatchRegisterName(Tok.getString());
1089 // If the match failed, try the register name as lowercase.
1090 if (RegNo == 0)
1091 RegNo = MatchRegisterName(Tok.getString().lower());
1093 // The "flags" register cannot be referenced directly.
1094 // Treat it as an identifier instead.
1095 if (isParsingInlineAsm() && isParsingIntelSyntax() && RegNo == X86::EFLAGS)
1096 RegNo = 0;
1098 if (!is64BitMode()) {
1099 // FIXME: This should be done using Requires<Not64BitMode> and
1100 // Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
1101 // checked.
1102 // FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a
1103 // REX prefix.
1104 if (RegNo == X86::RIZ || RegNo == X86::RIP ||
1105 X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
1106 X86II::isX86_64NonExtLowByteReg(RegNo) ||
1107 X86II::isX86_64ExtendedReg(RegNo)) {
1108 StringRef RegName = Tok.getString();
1109 Parser.Lex(); // Eat register name.
1110 return Error(StartLoc,
1111 "register %" + RegName + " is only available in 64-bit mode",
1112 SMRange(StartLoc, EndLoc));
1116 // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
1117 if (RegNo == X86::ST0) {
1118 Parser.Lex(); // Eat 'st'
1120 // Check to see if we have '(4)' after %st.
1121 if (getLexer().isNot(AsmToken::LParen))
1122 return false;
1123 // Lex the paren.
1124 getParser().Lex();
1126 const AsmToken &IntTok = Parser.getTok();
1127 if (IntTok.isNot(AsmToken::Integer))
1128 return Error(IntTok.getLoc(), "expected stack index");
1129 switch (IntTok.getIntVal()) {
1130 case 0: RegNo = X86::ST0; break;
1131 case 1: RegNo = X86::ST1; break;
1132 case 2: RegNo = X86::ST2; break;
1133 case 3: RegNo = X86::ST3; break;
1134 case 4: RegNo = X86::ST4; break;
1135 case 5: RegNo = X86::ST5; break;
1136 case 6: RegNo = X86::ST6; break;
1137 case 7: RegNo = X86::ST7; break;
1138 default: return Error(IntTok.getLoc(), "invalid stack index");
1141 if (getParser().Lex().isNot(AsmToken::RParen))
1142 return Error(Parser.getTok().getLoc(), "expected ')'");
1144 EndLoc = Parser.getTok().getEndLoc();
1145 Parser.Lex(); // Eat ')'
1146 return false;
1149 EndLoc = Parser.getTok().getEndLoc();
1151 // If this is "db[0-15]", match it as an alias
1152 // for dr[0-15].
1153 if (RegNo == 0 && Tok.getString().startswith("db")) {
1154 if (Tok.getString().size() == 3) {
1155 switch (Tok.getString()[2]) {
1156 case '0': RegNo = X86::DR0; break;
1157 case '1': RegNo = X86::DR1; break;
1158 case '2': RegNo = X86::DR2; break;
1159 case '3': RegNo = X86::DR3; break;
1160 case '4': RegNo = X86::DR4; break;
1161 case '5': RegNo = X86::DR5; break;
1162 case '6': RegNo = X86::DR6; break;
1163 case '7': RegNo = X86::DR7; break;
1164 case '8': RegNo = X86::DR8; break;
1165 case '9': RegNo = X86::DR9; break;
1167 } else if (Tok.getString().size() == 4 && Tok.getString()[2] == '1') {
1168 switch (Tok.getString()[3]) {
1169 case '0': RegNo = X86::DR10; break;
1170 case '1': RegNo = X86::DR11; break;
1171 case '2': RegNo = X86::DR12; break;
1172 case '3': RegNo = X86::DR13; break;
1173 case '4': RegNo = X86::DR14; break;
1174 case '5': RegNo = X86::DR15; break;
1178 if (RegNo != 0) {
1179 EndLoc = Parser.getTok().getEndLoc();
1180 Parser.Lex(); // Eat it.
1181 return false;
1185 if (RegNo == 0) {
1186 if (isParsingIntelSyntax()) return true;
1187 return Error(StartLoc, "invalid register name",
1188 SMRange(StartLoc, EndLoc));
1191 Parser.Lex(); // Eat identifier token.
1192 return false;
1195 void X86AsmParser::SetFrameRegister(unsigned RegNo) {
1196 Instrumentation->SetInitialFrameRegister(RegNo);
1199 std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(SMLoc Loc) {
1200 bool Parse32 = is32BitMode() || Code16GCC;
1201 unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
1202 const MCExpr *Disp = MCConstantExpr::create(0, getContext());
1203 return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
1204 /*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
1205 Loc, Loc, 0);
1208 std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
1209 bool Parse32 = is32BitMode() || Code16GCC;
1210 unsigned Basereg = is64BitMode() ? X86::RDI : (Parse32 ? X86::EDI : X86::DI);
1211 const MCExpr *Disp = MCConstantExpr::create(0, getContext());
1212 return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
1213 /*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
1214 Loc, Loc, 0);
1217 bool X86AsmParser::IsSIReg(unsigned Reg) {
1218 switch (Reg) {
1219 default: llvm_unreachable("Only (R|E)SI and (R|E)DI are expected!");
1220 case X86::RSI:
1221 case X86::ESI:
1222 case X86::SI:
1223 return true;
1224 case X86::RDI:
1225 case X86::EDI:
1226 case X86::DI:
1227 return false;
1231 unsigned X86AsmParser::GetSIDIForRegClass(unsigned RegClassID, unsigned Reg,
1232 bool IsSIReg) {
1233 switch (RegClassID) {
1234 default: llvm_unreachable("Unexpected register class");
1235 case X86::GR64RegClassID:
1236 return IsSIReg ? X86::RSI : X86::RDI;
1237 case X86::GR32RegClassID:
1238 return IsSIReg ? X86::ESI : X86::EDI;
1239 case X86::GR16RegClassID:
1240 return IsSIReg ? X86::SI : X86::DI;
1244 void X86AsmParser::AddDefaultSrcDestOperands(
1245 OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1246 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
1247 if (isParsingIntelSyntax()) {
1248 Operands.push_back(std::move(Dst));
1249 Operands.push_back(std::move(Src));
1251 else {
1252 Operands.push_back(std::move(Src));
1253 Operands.push_back(std::move(Dst));
1257 bool X86AsmParser::VerifyAndAdjustOperands(OperandVector &OrigOperands,
1258 OperandVector &FinalOperands) {
1260 if (OrigOperands.size() > 1) {
1261 // Check if sizes match, OrigOperands also contains the instruction name
1262 assert(OrigOperands.size() == FinalOperands.size() + 1 &&
1263 "Operand size mismatch");
1265 SmallVector<std::pair<SMLoc, std::string>, 2> Warnings;
1266 // Verify types match
1267 int RegClassID = -1;
1268 for (unsigned int i = 0; i < FinalOperands.size(); ++i) {
1269 X86Operand &OrigOp = static_cast<X86Operand &>(*OrigOperands[i + 1]);
1270 X86Operand &FinalOp = static_cast<X86Operand &>(*FinalOperands[i]);
1272 if (FinalOp.isReg() &&
1273 (!OrigOp.isReg() || FinalOp.getReg() != OrigOp.getReg()))
1274 // Return false and let a normal complaint about bogus operands happen
1275 return false;
1277 if (FinalOp.isMem()) {
1279 if (!OrigOp.isMem())
1280 // Return false and let a normal complaint about bogus operands happen
1281 return false;
1283 unsigned OrigReg = OrigOp.Mem.BaseReg;
1284 unsigned FinalReg = FinalOp.Mem.BaseReg;
1286 // If we've already encounterd a register class, make sure all register
1287 // bases are of the same register class
1288 if (RegClassID != -1 &&
1289 !X86MCRegisterClasses[RegClassID].contains(OrigReg)) {
1290 return Error(OrigOp.getStartLoc(),
1291 "mismatching source and destination index registers");
1294 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(OrigReg))
1295 RegClassID = X86::GR64RegClassID;
1296 else if (X86MCRegisterClasses[X86::GR32RegClassID].contains(OrigReg))
1297 RegClassID = X86::GR32RegClassID;
1298 else if (X86MCRegisterClasses[X86::GR16RegClassID].contains(OrigReg))
1299 RegClassID = X86::GR16RegClassID;
1300 else
1301 // Unexpected register class type
1302 // Return false and let a normal complaint about bogus operands happen
1303 return false;
1305 bool IsSI = IsSIReg(FinalReg);
1306 FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
1308 if (FinalReg != OrigReg) {
1309 std::string RegName = IsSI ? "ES:(R|E)SI" : "ES:(R|E)DI";
1310 Warnings.push_back(std::make_pair(
1311 OrigOp.getStartLoc(),
1312 "memory operand is only for determining the size, " + RegName +
1313 " will be used for the location"));
1316 FinalOp.Mem.Size = OrigOp.Mem.Size;
1317 FinalOp.Mem.SegReg = OrigOp.Mem.SegReg;
1318 FinalOp.Mem.BaseReg = FinalReg;
1322 // Produce warnings only if all the operands passed the adjustment - prevent
1323 // legal cases like "movsd (%rax), %xmm0" mistakenly produce warnings
1324 for (auto &WarningMsg : Warnings) {
1325 Warning(WarningMsg.first, WarningMsg.second);
1328 // Remove old operands
1329 for (unsigned int i = 0; i < FinalOperands.size(); ++i)
1330 OrigOperands.pop_back();
1332 // OrigOperands.append(FinalOperands.begin(), FinalOperands.end());
1333 for (unsigned int i = 0; i < FinalOperands.size(); ++i)
1334 OrigOperands.push_back(std::move(FinalOperands[i]));
1336 return false;
1339 std::unique_ptr<X86Operand> X86AsmParser::ParseOperand() {
1340 if (isParsingIntelSyntax())
1341 return ParseIntelOperand();
1342 return ParseATTOperand();
1345 std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
1346 unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg,
1347 unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier,
1348 const InlineAsmIdentifierInfo &Info) {
1349 // If we found a decl other than a VarDecl, then assume it is a FuncDecl or
1350 // some other label reference.
1351 if (Info.isKind(InlineAsmIdentifierInfo::IK_Label)) {
1352 // Insert an explicit size if the user didn't have one.
1353 if (!Size) {
1354 Size = getPointerWidth();
1355 InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
1356 /*Len=*/0, Size);
1358 // Create an absolute memory reference in order to match against
1359 // instructions taking a PC relative operand.
1360 return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size,
1361 Identifier, Info.Label.Decl);
1363 // We either have a direct symbol reference, or an offset from a symbol. The
1364 // parser always puts the symbol on the LHS, so look there for size
1365 // calculation purposes.
1366 unsigned FrontendSize = 0;
1367 void *Decl = nullptr;
1368 bool IsGlobalLV = false;
1369 if (Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
1370 // Size is in terms of bits in this context.
1371 FrontendSize = Info.Var.Type * 8;
1372 Decl = Info.Var.Decl;
1373 IsGlobalLV = Info.Var.IsGlobalLV;
1375 // It is widely common for MS InlineAsm to use a global variable and one/two
1376 // registers in a mmory expression, and though unaccessible via rip/eip.
1377 if (IsGlobalLV && (BaseReg || IndexReg)) {
1378 return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End);
1379 // Otherwise, we set the base register to a non-zero value
1380 // if we don't know the actual value at this time. This is necessary to
1381 // get the matching correct in some cases.
1382 } else {
1383 BaseReg = BaseReg ? BaseReg : 1;
1384 return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
1385 IndexReg, Scale, Start, End, Size, Identifier,
1386 Decl, FrontendSize);
1390 // Some binary bitwise operators have a named synonymous
1391 // Query a candidate string for being such a named operator
1392 // and if so - invoke the appropriate handler
1393 bool X86AsmParser::ParseIntelNamedOperator(StringRef Name, IntelExprStateMachine &SM) {
1394 // A named operator should be either lower or upper case, but not a mix
1395 if (Name.compare(Name.lower()) && Name.compare(Name.upper()))
1396 return false;
1397 if (Name.equals_lower("not"))
1398 SM.onNot();
1399 else if (Name.equals_lower("or"))
1400 SM.onOr();
1401 else if (Name.equals_lower("shl"))
1402 SM.onLShift();
1403 else if (Name.equals_lower("shr"))
1404 SM.onRShift();
1405 else if (Name.equals_lower("xor"))
1406 SM.onXor();
1407 else if (Name.equals_lower("and"))
1408 SM.onAnd();
1409 else if (Name.equals_lower("mod"))
1410 SM.onMod();
1411 else
1412 return false;
1413 return true;
1416 bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
1417 MCAsmParser &Parser = getParser();
1418 const AsmToken &Tok = Parser.getTok();
1419 StringRef ErrMsg;
1421 AsmToken::TokenKind PrevTK = AsmToken::Error;
1422 bool Done = false;
1423 while (!Done) {
1424 bool UpdateLocLex = true;
1425 AsmToken::TokenKind TK = getLexer().getKind();
1427 switch (TK) {
1428 default:
1429 if ((Done = SM.isValidEndState()))
1430 break;
1431 return Error(Tok.getLoc(), "unknown token in expression");
1432 case AsmToken::EndOfStatement:
1433 Done = true;
1434 break;
1435 case AsmToken::Real:
1436 // DotOperator: [ebx].0
1437 UpdateLocLex = false;
1438 if (ParseIntelDotOperator(SM, End))
1439 return true;
1440 break;
1441 case AsmToken::At:
1442 case AsmToken::String:
1443 case AsmToken::Identifier: {
1444 SMLoc IdentLoc = Tok.getLoc();
1445 StringRef Identifier = Tok.getString();
1446 UpdateLocLex = false;
1447 // Register
1448 unsigned Reg;
1449 if (Tok.is(AsmToken::Identifier) && !ParseRegister(Reg, IdentLoc, End)) {
1450 if (SM.onRegister(Reg, ErrMsg))
1451 return Error(Tok.getLoc(), ErrMsg);
1452 break;
1454 // Operator synonymous ("not", "or" etc.)
1455 if ((UpdateLocLex = ParseIntelNamedOperator(Identifier, SM)))
1456 break;
1457 // Symbol reference, when parsing assembly content
1458 InlineAsmIdentifierInfo Info;
1459 const MCExpr *Val;
1460 if (!isParsingInlineAsm()) {
1461 if (getParser().parsePrimaryExpr(Val, End)) {
1462 return Error(Tok.getLoc(), "Unexpected identifier!");
1463 } else if (SM.onIdentifierExpr(Val, Identifier, Info, false, ErrMsg)) {
1464 return Error(IdentLoc, ErrMsg);
1465 } else
1466 break;
1468 // MS InlineAsm operators (TYPE/LENGTH/SIZE)
1469 if (unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
1470 if (OpKind == IOK_OFFSET)
1471 return Error(IdentLoc, "Dealing OFFSET operator as part of"
1472 "a compound immediate expression is yet to be supported");
1473 if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
1474 if (SM.onInteger(Val, ErrMsg))
1475 return Error(IdentLoc, ErrMsg);
1476 } else
1477 return true;
1478 break;
1480 // MS Dot Operator expression
1481 if (Identifier.count('.') && PrevTK == AsmToken::RBrac) {
1482 if (ParseIntelDotOperator(SM, End))
1483 return true;
1484 break;
1486 // MS InlineAsm identifier
1487 // Call parseIdentifier() to combine @ with the identifier behind it.
1488 if (TK == AsmToken::At && Parser.parseIdentifier(Identifier))
1489 return Error(IdentLoc, "expected identifier");
1490 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info, false, End))
1491 return true;
1492 else if (SM.onIdentifierExpr(Val, Identifier, Info, true, ErrMsg))
1493 return Error(IdentLoc, ErrMsg);
1494 break;
1496 case AsmToken::Integer: {
1497 // Look for 'b' or 'f' following an Integer as a directional label
1498 SMLoc Loc = getTok().getLoc();
1499 int64_t IntVal = getTok().getIntVal();
1500 End = consumeToken();
1501 UpdateLocLex = false;
1502 if (getLexer().getKind() == AsmToken::Identifier) {
1503 StringRef IDVal = getTok().getString();
1504 if (IDVal == "f" || IDVal == "b") {
1505 MCSymbol *Sym =
1506 getContext().getDirectionalLocalSymbol(IntVal, IDVal == "b");
1507 MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
1508 const MCExpr *Val =
1509 MCSymbolRefExpr::create(Sym, Variant, getContext());
1510 if (IDVal == "b" && Sym->isUndefined())
1511 return Error(Loc, "invalid reference to undefined symbol");
1512 StringRef Identifier = Sym->getName();
1513 InlineAsmIdentifierInfo Info;
1514 if (SM.onIdentifierExpr(Val, Identifier, Info,
1515 isParsingInlineAsm(), ErrMsg))
1516 return Error(Loc, ErrMsg);
1517 End = consumeToken();
1518 } else {
1519 if (SM.onInteger(IntVal, ErrMsg))
1520 return Error(Loc, ErrMsg);
1522 } else {
1523 if (SM.onInteger(IntVal, ErrMsg))
1524 return Error(Loc, ErrMsg);
1526 break;
1528 case AsmToken::Plus:
1529 if (SM.onPlus(ErrMsg))
1530 return Error(getTok().getLoc(), ErrMsg);
1531 break;
1532 case AsmToken::Minus:
1533 if (SM.onMinus(ErrMsg))
1534 return Error(getTok().getLoc(), ErrMsg);
1535 break;
1536 case AsmToken::Tilde: SM.onNot(); break;
1537 case AsmToken::Star: SM.onStar(); break;
1538 case AsmToken::Slash: SM.onDivide(); break;
1539 case AsmToken::Percent: SM.onMod(); break;
1540 case AsmToken::Pipe: SM.onOr(); break;
1541 case AsmToken::Caret: SM.onXor(); break;
1542 case AsmToken::Amp: SM.onAnd(); break;
1543 case AsmToken::LessLess:
1544 SM.onLShift(); break;
1545 case AsmToken::GreaterGreater:
1546 SM.onRShift(); break;
1547 case AsmToken::LBrac:
1548 if (SM.onLBrac())
1549 return Error(Tok.getLoc(), "unexpected bracket encountered");
1550 break;
1551 case AsmToken::RBrac:
1552 if (SM.onRBrac())
1553 return Error(Tok.getLoc(), "unexpected bracket encountered");
1554 break;
1555 case AsmToken::LParen: SM.onLParen(); break;
1556 case AsmToken::RParen: SM.onRParen(); break;
1558 if (SM.hadError())
1559 return Error(Tok.getLoc(), "unknown token in expression");
1561 if (!Done && UpdateLocLex)
1562 End = consumeToken();
1564 PrevTK = TK;
1566 return false;
1569 void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
1570 SMLoc Start, SMLoc End) {
1571 SMLoc Loc = Start;
1572 unsigned ExprLen = End.getPointer() - Start.getPointer();
1573 // Skip everything before a symbol displacement (if we have one)
1574 if (SM.getSym()) {
1575 StringRef SymName = SM.getSymName();
1576 if (unsigned Len = SymName.data() - Start.getPointer())
1577 InstInfo->AsmRewrites->emplace_back(AOK_Skip, Start, Len);
1578 Loc = SMLoc::getFromPointer(SymName.data() + SymName.size());
1579 ExprLen = End.getPointer() - (SymName.data() + SymName.size());
1580 // If we have only a symbol than there's no need for complex rewrite,
1581 // simply skip everything after it
1582 if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
1583 if (ExprLen)
1584 InstInfo->AsmRewrites->emplace_back(AOK_Skip, Loc, ExprLen);
1585 return;
1588 // Build an Intel Expression rewrite
1589 StringRef BaseRegStr;
1590 StringRef IndexRegStr;
1591 if (SM.getBaseReg())
1592 BaseRegStr = X86IntelInstPrinter::getRegisterName(SM.getBaseReg());
1593 if (SM.getIndexReg())
1594 IndexRegStr = X86IntelInstPrinter::getRegisterName(SM.getIndexReg());
1595 // Emit it
1596 IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), SM.getImm(), SM.isMemExpr());
1597 InstInfo->AsmRewrites->emplace_back(Loc, ExprLen, Expr);
1600 // Inline assembly may use variable names with namespace alias qualifiers.
1601 bool X86AsmParser::ParseIntelInlineAsmIdentifier(const MCExpr *&Val,
1602 StringRef &Identifier,
1603 InlineAsmIdentifierInfo &Info,
1604 bool IsUnevaluatedOperand,
1605 SMLoc &End) {
1606 MCAsmParser &Parser = getParser();
1607 assert(isParsingInlineAsm() && "Expected to be parsing inline assembly.");
1608 Val = nullptr;
1610 StringRef LineBuf(Identifier.data());
1611 SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
1613 const AsmToken &Tok = Parser.getTok();
1614 SMLoc Loc = Tok.getLoc();
1616 // Advance the token stream until the end of the current token is
1617 // after the end of what the frontend claimed.
1618 const char *EndPtr = Tok.getLoc().getPointer() + LineBuf.size();
1619 do {
1620 End = Tok.getEndLoc();
1621 getLexer().Lex();
1622 } while (End.getPointer() < EndPtr);
1623 Identifier = LineBuf;
1625 // The frontend should end parsing on an assembler token boundary, unless it
1626 // failed parsing.
1627 assert((End.getPointer() == EndPtr ||
1628 Info.isKind(InlineAsmIdentifierInfo::IK_Invalid)) &&
1629 "frontend claimed part of a token?");
1631 // If the identifier lookup was unsuccessful, assume that we are dealing with
1632 // a label.
1633 if (Info.isKind(InlineAsmIdentifierInfo::IK_Invalid)) {
1634 StringRef InternalName =
1635 SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
1636 Loc, false);
1637 assert(InternalName.size() && "We should have an internal name here.");
1638 // Push a rewrite for replacing the identifier name with the internal name.
1639 InstInfo->AsmRewrites->emplace_back(AOK_Label, Loc, Identifier.size(),
1640 InternalName);
1641 } else if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
1642 return false;
1643 // Create the symbol reference.
1644 MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
1645 MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
1646 Val = MCSymbolRefExpr::create(Sym, Variant, getParser().getContext());
1647 return false;
1650 //ParseRoundingModeOp - Parse AVX-512 rounding mode operand
1651 std::unique_ptr<X86Operand>
1652 X86AsmParser::ParseRoundingModeOp(SMLoc Start) {
1653 MCAsmParser &Parser = getParser();
1654 const AsmToken &Tok = Parser.getTok();
1655 // Eat "{" and mark the current place.
1656 const SMLoc consumedToken = consumeToken();
1657 if (Tok.getIdentifier().startswith("r")){
1658 int rndMode = StringSwitch<int>(Tok.getIdentifier())
1659 .Case("rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
1660 .Case("rd", X86::STATIC_ROUNDING::TO_NEG_INF)
1661 .Case("ru", X86::STATIC_ROUNDING::TO_POS_INF)
1662 .Case("rz", X86::STATIC_ROUNDING::TO_ZERO)
1663 .Default(-1);
1664 if (-1 == rndMode)
1665 return ErrorOperand(Tok.getLoc(), "Invalid rounding mode.");
1666 Parser.Lex(); // Eat "r*" of r*-sae
1667 if (!getLexer().is(AsmToken::Minus))
1668 return ErrorOperand(Tok.getLoc(), "Expected - at this point");
1669 Parser.Lex(); // Eat "-"
1670 Parser.Lex(); // Eat the sae
1671 if (!getLexer().is(AsmToken::RCurly))
1672 return ErrorOperand(Tok.getLoc(), "Expected } at this point");
1673 SMLoc End = Tok.getEndLoc();
1674 Parser.Lex(); // Eat "}"
1675 const MCExpr *RndModeOp =
1676 MCConstantExpr::create(rndMode, Parser.getContext());
1677 return X86Operand::CreateImm(RndModeOp, Start, End);
1679 if(Tok.getIdentifier().equals("sae")){
1680 Parser.Lex(); // Eat the sae
1681 if (!getLexer().is(AsmToken::RCurly))
1682 return ErrorOperand(Tok.getLoc(), "Expected } at this point");
1683 Parser.Lex(); // Eat "}"
1684 return X86Operand::CreateToken("{sae}", consumedToken);
1686 return ErrorOperand(Tok.getLoc(), "unknown token in expression");
1689 /// Parse the '.' operator.
1690 bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM, SMLoc &End) {
1691 const AsmToken &Tok = getTok();
1692 unsigned Offset;
1694 // Drop the optional '.'.
1695 StringRef DotDispStr = Tok.getString();
1696 if (DotDispStr.startswith("."))
1697 DotDispStr = DotDispStr.drop_front(1);
1699 // .Imm gets lexed as a real.
1700 if (Tok.is(AsmToken::Real)) {
1701 APInt DotDisp;
1702 DotDispStr.getAsInteger(10, DotDisp);
1703 Offset = DotDisp.getZExtValue();
1704 } else if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) {
1705 std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
1706 if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second,
1707 Offset))
1708 return Error(Tok.getLoc(), "Unable to lookup field reference!");
1709 } else
1710 return Error(Tok.getLoc(), "Unexpected token type!");
1712 // Eat the DotExpression and update End
1713 End = SMLoc::getFromPointer(DotDispStr.data());
1714 const char *DotExprEndLoc = DotDispStr.data() + DotDispStr.size();
1715 while (Tok.getLoc().getPointer() < DotExprEndLoc)
1716 Lex();
1717 SM.addImm(Offset);
1718 return false;
1721 /// Parse the 'offset' operator. This operator is used to specify the
1722 /// location rather then the content of a variable.
1723 std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOffsetOfOperator() {
1724 MCAsmParser &Parser = getParser();
1725 const AsmToken &Tok = Parser.getTok();
1726 SMLoc OffsetOfLoc = Tok.getLoc();
1727 Parser.Lex(); // Eat offset.
1729 const MCExpr *Val;
1730 InlineAsmIdentifierInfo Info;
1731 SMLoc Start = Tok.getLoc(), End;
1732 StringRef Identifier = Tok.getString();
1733 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
1734 /*Unevaluated=*/false, End))
1735 return nullptr;
1737 void *Decl = nullptr;
1738 // FIXME: MS evaluates "offset <Constant>" to the underlying integral
1739 if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
1740 return ErrorOperand(Start, "offset operator cannot yet handle constants");
1741 else if (Info.isKind(InlineAsmIdentifierInfo::IK_Var))
1742 Decl = Info.Var.Decl;
1743 // Don't emit the offset operator.
1744 InstInfo->AsmRewrites->emplace_back(AOK_Skip, OffsetOfLoc, 7);
1746 // The offset operator will have an 'r' constraint, thus we need to create
1747 // register operand to ensure proper matching. Just pick a GPR based on
1748 // the size of a pointer.
1749 bool Parse32 = is32BitMode() || Code16GCC;
1750 unsigned RegNo = is64BitMode() ? X86::RBX : (Parse32 ? X86::EBX : X86::BX);
1752 return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true,
1753 OffsetOfLoc, Identifier, Decl);
1756 // Query a candidate string for being an Intel assembly operator
1757 // Report back its kind, or IOK_INVALID if does not evaluated as a known one
1758 unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(StringRef Name) {
1759 return StringSwitch<unsigned>(Name)
1760 .Cases("TYPE","type",IOK_TYPE)
1761 .Cases("SIZE","size",IOK_SIZE)
1762 .Cases("LENGTH","length",IOK_LENGTH)
1763 .Cases("OFFSET","offset",IOK_OFFSET)
1764 .Default(IOK_INVALID);
1767 /// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator
1768 /// returns the number of elements in an array. It returns the value 1 for
1769 /// non-array variables. The SIZE operator returns the size of a C or C++
1770 /// variable. A variable's size is the product of its LENGTH and TYPE. The
1771 /// TYPE operator returns the size of a C or C++ type or variable. If the
1772 /// variable is an array, TYPE returns the size of a single element.
1773 unsigned X86AsmParser::ParseIntelInlineAsmOperator(unsigned OpKind) {
1774 MCAsmParser &Parser = getParser();
1775 const AsmToken &Tok = Parser.getTok();
1776 Parser.Lex(); // Eat operator.
1778 const MCExpr *Val = nullptr;
1779 InlineAsmIdentifierInfo Info;
1780 SMLoc Start = Tok.getLoc(), End;
1781 StringRef Identifier = Tok.getString();
1782 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
1783 /*Unevaluated=*/true, End))
1784 return 0;
1786 if (!Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
1787 Error(Start, "unable to lookup expression");
1788 return 0;
1791 unsigned CVal = 0;
1792 switch(OpKind) {
1793 default: llvm_unreachable("Unexpected operand kind!");
1794 case IOK_LENGTH: CVal = Info.Var.Length; break;
1795 case IOK_SIZE: CVal = Info.Var.Size; break;
1796 case IOK_TYPE: CVal = Info.Var.Type; break;
1799 return CVal;
1802 bool X86AsmParser::ParseIntelMemoryOperandSize(unsigned &Size) {
1803 Size = StringSwitch<unsigned>(getTok().getString())
1804 .Cases("BYTE", "byte", 8)
1805 .Cases("WORD", "word", 16)
1806 .Cases("DWORD", "dword", 32)
1807 .Cases("FLOAT", "float", 32)
1808 .Cases("LONG", "long", 32)
1809 .Cases("FWORD", "fword", 48)
1810 .Cases("DOUBLE", "double", 64)
1811 .Cases("QWORD", "qword", 64)
1812 .Cases("MMWORD","mmword", 64)
1813 .Cases("XWORD", "xword", 80)
1814 .Cases("TBYTE", "tbyte", 80)
1815 .Cases("XMMWORD", "xmmword", 128)
1816 .Cases("YMMWORD", "ymmword", 256)
1817 .Cases("ZMMWORD", "zmmword", 512)
1818 .Default(0);
1819 if (Size) {
1820 const AsmToken &Tok = Lex(); // Eat operand size (e.g., byte, word).
1821 if (!(Tok.getString().equals("PTR") || Tok.getString().equals("ptr")))
1822 return Error(Tok.getLoc(), "Expected 'PTR' or 'ptr' token!");
1823 Lex(); // Eat ptr.
1825 return false;
1828 std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperand() {
1829 MCAsmParser &Parser = getParser();
1830 const AsmToken &Tok = Parser.getTok();
1831 SMLoc Start, End;
1833 // FIXME: Offset operator
1834 // Should be handled as part of immediate expression, as other operators
1835 // Currently, only supported as a stand-alone operand
1836 if (isParsingInlineAsm())
1837 if (IdentifyIntelInlineAsmOperator(Tok.getString()) == IOK_OFFSET)
1838 return ParseIntelOffsetOfOperator();
1840 // Parse optional Size directive.
1841 unsigned Size;
1842 if (ParseIntelMemoryOperandSize(Size))
1843 return nullptr;
1844 bool PtrInOperand = bool(Size);
1846 Start = Tok.getLoc();
1848 // Rounding mode operand.
1849 if (getLexer().is(AsmToken::LCurly))
1850 return ParseRoundingModeOp(Start);
1852 // Register operand.
1853 unsigned RegNo = 0;
1854 if (Tok.is(AsmToken::Identifier) && !ParseRegister(RegNo, Start, End)) {
1855 if (RegNo == X86::RIP)
1856 return ErrorOperand(Start, "rip can only be used as a base register");
1857 // A Register followed by ':' is considered a segment override
1858 if (Tok.isNot(AsmToken::Colon))
1859 return !PtrInOperand ? X86Operand::CreateReg(RegNo, Start, End) :
1860 ErrorOperand(Start, "expected memory operand after 'ptr', "
1861 "found register operand instead");
1862 // An alleged segment override. check if we have a valid segment register
1863 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo))
1864 return ErrorOperand(Start, "invalid segment register");
1865 // Eat ':' and update Start location
1866 Start = Lex().getLoc();
1869 // Immediates and Memory
1870 IntelExprStateMachine SM;
1871 if (ParseIntelExpression(SM, End))
1872 return nullptr;
1874 if (isParsingInlineAsm())
1875 RewriteIntelExpression(SM, Start, Tok.getLoc());
1877 int64_t Imm = SM.getImm();
1878 const MCExpr *Disp = SM.getSym();
1879 const MCExpr *ImmDisp = MCConstantExpr::create(Imm, getContext());
1880 if (Disp && Imm)
1881 Disp = MCBinaryExpr::createAdd(Disp, ImmDisp, getContext());
1882 if (!Disp)
1883 Disp = ImmDisp;
1885 // RegNo != 0 specifies a valid segment register,
1886 // and we are parsing a segment override
1887 if (!SM.isMemExpr() && !RegNo)
1888 return X86Operand::CreateImm(Disp, Start, End);
1890 StringRef ErrMsg;
1891 unsigned BaseReg = SM.getBaseReg();
1892 unsigned IndexReg = SM.getIndexReg();
1893 unsigned Scale = SM.getScale();
1895 if (Scale == 0 && BaseReg != X86::ESP && BaseReg != X86::RSP &&
1896 (IndexReg == X86::ESP || IndexReg == X86::RSP))
1897 std::swap(BaseReg, IndexReg);
1899 // If BaseReg is a vector register and IndexReg is not, swap them unless
1900 // Scale was specified in which case it would be an error.
1901 if (Scale == 0 &&
1902 !(X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
1903 X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
1904 X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg)) &&
1905 (X86MCRegisterClasses[X86::VR128XRegClassID].contains(BaseReg) ||
1906 X86MCRegisterClasses[X86::VR256XRegClassID].contains(BaseReg) ||
1907 X86MCRegisterClasses[X86::VR512RegClassID].contains(BaseReg)))
1908 std::swap(BaseReg, IndexReg);
1910 if (Scale != 0 &&
1911 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg))
1912 return ErrorOperand(Start, "16-bit addresses cannot have a scale");
1914 // If there was no explicit scale specified, change it to 1.
1915 if (Scale == 0)
1916 Scale = 1;
1918 // If this is a 16-bit addressing mode with the base and index in the wrong
1919 // order, swap them so CheckBaseRegAndIndexRegAndScale doesn't fail. It is
1920 // shared with att syntax where order matters.
1921 if ((BaseReg == X86::SI || BaseReg == X86::DI) &&
1922 (IndexReg == X86::BX || IndexReg == X86::BP))
1923 std::swap(BaseReg, IndexReg);
1925 if ((BaseReg || IndexReg) &&
1926 CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
1927 ErrMsg))
1928 return ErrorOperand(Start, ErrMsg);
1929 if (isParsingInlineAsm())
1930 return CreateMemForInlineAsm(RegNo, Disp, BaseReg, IndexReg,
1931 Scale, Start, End, Size, SM.getSymName(),
1932 SM.getIdentifierInfo());
1933 if (!(BaseReg || IndexReg || RegNo))
1934 return X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size);
1935 return X86Operand::CreateMem(getPointerWidth(), RegNo, Disp,
1936 BaseReg, IndexReg, Scale, Start, End, Size);
1939 std::unique_ptr<X86Operand> X86AsmParser::ParseATTOperand() {
1940 MCAsmParser &Parser = getParser();
1941 switch (getLexer().getKind()) {
1942 case AsmToken::Dollar: {
1943 // $42 or $ID -> immediate.
1944 SMLoc Start = Parser.getTok().getLoc(), End;
1945 Parser.Lex();
1946 const MCExpr *Val;
1947 // This is an immediate, so we should not parse a register. Do a precheck
1948 // for '%' to supercede intra-register parse errors.
1949 SMLoc L = Parser.getTok().getLoc();
1950 if (check(getLexer().is(AsmToken::Percent), L,
1951 "expected immediate expression") ||
1952 getParser().parseExpression(Val, End) ||
1953 check(isa<X86MCExpr>(Val), L, "expected immediate expression"))
1954 return nullptr;
1955 return X86Operand::CreateImm(Val, Start, End);
1957 case AsmToken::LCurly: {
1958 SMLoc Start = Parser.getTok().getLoc();
1959 return ParseRoundingModeOp(Start);
1961 default: {
1962 // This a memory operand or a register. We have some parsing complications
1963 // as a '(' may be part of an immediate expression or the addressing mode
1964 // block. This is complicated by the fact that an assembler-level variable
1965 // may refer either to a register or an immediate expression.
1967 SMLoc Loc = Parser.getTok().getLoc(), EndLoc;
1968 const MCExpr *Expr = nullptr;
1969 unsigned Reg = 0;
1970 if (getLexer().isNot(AsmToken::LParen)) {
1971 // No '(' so this is either a displacement expression or a register.
1972 if (Parser.parseExpression(Expr, EndLoc))
1973 return nullptr;
1974 if (auto *RE = dyn_cast<X86MCExpr>(Expr)) {
1975 // Segment Register. Reset Expr and copy value to register.
1976 Expr = nullptr;
1977 Reg = RE->getRegNo();
1979 // Sanity check register.
1980 if (Reg == X86::EIZ || Reg == X86::RIZ)
1981 return ErrorOperand(
1982 Loc, "%eiz and %riz can only be used as index registers",
1983 SMRange(Loc, EndLoc));
1984 if (Reg == X86::RIP)
1985 return ErrorOperand(Loc, "%rip can only be used as a base register",
1986 SMRange(Loc, EndLoc));
1987 // Return register that are not segment prefixes immediately.
1988 if (!Parser.parseOptionalToken(AsmToken::Colon))
1989 return X86Operand::CreateReg(Reg, Loc, EndLoc);
1990 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg))
1991 return ErrorOperand(Loc, "invalid segment register");
1994 // This is a Memory operand.
1995 return ParseMemOperand(Reg, Expr, Loc, EndLoc);
2000 // true on failure, false otherwise
2001 // If no {z} mark was found - Parser doesn't advance
2002 bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
2003 const SMLoc &StartLoc) {
2004 MCAsmParser &Parser = getParser();
2005 // Assuming we are just pass the '{' mark, quering the next token
2006 // Searched for {z}, but none was found. Return false, as no parsing error was
2007 // encountered
2008 if (!(getLexer().is(AsmToken::Identifier) &&
2009 (getLexer().getTok().getIdentifier() == "z")))
2010 return false;
2011 Parser.Lex(); // Eat z
2012 // Query and eat the '}' mark
2013 if (!getLexer().is(AsmToken::RCurly))
2014 return Error(getLexer().getLoc(), "Expected } at this point");
2015 Parser.Lex(); // Eat '}'
2016 // Assign Z with the {z} mark opernad
2017 Z = X86Operand::CreateToken("{z}", StartLoc);
2018 return false;
2021 // true on failure, false otherwise
2022 bool X86AsmParser::HandleAVX512Operand(OperandVector &Operands,
2023 const MCParsedAsmOperand &Op) {
2024 MCAsmParser &Parser = getParser();
2025 if (getLexer().is(AsmToken::LCurly)) {
2026 // Eat "{" and mark the current place.
2027 const SMLoc consumedToken = consumeToken();
2028 // Distinguish {1to<NUM>} from {%k<NUM>}.
2029 if(getLexer().is(AsmToken::Integer)) {
2030 // Parse memory broadcasting ({1to<NUM>}).
2031 if (getLexer().getTok().getIntVal() != 1)
2032 return TokError("Expected 1to<NUM> at this point");
2033 Parser.Lex(); // Eat "1" of 1to8
2034 if (!getLexer().is(AsmToken::Identifier) ||
2035 !getLexer().getTok().getIdentifier().startswith("to"))
2036 return TokError("Expected 1to<NUM> at this point");
2037 // Recognize only reasonable suffixes.
2038 const char *BroadcastPrimitive =
2039 StringSwitch<const char*>(getLexer().getTok().getIdentifier())
2040 .Case("to2", "{1to2}")
2041 .Case("to4", "{1to4}")
2042 .Case("to8", "{1to8}")
2043 .Case("to16", "{1to16}")
2044 .Default(nullptr);
2045 if (!BroadcastPrimitive)
2046 return TokError("Invalid memory broadcast primitive.");
2047 Parser.Lex(); // Eat "toN" of 1toN
2048 if (!getLexer().is(AsmToken::RCurly))
2049 return TokError("Expected } at this point");
2050 Parser.Lex(); // Eat "}"
2051 Operands.push_back(X86Operand::CreateToken(BroadcastPrimitive,
2052 consumedToken));
2053 // No AVX512 specific primitives can pass
2054 // after memory broadcasting, so return.
2055 return false;
2056 } else {
2057 // Parse either {k}{z}, {z}{k}, {k} or {z}
2058 // last one have no meaning, but GCC accepts it
2059 // Currently, we're just pass a '{' mark
2060 std::unique_ptr<X86Operand> Z;
2061 if (ParseZ(Z, consumedToken))
2062 return true;
2063 // Reaching here means that parsing of the allegadly '{z}' mark yielded
2064 // no errors.
2065 // Query for the need of further parsing for a {%k<NUM>} mark
2066 if (!Z || getLexer().is(AsmToken::LCurly)) {
2067 SMLoc StartLoc = Z ? consumeToken() : consumedToken;
2068 // Parse an op-mask register mark ({%k<NUM>}), which is now to be
2069 // expected
2070 unsigned RegNo;
2071 SMLoc RegLoc;
2072 if (!ParseRegister(RegNo, RegLoc, StartLoc) &&
2073 X86MCRegisterClasses[X86::VK1RegClassID].contains(RegNo)) {
2074 if (RegNo == X86::K0)
2075 return Error(RegLoc, "Register k0 can't be used as write mask");
2076 if (!getLexer().is(AsmToken::RCurly))
2077 return Error(getLexer().getLoc(), "Expected } at this point");
2078 Operands.push_back(X86Operand::CreateToken("{", StartLoc));
2079 Operands.push_back(
2080 X86Operand::CreateReg(RegNo, StartLoc, StartLoc));
2081 Operands.push_back(X86Operand::CreateToken("}", consumeToken()));
2082 } else
2083 return Error(getLexer().getLoc(),
2084 "Expected an op-mask register at this point");
2085 // {%k<NUM>} mark is found, inquire for {z}
2086 if (getLexer().is(AsmToken::LCurly) && !Z) {
2087 // Have we've found a parsing error, or found no (expected) {z} mark
2088 // - report an error
2089 if (ParseZ(Z, consumeToken()) || !Z)
2090 return Error(getLexer().getLoc(),
2091 "Expected a {z} mark at this point");
2094 // '{z}' on its own is meaningless, hence should be ignored.
2095 // on the contrary - have it been accompanied by a K register,
2096 // allow it.
2097 if (Z)
2098 Operands.push_back(std::move(Z));
2102 return false;
2105 /// ParseMemOperand: 'seg : disp(basereg, indexreg, scale)'. The '%ds:' prefix
2106 /// has already been parsed if present. disp may be provided as well.
2107 std::unique_ptr<X86Operand> X86AsmParser::ParseMemOperand(unsigned SegReg,
2108 const MCExpr *&Disp,
2109 const SMLoc &StartLoc,
2110 SMLoc &EndLoc) {
2111 MCAsmParser &Parser = getParser();
2112 SMLoc Loc;
2113 // Based on the initial passed values, we may be in any of these cases, we are
2114 // in one of these cases (with current position (*)):
2116 // 1. seg : * disp (base-index-scale-expr)
2117 // 2. seg : *(disp) (base-index-scale-expr)
2118 // 3. seg : *(base-index-scale-expr)
2119 // 4. disp *(base-index-scale-expr)
2120 // 5. *(disp) (base-index-scale-expr)
2121 // 6. *(base-index-scale-expr)
2122 // 7. disp *
2123 // 8. *(disp)
2125 // If we do not have an displacement yet, check if we're in cases 4 or 6 by
2126 // checking if the first object after the parenthesis is a register (or an
2127 // identifier referring to a register) and parse the displacement or default
2128 // to 0 as appropriate.
2129 auto isAtMemOperand = [this]() {
2130 if (this->getLexer().isNot(AsmToken::LParen))
2131 return false;
2132 AsmToken Buf[2];
2133 StringRef Id;
2134 auto TokCount = this->getLexer().peekTokens(Buf, true);
2135 if (TokCount == 0)
2136 return false;
2137 switch (Buf[0].getKind()) {
2138 case AsmToken::Percent:
2139 case AsmToken::Comma:
2140 return true;
2141 // These lower cases are doing a peekIdentifier.
2142 case AsmToken::At:
2143 case AsmToken::Dollar:
2144 if ((TokCount > 1) &&
2145 (Buf[1].is(AsmToken::Identifier) || Buf[1].is(AsmToken::String)) &&
2146 (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
2147 Id = StringRef(Buf[0].getLoc().getPointer(),
2148 Buf[1].getIdentifier().size() + 1);
2149 break;
2150 case AsmToken::Identifier:
2151 case AsmToken::String:
2152 Id = Buf[0].getIdentifier();
2153 break;
2154 default:
2155 return false;
2157 // We have an ID. Check if it is bound to a register.
2158 if (!Id.empty()) {
2159 MCSymbol *Sym = this->getContext().getOrCreateSymbol(Id);
2160 if (Sym->isVariable()) {
2161 auto V = Sym->getVariableValue(/*SetUsed*/ false);
2162 return isa<X86MCExpr>(V);
2165 return false;
2168 if (!Disp) {
2169 // Parse immediate if we're not at a mem operand yet.
2170 if (!isAtMemOperand()) {
2171 if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(Disp, EndLoc))
2172 return nullptr;
2173 assert(!isa<X86MCExpr>(Disp) && "Expected non-register here.");
2174 } else {
2175 // Disp is implicitly zero if we haven't parsed it yet.
2176 Disp = MCConstantExpr::create(0, Parser.getContext());
2180 // We are now either at the end of the operand or at the '(' at the start of a
2181 // base-index-scale-expr.
2183 if (!parseOptionalToken(AsmToken::LParen)) {
2184 if (SegReg == 0)
2185 return X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc);
2186 return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1,
2187 StartLoc, EndLoc);
2190 // If we reached here, then eat the '(' and Process
2191 // the rest of the memory operand.
2192 unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
2193 SMLoc BaseLoc = getLexer().getLoc();
2194 const MCExpr *E;
2195 StringRef ErrMsg;
2197 // Parse BaseReg if one is provided.
2198 if (getLexer().isNot(AsmToken::Comma) && getLexer().isNot(AsmToken::RParen)) {
2199 if (Parser.parseExpression(E, EndLoc) ||
2200 check(!isa<X86MCExpr>(E), BaseLoc, "expected register here"))
2201 return nullptr;
2203 // Sanity check register.
2204 BaseReg = cast<X86MCExpr>(E)->getRegNo();
2205 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
2206 return ErrorOperand(BaseLoc,
2207 "eiz and riz can only be used as index registers",
2208 SMRange(BaseLoc, EndLoc));
2211 if (parseOptionalToken(AsmToken::Comma)) {
2212 // Following the comma we should have either an index register, or a scale
2213 // value. We don't support the later form, but we want to parse it
2214 // correctly.
2216 // Even though it would be completely consistent to support syntax like
2217 // "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
2218 if (getLexer().isNot(AsmToken::RParen)) {
2219 if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(E, EndLoc))
2220 return nullptr;
2222 if (!isa<X86MCExpr>(E)) {
2223 // We've parsed an unexpected Scale Value instead of an index
2224 // register. Interpret it as an absolute.
2225 int64_t ScaleVal;
2226 if (!E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
2227 return ErrorOperand(Loc, "expected absolute expression");
2228 if (ScaleVal != 1)
2229 Warning(Loc, "scale factor without index register is ignored");
2230 Scale = 1;
2231 } else { // IndexReg Found.
2232 IndexReg = cast<X86MCExpr>(E)->getRegNo();
2234 if (BaseReg == X86::RIP)
2235 return ErrorOperand(
2236 Loc, "%rip as base register can not have an index register");
2237 if (IndexReg == X86::RIP)
2238 return ErrorOperand(Loc, "%rip is not allowed as an index register");
2240 if (parseOptionalToken(AsmToken::Comma)) {
2241 // Parse the scale amount:
2242 // ::= ',' [scale-expression]
2244 // A scale amount without an index is ignored.
2245 if (getLexer().isNot(AsmToken::RParen)) {
2246 int64_t ScaleVal;
2247 if (Parser.parseTokenLoc(Loc) ||
2248 Parser.parseAbsoluteExpression(ScaleVal))
2249 return ErrorOperand(Loc, "expected scale expression");
2250 Scale = (unsigned)ScaleVal;
2251 // Validate the scale amount.
2252 if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
2253 Scale != 1)
2254 return ErrorOperand(Loc,
2255 "scale factor in 16-bit address must be 1");
2256 if (checkScale(Scale, ErrMsg))
2257 return ErrorOperand(Loc, ErrMsg);
2264 // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
2265 if (parseToken(AsmToken::RParen, "unexpected token in memory operand"))
2266 return nullptr;
2268 // This is to support otherwise illegal operand (%dx) found in various
2269 // unofficial manuals examples (e.g. "out[s]?[bwl]? %al, (%dx)") and must now
2270 // be supported. Mark such DX variants separately fix only in special cases.
2271 if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
2272 isa<MCConstantExpr>(Disp) && cast<MCConstantExpr>(Disp)->getValue() == 0)
2273 return X86Operand::CreateDXReg(BaseLoc, BaseLoc);
2275 if (CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
2276 ErrMsg))
2277 return ErrorOperand(BaseLoc, ErrMsg);
2279 if (SegReg || BaseReg || IndexReg)
2280 return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
2281 IndexReg, Scale, StartLoc, EndLoc);
2282 return X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc);
2285 // Parse either a standard primary expression or a register.
2286 bool X86AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
2287 MCAsmParser &Parser = getParser();
2288 // See if this is a register first.
2289 if (getTok().is(AsmToken::Percent) ||
2290 (isParsingIntelSyntax() && getTok().is(AsmToken::Identifier) &&
2291 MatchRegisterName(Parser.getTok().getString()))) {
2292 SMLoc StartLoc = Parser.getTok().getLoc();
2293 unsigned RegNo;
2294 if (ParseRegister(RegNo, StartLoc, EndLoc))
2295 return true;
2296 Res = X86MCExpr::create(RegNo, Parser.getContext());
2297 return false;
2299 return Parser.parsePrimaryExpr(Res, EndLoc);
2302 bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
2303 SMLoc NameLoc, OperandVector &Operands) {
2304 MCAsmParser &Parser = getParser();
2305 InstInfo = &Info;
2306 StringRef PatchedName = Name;
2308 if ((Name.equals("jmp") || Name.equals("jc") || Name.equals("jz")) &&
2309 isParsingIntelSyntax() && isParsingInlineAsm()) {
2310 StringRef NextTok = Parser.getTok().getString();
2311 if (NextTok == "short") {
2312 SMLoc NameEndLoc =
2313 NameLoc.getFromPointer(NameLoc.getPointer() + Name.size());
2314 // Eat the short keyword
2315 Parser.Lex();
2316 // MS ignores the short keyword, it determines the jmp type based
2317 // on the distance of the label
2318 InstInfo->AsmRewrites->emplace_back(AOK_Skip, NameEndLoc,
2319 NextTok.size() + 1);
2323 // FIXME: Hack to recognize setneb as setne.
2324 if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
2325 PatchedName != "setb" && PatchedName != "setnb")
2326 PatchedName = PatchedName.substr(0, Name.size()-1);
2328 // FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
2329 if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
2330 (PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
2331 PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
2332 bool IsVCMP = PatchedName[0] == 'v';
2333 unsigned CCIdx = IsVCMP ? 4 : 3;
2334 unsigned ComparisonCode = StringSwitch<unsigned>(
2335 PatchedName.slice(CCIdx, PatchedName.size() - 2))
2336 .Case("eq", 0x00)
2337 .Case("eq_oq", 0x00)
2338 .Case("lt", 0x01)
2339 .Case("lt_os", 0x01)
2340 .Case("le", 0x02)
2341 .Case("le_os", 0x02)
2342 .Case("unord", 0x03)
2343 .Case("unord_q", 0x03)
2344 .Case("neq", 0x04)
2345 .Case("neq_uq", 0x04)
2346 .Case("nlt", 0x05)
2347 .Case("nlt_us", 0x05)
2348 .Case("nle", 0x06)
2349 .Case("nle_us", 0x06)
2350 .Case("ord", 0x07)
2351 .Case("ord_q", 0x07)
2352 /* AVX only from here */
2353 .Case("eq_uq", 0x08)
2354 .Case("nge", 0x09)
2355 .Case("nge_us", 0x09)
2356 .Case("ngt", 0x0A)
2357 .Case("ngt_us", 0x0A)
2358 .Case("false", 0x0B)
2359 .Case("false_oq", 0x0B)
2360 .Case("neq_oq", 0x0C)
2361 .Case("ge", 0x0D)
2362 .Case("ge_os", 0x0D)
2363 .Case("gt", 0x0E)
2364 .Case("gt_os", 0x0E)
2365 .Case("true", 0x0F)
2366 .Case("true_uq", 0x0F)
2367 .Case("eq_os", 0x10)
2368 .Case("lt_oq", 0x11)
2369 .Case("le_oq", 0x12)
2370 .Case("unord_s", 0x13)
2371 .Case("neq_us", 0x14)
2372 .Case("nlt_uq", 0x15)
2373 .Case("nle_uq", 0x16)
2374 .Case("ord_s", 0x17)
2375 .Case("eq_us", 0x18)
2376 .Case("nge_uq", 0x19)
2377 .Case("ngt_uq", 0x1A)
2378 .Case("false_os", 0x1B)
2379 .Case("neq_os", 0x1C)
2380 .Case("ge_oq", 0x1D)
2381 .Case("gt_oq", 0x1E)
2382 .Case("true_us", 0x1F)
2383 .Default(~0U);
2384 if (ComparisonCode != ~0U && (IsVCMP || ComparisonCode < 8)) {
2386 Operands.push_back(X86Operand::CreateToken(PatchedName.slice(0, CCIdx),
2387 NameLoc));
2389 const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
2390 getParser().getContext());
2391 Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
2393 PatchedName = PatchedName.substr(PatchedName.size() - 2);
2397 // FIXME: Hack to recognize vpcmp<comparison code>{ub,uw,ud,uq,b,w,d,q}.
2398 if (PatchedName.startswith("vpcmp") &&
2399 (PatchedName.endswith("b") || PatchedName.endswith("w") ||
2400 PatchedName.endswith("d") || PatchedName.endswith("q"))) {
2401 unsigned CCIdx = PatchedName.drop_back().back() == 'u' ? 2 : 1;
2402 unsigned ComparisonCode = StringSwitch<unsigned>(
2403 PatchedName.slice(5, PatchedName.size() - CCIdx))
2404 .Case("eq", 0x0) // Only allowed on unsigned. Checked below.
2405 .Case("lt", 0x1)
2406 .Case("le", 0x2)
2407 //.Case("false", 0x3) // Not a documented alias.
2408 .Case("neq", 0x4)
2409 .Case("nlt", 0x5)
2410 .Case("nle", 0x6)
2411 //.Case("true", 0x7) // Not a documented alias.
2412 .Default(~0U);
2413 if (ComparisonCode != ~0U && (ComparisonCode != 0 || CCIdx == 2)) {
2414 Operands.push_back(X86Operand::CreateToken("vpcmp", NameLoc));
2416 const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
2417 getParser().getContext());
2418 Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
2420 PatchedName = PatchedName.substr(PatchedName.size() - CCIdx);
2424 // FIXME: Hack to recognize vpcom<comparison code>{ub,uw,ud,uq,b,w,d,q}.
2425 if (PatchedName.startswith("vpcom") &&
2426 (PatchedName.endswith("b") || PatchedName.endswith("w") ||
2427 PatchedName.endswith("d") || PatchedName.endswith("q"))) {
2428 unsigned CCIdx = PatchedName.drop_back().back() == 'u' ? 2 : 1;
2429 unsigned ComparisonCode = StringSwitch<unsigned>(
2430 PatchedName.slice(5, PatchedName.size() - CCIdx))
2431 .Case("lt", 0x0)
2432 .Case("le", 0x1)
2433 .Case("gt", 0x2)
2434 .Case("ge", 0x3)
2435 .Case("eq", 0x4)
2436 .Case("neq", 0x5)
2437 .Case("false", 0x6)
2438 .Case("true", 0x7)
2439 .Default(~0U);
2440 if (ComparisonCode != ~0U) {
2441 Operands.push_back(X86Operand::CreateToken("vpcom", NameLoc));
2443 const MCExpr *ImmOp = MCConstantExpr::create(ComparisonCode,
2444 getParser().getContext());
2445 Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
2447 PatchedName = PatchedName.substr(PatchedName.size() - CCIdx);
2452 // Determine whether this is an instruction prefix.
2453 // FIXME:
2454 // Enhance prefixes integrity robustness. for example, following forms
2455 // are currently tolerated:
2456 // repz repnz <insn> ; GAS errors for the use of two similar prefixes
2457 // lock addq %rax, %rbx ; Destination operand must be of memory type
2458 // xacquire <insn> ; xacquire must be accompanied by 'lock'
2459 bool isPrefix = StringSwitch<bool>(Name)
2460 .Cases("rex64", "data32", "data16", true)
2461 .Cases("xacquire", "xrelease", true)
2462 .Cases("acquire", "release", isParsingIntelSyntax())
2463 .Default(false);
2465 auto isLockRepeatNtPrefix = [](StringRef N) {
2466 return StringSwitch<bool>(N)
2467 .Cases("lock", "rep", "repe", "repz", "repne", "repnz", "notrack", true)
2468 .Default(false);
2471 bool CurlyAsEndOfStatement = false;
2473 unsigned Flags = X86::IP_NO_PREFIX;
2474 while (isLockRepeatNtPrefix(Name.lower())) {
2475 unsigned Prefix =
2476 StringSwitch<unsigned>(Name)
2477 .Cases("lock", "lock", X86::IP_HAS_LOCK)
2478 .Cases("rep", "repe", "repz", X86::IP_HAS_REPEAT)
2479 .Cases("repne", "repnz", X86::IP_HAS_REPEAT_NE)
2480 .Cases("notrack", "notrack", X86::IP_HAS_NOTRACK)
2481 .Default(X86::IP_NO_PREFIX); // Invalid prefix (impossible)
2482 Flags |= Prefix;
2483 if (getLexer().is(AsmToken::EndOfStatement)) {
2484 // We don't have real instr with the given prefix
2485 // let's use the prefix as the instr.
2486 // TODO: there could be several prefixes one after another
2487 Flags = X86::IP_NO_PREFIX;
2488 break;
2490 Name = Parser.getTok().getString();
2491 Parser.Lex(); // eat the prefix
2492 // Hack: we could have something like "rep # some comment" or
2493 // "lock; cmpxchg16b $1" or "lock\0A\09incl" or "lock/incl"
2494 while (Name.startswith(";") || Name.startswith("\n") ||
2495 Name.startswith("#") || Name.startswith("\t") ||
2496 Name.startswith("/")) {
2497 Name = Parser.getTok().getString();
2498 Parser.Lex(); // go to next prefix or instr
2502 if (Flags)
2503 PatchedName = Name;
2505 // Hacks to handle 'data16' and 'data32'
2506 if (PatchedName == "data16" && is16BitMode()) {
2507 return Error(NameLoc, "redundant data16 prefix");
2509 if (PatchedName == "data32") {
2510 if (is32BitMode())
2511 return Error(NameLoc, "redundant data32 prefix");
2512 if (is64BitMode())
2513 return Error(NameLoc, "'data32' is not supported in 64-bit mode");
2514 // Hack to 'data16' for the table lookup.
2515 PatchedName = "data16";
2518 Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
2520 // This does the actual operand parsing. Don't parse any more if we have a
2521 // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
2522 // just want to parse the "lock" as the first instruction and the "incl" as
2523 // the next one.
2524 if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {
2525 // Parse '*' modifier.
2526 if (getLexer().is(AsmToken::Star))
2527 Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
2529 // Read the operands.
2530 while(1) {
2531 if (std::unique_ptr<X86Operand> Op = ParseOperand()) {
2532 Operands.push_back(std::move(Op));
2533 if (HandleAVX512Operand(Operands, *Operands.back()))
2534 return true;
2535 } else {
2536 return true;
2538 // check for comma and eat it
2539 if (getLexer().is(AsmToken::Comma))
2540 Parser.Lex();
2541 else
2542 break;
2545 // In MS inline asm curly braces mark the beginning/end of a block,
2546 // therefore they should be interepreted as end of statement
2547 CurlyAsEndOfStatement =
2548 isParsingIntelSyntax() && isParsingInlineAsm() &&
2549 (getLexer().is(AsmToken::LCurly) || getLexer().is(AsmToken::RCurly));
2550 if (getLexer().isNot(AsmToken::EndOfStatement) && !CurlyAsEndOfStatement)
2551 return TokError("unexpected token in argument list");
2554 // Consume the EndOfStatement or the prefix separator Slash
2555 if (getLexer().is(AsmToken::EndOfStatement) ||
2556 (isPrefix && getLexer().is(AsmToken::Slash)))
2557 Parser.Lex();
2558 else if (CurlyAsEndOfStatement)
2559 // Add an actual EndOfStatement before the curly brace
2560 Info.AsmRewrites->emplace_back(AOK_EndOfStatement,
2561 getLexer().getTok().getLoc(), 0);
2563 // This is for gas compatibility and cannot be done in td.
2564 // Adding "p" for some floating point with no argument.
2565 // For example: fsub --> fsubp
2566 bool IsFp =
2567 Name == "fsub" || Name == "fdiv" || Name == "fsubr" || Name == "fdivr";
2568 if (IsFp && Operands.size() == 1) {
2569 const char *Repl = StringSwitch<const char *>(Name)
2570 .Case("fsub", "fsubp")
2571 .Case("fdiv", "fdivp")
2572 .Case("fsubr", "fsubrp")
2573 .Case("fdivr", "fdivrp");
2574 static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
2577 // Moving a 32 or 16 bit value into a segment register has the same
2578 // behavior. Modify such instructions to always take shorter form.
2579 if ((Name == "mov" || Name == "movw" || Name == "movl") &&
2580 (Operands.size() == 3)) {
2581 X86Operand &Op1 = (X86Operand &)*Operands[1];
2582 X86Operand &Op2 = (X86Operand &)*Operands[2];
2583 SMLoc Loc = Op1.getEndLoc();
2584 if (Op1.isReg() && Op2.isReg() &&
2585 X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
2586 Op2.getReg()) &&
2587 (X86MCRegisterClasses[X86::GR16RegClassID].contains(Op1.getReg()) ||
2588 X86MCRegisterClasses[X86::GR32RegClassID].contains(Op1.getReg()))) {
2589 // Change instruction name to match new instruction.
2590 if (Name != "mov" && Name[3] == (is16BitMode() ? 'l' : 'w')) {
2591 Name = is16BitMode() ? "movw" : "movl";
2592 Operands[0] = X86Operand::CreateToken(Name, NameLoc);
2594 // Select the correct equivalent 16-/32-bit source register.
2595 unsigned Reg =
2596 getX86SubSuperRegisterOrZero(Op1.getReg(), is16BitMode() ? 16 : 32);
2597 Operands[1] = X86Operand::CreateReg(Reg, Loc, Loc);
2601 // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" ->
2602 // "outb %al, %dx". Out doesn't take a memory form, but this is a widely
2603 // documented form in various unofficial manuals, so a lot of code uses it.
2604 if ((Name == "outb" || Name == "outsb" || Name == "outw" || Name == "outsw" ||
2605 Name == "outl" || Name == "outsl" || Name == "out" || Name == "outs") &&
2606 Operands.size() == 3) {
2607 X86Operand &Op = (X86Operand &)*Operands.back();
2608 if (Op.isDXReg())
2609 Operands.back() = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
2610 Op.getEndLoc());
2612 // Same hack for "in[s]?[bwl]? (%dx), %al" -> "inb %dx, %al".
2613 if ((Name == "inb" || Name == "insb" || Name == "inw" || Name == "insw" ||
2614 Name == "inl" || Name == "insl" || Name == "in" || Name == "ins") &&
2615 Operands.size() == 3) {
2616 X86Operand &Op = (X86Operand &)*Operands[1];
2617 if (Op.isDXReg())
2618 Operands[1] = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
2619 Op.getEndLoc());
2622 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 2> TmpOperands;
2623 bool HadVerifyError = false;
2625 // Append default arguments to "ins[bwld]"
2626 if (Name.startswith("ins") &&
2627 (Operands.size() == 1 || Operands.size() == 3) &&
2628 (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd" ||
2629 Name == "ins")) {
2631 AddDefaultSrcDestOperands(TmpOperands,
2632 X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
2633 DefaultMemDIOperand(NameLoc));
2634 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
2637 // Append default arguments to "outs[bwld]"
2638 if (Name.startswith("outs") &&
2639 (Operands.size() == 1 || Operands.size() == 3) &&
2640 (Name == "outsb" || Name == "outsw" || Name == "outsl" ||
2641 Name == "outsd" || Name == "outs")) {
2642 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
2643 X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
2644 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
2647 // Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate
2648 // values of $SIREG according to the mode. It would be nice if this
2649 // could be achieved with InstAlias in the tables.
2650 if (Name.startswith("lods") &&
2651 (Operands.size() == 1 || Operands.size() == 2) &&
2652 (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
2653 Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) {
2654 TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
2655 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
2658 // Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate
2659 // values of $DIREG according to the mode. It would be nice if this
2660 // could be achieved with InstAlias in the tables.
2661 if (Name.startswith("stos") &&
2662 (Operands.size() == 1 || Operands.size() == 2) &&
2663 (Name == "stos" || Name == "stosb" || Name == "stosw" ||
2664 Name == "stosl" || Name == "stosd" || Name == "stosq")) {
2665 TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
2666 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
2669 // Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate
2670 // values of $DIREG according to the mode. It would be nice if this
2671 // could be achieved with InstAlias in the tables.
2672 if (Name.startswith("scas") &&
2673 (Operands.size() == 1 || Operands.size() == 2) &&
2674 (Name == "scas" || Name == "scasb" || Name == "scasw" ||
2675 Name == "scasl" || Name == "scasd" || Name == "scasq")) {
2676 TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
2677 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
2680 // Add default SI and DI operands to "cmps[bwlq]".
2681 if (Name.startswith("cmps") &&
2682 (Operands.size() == 1 || Operands.size() == 3) &&
2683 (Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" ||
2684 Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) {
2685 AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
2686 DefaultMemSIOperand(NameLoc));
2687 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
2690 // Add default SI and DI operands to "movs[bwlq]".
2691 if (((Name.startswith("movs") &&
2692 (Name == "movs" || Name == "movsb" || Name == "movsw" ||
2693 Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
2694 (Name.startswith("smov") &&
2695 (Name == "smov" || Name == "smovb" || Name == "smovw" ||
2696 Name == "smovl" || Name == "smovd" || Name == "smovq"))) &&
2697 (Operands.size() == 1 || Operands.size() == 3)) {
2698 if (Name == "movsd" && Operands.size() == 1 && !isParsingIntelSyntax())
2699 Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
2700 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
2701 DefaultMemDIOperand(NameLoc));
2702 HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
2705 // Check if we encountered an error for one the string insturctions
2706 if (HadVerifyError) {
2707 return HadVerifyError;
2710 // FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
2711 // "shift <op>".
2712 if ((Name.startswith("shr") || Name.startswith("sar") ||
2713 Name.startswith("shl") || Name.startswith("sal") ||
2714 Name.startswith("rcl") || Name.startswith("rcr") ||
2715 Name.startswith("rol") || Name.startswith("ror")) &&
2716 Operands.size() == 3) {
2717 if (isParsingIntelSyntax()) {
2718 // Intel syntax
2719 X86Operand &Op1 = static_cast<X86Operand &>(*Operands[2]);
2720 if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
2721 cast<MCConstantExpr>(Op1.getImm())->getValue() == 1)
2722 Operands.pop_back();
2723 } else {
2724 X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
2725 if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
2726 cast<MCConstantExpr>(Op1.getImm())->getValue() == 1)
2727 Operands.erase(Operands.begin() + 1);
2731 // Transforms "int $3" into "int3" as a size optimization. We can't write an
2732 // instalias with an immediate operand yet.
2733 if (Name == "int" && Operands.size() == 2) {
2734 X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
2735 if (Op1.isImm())
2736 if (auto *CE = dyn_cast<MCConstantExpr>(Op1.getImm()))
2737 if (CE->getValue() == 3) {
2738 Operands.erase(Operands.begin() + 1);
2739 static_cast<X86Operand &>(*Operands[0]).setTokenValue("int3");
2743 // Transforms "xlat mem8" into "xlatb"
2744 if ((Name == "xlat" || Name == "xlatb") && Operands.size() == 2) {
2745 X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
2746 if (Op1.isMem8()) {
2747 Warning(Op1.getStartLoc(), "memory operand is only for determining the "
2748 "size, (R|E)BX will be used for the location");
2749 Operands.pop_back();
2750 static_cast<X86Operand &>(*Operands[0]).setTokenValue("xlatb");
2754 if (Flags)
2755 Operands.push_back(X86Operand::CreatePrefix(Flags, NameLoc, NameLoc));
2756 return false;
2759 bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
2760 return false;
2763 bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
2764 const MCRegisterInfo *MRI = getContext().getRegisterInfo();
2766 switch (Inst.getOpcode()) {
2767 case X86::VGATHERDPDYrm:
2768 case X86::VGATHERDPDrm:
2769 case X86::VGATHERDPSYrm:
2770 case X86::VGATHERDPSrm:
2771 case X86::VGATHERQPDYrm:
2772 case X86::VGATHERQPDrm:
2773 case X86::VGATHERQPSYrm:
2774 case X86::VGATHERQPSrm:
2775 case X86::VPGATHERDDYrm:
2776 case X86::VPGATHERDDrm:
2777 case X86::VPGATHERDQYrm:
2778 case X86::VPGATHERDQrm:
2779 case X86::VPGATHERQDYrm:
2780 case X86::VPGATHERQDrm:
2781 case X86::VPGATHERQQYrm:
2782 case X86::VPGATHERQQrm: {
2783 unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
2784 unsigned Mask = MRI->getEncodingValue(Inst.getOperand(1).getReg());
2785 unsigned Index =
2786 MRI->getEncodingValue(Inst.getOperand(3 + X86::AddrIndexReg).getReg());
2787 if (Dest == Mask || Dest == Index || Mask == Index)
2788 return Warning(Ops[0]->getStartLoc(), "mask, index, and destination "
2789 "registers should be distinct");
2790 break;
2792 case X86::VGATHERDPDZ128rm:
2793 case X86::VGATHERDPDZ256rm:
2794 case X86::VGATHERDPDZrm:
2795 case X86::VGATHERDPSZ128rm:
2796 case X86::VGATHERDPSZ256rm:
2797 case X86::VGATHERDPSZrm:
2798 case X86::VGATHERQPDZ128rm:
2799 case X86::VGATHERQPDZ256rm:
2800 case X86::VGATHERQPDZrm:
2801 case X86::VGATHERQPSZ128rm:
2802 case X86::VGATHERQPSZ256rm:
2803 case X86::VGATHERQPSZrm:
2804 case X86::VPGATHERDDZ128rm:
2805 case X86::VPGATHERDDZ256rm:
2806 case X86::VPGATHERDDZrm:
2807 case X86::VPGATHERDQZ128rm:
2808 case X86::VPGATHERDQZ256rm:
2809 case X86::VPGATHERDQZrm:
2810 case X86::VPGATHERQDZ128rm:
2811 case X86::VPGATHERQDZ256rm:
2812 case X86::VPGATHERQDZrm:
2813 case X86::VPGATHERQQZ128rm:
2814 case X86::VPGATHERQQZ256rm:
2815 case X86::VPGATHERQQZrm: {
2816 unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
2817 unsigned Index =
2818 MRI->getEncodingValue(Inst.getOperand(4 + X86::AddrIndexReg).getReg());
2819 if (Dest == Index)
2820 return Warning(Ops[0]->getStartLoc(), "index and destination registers "
2821 "should be distinct");
2822 break;
2824 case X86::V4FMADDPSrm:
2825 case X86::V4FMADDPSrmk:
2826 case X86::V4FMADDPSrmkz:
2827 case X86::V4FMADDSSrm:
2828 case X86::V4FMADDSSrmk:
2829 case X86::V4FMADDSSrmkz:
2830 case X86::V4FNMADDPSrm:
2831 case X86::V4FNMADDPSrmk:
2832 case X86::V4FNMADDPSrmkz:
2833 case X86::V4FNMADDSSrm:
2834 case X86::V4FNMADDSSrmk:
2835 case X86::V4FNMADDSSrmkz:
2836 case X86::VP4DPWSSDSrm:
2837 case X86::VP4DPWSSDSrmk:
2838 case X86::VP4DPWSSDSrmkz:
2839 case X86::VP4DPWSSDrm:
2840 case X86::VP4DPWSSDrmk:
2841 case X86::VP4DPWSSDrmkz: {
2842 unsigned Src2 = Inst.getOperand(Inst.getNumOperands() -
2843 X86::AddrNumOperands - 1).getReg();
2844 unsigned Src2Enc = MRI->getEncodingValue(Src2);
2845 if (Src2Enc % 4 != 0) {
2846 StringRef RegName = X86IntelInstPrinter::getRegisterName(Src2);
2847 unsigned GroupStart = (Src2Enc / 4) * 4;
2848 unsigned GroupEnd = GroupStart + 3;
2849 return Warning(Ops[0]->getStartLoc(),
2850 "source register '" + RegName + "' implicitly denotes '" +
2851 RegName.take_front(3) + Twine(GroupStart) + "' to '" +
2852 RegName.take_front(3) + Twine(GroupEnd) +
2853 "' source group");
2855 break;
2859 return false;
2862 static const char *getSubtargetFeatureName(uint64_t Val);
2864 void X86AsmParser::EmitInstruction(MCInst &Inst, OperandVector &Operands,
2865 MCStreamer &Out) {
2866 Instrumentation->InstrumentAndEmitInstruction(
2867 Inst, Operands, getContext(), MII, Out);
2870 bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2871 OperandVector &Operands,
2872 MCStreamer &Out, uint64_t &ErrorInfo,
2873 bool MatchingInlineAsm) {
2874 if (isParsingIntelSyntax())
2875 return MatchAndEmitIntelInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
2876 MatchingInlineAsm);
2877 return MatchAndEmitATTInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
2878 MatchingInlineAsm);
2881 void X86AsmParser::MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op,
2882 OperandVector &Operands, MCStreamer &Out,
2883 bool MatchingInlineAsm) {
2884 // FIXME: This should be replaced with a real .td file alias mechanism.
2885 // Also, MatchInstructionImpl should actually *do* the EmitInstruction
2886 // call.
2887 const char *Repl = StringSwitch<const char *>(Op.getToken())
2888 .Case("finit", "fninit")
2889 .Case("fsave", "fnsave")
2890 .Case("fstcw", "fnstcw")
2891 .Case("fstcww", "fnstcw")
2892 .Case("fstenv", "fnstenv")
2893 .Case("fstsw", "fnstsw")
2894 .Case("fstsww", "fnstsw")
2895 .Case("fclex", "fnclex")
2896 .Default(nullptr);
2897 if (Repl) {
2898 MCInst Inst;
2899 Inst.setOpcode(X86::WAIT);
2900 Inst.setLoc(IDLoc);
2901 if (!MatchingInlineAsm)
2902 EmitInstruction(Inst, Operands, Out);
2903 Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
2907 bool X86AsmParser::ErrorMissingFeature(SMLoc IDLoc, uint64_t ErrorInfo,
2908 bool MatchingInlineAsm) {
2909 assert(ErrorInfo && "Unknown missing feature!");
2910 SmallString<126> Msg;
2911 raw_svector_ostream OS(Msg);
2912 OS << "instruction requires:";
2913 uint64_t Mask = 1;
2914 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
2915 if (ErrorInfo & Mask)
2916 OS << ' ' << getSubtargetFeatureName(ErrorInfo & Mask);
2917 Mask <<= 1;
2919 return Error(IDLoc, OS.str(), SMRange(), MatchingInlineAsm);
2922 static unsigned getPrefixes(OperandVector &Operands) {
2923 unsigned Result = 0;
2924 X86Operand &Prefix = static_cast<X86Operand &>(*Operands.back());
2925 if (Prefix.isPrefix()) {
2926 Result = Prefix.getPrefix();
2927 Operands.pop_back();
2929 return Result;
2932 bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
2933 OperandVector &Operands,
2934 MCStreamer &Out,
2935 uint64_t &ErrorInfo,
2936 bool MatchingInlineAsm) {
2937 assert(!Operands.empty() && "Unexpect empty operand list!");
2938 X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
2939 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
2940 SMRange EmptyRange = None;
2942 // First, handle aliases that expand to multiple instructions.
2943 MatchFPUWaitAlias(IDLoc, Op, Operands, Out, MatchingInlineAsm);
2945 bool WasOriginallyInvalidOperand = false;
2946 unsigned Prefixes = getPrefixes(Operands);
2948 MCInst Inst;
2950 if (Prefixes)
2951 Inst.setFlags(Prefixes);
2953 // First, try a direct match.
2954 switch (MatchInstruction(Operands, Inst, ErrorInfo, MatchingInlineAsm,
2955 isParsingIntelSyntax())) {
2956 default: llvm_unreachable("Unexpected match result!");
2957 case Match_Success:
2958 if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
2959 return true;
2960 // Some instructions need post-processing to, for example, tweak which
2961 // encoding is selected. Loop on it while changes happen so the
2962 // individual transformations can chain off each other.
2963 if (!MatchingInlineAsm)
2964 while (processInstruction(Inst, Operands))
2967 Inst.setLoc(IDLoc);
2968 if (!MatchingInlineAsm)
2969 EmitInstruction(Inst, Operands, Out);
2970 Opcode = Inst.getOpcode();
2971 return false;
2972 case Match_MissingFeature:
2973 return ErrorMissingFeature(IDLoc, ErrorInfo, MatchingInlineAsm);
2974 case Match_InvalidOperand:
2975 WasOriginallyInvalidOperand = true;
2976 break;
2977 case Match_MnemonicFail:
2978 break;
2981 // FIXME: Ideally, we would only attempt suffix matches for things which are
2982 // valid prefixes, and we could just infer the right unambiguous
2983 // type. However, that requires substantially more matcher support than the
2984 // following hack.
2986 // Change the operand to point to a temporary token.
2987 StringRef Base = Op.getToken();
2988 SmallString<16> Tmp;
2989 Tmp += Base;
2990 Tmp += ' ';
2991 Op.setTokenValue(Tmp);
2993 // If this instruction starts with an 'f', then it is a floating point stack
2994 // instruction. These come in up to three forms for 32-bit, 64-bit, and
2995 // 80-bit floating point, which use the suffixes s,l,t respectively.
2997 // Otherwise, we assume that this may be an integer instruction, which comes
2998 // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
2999 const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
3001 // Check for the various suffix matches.
3002 uint64_t ErrorInfoIgnore;
3003 uint64_t ErrorInfoMissingFeature = 0; // Init suppresses compiler warnings.
3004 unsigned Match[4];
3006 for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I) {
3007 Tmp.back() = Suffixes[I];
3008 Match[I] = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
3009 MatchingInlineAsm, isParsingIntelSyntax());
3010 // If this returned as a missing feature failure, remember that.
3011 if (Match[I] == Match_MissingFeature)
3012 ErrorInfoMissingFeature = ErrorInfoIgnore;
3015 // Restore the old token.
3016 Op.setTokenValue(Base);
3018 // If exactly one matched, then we treat that as a successful match (and the
3019 // instruction will already have been filled in correctly, since the failing
3020 // matches won't have modified it).
3021 unsigned NumSuccessfulMatches =
3022 std::count(std::begin(Match), std::end(Match), Match_Success);
3023 if (NumSuccessfulMatches == 1) {
3024 Inst.setLoc(IDLoc);
3025 if (!MatchingInlineAsm)
3026 EmitInstruction(Inst, Operands, Out);
3027 Opcode = Inst.getOpcode();
3028 return false;
3031 // Otherwise, the match failed, try to produce a decent error message.
3033 // If we had multiple suffix matches, then identify this as an ambiguous
3034 // match.
3035 if (NumSuccessfulMatches > 1) {
3036 char MatchChars[4];
3037 unsigned NumMatches = 0;
3038 for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I)
3039 if (Match[I] == Match_Success)
3040 MatchChars[NumMatches++] = Suffixes[I];
3042 SmallString<126> Msg;
3043 raw_svector_ostream OS(Msg);
3044 OS << "ambiguous instructions require an explicit suffix (could be ";
3045 for (unsigned i = 0; i != NumMatches; ++i) {
3046 if (i != 0)
3047 OS << ", ";
3048 if (i + 1 == NumMatches)
3049 OS << "or ";
3050 OS << "'" << Base << MatchChars[i] << "'";
3052 OS << ")";
3053 Error(IDLoc, OS.str(), EmptyRange, MatchingInlineAsm);
3054 return true;
3057 // Okay, we know that none of the variants matched successfully.
3059 // If all of the instructions reported an invalid mnemonic, then the original
3060 // mnemonic was invalid.
3061 if (std::count(std::begin(Match), std::end(Match), Match_MnemonicFail) == 4) {
3062 if (!WasOriginallyInvalidOperand) {
3063 return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
3064 Op.getLocRange(), MatchingInlineAsm);
3067 // Recover location info for the operand if we know which was the problem.
3068 if (ErrorInfo != ~0ULL) {
3069 if (ErrorInfo >= Operands.size())
3070 return Error(IDLoc, "too few operands for instruction", EmptyRange,
3071 MatchingInlineAsm);
3073 X86Operand &Operand = (X86Operand &)*Operands[ErrorInfo];
3074 if (Operand.getStartLoc().isValid()) {
3075 SMRange OperandRange = Operand.getLocRange();
3076 return Error(Operand.getStartLoc(), "invalid operand for instruction",
3077 OperandRange, MatchingInlineAsm);
3081 return Error(IDLoc, "invalid operand for instruction", EmptyRange,
3082 MatchingInlineAsm);
3085 // If one instruction matched with a missing feature, report this as a
3086 // missing feature.
3087 if (std::count(std::begin(Match), std::end(Match),
3088 Match_MissingFeature) == 1) {
3089 ErrorInfo = ErrorInfoMissingFeature;
3090 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeature,
3091 MatchingInlineAsm);
3094 // If one instruction matched with an invalid operand, report this as an
3095 // operand failure.
3096 if (std::count(std::begin(Match), std::end(Match),
3097 Match_InvalidOperand) == 1) {
3098 return Error(IDLoc, "invalid operand for instruction", EmptyRange,
3099 MatchingInlineAsm);
3102 // If all of these were an outright failure, report it in a useless way.
3103 Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
3104 EmptyRange, MatchingInlineAsm);
3105 return true;
3108 bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
3109 OperandVector &Operands,
3110 MCStreamer &Out,
3111 uint64_t &ErrorInfo,
3112 bool MatchingInlineAsm) {
3113 assert(!Operands.empty() && "Unexpect empty operand list!");
3114 X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
3115 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3116 StringRef Mnemonic = Op.getToken();
3117 SMRange EmptyRange = None;
3118 StringRef Base = Op.getToken();
3119 unsigned Prefixes = getPrefixes(Operands);
3121 // First, handle aliases that expand to multiple instructions.
3122 MatchFPUWaitAlias(IDLoc, Op, Operands, Out, MatchingInlineAsm);
3124 MCInst Inst;
3126 if (Prefixes)
3127 Inst.setFlags(Prefixes);
3129 // Find one unsized memory operand, if present.
3130 X86Operand *UnsizedMemOp = nullptr;
3131 for (const auto &Op : Operands) {
3132 X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
3133 if (X86Op->isMemUnsized()) {
3134 UnsizedMemOp = X86Op;
3135 // Have we found an unqualified memory operand,
3136 // break. IA allows only one memory operand.
3137 break;
3141 // Allow some instructions to have implicitly pointer-sized operands. This is
3142 // compatible with gas.
3143 if (UnsizedMemOp) {
3144 static const char *const PtrSizedInstrs[] = {"call", "jmp", "push"};
3145 for (const char *Instr : PtrSizedInstrs) {
3146 if (Mnemonic == Instr) {
3147 UnsizedMemOp->Mem.Size = getPointerWidth();
3148 break;
3153 SmallVector<unsigned, 8> Match;
3154 uint64_t ErrorInfoMissingFeature = 0;
3156 // If unsized push has immediate operand we should default the default pointer
3157 // size for the size.
3158 if (Mnemonic == "push" && Operands.size() == 2) {
3159 auto *X86Op = static_cast<X86Operand *>(Operands[1].get());
3160 if (X86Op->isImm()) {
3161 // If it's not a constant fall through and let remainder take care of it.
3162 const auto *CE = dyn_cast<MCConstantExpr>(X86Op->getImm());
3163 unsigned Size = getPointerWidth();
3164 if (CE &&
3165 (isIntN(Size, CE->getValue()) || isUIntN(Size, CE->getValue()))) {
3166 SmallString<16> Tmp;
3167 Tmp += Base;
3168 Tmp += (is64BitMode())
3169 ? "q"
3170 : (is32BitMode()) ? "l" : (is16BitMode()) ? "w" : " ";
3171 Op.setTokenValue(Tmp);
3172 // Do match in ATT mode to allow explicit suffix usage.
3173 Match.push_back(MatchInstruction(Operands, Inst, ErrorInfo,
3174 MatchingInlineAsm,
3175 false /*isParsingIntelSyntax()*/));
3176 Op.setTokenValue(Base);
3181 // If an unsized memory operand is present, try to match with each memory
3182 // operand size. In Intel assembly, the size is not part of the instruction
3183 // mnemonic.
3184 if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
3185 static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
3186 for (unsigned Size : MopSizes) {
3187 UnsizedMemOp->Mem.Size = Size;
3188 uint64_t ErrorInfoIgnore;
3189 unsigned LastOpcode = Inst.getOpcode();
3190 unsigned M = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
3191 MatchingInlineAsm, isParsingIntelSyntax());
3192 if (Match.empty() || LastOpcode != Inst.getOpcode())
3193 Match.push_back(M);
3195 // If this returned as a missing feature failure, remember that.
3196 if (Match.back() == Match_MissingFeature)
3197 ErrorInfoMissingFeature = ErrorInfoIgnore;
3200 // Restore the size of the unsized memory operand if we modified it.
3201 UnsizedMemOp->Mem.Size = 0;
3204 // If we haven't matched anything yet, this is not a basic integer or FPU
3205 // operation. There shouldn't be any ambiguity in our mnemonic table, so try
3206 // matching with the unsized operand.
3207 if (Match.empty()) {
3208 Match.push_back(MatchInstruction(
3209 Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax()));
3210 // If this returned as a missing feature failure, remember that.
3211 if (Match.back() == Match_MissingFeature)
3212 ErrorInfoMissingFeature = ErrorInfo;
3215 // Restore the size of the unsized memory operand if we modified it.
3216 if (UnsizedMemOp)
3217 UnsizedMemOp->Mem.Size = 0;
3219 // If it's a bad mnemonic, all results will be the same.
3220 if (Match.back() == Match_MnemonicFail) {
3221 return Error(IDLoc, "invalid instruction mnemonic '" + Mnemonic + "'",
3222 Op.getLocRange(), MatchingInlineAsm);
3225 unsigned NumSuccessfulMatches =
3226 std::count(std::begin(Match), std::end(Match), Match_Success);
3228 // If matching was ambiguous and we had size information from the frontend,
3229 // try again with that. This handles cases like "movxz eax, m8/m16".
3230 if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
3231 UnsizedMemOp->getMemFrontendSize()) {
3232 UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
3233 unsigned M = MatchInstruction(
3234 Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax());
3235 if (M == Match_Success)
3236 NumSuccessfulMatches = 1;
3238 // Add a rewrite that encodes the size information we used from the
3239 // frontend.
3240 InstInfo->AsmRewrites->emplace_back(
3241 AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
3242 /*Len=*/0, UnsizedMemOp->getMemFrontendSize());
3245 // If exactly one matched, then we treat that as a successful match (and the
3246 // instruction will already have been filled in correctly, since the failing
3247 // matches won't have modified it).
3248 if (NumSuccessfulMatches == 1) {
3249 if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
3250 return true;
3251 // Some instructions need post-processing to, for example, tweak which
3252 // encoding is selected. Loop on it while changes happen so the individual
3253 // transformations can chain off each other.
3254 if (!MatchingInlineAsm)
3255 while (processInstruction(Inst, Operands))
3257 Inst.setLoc(IDLoc);
3258 if (!MatchingInlineAsm)
3259 EmitInstruction(Inst, Operands, Out);
3260 Opcode = Inst.getOpcode();
3261 return false;
3262 } else if (NumSuccessfulMatches > 1) {
3263 assert(UnsizedMemOp &&
3264 "multiple matches only possible with unsized memory operands");
3265 return Error(UnsizedMemOp->getStartLoc(),
3266 "ambiguous operand size for instruction '" + Mnemonic + "\'",
3267 UnsizedMemOp->getLocRange());
3270 // If one instruction matched with a missing feature, report this as a
3271 // missing feature.
3272 if (std::count(std::begin(Match), std::end(Match),
3273 Match_MissingFeature) == 1) {
3274 ErrorInfo = ErrorInfoMissingFeature;
3275 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeature,
3276 MatchingInlineAsm);
3279 // If one instruction matched with an invalid operand, report this as an
3280 // operand failure.
3281 if (std::count(std::begin(Match), std::end(Match),
3282 Match_InvalidOperand) == 1) {
3283 return Error(IDLoc, "invalid operand for instruction", EmptyRange,
3284 MatchingInlineAsm);
3287 // If all of these were an outright failure, report it in a useless way.
3288 return Error(IDLoc, "unknown instruction mnemonic", EmptyRange,
3289 MatchingInlineAsm);
3292 bool X86AsmParser::OmitRegisterFromClobberLists(unsigned RegNo) {
3293 return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
3296 bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
3297 MCAsmParser &Parser = getParser();
3298 StringRef IDVal = DirectiveID.getIdentifier();
3299 if (IDVal.startswith(".code"))
3300 return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
3301 else if (IDVal.startswith(".att_syntax")) {
3302 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3303 if (Parser.getTok().getString() == "prefix")
3304 Parser.Lex();
3305 else if (Parser.getTok().getString() == "noprefix")
3306 return Error(DirectiveID.getLoc(), "'.att_syntax noprefix' is not "
3307 "supported: registers must have a "
3308 "'%' prefix in .att_syntax");
3310 getParser().setAssemblerDialect(0);
3311 return false;
3312 } else if (IDVal.startswith(".intel_syntax")) {
3313 getParser().setAssemblerDialect(1);
3314 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3315 if (Parser.getTok().getString() == "noprefix")
3316 Parser.Lex();
3317 else if (Parser.getTok().getString() == "prefix")
3318 return Error(DirectiveID.getLoc(), "'.intel_syntax prefix' is not "
3319 "supported: registers must not have "
3320 "a '%' prefix in .intel_syntax");
3322 return false;
3323 } else if (IDVal == ".even")
3324 return parseDirectiveEven(DirectiveID.getLoc());
3325 else if (IDVal == ".cv_fpo_proc")
3326 return parseDirectiveFPOProc(DirectiveID.getLoc());
3327 else if (IDVal == ".cv_fpo_setframe")
3328 return parseDirectiveFPOSetFrame(DirectiveID.getLoc());
3329 else if (IDVal == ".cv_fpo_pushreg")
3330 return parseDirectiveFPOPushReg(DirectiveID.getLoc());
3331 else if (IDVal == ".cv_fpo_stackalloc")
3332 return parseDirectiveFPOStackAlloc(DirectiveID.getLoc());
3333 else if (IDVal == ".cv_fpo_stackalign")
3334 return parseDirectiveFPOStackAlign(DirectiveID.getLoc());
3335 else if (IDVal == ".cv_fpo_endprologue")
3336 return parseDirectiveFPOEndPrologue(DirectiveID.getLoc());
3337 else if (IDVal == ".cv_fpo_endproc")
3338 return parseDirectiveFPOEndProc(DirectiveID.getLoc());
3340 return true;
3343 /// parseDirectiveEven
3344 /// ::= .even
3345 bool X86AsmParser::parseDirectiveEven(SMLoc L) {
3346 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
3347 return false;
3349 const MCSection *Section = getStreamer().getCurrentSectionOnly();
3350 if (!Section) {
3351 getStreamer().InitSections(false);
3352 Section = getStreamer().getCurrentSectionOnly();
3354 if (Section->UseCodeAlign())
3355 getStreamer().EmitCodeAlignment(2, 0);
3356 else
3357 getStreamer().EmitValueToAlignment(2, 0, 1, 0);
3358 return false;
3361 /// ParseDirectiveCode
3362 /// ::= .code16 | .code32 | .code64
3363 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
3364 MCAsmParser &Parser = getParser();
3365 Code16GCC = false;
3366 if (IDVal == ".code16") {
3367 Parser.Lex();
3368 if (!is16BitMode()) {
3369 SwitchMode(X86::Mode16Bit);
3370 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
3372 } else if (IDVal == ".code16gcc") {
3373 // .code16gcc parses as if in 32-bit mode, but emits code in 16-bit mode.
3374 Parser.Lex();
3375 Code16GCC = true;
3376 if (!is16BitMode()) {
3377 SwitchMode(X86::Mode16Bit);
3378 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
3380 } else if (IDVal == ".code32") {
3381 Parser.Lex();
3382 if (!is32BitMode()) {
3383 SwitchMode(X86::Mode32Bit);
3384 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
3386 } else if (IDVal == ".code64") {
3387 Parser.Lex();
3388 if (!is64BitMode()) {
3389 SwitchMode(X86::Mode64Bit);
3390 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64);
3392 } else {
3393 Error(L, "unknown directive " + IDVal);
3394 return false;
3397 return false;
3400 // .cv_fpo_proc foo
3401 bool X86AsmParser::parseDirectiveFPOProc(SMLoc L) {
3402 MCAsmParser &Parser = getParser();
3403 StringRef ProcName;
3404 int64_t ParamsSize;
3405 if (Parser.parseIdentifier(ProcName))
3406 return Parser.TokError("expected symbol name");
3407 if (Parser.parseIntToken(ParamsSize, "expected parameter byte count"))
3408 return true;
3409 if (!isUIntN(32, ParamsSize))
3410 return Parser.TokError("parameters size out of range");
3411 if (Parser.parseEOL("unexpected tokens"))
3412 return addErrorSuffix(" in '.cv_fpo_proc' directive");
3413 MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
3414 return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
3417 // .cv_fpo_setframe ebp
3418 bool X86AsmParser::parseDirectiveFPOSetFrame(SMLoc L) {
3419 MCAsmParser &Parser = getParser();
3420 unsigned Reg;
3421 SMLoc DummyLoc;
3422 if (ParseRegister(Reg, DummyLoc, DummyLoc) ||
3423 Parser.parseEOL("unexpected tokens"))
3424 return addErrorSuffix(" in '.cv_fpo_setframe' directive");
3425 return getTargetStreamer().emitFPOSetFrame(Reg, L);
3428 // .cv_fpo_pushreg ebx
3429 bool X86AsmParser::parseDirectiveFPOPushReg(SMLoc L) {
3430 MCAsmParser &Parser = getParser();
3431 unsigned Reg;
3432 SMLoc DummyLoc;
3433 if (ParseRegister(Reg, DummyLoc, DummyLoc) ||
3434 Parser.parseEOL("unexpected tokens"))
3435 return addErrorSuffix(" in '.cv_fpo_pushreg' directive");
3436 return getTargetStreamer().emitFPOPushReg(Reg, L);
3439 // .cv_fpo_stackalloc 20
3440 bool X86AsmParser::parseDirectiveFPOStackAlloc(SMLoc L) {
3441 MCAsmParser &Parser = getParser();
3442 int64_t Offset;
3443 if (Parser.parseIntToken(Offset, "expected offset") ||
3444 Parser.parseEOL("unexpected tokens"))
3445 return addErrorSuffix(" in '.cv_fpo_stackalloc' directive");
3446 return getTargetStreamer().emitFPOStackAlloc(Offset, L);
3449 // .cv_fpo_stackalign 8
3450 bool X86AsmParser::parseDirectiveFPOStackAlign(SMLoc L) {
3451 MCAsmParser &Parser = getParser();
3452 int64_t Offset;
3453 if (Parser.parseIntToken(Offset, "expected offset") ||
3454 Parser.parseEOL("unexpected tokens"))
3455 return addErrorSuffix(" in '.cv_fpo_stackalign' directive");
3456 return getTargetStreamer().emitFPOStackAlign(Offset, L);
3459 // .cv_fpo_endprologue
3460 bool X86AsmParser::parseDirectiveFPOEndPrologue(SMLoc L) {
3461 MCAsmParser &Parser = getParser();
3462 if (Parser.parseEOL("unexpected tokens"))
3463 return addErrorSuffix(" in '.cv_fpo_endprologue' directive");
3464 return getTargetStreamer().emitFPOEndPrologue(L);
3467 // .cv_fpo_endproc
3468 bool X86AsmParser::parseDirectiveFPOEndProc(SMLoc L) {
3469 MCAsmParser &Parser = getParser();
3470 if (Parser.parseEOL("unexpected tokens"))
3471 return addErrorSuffix(" in '.cv_fpo_endproc' directive");
3472 return getTargetStreamer().emitFPOEndProc(L);
3475 // Force static initialization.
3476 extern "C" void LLVMInitializeX86AsmParser() {
3477 RegisterMCAsmParser<X86AsmParser> X(getTheX86_32Target());
3478 RegisterMCAsmParser<X86AsmParser> Y(getTheX86_64Target());
3481 #define GET_REGISTER_MATCHER
3482 #define GET_MATCHER_IMPLEMENTATION
3483 #define GET_SUBTARGET_FEATURE_NAME
3484 #include "X86GenAsmMatcher.inc"