1 //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 #include "llvm/ADT/SmallVector.h"
12 #include "llvm/ADT/Twine.h"
13 #include "llvm/MC/MCAsmLexer.h"
14 #include "llvm/MC/MCAsmParser.h"
15 #include "llvm/MC/MCInst.h"
16 #include "llvm/MC/MCValue.h"
17 #include "llvm/Support/SourceMgr.h"
18 #include "llvm/Target/TargetRegistry.h"
19 #include "llvm/Target/TargetAsmParser.h"
25 class X86ATTAsmParser
: public TargetAsmParser
{
29 bool MatchInstruction(const StringRef
&Name
,
30 SmallVectorImpl
<X86Operand
> &Operands
,
33 MCAsmParser
&getParser() const { return Parser
; }
35 MCAsmLexer
&getLexer() const { return Parser
.getLexer(); }
37 void Warning(SMLoc L
, const Twine
&Msg
) { Parser
.Warning(L
, Msg
); }
39 bool Error(SMLoc L
, const Twine
&Msg
) { return Parser
.Error(L
, Msg
); }
41 bool ParseRegister(X86Operand
&Op
);
43 bool ParseOperand(X86Operand
&Op
);
45 bool ParseMemOperand(X86Operand
&Op
);
47 /// @name Auto-generated Match Functions
50 bool MatchRegisterName(const StringRef
&Name
, unsigned &RegNo
);
55 X86ATTAsmParser(const Target
&T
, MCAsmParser
&_Parser
)
56 : TargetAsmParser(T
), Parser(_Parser
) {}
58 virtual bool ParseInstruction(const StringRef
&Name
, MCInst
&Inst
);
61 } // end anonymous namespace
66 /// X86Operand - Instances of this class represent a parsed X86 machine
93 unsigned getReg() const {
94 assert(Kind
== Register
&& "Invalid access!");
98 const MCValue
&getImm() const {
99 assert(Kind
== Immediate
&& "Invalid access!");
103 const MCValue
&getMemDisp() const {
104 assert(Kind
== Memory
&& "Invalid access!");
107 unsigned getMemSegReg() const {
108 assert(Kind
== Memory
&& "Invalid access!");
111 unsigned getMemBaseReg() const {
112 assert(Kind
== Memory
&& "Invalid access!");
115 unsigned getMemIndexReg() const {
116 assert(Kind
== Memory
&& "Invalid access!");
119 unsigned getMemScale() const {
120 assert(Kind
== Memory
&& "Invalid access!");
124 static X86Operand
CreateReg(unsigned RegNo
) {
127 Res
.Reg
.RegNo
= RegNo
;
130 static X86Operand
CreateImm(MCValue Val
) {
132 Res
.Kind
= Immediate
;
136 static X86Operand
CreateMem(unsigned SegReg
, MCValue Disp
, unsigned BaseReg
,
137 unsigned IndexReg
, unsigned Scale
) {
138 // We should never just have a displacement, that would be an immediate.
139 assert((SegReg
|| BaseReg
|| IndexReg
) && "Invalid memory operand!");
141 // The scale should always be one of {1,2,4,8}.
142 assert(((Scale
== 1 || Scale
== 2 || Scale
== 4 || Scale
== 8)) &&
146 Res
.Mem
.SegReg
= SegReg
;
148 Res
.Mem
.BaseReg
= BaseReg
;
149 Res
.Mem
.IndexReg
= IndexReg
;
150 Res
.Mem
.Scale
= Scale
;
155 } // end anonymous namespace.
158 bool X86ATTAsmParser::ParseRegister(X86Operand
&Op
) {
159 const AsmToken
&Tok
= getLexer().getTok();
160 assert(Tok
.is(AsmToken::Register
) && "Invalid token kind!");
162 // FIXME: Validate register for the current architecture; we have to do
163 // validation later, so maybe there is no need for this here.
165 assert(Tok
.getString().startswith("%") && "Invalid register name!");
166 if (MatchRegisterName(Tok
.getString().substr(1), RegNo
))
167 return Error(Tok
.getLoc(), "invalid register name");
169 Op
= X86Operand::CreateReg(RegNo
);
170 getLexer().Lex(); // Eat register token.
175 bool X86ATTAsmParser::ParseOperand(X86Operand
&Op
) {
176 switch (getLexer().getKind()) {
178 return ParseMemOperand(Op
);
179 case AsmToken::Register
:
180 // FIXME: if a segment register, this could either be just the seg reg, or
181 // the start of a memory operand.
182 return ParseRegister(Op
);
183 case AsmToken::Dollar
: {
187 if (getParser().ParseRelocatableExpression(Val
))
189 Op
= X86Operand::CreateImm(Val
);
193 getLexer().Lex(); // Eat the star.
195 if (getLexer().is(AsmToken::Register
)) {
196 if (ParseRegister(Op
))
198 } else if (ParseMemOperand(Op
))
201 // FIXME: Note the '*' in the operand for use by the matcher.
206 /// ParseMemOperand: segment: disp(basereg, indexreg, scale)
207 bool X86ATTAsmParser::ParseMemOperand(X86Operand
&Op
) {
208 // FIXME: If SegReg ':' (e.g. %gs:), eat and remember.
211 // We have to disambiguate a parenthesized expression "(4+5)" from the start
212 // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The
213 // only way to do this without lookahead is to eat the ( and see what is after
215 MCValue Disp
= MCValue::get(0, 0, 0);
216 if (getLexer().isNot(AsmToken::LParen
)) {
217 if (getParser().ParseRelocatableExpression(Disp
)) return true;
219 // After parsing the base expression we could either have a parenthesized
220 // memory address or not. If not, return now. If so, eat the (.
221 if (getLexer().isNot(AsmToken::LParen
)) {
222 // Unless we have a segment register, treat this as an immediate.
224 Op
= X86Operand::CreateMem(SegReg
, Disp
, 0, 0, 1);
226 Op
= X86Operand::CreateImm(Disp
);
233 // Okay, we have a '('. We don't know if this is an expression or not, but
234 // so we have to eat the ( to see beyond it.
235 getLexer().Lex(); // Eat the '('.
237 if (getLexer().is(AsmToken::Register
) || getLexer().is(AsmToken::Comma
)) {
238 // Nothing to do here, fall into the code below with the '(' part of the
239 // memory operand consumed.
241 // It must be an parenthesized expression, parse it now.
242 if (getParser().ParseParenRelocatableExpression(Disp
))
245 // After parsing the base expression we could either have a parenthesized
246 // memory address or not. If not, return now. If so, eat the (.
247 if (getLexer().isNot(AsmToken::LParen
)) {
248 // Unless we have a segment register, treat this as an immediate.
250 Op
= X86Operand::CreateMem(SegReg
, Disp
, 0, 0, 1);
252 Op
= X86Operand::CreateImm(Disp
);
261 // If we reached here, then we just ate the ( of the memory operand. Process
262 // the rest of the memory operand.
263 unsigned BaseReg
= 0, IndexReg
= 0, Scale
= 1;
265 if (getLexer().is(AsmToken::Register
)) {
266 if (ParseRegister(Op
))
268 BaseReg
= Op
.getReg();
271 if (getLexer().is(AsmToken::Comma
)) {
272 getLexer().Lex(); // Eat the comma.
274 // Following the comma we should have either an index register, or a scale
275 // value. We don't support the later form, but we want to parse it
278 // Not that even though it would be completely consistent to support syntax
279 // like "1(%eax,,1)", the assembler doesn't.
280 if (getLexer().is(AsmToken::Register
)) {
281 if (ParseRegister(Op
))
283 IndexReg
= Op
.getReg();
285 if (getLexer().isNot(AsmToken::RParen
)) {
286 // Parse the scale amount:
287 // ::= ',' [scale-expression]
288 if (getLexer().isNot(AsmToken::Comma
))
290 getLexer().Lex(); // Eat the comma.
292 if (getLexer().isNot(AsmToken::RParen
)) {
293 SMLoc Loc
= getLexer().getTok().getLoc();
296 if (getParser().ParseAbsoluteExpression(ScaleVal
))
299 // Validate the scale amount.
300 if (ScaleVal
!= 1 && ScaleVal
!= 2 && ScaleVal
!= 4 && ScaleVal
!= 8)
301 return Error(Loc
, "scale factor in address must be 1, 2, 4 or 8");
302 Scale
= (unsigned)ScaleVal
;
305 } else if (getLexer().isNot(AsmToken::RParen
)) {
306 // Otherwise we have the unsupported form of a scale amount without an
308 SMLoc Loc
= getLexer().getTok().getLoc();
311 if (getParser().ParseAbsoluteExpression(Value
))
314 return Error(Loc
, "cannot have scale factor without index register");
318 // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
319 if (getLexer().isNot(AsmToken::RParen
))
320 return Error(getLexer().getTok().getLoc(),
321 "unexpected token in memory operand");
322 getLexer().Lex(); // Eat the ')'.
324 Op
= X86Operand::CreateMem(SegReg
, Disp
, BaseReg
, IndexReg
, Scale
);
328 bool X86ATTAsmParser::ParseInstruction(const StringRef
&Name
, MCInst
&Inst
) {
329 SmallVector
<X86Operand
, 3> Operands
;
331 SMLoc Loc
= getLexer().getTok().getLoc();
332 if (getLexer().isNot(AsmToken::EndOfStatement
)) {
333 // Read the first operand.
334 Operands
.push_back(X86Operand());
335 if (ParseOperand(Operands
.back()))
338 while (getLexer().is(AsmToken::Comma
)) {
339 getLexer().Lex(); // Eat the comma.
341 // Parse and remember the operand.
342 Operands
.push_back(X86Operand());
343 if (ParseOperand(Operands
.back()))
348 if (!MatchInstruction(Name
, Operands
, Inst
))
351 // FIXME: We should give nicer diagnostics about the exact failure.
353 // FIXME: For now we just treat unrecognized instructions as "warnings".
354 Warning(Loc
, "unrecognized instruction");
359 // Force static initialization.
360 extern "C" void LLVMInitializeX86AsmParser() {
361 RegisterAsmParser
<X86ATTAsmParser
> X(TheX86_32Target
);
362 RegisterAsmParser
<X86ATTAsmParser
> Y(TheX86_64Target
);
365 // FIXME: These should come from tblgen?
368 Match_X86_Op_REG(const X86Operand
&Op
, MCOperand
*MCOps
, unsigned NumOps
) {
369 assert(NumOps
== 1 && "Invalid number of ops!");
371 // FIXME: Match correct registers.
372 if (Op
.Kind
!= X86Operand::Register
)
375 MCOps
[0] = MCOperand::CreateReg(Op
.getReg());
380 Match_X86_Op_IMM(const X86Operand
&Op
, MCOperand
*MCOps
, unsigned NumOps
) {
381 assert(NumOps
== 1 && "Invalid number of ops!");
383 // FIXME: We need to check widths.
384 if (Op
.Kind
!= X86Operand::Immediate
)
387 MCOps
[0] = MCOperand::CreateMCValue(Op
.getImm());
391 static bool Match_X86_Op_LMEM(const X86Operand
&Op
,
394 assert(NumMCOps
== 4 && "Invalid number of ops!");
396 if (Op
.Kind
!= X86Operand::Memory
)
399 MCOps
[0] = MCOperand::CreateReg(Op
.getMemBaseReg());
400 MCOps
[1] = MCOperand::CreateImm(Op
.getMemScale());
401 MCOps
[2] = MCOperand::CreateReg(Op
.getMemIndexReg());
402 MCOps
[3] = MCOperand::CreateMCValue(Op
.getMemDisp());
407 static bool Match_X86_Op_MEM(const X86Operand
&Op
,
410 assert(NumMCOps
== 5 && "Invalid number of ops!");
412 if (Match_X86_Op_LMEM(Op
, MCOps
, 4))
415 MCOps
[4] = MCOperand::CreateReg(Op
.getMemSegReg());
421 static bool Match_X86_Op_##name(const X86Operand &Op, \
423 unsigned NumMCOps) { \
424 return Match_X86_Op_REG(Op, MCOps, NumMCOps); \
433 static bool Match_X86_Op_##name(const X86Operand &Op, \
435 unsigned NumMCOps) { \
436 return Match_X86_Op_IMM(Op, MCOps, NumMCOps); \
453 static bool Match_X86_Op_##name(const X86Operand &Op, \
455 unsigned NumMCOps) { \
456 return Match_X86_Op_LMEM(Op, MCOps, NumMCOps); \
464 static bool Match_X86_Op_##name(const X86Operand &Op, \
466 unsigned NumMCOps) { \
467 return Match_X86_Op_MEM(Op, MCOps, NumMCOps); \
482 #define DUMMY(name) \
483 static bool Match_X86_Op_##name(const X86Operand &Op, \
485 unsigned NumMCOps) { \
498 #include "X86GenAsmMatcher.inc"