1 //===-- X86InstrControl.td - Control Flow Instructions -----*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the X86 jump, return, call, and related instructions.
11 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
14 // Control Flow Instructions.
17 // Return instructions.
19 // The X86retglue return instructions are variadic because we may add ST0 and
20 // ST1 arguments when returning values on the x87 stack.
21 let isTerminator = 1, isReturn = 1, isBarrier = 1,
22 hasCtrlDep = 1, FPForm = SpecialFP, SchedRW = [WriteJumpLd] in {
23 def RET32 : I <0xC3, RawFrm, (outs), (ins variable_ops),
24 "ret{l}", []>, OpSize32, Requires<[Not64BitMode]>;
25 def RET64 : I <0xC3, RawFrm, (outs), (ins variable_ops),
26 "ret{q}", []>, OpSize32, Requires<[In64BitMode]>;
27 def RET16 : I <0xC3, RawFrm, (outs), (ins),
28 "ret{w}", []>, OpSize16;
29 def RETI32 : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
30 "ret{l}\t$amt", []>, OpSize32, Requires<[Not64BitMode]>;
31 def RETI64 : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
32 "ret{q}\t$amt", []>, OpSize32, Requires<[In64BitMode]>;
33 def RETI16 : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
34 "ret{w}\t$amt", []>, OpSize16;
35 def LRET32 : I <0xCB, RawFrm, (outs), (ins),
36 "{l}ret{l|f}", []>, OpSize32;
37 def LRET64 : RI <0xCB, RawFrm, (outs), (ins),
38 "{l}ret{|f}q", []>, Requires<[In64BitMode]>;
39 def LRET16 : I <0xCB, RawFrm, (outs), (ins),
40 "{l}ret{w|f}", []>, OpSize16;
41 def LRETI32 : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
42 "{l}ret{l|f}\t$amt", []>, OpSize32;
43 def LRETI64 : RIi16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
44 "{l}ret{|f}q\t$amt", []>, Requires<[In64BitMode]>;
45 def LRETI16 : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
46 "{l}ret{w|f}\t$amt", []>, OpSize16;
48 // The machine return from interrupt instruction, but sometimes we need to
49 // perform a post-epilogue stack adjustment. Codegen emits the pseudo form
50 // which expands to include an SP adjustment if necessary.
51 def IRET16 : I <0xcf, RawFrm, (outs), (ins), "iret{w}", []>,
53 def IRET32 : I <0xcf, RawFrm, (outs), (ins), "iret{l|d}", []>, OpSize32;
54 def IRET64 : RI <0xcf, RawFrm, (outs), (ins), "iretq", []>, Requires<[In64BitMode]>;
55 let isCodeGenOnly = 1 in
56 def IRET : PseudoI<(outs), (ins i32imm:$adj), [(X86iret timm:$adj)]>;
57 def RET : PseudoI<(outs), (ins i32imm:$adj, variable_ops), [(X86retglue timm:$adj)]>;
60 // Unconditional branches.
61 let isBarrier = 1, isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in {
62 def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
63 "jmp\t$dst", [(br bb:$dst)]>;
64 let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
65 def JMP_2 : Ii16PCRel<0xE9, RawFrm, (outs), (ins brtarget16:$dst),
66 "jmp\t$dst", []>, OpSize16;
67 def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget32:$dst),
68 "jmp\t$dst", []>, OpSize32;
72 // Conditional Branches.
73 let isBranch = 1, isTerminator = 1, Uses = [EFLAGS], SchedRW = [WriteJump],
74 isCodeGenOnly = 1, ForceDisassemble = 1 in {
75 def JCC_1 : Ii8PCRel <0x70, AddCCFrm, (outs),
76 (ins brtarget8:$dst, ccode:$cond),
78 [(X86brcond bb:$dst, timm:$cond, EFLAGS)]>;
79 let hasSideEffects = 0 in {
80 def JCC_2 : Ii16PCRel<0x80, AddCCFrm, (outs),
81 (ins brtarget16:$dst, ccode:$cond),
84 def JCC_4 : Ii32PCRel<0x80, AddCCFrm, (outs),
85 (ins brtarget32:$dst, ccode:$cond),
91 // jcx/jecx/jrcx instructions.
92 let isBranch = 1, isTerminator = 1, hasSideEffects = 0, SchedRW = [WriteJump] in {
93 // These are the 32-bit versions of this instruction for the asmparser. In
94 // 32-bit mode, the address size prefix is jcxz and the unprefixed version is
97 def JCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
98 "jcxz\t$dst", []>, AdSize16, Requires<[Not64BitMode]>;
100 def JECXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
101 "jecxz\t$dst", []>, AdSize32;
104 def JRCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
105 "jrcxz\t$dst", []>, AdSize64, Requires<[In64BitMode]>;
109 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
110 def JMP16r : I<0xFF, MRM4r, (outs), (ins GR16:$dst), "jmp{w}\t{*}$dst",
111 [(brind GR16:$dst)]>, Requires<[Not64BitMode]>,
112 OpSize16, Sched<[WriteJump]>;
113 def JMP16m : I<0xFF, MRM4m, (outs), (ins i16mem:$dst), "jmp{w}\t{*}$dst",
114 [(brind (loadi16 addr:$dst))]>, Requires<[Not64BitMode]>,
115 OpSize16, Sched<[WriteJumpLd]>;
117 def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
118 [(brind GR32:$dst)]>, Requires<[Not64BitMode]>,
119 OpSize32, Sched<[WriteJump]>;
120 def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
121 [(brind (loadi32 addr:$dst))]>, Requires<[Not64BitMode]>,
122 OpSize32, Sched<[WriteJumpLd]>;
124 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
125 [(brind GR64:$dst)]>, Requires<[In64BitMode]>,
127 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
128 [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>,
129 Sched<[WriteJumpLd]>;
131 // Win64 wants indirect jumps leaving the function to have a REX_W prefix.
132 // These are switched from TAILJMPr/m64_REX in MCInstLower.
133 let isCodeGenOnly = 1, hasREX_W = 1 in {
134 def JMP64r_REX : I<0xFF, MRM4r, (outs), (ins GR64:$dst),
135 "rex64 jmp{q}\t{*}$dst", []>, Sched<[WriteJump]>;
137 def JMP64m_REX : I<0xFF, MRM4m, (outs), (ins i64mem:$dst),
138 "rex64 jmp{q}\t{*}$dst", []>, Sched<[WriteJumpLd]>;
142 // Non-tracking jumps for IBT, use with caution.
143 let isCodeGenOnly = 1 in {
144 def JMP16r_NT : I<0xFF, MRM4r, (outs), (ins GR16 : $dst), "jmp{w}\t{*}$dst",
145 [(X86NoTrackBrind GR16 : $dst)]>, Requires<[Not64BitMode]>,
146 OpSize16, Sched<[WriteJump]>, NOTRACK;
148 def JMP16m_NT : I<0xFF, MRM4m, (outs), (ins i16mem : $dst), "jmp{w}\t{*}$dst",
149 [(X86NoTrackBrind (loadi16 addr : $dst))]>,
150 Requires<[Not64BitMode]>, OpSize16, Sched<[WriteJumpLd]>,
153 def JMP32r_NT : I<0xFF, MRM4r, (outs), (ins GR32 : $dst), "jmp{l}\t{*}$dst",
154 [(X86NoTrackBrind GR32 : $dst)]>, Requires<[Not64BitMode]>,
155 OpSize32, Sched<[WriteJump]>, NOTRACK;
156 def JMP32m_NT : I<0xFF, MRM4m, (outs), (ins i32mem : $dst), "jmp{l}\t{*}$dst",
157 [(X86NoTrackBrind (loadi32 addr : $dst))]>,
158 Requires<[Not64BitMode]>, OpSize32, Sched<[WriteJumpLd]>,
161 def JMP64r_NT : I<0xFF, MRM4r, (outs), (ins GR64 : $dst), "jmp{q}\t{*}$dst",
162 [(X86NoTrackBrind GR64 : $dst)]>, Requires<[In64BitMode]>,
163 Sched<[WriteJump]>, NOTRACK;
164 def JMP64m_NT : I<0xFF, MRM4m, (outs), (ins i64mem : $dst), "jmp{q}\t{*}$dst",
165 [(X86NoTrackBrind(loadi64 addr : $dst))]>,
166 Requires<[In64BitMode]>, Sched<[WriteJumpLd]>, NOTRACK;
169 let Predicates = [Not64BitMode], AsmVariantName = "att" in {
170 def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
171 (ins i32imm:$off, i16imm:$seg),
172 "ljmp{l}\t$seg, $off", []>,
173 OpSize32, Sched<[WriteJump]>;
174 def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
175 (ins i16imm:$off, i16imm:$seg),
176 "ljmp{w}\t$seg, $off", []>,
177 OpSize16, Sched<[WriteJump]>;
180 def FARJMP64m : RI<0xFF, MRM5m, (outs), (ins opaquemem:$dst),
181 "ljmp{q}\t{*}$dst", []>, Sched<[WriteJump]>, Requires<[In64BitMode]>;
183 def FARJMP32m : I<0xFF, MRM5m, (outs), (ins opaquemem:$dst),
184 "{l}jmp{l}\t{*}$dst", []>, OpSize32, Sched<[WriteJumpLd]>;
185 let AsmVariantName = "att" in
186 def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaquemem:$dst),
187 "ljmp{w}\t{*}$dst", []>, OpSize16, Sched<[WriteJumpLd]>;
191 def JMPABS64i : Ii64<0xA1, RawFrm, (outs), (ins i64imm:$dst), "jmpabs\t$dst", []>,
192 ExplicitREX2Prefix, Requires<[In64BitMode]>, Sched<[WriteJumpLd]>;
195 let isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in {
196 def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", []>;
197 def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", []>;
198 def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", []>;
201 //===----------------------------------------------------------------------===//
202 // Call Instructions...
205 // All calls clobber the non-callee saved registers. ESP is marked as
206 // a use to prevent stack-pointer assignments that appear immediately
207 // before calls from potentially appearing dead. Uses for argument
208 // registers are added manually.
209 let Uses = [ESP, SSP] in {
210 def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
211 (outs), (ins i32imm_brtarget:$dst),
212 "call{l}\t$dst", []>, OpSize32,
213 Requires<[Not64BitMode]>, Sched<[WriteJump]>;
214 let hasSideEffects = 0 in
215 def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
216 (outs), (ins i16imm_brtarget:$dst),
217 "call{w}\t$dst", []>, OpSize16,
218 Requires<[Not64BitMode]>, Sched<[WriteJump]>;
219 def CALL16r : I<0xFF, MRM2r, (outs), (ins GR16:$dst),
220 "call{w}\t{*}$dst", [(X86call GR16:$dst)]>,
221 OpSize16, Requires<[Not64BitMode]>, Sched<[WriteJump]>;
222 def CALL16m : I<0xFF, MRM2m, (outs), (ins i16mem:$dst),
223 "call{w}\t{*}$dst", [(X86call (loadi16 addr:$dst))]>,
224 OpSize16, Requires<[Not64BitMode,FavorMemIndirectCall]>,
225 Sched<[WriteJumpLd]>;
226 def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst),
227 "call{l}\t{*}$dst", [(X86call GR32:$dst)]>, OpSize32,
228 Requires<[Not64BitMode,NotUseIndirectThunkCalls]>,
230 def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst),
231 "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))]>,
233 Requires<[Not64BitMode,FavorMemIndirectCall,
234 NotUseIndirectThunkCalls]>,
235 Sched<[WriteJumpLd]>;
237 // Non-tracking calls for IBT, use with caution.
238 let isCodeGenOnly = 1 in {
239 def CALL16r_NT : I<0xFF, MRM2r, (outs), (ins GR16 : $dst),
240 "call{w}\t{*}$dst",[(X86NoTrackCall GR16 : $dst)]>,
241 OpSize16, Requires<[Not64BitMode]>, Sched<[WriteJump]>, NOTRACK;
242 def CALL16m_NT : I<0xFF, MRM2m, (outs), (ins i16mem : $dst),
243 "call{w}\t{*}$dst",[(X86NoTrackCall(loadi16 addr : $dst))]>,
244 OpSize16, Requires<[Not64BitMode,FavorMemIndirectCall]>,
245 Sched<[WriteJumpLd]>, NOTRACK;
246 def CALL32r_NT : I<0xFF, MRM2r, (outs), (ins GR32 : $dst),
247 "call{l}\t{*}$dst",[(X86NoTrackCall GR32 : $dst)]>,
248 OpSize32, Requires<[Not64BitMode]>, Sched<[WriteJump]>, NOTRACK;
249 def CALL32m_NT : I<0xFF, MRM2m, (outs), (ins i32mem : $dst),
250 "call{l}\t{*}$dst",[(X86NoTrackCall(loadi32 addr : $dst))]>,
251 OpSize32, Requires<[Not64BitMode,FavorMemIndirectCall]>,
252 Sched<[WriteJumpLd]>, NOTRACK;
255 let Predicates = [Not64BitMode], AsmVariantName = "att" in {
256 def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
257 (ins i32imm:$off, i16imm:$seg),
258 "lcall{l}\t$seg, $off", []>,
259 OpSize32, Sched<[WriteJump]>;
260 def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
261 (ins i16imm:$off, i16imm:$seg),
262 "lcall{w}\t$seg, $off", []>,
263 OpSize16, Sched<[WriteJump]>;
267 def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaquemem:$dst),
268 "{l}call{l}\t{*}$dst", []>, OpSize32, Sched<[WriteJumpLd]>;
269 def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaquemem:$dst),
270 "lcall{w}\t{*}$dst", []>, OpSize16, Sched<[WriteJumpLd]>;
276 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
277 isCodeGenOnly = 1, Uses = [ESP, SSP] in {
278 def TCRETURNdi : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$offset),
279 []>, Sched<[WriteJump]>;
280 def TCRETURNri : PseudoI<(outs), (ins ptr_rc_tailcall:$dst, i32imm:$offset),
281 []>, Sched<[WriteJump]>;
283 def TCRETURNmi : PseudoI<(outs), (ins i32mem_TC:$dst, i32imm:$offset),
284 []>, Sched<[WriteJumpLd]>;
286 def TAILJMPd : PseudoI<(outs), (ins i32imm_brtarget:$dst),
287 []>, Sched<[WriteJump]>;
289 def TAILJMPr : PseudoI<(outs), (ins ptr_rc_tailcall:$dst),
290 []>, Sched<[WriteJump]>;
292 def TAILJMPm : PseudoI<(outs), (ins i32mem_TC:$dst),
293 []>, Sched<[WriteJumpLd]>;
296 // Conditional tail calls are similar to the above, but they are branches
297 // rather than barriers, and they use EFLAGS.
298 let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
299 isCodeGenOnly = 1, SchedRW = [WriteJump] in
300 let Uses = [ESP, EFLAGS, SSP] in {
301 def TCRETURNdicc : PseudoI<(outs),
302 (ins i32imm_brtarget:$dst, i32imm:$offset, i32imm:$cond),
305 // This gets substituted to a conditional jump instruction in MC lowering.
306 def TAILJMPd_CC : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$cond), []>;
310 //===----------------------------------------------------------------------===//
311 // Call Instructions...
314 // RSP is marked as a use to prevent stack-pointer assignments that appear
315 // immediately before calls from potentially appearing dead. Uses for argument
316 // registers are added manually.
317 let isCall = 1, Uses = [RSP, SSP], SchedRW = [WriteJump] in {
318 // NOTE: this pattern doesn't match "X86call imm", because we do not know
319 // that the offset between an arbitrary immediate and the call will fit in
320 // the 32-bit pcrel field that we have.
321 def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
322 (outs), (ins i64i32imm_brtarget:$dst),
323 "call{q}\t$dst", []>, OpSize32,
324 Requires<[In64BitMode]>;
325 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst),
326 "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
327 Requires<[In64BitMode,NotUseIndirectThunkCalls]>;
328 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst),
329 "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
330 Requires<[In64BitMode,FavorMemIndirectCall,
331 NotUseIndirectThunkCalls]>;
333 // Non-tracking calls for IBT, use with caution.
334 let isCodeGenOnly = 1 in {
335 def CALL64r_NT : I<0xFF, MRM2r, (outs), (ins GR64 : $dst),
336 "call{q}\t{*}$dst",[(X86NoTrackCall GR64 : $dst)]>,
337 Requires<[In64BitMode]>, NOTRACK;
338 def CALL64m_NT : I<0xFF, MRM2m, (outs), (ins i64mem : $dst),
340 [(X86NoTrackCall(loadi64 addr : $dst))]>,
341 Requires<[In64BitMode,FavorMemIndirectCall]>, NOTRACK;
345 def FARCALL64m : RI<0xFF, MRM3m, (outs), (ins opaquemem:$dst),
346 "lcall{q}\t{*}$dst", []>;
349 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
350 isCodeGenOnly = 1, Uses = [RSP, SSP] in {
351 def TCRETURNdi64 : PseudoI<(outs),
352 (ins i64i32imm_brtarget:$dst, i32imm:$offset),
353 []>, Sched<[WriteJump]>;
354 def TCRETURNri64 : PseudoI<(outs),
355 (ins ptr_rc_tailcall:$dst, i32imm:$offset),
356 []>, Sched<[WriteJump]>;
358 def TCRETURNmi64 : PseudoI<(outs),
359 (ins i64mem_TC:$dst, i32imm:$offset),
360 []>, Sched<[WriteJumpLd]>;
362 def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_brtarget:$dst),
363 []>, Sched<[WriteJump]>;
365 def TAILJMPr64 : PseudoI<(outs), (ins ptr_rc_tailcall:$dst),
366 []>, Sched<[WriteJump]>;
369 def TAILJMPm64 : PseudoI<(outs), (ins i64mem_TC:$dst),
370 []>, Sched<[WriteJumpLd]>;
372 // Win64 wants indirect jumps leaving the function to have a REX_W prefix.
373 let hasREX_W = 1 in {
374 def TAILJMPr64_REX : PseudoI<(outs), (ins ptr_rc_tailcall:$dst),
375 []>, Sched<[WriteJump]>;
378 def TAILJMPm64_REX : PseudoI<(outs), (ins i64mem_TC:$dst),
379 []>, Sched<[WriteJumpLd]>;
383 let isPseudo = 1, isCall = 1, isCodeGenOnly = 1,
385 usesCustomInserter = 1,
386 SchedRW = [WriteJump] in {
387 def INDIRECT_THUNK_CALL32 :
388 PseudoI<(outs), (ins GR32:$dst), [(X86call GR32:$dst)]>,
389 Requires<[Not64BitMode,UseIndirectThunkCalls]>;
391 def INDIRECT_THUNK_CALL64 :
392 PseudoI<(outs), (ins GR64:$dst), [(X86call GR64:$dst)]>,
393 Requires<[In64BitMode,UseIndirectThunkCalls]>;
395 // Indirect thunk variant of indirect tail calls.
396 let isTerminator = 1, isReturn = 1, isBarrier = 1 in {
397 def INDIRECT_THUNK_TCRETURN64 :
398 PseudoI<(outs), (ins GR64:$dst, i32imm:$offset), []>;
399 def INDIRECT_THUNK_TCRETURN32 :
400 PseudoI<(outs), (ins GR32:$dst, i32imm:$offset), []>;
404 let isPseudo = 1, isCall = 1, isCodeGenOnly = 1,
406 SchedRW = [WriteJump] in {
407 def CALL64m_RVMARKER :
408 PseudoI<(outs), (ins i64imm:$rvfunc, i64mem:$dst), [(X86call_rvmarker tglobaladdr:$rvfunc, (loadi64 addr:$dst))]>,
409 Requires<[In64BitMode]>;
411 def CALL64r_RVMARKER :
412 PseudoI<(outs), (ins i64imm:$rvfunc, GR64:$dst), [(X86call_rvmarker tglobaladdr:$rvfunc, GR64:$dst)]>,
413 Requires<[In64BitMode]>;
415 def CALL64pcrel32_RVMARKER :
416 PseudoI<(outs), (ins i64imm:$rvfunc, i64i32imm_brtarget:$dst), []>,
417 Requires<[In64BitMode]>;
420 // Conditional tail calls are similar to the above, but they are branches
421 // rather than barriers, and they use EFLAGS.
422 let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
423 isCodeGenOnly = 1, SchedRW = [WriteJump] in
424 let Uses = [RSP, EFLAGS, SSP] in {
425 def TCRETURNdi64cc : PseudoI<(outs),
426 (ins i64i32imm_brtarget:$dst, i32imm:$offset,
429 // This gets substituted to a conditional jump instruction in MC lowering.
430 def TAILJMPd64_CC : PseudoI<(outs),
431 (ins i64i32imm_brtarget:$dst, i32imm:$cond), []>;