1 //===-- SystemZInstrInfo.td - General SystemZ instructions ----*- tblgen-*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
13 // The callseq_start node requires the hasSideEffects flag, even though these
14 // instructions are noops on SystemZ.
15 let hasNoSchedulingInfo = 1, hasSideEffects = 1 in {
16 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
17 [(callseq_start timm:$amt1, timm:$amt2)]>;
18 def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
19 [(callseq_end timm:$amt1, timm:$amt2)]>;
22 // Takes as input the value of the stack pointer after a dynamic allocation
23 // has been made. Sets the output to the address of the dynamically-
24 // allocated area itself, skipping the outgoing arguments.
26 // This expands to an LA or LAY instruction. We restrict the offset
27 // to the range of LA and keep the LAY range in reserve for when
28 // the size of the outgoing arguments is added.
29 def ADJDYNALLOC : Pseudo<(outs GR64:$dst), (ins dynalloc12only:$src),
30 [(set GR64:$dst, dynalloc12only:$src)]>;
33 //===----------------------------------------------------------------------===//
34 // Branch instructions
35 //===----------------------------------------------------------------------===//
37 // Conditional branches.
38 let isBranch = 1, isTerminator = 1, Uses = [CC] in {
39 // It's easier for LLVM to handle these branches in their raw BRC/BRCL form
40 // with the condition-code mask being the first operand. It seems friendlier
41 // to use mnemonic forms like JE and JLH when writing out the assembly though.
42 let isCodeGenOnly = 1 in {
43 // An assembler extended mnemonic for BRC.
44 def BRC : CondBranchRI <"j#", 0xA74, z_br_ccmask>;
45 // An assembler extended mnemonic for BRCL. (The extension is "G"
46 // rather than "L" because "JL" is "Jump if Less".)
47 def BRCL : CondBranchRIL<"jg#", 0xC04>;
48 let isIndirectBranch = 1 in {
49 def BC : CondBranchRX<"b#", 0x47>;
50 def BCR : CondBranchRR<"b#r", 0x07>;
51 def BIC : CondBranchRXY<"bi#", 0xe347>,
52 Requires<[FeatureMiscellaneousExtensions2]>;
56 // Allow using the raw forms directly from the assembler (and occasional
57 // special code generation needs) as well.
58 def BRCAsm : AsmCondBranchRI <"brc", 0xA74>;
59 def BRCLAsm : AsmCondBranchRIL<"brcl", 0xC04>;
60 let isIndirectBranch = 1 in {
61 def BCAsm : AsmCondBranchRX<"bc", 0x47>;
62 def BCRAsm : AsmCondBranchRR<"bcr", 0x07>;
63 def BICAsm : AsmCondBranchRXY<"bic", 0xe347>,
64 Requires<[FeatureMiscellaneousExtensions2]>;
67 // Define AsmParser extended mnemonics for each general condition-code mask
68 // (integer or floating-point)
69 foreach V = [ "E", "NE", "H", "NH", "L", "NL", "HE", "NHE", "LE", "NLE",
70 "Z", "NZ", "P", "NP", "M", "NM", "LH", "NLH", "O", "NO" ] in {
71 def JAsm#V : FixedCondBranchRI <CV<V>, "j#", 0xA74>;
72 def JGAsm#V : FixedCondBranchRIL<CV<V>, "jg#", 0xC04>;
73 let isIndirectBranch = 1 in {
74 def BAsm#V : FixedCondBranchRX <CV<V>, "b#", 0x47>;
75 def BRAsm#V : FixedCondBranchRR <CV<V>, "b#r", 0x07>;
76 def BIAsm#V : FixedCondBranchRXY<CV<V>, "bi#", 0xe347>,
77 Requires<[FeatureMiscellaneousExtensions2]>;
82 // Unconditional branches. These are in fact simply variants of the
83 // conditional branches with the condition mask set to "always".
84 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
85 def J : FixedCondBranchRI <CondAlways, "j", 0xA74, br>;
86 def JG : FixedCondBranchRIL<CondAlways, "jg", 0xC04>;
87 let isIndirectBranch = 1 in {
88 def B : FixedCondBranchRX<CondAlways, "b", 0x47>;
89 def BR : FixedCondBranchRR<CondAlways, "br", 0x07, brind>;
90 def BI : FixedCondBranchRXY<CondAlways, "bi", 0xe347, brind>,
91 Requires<[FeatureMiscellaneousExtensions2]>;
95 // NOPs. These are again variants of the conditional branches,
96 // with the condition mask set to "never".
97 def NOP : InstAlias<"nop\t$XBD", (BCAsm 0, bdxaddr12only:$XBD), 0>;
98 def NOPR : InstAlias<"nopr\t$R", (BCRAsm 0, GR64:$R), 0>;
100 // Fused compare-and-branch instructions.
102 // These instructions do not use or clobber the condition codes.
103 // We nevertheless pretend that the relative compare-and-branch
104 // instructions clobber CC, so that we can lower them to separate
105 // comparisons and BRCLs if the branch ends up being out of range.
106 let isBranch = 1, isTerminator = 1 in {
107 // As for normal branches, we handle these instructions internally in
108 // their raw CRJ-like form, but use assembly macros like CRJE when writing
109 // them out. Using the *Pair multiclasses, we also create the raw forms.
111 defm CRJ : CmpBranchRIEbPair<"crj", 0xEC76, GR32>;
112 defm CGRJ : CmpBranchRIEbPair<"cgrj", 0xEC64, GR64>;
113 defm CIJ : CmpBranchRIEcPair<"cij", 0xEC7E, GR32, imm32sx8>;
114 defm CGIJ : CmpBranchRIEcPair<"cgij", 0xEC7C, GR64, imm64sx8>;
115 defm CLRJ : CmpBranchRIEbPair<"clrj", 0xEC77, GR32>;
116 defm CLGRJ : CmpBranchRIEbPair<"clgrj", 0xEC65, GR64>;
117 defm CLIJ : CmpBranchRIEcPair<"clij", 0xEC7F, GR32, imm32zx8>;
118 defm CLGIJ : CmpBranchRIEcPair<"clgij", 0xEC7D, GR64, imm64zx8>;
120 let isIndirectBranch = 1 in {
121 defm CRB : CmpBranchRRSPair<"crb", 0xECF6, GR32>;
122 defm CGRB : CmpBranchRRSPair<"cgrb", 0xECE4, GR64>;
123 defm CIB : CmpBranchRISPair<"cib", 0xECFE, GR32, imm32sx8>;
124 defm CGIB : CmpBranchRISPair<"cgib", 0xECFC, GR64, imm64sx8>;
125 defm CLRB : CmpBranchRRSPair<"clrb", 0xECF7, GR32>;
126 defm CLGRB : CmpBranchRRSPair<"clgrb", 0xECE5, GR64>;
127 defm CLIB : CmpBranchRISPair<"clib", 0xECFF, GR32, imm32zx8>;
128 defm CLGIB : CmpBranchRISPair<"clgib", 0xECFD, GR64, imm64zx8>;
131 // Define AsmParser mnemonics for each integer condition-code mask.
132 foreach V = [ "E", "H", "L", "HE", "LE", "LH",
133 "NE", "NH", "NL", "NHE", "NLE", "NLH" ] in {
135 def CRJAsm#V : FixedCmpBranchRIEb<ICV<V>, "crj", 0xEC76, GR32>;
136 def CGRJAsm#V : FixedCmpBranchRIEb<ICV<V>, "cgrj", 0xEC64, GR64>;
137 def CIJAsm#V : FixedCmpBranchRIEc<ICV<V>, "cij", 0xEC7E, GR32,
139 def CGIJAsm#V : FixedCmpBranchRIEc<ICV<V>, "cgij", 0xEC7C, GR64,
141 def CLRJAsm#V : FixedCmpBranchRIEb<ICV<V>, "clrj", 0xEC77, GR32>;
142 def CLGRJAsm#V : FixedCmpBranchRIEb<ICV<V>, "clgrj", 0xEC65, GR64>;
143 def CLIJAsm#V : FixedCmpBranchRIEc<ICV<V>, "clij", 0xEC7F, GR32,
145 def CLGIJAsm#V : FixedCmpBranchRIEc<ICV<V>, "clgij", 0xEC7D, GR64,
148 let isIndirectBranch = 1 in {
149 def CRBAsm#V : FixedCmpBranchRRS<ICV<V>, "crb", 0xECF6, GR32>;
150 def CGRBAsm#V : FixedCmpBranchRRS<ICV<V>, "cgrb", 0xECE4, GR64>;
151 def CIBAsm#V : FixedCmpBranchRIS<ICV<V>, "cib", 0xECFE, GR32,
153 def CGIBAsm#V : FixedCmpBranchRIS<ICV<V>, "cgib", 0xECFC, GR64,
155 def CLRBAsm#V : FixedCmpBranchRRS<ICV<V>, "clrb", 0xECF7, GR32>;
156 def CLGRBAsm#V : FixedCmpBranchRRS<ICV<V>, "clgrb", 0xECE5, GR64>;
157 def CLIBAsm#V : FixedCmpBranchRIS<ICV<V>, "clib", 0xECFF, GR32,
159 def CLGIBAsm#V : FixedCmpBranchRIS<ICV<V>, "clgib", 0xECFD, GR64,
165 // Decrement a register and branch if it is nonzero. These don't clobber CC,
166 // but we might need to split long relative branches into sequences that do.
167 let isBranch = 1, isTerminator = 1 in {
169 def BRCT : BranchUnaryRI<"brct", 0xA76, GR32>;
170 def BRCTG : BranchUnaryRI<"brctg", 0xA77, GR64>;
172 // This doesn't need to clobber CC since we never need to split it.
173 def BRCTH : BranchUnaryRIL<"brcth", 0xCC6, GRH32>,
174 Requires<[FeatureHighWord]>;
176 def BCT : BranchUnaryRX<"bct", 0x46,GR32>;
177 def BCTR : BranchUnaryRR<"bctr", 0x06, GR32>;
178 def BCTG : BranchUnaryRXY<"bctg", 0xE346, GR64>;
179 def BCTGR : BranchUnaryRRE<"bctgr", 0xB946, GR64>;
182 let isBranch = 1, isTerminator = 1 in {
184 def BRXH : BranchBinaryRSI<"brxh", 0x84, GR32>;
185 def BRXLE : BranchBinaryRSI<"brxle", 0x85, GR32>;
186 def BRXHG : BranchBinaryRIEe<"brxhg", 0xEC44, GR64>;
187 def BRXLG : BranchBinaryRIEe<"brxlg", 0xEC45, GR64>;
189 def BXH : BranchBinaryRS<"bxh", 0x86, GR32>;
190 def BXLE : BranchBinaryRS<"bxle", 0x87, GR32>;
191 def BXHG : BranchBinaryRSY<"bxhg", 0xEB44, GR64>;
192 def BXLEG : BranchBinaryRSY<"bxleg", 0xEB45, GR64>;
195 //===----------------------------------------------------------------------===//
197 //===----------------------------------------------------------------------===//
199 // Unconditional trap.
200 let hasCtrlDep = 1, hasSideEffects = 1 in
201 def Trap : Alias<4, (outs), (ins), [(trap)]>;
204 let hasCtrlDep = 1, Uses = [CC], hasSideEffects = 1 in
205 def CondTrap : Alias<4, (outs), (ins cond4:$valid, cond4:$R1), []>;
207 // Fused compare-and-trap instructions.
208 let hasCtrlDep = 1, hasSideEffects = 1 in {
209 // These patterns work the same way as for compare-and-branch.
210 defm CRT : CmpBranchRRFcPair<"crt", 0xB972, GR32>;
211 defm CGRT : CmpBranchRRFcPair<"cgrt", 0xB960, GR64>;
212 defm CLRT : CmpBranchRRFcPair<"clrt", 0xB973, GR32>;
213 defm CLGRT : CmpBranchRRFcPair<"clgrt", 0xB961, GR64>;
214 defm CIT : CmpBranchRIEaPair<"cit", 0xEC72, GR32, imm32sx16>;
215 defm CGIT : CmpBranchRIEaPair<"cgit", 0xEC70, GR64, imm64sx16>;
216 defm CLFIT : CmpBranchRIEaPair<"clfit", 0xEC73, GR32, imm32zx16>;
217 defm CLGIT : CmpBranchRIEaPair<"clgit", 0xEC71, GR64, imm64zx16>;
218 let Predicates = [FeatureMiscellaneousExtensions] in {
219 defm CLT : CmpBranchRSYbPair<"clt", 0xEB23, GR32>;
220 defm CLGT : CmpBranchRSYbPair<"clgt", 0xEB2B, GR64>;
223 foreach V = [ "E", "H", "L", "HE", "LE", "LH",
224 "NE", "NH", "NL", "NHE", "NLE", "NLH" ] in {
225 def CRTAsm#V : FixedCmpBranchRRFc<ICV<V>, "crt", 0xB972, GR32>;
226 def CGRTAsm#V : FixedCmpBranchRRFc<ICV<V>, "cgrt", 0xB960, GR64>;
227 def CLRTAsm#V : FixedCmpBranchRRFc<ICV<V>, "clrt", 0xB973, GR32>;
228 def CLGRTAsm#V : FixedCmpBranchRRFc<ICV<V>, "clgrt", 0xB961, GR64>;
229 def CITAsm#V : FixedCmpBranchRIEa<ICV<V>, "cit", 0xEC72, GR32,
231 def CGITAsm#V : FixedCmpBranchRIEa<ICV<V>, "cgit", 0xEC70, GR64,
233 def CLFITAsm#V : FixedCmpBranchRIEa<ICV<V>, "clfit", 0xEC73, GR32,
235 def CLGITAsm#V : FixedCmpBranchRIEa<ICV<V>, "clgit", 0xEC71, GR64,
237 let Predicates = [FeatureMiscellaneousExtensions] in {
238 def CLTAsm#V : FixedCmpBranchRSYb<ICV<V>, "clt", 0xEB23, GR32>;
239 def CLGTAsm#V : FixedCmpBranchRSYb<ICV<V>, "clgt", 0xEB2B, GR64>;
244 //===----------------------------------------------------------------------===//
245 // Call and return instructions
246 //===----------------------------------------------------------------------===//
248 // Define the general form of the call instructions for the asm parser.
249 // These instructions don't hard-code %r14 as the return address register.
250 let isCall = 1, Defs = [CC] in {
251 def BRAS : CallRI <"bras", 0xA75>;
252 def BRASL : CallRIL<"brasl", 0xC05>;
253 def BAS : CallRX <"bas", 0x4D>;
254 def BASR : CallRR <"basr", 0x0D>;
258 let isCall = 1, Defs = [R14D, CC], Uses = [FPC] in {
259 def CallBRASL : Alias<6, (outs), (ins pcrel32:$I2, variable_ops),
260 [(z_call pcrel32:$I2)]>;
261 def CallBASR : Alias<2, (outs), (ins ADDR64:$R2, variable_ops),
262 [(z_call ADDR64:$R2)]>;
265 // TLS calls. These will be lowered into a call to __tls_get_offset,
266 // with an extra relocation specifying the TLS symbol.
267 let isCall = 1, Defs = [R14D, CC] in {
268 def TLS_GDCALL : Alias<6, (outs), (ins tlssym:$I2, variable_ops),
269 [(z_tls_gdcall tglobaltlsaddr:$I2)]>;
270 def TLS_LDCALL : Alias<6, (outs), (ins tlssym:$I2, variable_ops),
271 [(z_tls_ldcall tglobaltlsaddr:$I2)]>;
274 // Sibling calls. Indirect sibling calls must be via R1, since R2 upwards
275 // are argument registers and since branching to R0 is a no-op.
276 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
277 def CallJG : Alias<6, (outs), (ins pcrel32:$I2),
278 [(z_sibcall pcrel32:$I2)]>;
280 def CallBR : Alias<2, (outs), (ins), [(z_sibcall R1D)]>;
283 // Conditional sibling calls.
284 let CCMaskFirst = 1, isCall = 1, isTerminator = 1, isReturn = 1 in {
285 def CallBRCL : Alias<6, (outs), (ins cond4:$valid, cond4:$R1,
288 def CallBCR : Alias<2, (outs), (ins cond4:$valid, cond4:$R1), []>;
291 // Fused compare and conditional sibling calls.
292 let isCall = 1, isTerminator = 1, isReturn = 1, Uses = [R1D] in {
293 def CRBCall : Alias<6, (outs), (ins GR32:$R1, GR32:$R2, cond4:$M3), []>;
294 def CGRBCall : Alias<6, (outs), (ins GR64:$R1, GR64:$R2, cond4:$M3), []>;
295 def CIBCall : Alias<6, (outs), (ins GR32:$R1, imm32sx8:$I2, cond4:$M3), []>;
296 def CGIBCall : Alias<6, (outs), (ins GR64:$R1, imm64sx8:$I2, cond4:$M3), []>;
297 def CLRBCall : Alias<6, (outs), (ins GR32:$R1, GR32:$R2, cond4:$M3), []>;
298 def CLGRBCall : Alias<6, (outs), (ins GR64:$R1, GR64:$R2, cond4:$M3), []>;
299 def CLIBCall : Alias<6, (outs), (ins GR32:$R1, imm32zx8:$I2, cond4:$M3), []>;
300 def CLGIBCall : Alias<6, (outs), (ins GR64:$R1, imm64zx8:$I2, cond4:$M3), []>;
303 // A return instruction (br %r14).
304 let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
305 def Return : Alias<2, (outs), (ins), [(z_retflag)]>;
307 // A conditional return instruction (bcr <cond>, %r14).
308 let isReturn = 1, isTerminator = 1, hasCtrlDep = 1, CCMaskFirst = 1, Uses = [CC] in
309 def CondReturn : Alias<2, (outs), (ins cond4:$valid, cond4:$R1), []>;
311 // Fused compare and conditional returns.
312 let isReturn = 1, isTerminator = 1, hasCtrlDep = 1 in {
313 def CRBReturn : Alias<6, (outs), (ins GR32:$R1, GR32:$R2, cond4:$M3), []>;
314 def CGRBReturn : Alias<6, (outs), (ins GR64:$R1, GR64:$R2, cond4:$M3), []>;
315 def CIBReturn : Alias<6, (outs), (ins GR32:$R1, imm32sx8:$I2, cond4:$M3), []>;
316 def CGIBReturn : Alias<6, (outs), (ins GR64:$R1, imm64sx8:$I2, cond4:$M3), []>;
317 def CLRBReturn : Alias<6, (outs), (ins GR32:$R1, GR32:$R2, cond4:$M3), []>;
318 def CLGRBReturn : Alias<6, (outs), (ins GR64:$R1, GR64:$R2, cond4:$M3), []>;
319 def CLIBReturn : Alias<6, (outs), (ins GR32:$R1, imm32zx8:$I2, cond4:$M3), []>;
320 def CLGIBReturn : Alias<6, (outs), (ins GR64:$R1, imm64zx8:$I2, cond4:$M3), []>;
323 //===----------------------------------------------------------------------===//
324 // Select instructions
325 //===----------------------------------------------------------------------===//
327 def Select32 : SelectWrapper<i32, GR32>,
328 Requires<[FeatureNoLoadStoreOnCond]>;
329 def Select64 : SelectWrapper<i64, GR64>,
330 Requires<[FeatureNoLoadStoreOnCond]>;
332 // We don't define 32-bit Mux stores if we don't have STOCFH, because the
333 // low-only STOC should then always be used if possible.
334 defm CondStore8Mux : CondStores<GRX32, nonvolatile_truncstorei8,
335 nonvolatile_anyextloadi8, bdxaddr20only>,
336 Requires<[FeatureHighWord]>;
337 defm CondStore16Mux : CondStores<GRX32, nonvolatile_truncstorei16,
338 nonvolatile_anyextloadi16, bdxaddr20only>,
339 Requires<[FeatureHighWord]>;
340 defm CondStore32Mux : CondStores<GRX32, simple_store,
341 simple_load, bdxaddr20only>,
342 Requires<[FeatureLoadStoreOnCond2]>;
343 defm CondStore8 : CondStores<GR32, nonvolatile_truncstorei8,
344 nonvolatile_anyextloadi8, bdxaddr20only>;
345 defm CondStore16 : CondStores<GR32, nonvolatile_truncstorei16,
346 nonvolatile_anyextloadi16, bdxaddr20only>;
347 defm CondStore32 : CondStores<GR32, simple_store,
348 simple_load, bdxaddr20only>;
350 defm : CondStores64<CondStore8, CondStore8Inv, nonvolatile_truncstorei8,
351 nonvolatile_anyextloadi8, bdxaddr20only>;
352 defm : CondStores64<CondStore16, CondStore16Inv, nonvolatile_truncstorei16,
353 nonvolatile_anyextloadi16, bdxaddr20only>;
354 defm : CondStores64<CondStore32, CondStore32Inv, nonvolatile_truncstorei32,
355 nonvolatile_anyextloadi32, bdxaddr20only>;
356 defm CondStore64 : CondStores<GR64, simple_store,
357 simple_load, bdxaddr20only>;
359 //===----------------------------------------------------------------------===//
361 //===----------------------------------------------------------------------===//
364 def LR : UnaryRR <"lr", 0x18, null_frag, GR32, GR32>;
365 def LGR : UnaryRRE<"lgr", 0xB904, null_frag, GR64, GR64>;
367 let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
368 def LTR : UnaryRR <"ltr", 0x12, null_frag, GR32, GR32>;
369 def LTGR : UnaryRRE<"ltgr", 0xB902, null_frag, GR64, GR64>;
372 let usesCustomInserter = 1, hasNoSchedulingInfo = 1 in
373 def PAIR128 : Pseudo<(outs GR128:$dst), (ins GR64:$hi, GR64:$lo), []>;
376 let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in {
377 // 16-bit sign-extended immediates. LHIMux expands to LHI or IIHF,
378 // deopending on the choice of register.
379 def LHIMux : UnaryRIPseudo<bitconvert, GRX32, imm32sx16>,
380 Requires<[FeatureHighWord]>;
381 def LHI : UnaryRI<"lhi", 0xA78, bitconvert, GR32, imm32sx16>;
382 def LGHI : UnaryRI<"lghi", 0xA79, bitconvert, GR64, imm64sx16>;
384 // Other 16-bit immediates.
385 def LLILL : UnaryRI<"llill", 0xA5F, bitconvert, GR64, imm64ll16>;
386 def LLILH : UnaryRI<"llilh", 0xA5E, bitconvert, GR64, imm64lh16>;
387 def LLIHL : UnaryRI<"llihl", 0xA5D, bitconvert, GR64, imm64hl16>;
388 def LLIHH : UnaryRI<"llihh", 0xA5C, bitconvert, GR64, imm64hh16>;
390 // 32-bit immediates.
391 def LGFI : UnaryRIL<"lgfi", 0xC01, bitconvert, GR64, imm64sx32>;
392 def LLILF : UnaryRIL<"llilf", 0xC0F, bitconvert, GR64, imm64lf32>;
393 def LLIHF : UnaryRIL<"llihf", 0xC0E, bitconvert, GR64, imm64hf32>;
397 let canFoldAsLoad = 1, SimpleBDXLoad = 1, mayLoad = 1 in {
398 // Expands to L, LY or LFH, depending on the choice of register.
399 def LMux : UnaryRXYPseudo<"l", load, GRX32, 4>,
400 Requires<[FeatureHighWord]>;
401 defm L : UnaryRXPair<"l", 0x58, 0xE358, load, GR32, 4>;
402 def LFH : UnaryRXY<"lfh", 0xE3CA, load, GRH32, 4>,
403 Requires<[FeatureHighWord]>;
404 def LG : UnaryRXY<"lg", 0xE304, load, GR64, 8>;
406 // These instructions are split after register allocation, so we don't
407 // want a custom inserter.
408 let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
409 def L128 : Pseudo<(outs GR128:$dst), (ins bdxaddr20only128:$src),
410 [(set GR128:$dst, (load bdxaddr20only128:$src))]>;
413 let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
414 def LT : UnaryRXY<"lt", 0xE312, load, GR32, 4>;
415 def LTG : UnaryRXY<"ltg", 0xE302, load, GR64, 8>;
418 let canFoldAsLoad = 1 in {
419 def LRL : UnaryRILPC<"lrl", 0xC4D, aligned_load, GR32>;
420 def LGRL : UnaryRILPC<"lgrl", 0xC48, aligned_load, GR64>;
423 // Load and zero rightmost byte.
424 let Predicates = [FeatureLoadAndZeroRightmostByte] in {
425 def LZRF : UnaryRXY<"lzrf", 0xE33B, null_frag, GR32, 4>;
426 def LZRG : UnaryRXY<"lzrg", 0xE32A, null_frag, GR64, 8>;
427 def : Pat<(and (i32 (load bdxaddr20only:$src)), 0xffffff00),
428 (LZRF bdxaddr20only:$src)>;
429 def : Pat<(and (i64 (load bdxaddr20only:$src)), 0xffffffffffffff00),
430 (LZRG bdxaddr20only:$src)>;
434 let Predicates = [FeatureLoadAndTrap], hasSideEffects = 1 in {
435 def LAT : UnaryRXY<"lat", 0xE39F, null_frag, GR32, 4>;
436 def LFHAT : UnaryRXY<"lfhat", 0xE3C8, null_frag, GRH32, 4>;
437 def LGAT : UnaryRXY<"lgat", 0xE385, null_frag, GR64, 8>;
441 let SimpleBDXStore = 1, mayStore = 1 in {
442 // Expands to ST, STY or STFH, depending on the choice of register.
443 def STMux : StoreRXYPseudo<store, GRX32, 4>,
444 Requires<[FeatureHighWord]>;
445 defm ST : StoreRXPair<"st", 0x50, 0xE350, store, GR32, 4>;
446 def STFH : StoreRXY<"stfh", 0xE3CB, store, GRH32, 4>,
447 Requires<[FeatureHighWord]>;
448 def STG : StoreRXY<"stg", 0xE324, store, GR64, 8>;
450 // These instructions are split after register allocation, so we don't
451 // want a custom inserter.
452 let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
453 def ST128 : Pseudo<(outs), (ins GR128:$src, bdxaddr20only128:$dst),
454 [(store GR128:$src, bdxaddr20only128:$dst)]>;
457 def STRL : StoreRILPC<"strl", 0xC4F, aligned_store, GR32>;
458 def STGRL : StoreRILPC<"stgrl", 0xC4B, aligned_store, GR64>;
460 // 8-bit immediate stores to 8-bit fields.
461 defm MVI : StoreSIPair<"mvi", 0x92, 0xEB52, truncstorei8, imm32zx8trunc>;
463 // 16-bit immediate stores to 16-, 32- or 64-bit fields.
464 def MVHHI : StoreSIL<"mvhhi", 0xE544, truncstorei16, imm32sx16trunc>;
465 def MVHI : StoreSIL<"mvhi", 0xE54C, store, imm32sx16>;
466 def MVGHI : StoreSIL<"mvghi", 0xE548, store, imm64sx16>;
468 // Memory-to-memory moves.
469 let mayLoad = 1, mayStore = 1 in
470 defm MVC : MemorySS<"mvc", 0xD2, z_mvc, z_mvc_loop>;
471 let mayLoad = 1, mayStore = 1, Defs = [CC] in {
472 def MVCL : SideEffectBinaryMemMemRR<"mvcl", 0x0E, GR128, GR128>;
473 def MVCLE : SideEffectTernaryMemMemRS<"mvcle", 0xA8, GR128, GR128>;
474 def MVCLU : SideEffectTernaryMemMemRSY<"mvclu", 0xEB8E, GR128, GR128>;
478 let Predicates = [FeatureMiscellaneousExtensions3],
479 mayLoad = 1, mayStore = 1, Uses = [R0L] in
480 def MVCRL : SideEffectBinarySSE<"mvcrl", 0xE50A>;
483 let mayLoad = 1, mayStore = 1, Defs = [CC] in
484 defm MVST : StringRRE<"mvst", 0xB255, z_stpcpy>;
486 //===----------------------------------------------------------------------===//
487 // Conditional move instructions
488 //===----------------------------------------------------------------------===//
490 let Predicates = [FeatureMiscellaneousExtensions3], Uses = [CC] in {
492 let isCommutable = 1 in {
493 // Expands to SELR or SELFHR or a branch-and-move sequence,
494 // depending on the choice of registers.
495 def SELRMux : CondBinaryRRFaPseudo<GRX32, GRX32, GRX32>;
496 defm SELFHR : CondBinaryRRFaPair<"selfhr", 0xB9C0, GRH32, GRH32, GRH32>;
497 defm SELR : CondBinaryRRFaPair<"selr", 0xB9F0, GR32, GR32, GR32>;
498 defm SELGR : CondBinaryRRFaPair<"selgr", 0xB9E3, GR64, GR64, GR64>;
501 // Define AsmParser extended mnemonics for each general condition-code mask.
502 foreach V = [ "E", "NE", "H", "NH", "L", "NL", "HE", "NHE", "LE", "NLE",
503 "Z", "NZ", "P", "NP", "M", "NM", "LH", "NLH", "O", "NO" ] in {
504 def SELRAsm#V : FixedCondBinaryRRFa<CV<V>, "selr", 0xB9F0,
506 def SELFHRAsm#V : FixedCondBinaryRRFa<CV<V>, "selfhr", 0xB9C0,
507 GRH32, GRH32, GRH32>;
508 def SELGRAsm#V : FixedCondBinaryRRFa<CV<V>, "selgr", 0xB9E3,
513 let Predicates = [FeatureLoadStoreOnCond2], Uses = [CC] in {
514 // Load immediate on condition. Matched via DAG pattern and created
515 // by the PeepholeOptimizer via FoldImmediate.
517 // Expands to LOCHI or LOCHHI, depending on the choice of register.
518 def LOCHIMux : CondBinaryRIEPseudo<GRX32, imm32sx16>;
519 defm LOCHHI : CondBinaryRIEPair<"lochhi", 0xEC4E, GRH32, imm32sx16>;
520 defm LOCHI : CondBinaryRIEPair<"lochi", 0xEC42, GR32, imm32sx16>;
521 defm LOCGHI : CondBinaryRIEPair<"locghi", 0xEC46, GR64, imm64sx16>;
523 // Move register on condition. Matched via DAG pattern and
524 // created by early if-conversion.
525 let isCommutable = 1 in {
526 // Expands to LOCR or LOCFHR or a branch-and-move sequence,
527 // depending on the choice of registers.
528 def LOCRMux : CondBinaryRRFPseudo<GRX32, GRX32>;
529 defm LOCFHR : CondBinaryRRFPair<"locfhr", 0xB9E0, GRH32, GRH32>;
532 // Load on condition. Matched via DAG pattern.
533 // Expands to LOC or LOCFH, depending on the choice of register.
534 def LOCMux : CondUnaryRSYPseudo<simple_load, GRX32, 4>;
535 defm LOCFH : CondUnaryRSYPair<"locfh", 0xEBE0, simple_load, GRH32, 4>;
537 // Store on condition. Expanded from CondStore* pseudos.
538 // Expands to STOC or STOCFH, depending on the choice of register.
539 def STOCMux : CondStoreRSYPseudo<GRX32, 4>;
540 defm STOCFH : CondStoreRSYPair<"stocfh", 0xEBE1, GRH32, 4>;
542 // Define AsmParser extended mnemonics for each general condition-code mask.
543 foreach V = [ "E", "NE", "H", "NH", "L", "NL", "HE", "NHE", "LE", "NLE",
544 "Z", "NZ", "P", "NP", "M", "NM", "LH", "NLH", "O", "NO" ] in {
545 def LOCHIAsm#V : FixedCondBinaryRIE<CV<V>, "lochi", 0xEC42, GR32,
547 def LOCGHIAsm#V : FixedCondBinaryRIE<CV<V>, "locghi", 0xEC46, GR64,
549 def LOCHHIAsm#V : FixedCondBinaryRIE<CV<V>, "lochhi", 0xEC4E, GRH32,
551 def LOCFHRAsm#V : FixedCondBinaryRRF<CV<V>, "locfhr", 0xB9E0, GRH32, GRH32>;
552 def LOCFHAsm#V : FixedCondUnaryRSY<CV<V>, "locfh", 0xEBE0, GRH32, 4>;
553 def STOCFHAsm#V : FixedCondStoreRSY<CV<V>, "stocfh", 0xEBE1, GRH32, 4>;
557 let Predicates = [FeatureLoadStoreOnCond], Uses = [CC] in {
558 // Move register on condition. Matched via DAG pattern and
559 // created by early if-conversion.
560 let isCommutable = 1 in {
561 defm LOCR : CondBinaryRRFPair<"locr", 0xB9F2, GR32, GR32>;
562 defm LOCGR : CondBinaryRRFPair<"locgr", 0xB9E2, GR64, GR64>;
565 // Load on condition. Matched via DAG pattern.
566 defm LOC : CondUnaryRSYPair<"loc", 0xEBF2, simple_load, GR32, 4>;
567 defm LOCG : CondUnaryRSYPair<"locg", 0xEBE2, simple_load, GR64, 8>;
569 // Store on condition. Expanded from CondStore* pseudos.
570 defm STOC : CondStoreRSYPair<"stoc", 0xEBF3, GR32, 4>;
571 defm STOCG : CondStoreRSYPair<"stocg", 0xEBE3, GR64, 8>;
573 // Define AsmParser extended mnemonics for each general condition-code mask.
574 foreach V = [ "E", "NE", "H", "NH", "L", "NL", "HE", "NHE", "LE", "NLE",
575 "Z", "NZ", "P", "NP", "M", "NM", "LH", "NLH", "O", "NO" ] in {
576 def LOCRAsm#V : FixedCondBinaryRRF<CV<V>, "locr", 0xB9F2, GR32, GR32>;
577 def LOCGRAsm#V : FixedCondBinaryRRF<CV<V>, "locgr", 0xB9E2, GR64, GR64>;
578 def LOCAsm#V : FixedCondUnaryRSY<CV<V>, "loc", 0xEBF2, GR32, 4>;
579 def LOCGAsm#V : FixedCondUnaryRSY<CV<V>, "locg", 0xEBE2, GR64, 8>;
580 def STOCAsm#V : FixedCondStoreRSY<CV<V>, "stoc", 0xEBF3, GR32, 4>;
581 def STOCGAsm#V : FixedCondStoreRSY<CV<V>, "stocg", 0xEBE3, GR64, 8>;
584 //===----------------------------------------------------------------------===//
586 //===----------------------------------------------------------------------===//
588 // Note that putting these before zero extensions mean that we will prefer
589 // them for anyextload*. There's not really much to choose between the two
590 // either way, but signed-extending loads have a short LH and a long LHY,
591 // while zero-extending loads have only the long LLH.
593 //===----------------------------------------------------------------------===//
595 // 32-bit extensions from registers.
596 def LBR : UnaryRRE<"lbr", 0xB926, sext8, GR32, GR32>;
597 def LHR : UnaryRRE<"lhr", 0xB927, sext16, GR32, GR32>;
599 // 64-bit extensions from registers.
600 def LGBR : UnaryRRE<"lgbr", 0xB906, sext8, GR64, GR64>;
601 def LGHR : UnaryRRE<"lghr", 0xB907, sext16, GR64, GR64>;
602 def LGFR : UnaryRRE<"lgfr", 0xB914, sext32, GR64, GR32>;
604 let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in
605 def LTGFR : UnaryRRE<"ltgfr", 0xB912, null_frag, GR64, GR32>;
607 // Match 32-to-64-bit sign extensions in which the source is already
608 // in a 64-bit register.
609 def : Pat<(sext_inreg GR64:$src, i32),
610 (LGFR (EXTRACT_SUBREG GR64:$src, subreg_l32))>;
612 // 32-bit extensions from 8-bit memory. LBMux expands to LB or LBH,
613 // depending on the choice of register.
614 def LBMux : UnaryRXYPseudo<"lb", asextloadi8, GRX32, 1>,
615 Requires<[FeatureHighWord]>;
616 def LB : UnaryRXY<"lb", 0xE376, asextloadi8, GR32, 1>;
617 def LBH : UnaryRXY<"lbh", 0xE3C0, asextloadi8, GRH32, 1>,
618 Requires<[FeatureHighWord]>;
620 // 32-bit extensions from 16-bit memory. LHMux expands to LH or LHH,
621 // depending on the choice of register.
622 def LHMux : UnaryRXYPseudo<"lh", asextloadi16, GRX32, 2>,
623 Requires<[FeatureHighWord]>;
624 defm LH : UnaryRXPair<"lh", 0x48, 0xE378, asextloadi16, GR32, 2>;
625 def LHH : UnaryRXY<"lhh", 0xE3C4, asextloadi16, GRH32, 2>,
626 Requires<[FeatureHighWord]>;
627 def LHRL : UnaryRILPC<"lhrl", 0xC45, aligned_asextloadi16, GR32>;
629 // 64-bit extensions from memory.
630 def LGB : UnaryRXY<"lgb", 0xE377, asextloadi8, GR64, 1>;
631 def LGH : UnaryRXY<"lgh", 0xE315, asextloadi16, GR64, 2>;
632 def LGF : UnaryRXY<"lgf", 0xE314, asextloadi32, GR64, 4>;
633 def LGHRL : UnaryRILPC<"lghrl", 0xC44, aligned_asextloadi16, GR64>;
634 def LGFRL : UnaryRILPC<"lgfrl", 0xC4C, aligned_asextloadi32, GR64>;
635 let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in
636 def LTGF : UnaryRXY<"ltgf", 0xE332, asextloadi32, GR64, 4>;
638 //===----------------------------------------------------------------------===//
640 //===----------------------------------------------------------------------===//
642 // 32-bit extensions from registers.
644 // Expands to LLCR or RISB[LH]G, depending on the choice of registers.
645 def LLCRMux : UnaryRRPseudo<"llcr", zext8, GRX32, GRX32>,
646 Requires<[FeatureHighWord]>;
647 def LLCR : UnaryRRE<"llcr", 0xB994, zext8, GR32, GR32>;
648 // Expands to LLHR or RISB[LH]G, depending on the choice of registers.
649 def LLHRMux : UnaryRRPseudo<"llhr", zext16, GRX32, GRX32>,
650 Requires<[FeatureHighWord]>;
651 def LLHR : UnaryRRE<"llhr", 0xB995, zext16, GR32, GR32>;
653 // 64-bit extensions from registers.
654 def LLGCR : UnaryRRE<"llgcr", 0xB984, zext8, GR64, GR64>;
655 def LLGHR : UnaryRRE<"llghr", 0xB985, zext16, GR64, GR64>;
656 def LLGFR : UnaryRRE<"llgfr", 0xB916, zext32, GR64, GR32>;
658 // Match 32-to-64-bit zero extensions in which the source is already
659 // in a 64-bit register.
660 def : Pat<(and GR64:$src, 0xffffffff),
661 (LLGFR (EXTRACT_SUBREG GR64:$src, subreg_l32))>;
663 // 32-bit extensions from 8-bit memory. LLCMux expands to LLC or LLCH,
664 // depending on the choice of register.
665 def LLCMux : UnaryRXYPseudo<"llc", azextloadi8, GRX32, 1>,
666 Requires<[FeatureHighWord]>;
667 def LLC : UnaryRXY<"llc", 0xE394, azextloadi8, GR32, 1>;
668 def LLCH : UnaryRXY<"llch", 0xE3C2, azextloadi8, GRH32, 1>,
669 Requires<[FeatureHighWord]>;
671 // 32-bit extensions from 16-bit memory. LLHMux expands to LLH or LLHH,
672 // depending on the choice of register.
673 def LLHMux : UnaryRXYPseudo<"llh", azextloadi16, GRX32, 2>,
674 Requires<[FeatureHighWord]>;
675 def LLH : UnaryRXY<"llh", 0xE395, azextloadi16, GR32, 2>;
676 def LLHH : UnaryRXY<"llhh", 0xE3C6, azextloadi16, GRH32, 2>,
677 Requires<[FeatureHighWord]>;
678 def LLHRL : UnaryRILPC<"llhrl", 0xC42, aligned_azextloadi16, GR32>;
680 // 64-bit extensions from memory.
681 def LLGC : UnaryRXY<"llgc", 0xE390, azextloadi8, GR64, 1>;
682 def LLGH : UnaryRXY<"llgh", 0xE391, azextloadi16, GR64, 2>;
683 def LLGF : UnaryRXY<"llgf", 0xE316, azextloadi32, GR64, 4>;
684 def LLGHRL : UnaryRILPC<"llghrl", 0xC46, aligned_azextloadi16, GR64>;
685 def LLGFRL : UnaryRILPC<"llgfrl", 0xC4E, aligned_azextloadi32, GR64>;
687 // 31-to-64-bit zero extensions.
688 def LLGTR : UnaryRRE<"llgtr", 0xB917, null_frag, GR64, GR64>;
689 def LLGT : UnaryRXY<"llgt", 0xE317, null_frag, GR64, 4>;
690 def : Pat<(and GR64:$src, 0x7fffffff),
692 def : Pat<(and (i64 (azextloadi32 bdxaddr20only:$src)), 0x7fffffff),
693 (LLGT bdxaddr20only:$src)>;
695 // Load and zero rightmost byte.
696 let Predicates = [FeatureLoadAndZeroRightmostByte] in {
697 def LLZRGF : UnaryRXY<"llzrgf", 0xE33A, null_frag, GR64, 4>;
698 def : Pat<(and (i64 (azextloadi32 bdxaddr20only:$src)), 0xffffff00),
699 (LLZRGF bdxaddr20only:$src)>;
703 let Predicates = [FeatureLoadAndTrap], hasSideEffects = 1 in {
704 def LLGFAT : UnaryRXY<"llgfat", 0xE39D, null_frag, GR64, 4>;
705 def LLGTAT : UnaryRXY<"llgtat", 0xE39C, null_frag, GR64, 4>;
708 // Extend GR64s to GR128s.
709 let usesCustomInserter = 1, hasNoSchedulingInfo = 1 in
710 def ZEXT128 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
712 //===----------------------------------------------------------------------===//
714 //===----------------------------------------------------------------------===//
716 // Use subregs to populate the "don't care" bits in a 32-bit to 64-bit anyext.
717 def : Pat<(i64 (anyext GR32:$src)),
718 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32)>;
720 // Extend GR64s to GR128s.
721 let usesCustomInserter = 1, hasNoSchedulingInfo = 1 in
722 def AEXT128 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
724 //===----------------------------------------------------------------------===//
726 //===----------------------------------------------------------------------===//
728 // Truncations of 64-bit registers to 32-bit registers.
729 def : Pat<(i32 (trunc GR64:$src)),
730 (EXTRACT_SUBREG GR64:$src, subreg_l32)>;
732 // Truncations of 32-bit registers to 8-bit memory. STCMux expands to
733 // STC, STCY or STCH, depending on the choice of register.
734 def STCMux : StoreRXYPseudo<truncstorei8, GRX32, 1>,
735 Requires<[FeatureHighWord]>;
736 defm STC : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR32, 1>;
737 def STCH : StoreRXY<"stch", 0xE3C3, truncstorei8, GRH32, 1>,
738 Requires<[FeatureHighWord]>;
740 // Truncations of 32-bit registers to 16-bit memory. STHMux expands to
741 // STH, STHY or STHH, depending on the choice of register.
742 def STHMux : StoreRXYPseudo<truncstorei16, GRX32, 1>,
743 Requires<[FeatureHighWord]>;
744 defm STH : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR32, 2>;
745 def STHH : StoreRXY<"sthh", 0xE3C7, truncstorei16, GRH32, 2>,
746 Requires<[FeatureHighWord]>;
747 def STHRL : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR32>;
749 // Truncations of 64-bit registers to memory.
750 defm : StoreGR64Pair<STC, STCY, truncstorei8>;
751 defm : StoreGR64Pair<STH, STHY, truncstorei16>;
752 def : StoreGR64PC<STHRL, aligned_truncstorei16>;
753 defm : StoreGR64Pair<ST, STY, truncstorei32>;
754 def : StoreGR64PC<STRL, aligned_truncstorei32>;
756 // Store characters under mask -- not (yet) used for codegen.
757 defm STCM : StoreBinaryRSPair<"stcm", 0xBE, 0xEB2D, GR32, 0>;
758 def STCMH : StoreBinaryRSY<"stcmh", 0xEB2C, GRH32, 0>;
760 //===----------------------------------------------------------------------===//
761 // Multi-register moves
762 //===----------------------------------------------------------------------===//
764 // Multi-register loads.
765 defm LM : LoadMultipleRSPair<"lm", 0x98, 0xEB98, GR32>;
766 def LMG : LoadMultipleRSY<"lmg", 0xEB04, GR64>;
767 def LMH : LoadMultipleRSY<"lmh", 0xEB96, GRH32>;
768 def LMD : LoadMultipleSSe<"lmd", 0xEF, GR64>;
770 // Multi-register stores.
771 defm STM : StoreMultipleRSPair<"stm", 0x90, 0xEB90, GR32>;
772 def STMG : StoreMultipleRSY<"stmg", 0xEB24, GR64>;
773 def STMH : StoreMultipleRSY<"stmh", 0xEB26, GRH32>;
775 //===----------------------------------------------------------------------===//
777 //===----------------------------------------------------------------------===//
779 // Byte-swapping register moves.
780 def LRVR : UnaryRRE<"lrvr", 0xB91F, bswap, GR32, GR32>;
781 def LRVGR : UnaryRRE<"lrvgr", 0xB90F, bswap, GR64, GR64>;
783 // Byte-swapping loads.
784 def LRVH : UnaryRXY<"lrvh", 0xE31F, z_loadbswap16, GR32, 2>;
785 def LRV : UnaryRXY<"lrv", 0xE31E, z_loadbswap32, GR32, 4>;
786 def LRVG : UnaryRXY<"lrvg", 0xE30F, z_loadbswap64, GR64, 8>;
788 // Byte-swapping stores.
789 def STRVH : StoreRXY<"strvh", 0xE33F, z_storebswap16, GR32, 2>;
790 def STRV : StoreRXY<"strv", 0xE33E, z_storebswap32, GR32, 4>;
791 def STRVG : StoreRXY<"strvg", 0xE32F, z_storebswap64, GR64, 8>;
793 // Byte-swapping memory-to-memory moves.
794 let mayLoad = 1, mayStore = 1 in
795 def MVCIN : SideEffectBinarySSa<"mvcin", 0xE8>;
797 //===----------------------------------------------------------------------===//
798 // Load address instructions
799 //===----------------------------------------------------------------------===//
801 // Load BDX-style addresses.
802 let isAsCheapAsAMove = 1, isReMaterializable = 1 in
803 defm LA : LoadAddressRXPair<"la", 0x41, 0xE371, bitconvert>;
805 // Load a PC-relative address. There's no version of this instruction
806 // with a 16-bit offset, so there's no relaxation.
807 let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in
808 def LARL : LoadAddressRIL<"larl", 0xC00, bitconvert>;
810 // Load the Global Offset Table address. This will be lowered into a
811 // larl $R1, _GLOBAL_OFFSET_TABLE_
813 def GOT : Alias<6, (outs GR64:$R1), (ins),
814 [(set GR64:$R1, (global_offset_table))]>;
816 //===----------------------------------------------------------------------===//
817 // Absolute and Negation
818 //===----------------------------------------------------------------------===//
821 let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
822 def LPR : UnaryRR <"lpr", 0x10, z_iabs, GR32, GR32>;
823 def LPGR : UnaryRRE<"lpgr", 0xB900, z_iabs, GR64, GR64>;
825 let CCValues = 0xE, CompareZeroCCMask = 0xE in
826 def LPGFR : UnaryRRE<"lpgfr", 0xB910, null_frag, GR64, GR32>;
828 def : Pat<(z_iabs32 GR32:$src), (LPR GR32:$src)>;
829 def : Pat<(z_iabs64 GR64:$src), (LPGR GR64:$src)>;
830 defm : SXU<z_iabs, LPGFR>;
831 defm : SXU<z_iabs64, LPGFR>;
834 let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
835 def LNR : UnaryRR <"lnr", 0x11, z_inegabs, GR32, GR32>;
836 def LNGR : UnaryRRE<"lngr", 0xB901, z_inegabs, GR64, GR64>;
838 let CCValues = 0xE, CompareZeroCCMask = 0xE in
839 def LNGFR : UnaryRRE<"lngfr", 0xB911, null_frag, GR64, GR32>;
841 def : Pat<(z_inegabs32 GR32:$src), (LNR GR32:$src)>;
842 def : Pat<(z_inegabs64 GR64:$src), (LNGR GR64:$src)>;
843 defm : SXU<z_inegabs, LNGFR>;
844 defm : SXU<z_inegabs64, LNGFR>;
847 let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
848 def LCR : UnaryRR <"lcr", 0x13, ineg, GR32, GR32>;
849 def LCGR : UnaryRRE<"lcgr", 0xB903, ineg, GR64, GR64>;
851 let CCValues = 0xE, CompareZeroCCMask = 0xE in
852 def LCGFR : UnaryRRE<"lcgfr", 0xB913, null_frag, GR64, GR32>;
854 defm : SXU<ineg, LCGFR>;
856 //===----------------------------------------------------------------------===//
858 //===----------------------------------------------------------------------===//
860 let isCodeGenOnly = 1 in
861 defm IC32 : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR32, azextloadi8, 1>;
862 defm IC : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR64, azextloadi8, 1>;
864 defm : InsertMem<"inserti8", IC32, GR32, azextloadi8, bdxaddr12pair>;
865 defm : InsertMem<"inserti8", IC32Y, GR32, azextloadi8, bdxaddr20pair>;
867 defm : InsertMem<"inserti8", IC, GR64, azextloadi8, bdxaddr12pair>;
868 defm : InsertMem<"inserti8", ICY, GR64, azextloadi8, bdxaddr20pair>;
870 // Insert characters under mask -- not (yet) used for codegen.
872 defm ICM : TernaryRSPair<"icm", 0xBF, 0xEB81, GR32, 0>;
873 def ICMH : TernaryRSY<"icmh", 0xEB80, GRH32, 0>;
876 // Insertions of a 16-bit immediate, leaving other bits unaffected.
877 // We don't have or_as_insert equivalents of these operations because
878 // OI is available instead.
880 // IIxMux expands to II[LH]x, depending on the choice of register.
881 def IILMux : BinaryRIPseudo<insertll, GRX32, imm32ll16>,
882 Requires<[FeatureHighWord]>;
883 def IIHMux : BinaryRIPseudo<insertlh, GRX32, imm32lh16>,
884 Requires<[FeatureHighWord]>;
885 def IILL : BinaryRI<"iill", 0xA53, insertll, GR32, imm32ll16>;
886 def IILH : BinaryRI<"iilh", 0xA52, insertlh, GR32, imm32lh16>;
887 def IIHL : BinaryRI<"iihl", 0xA51, insertll, GRH32, imm32ll16>;
888 def IIHH : BinaryRI<"iihh", 0xA50, insertlh, GRH32, imm32lh16>;
889 def IILL64 : BinaryAliasRI<insertll, GR64, imm64ll16>;
890 def IILH64 : BinaryAliasRI<insertlh, GR64, imm64lh16>;
891 def IIHL64 : BinaryAliasRI<inserthl, GR64, imm64hl16>;
892 def IIHH64 : BinaryAliasRI<inserthh, GR64, imm64hh16>;
894 // ...likewise for 32-bit immediates. For GR32s this is a general
895 // full-width move. (We use IILF rather than something like LLILF
896 // for 32-bit moves because IILF leaves the upper 32 bits of the
898 let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in {
899 def IIFMux : UnaryRIPseudo<bitconvert, GRX32, uimm32>,
900 Requires<[FeatureHighWord]>;
901 def IILF : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>;
902 def IIHF : UnaryRIL<"iihf", 0xC08, bitconvert, GRH32, uimm32>;
904 def IILF64 : BinaryAliasRIL<insertlf, GR64, imm64lf32>;
905 def IIHF64 : BinaryAliasRIL<inserthf, GR64, imm64hf32>;
907 // An alternative model of inserthf, with the first operand being
908 // a zero-extended value.
909 def : Pat<(or (zext32 GR32:$src), imm64hf32:$imm),
910 (IIHF64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32),
913 //===----------------------------------------------------------------------===//
915 //===----------------------------------------------------------------------===//
917 // Addition producing a signed overflow flag.
918 let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
919 // Addition of a register.
920 let isCommutable = 1 in {
921 defm AR : BinaryRRAndK<"ar", 0x1A, 0xB9F8, z_sadd, GR32, GR32>;
922 defm AGR : BinaryRREAndK<"agr", 0xB908, 0xB9E8, z_sadd, GR64, GR64>;
924 def AGFR : BinaryRRE<"agfr", 0xB918, null_frag, GR64, GR32>;
926 // Addition to a high register.
927 def AHHHR : BinaryRRFa<"ahhhr", 0xB9C8, null_frag, GRH32, GRH32, GRH32>,
928 Requires<[FeatureHighWord]>;
929 def AHHLR : BinaryRRFa<"ahhlr", 0xB9D8, null_frag, GRH32, GRH32, GR32>,
930 Requires<[FeatureHighWord]>;
932 // Addition of signed 16-bit immediates.
933 defm AHIMux : BinaryRIAndKPseudo<"ahimux", z_sadd, GRX32, imm32sx16>;
934 defm AHI : BinaryRIAndK<"ahi", 0xA7A, 0xECD8, z_sadd, GR32, imm32sx16>;
935 defm AGHI : BinaryRIAndK<"aghi", 0xA7B, 0xECD9, z_sadd, GR64, imm64sx16>;
937 // Addition of signed 32-bit immediates.
938 def AFIMux : BinaryRIPseudo<z_sadd, GRX32, simm32>,
939 Requires<[FeatureHighWord]>;
940 def AFI : BinaryRIL<"afi", 0xC29, z_sadd, GR32, simm32>;
941 def AIH : BinaryRIL<"aih", 0xCC8, z_sadd, GRH32, simm32>,
942 Requires<[FeatureHighWord]>;
943 def AGFI : BinaryRIL<"agfi", 0xC28, z_sadd, GR64, imm64sx32>;
945 // Addition of memory.
946 defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, z_sadd, GR32, asextloadi16, 2>;
947 defm A : BinaryRXPairAndPseudo<"a", 0x5A, 0xE35A, z_sadd, GR32, load, 4>;
948 def AGH : BinaryRXY<"agh", 0xE338, z_sadd, GR64, asextloadi16, 2>,
949 Requires<[FeatureMiscellaneousExtensions2]>;
950 def AGF : BinaryRXY<"agf", 0xE318, z_sadd, GR64, asextloadi32, 4>;
951 defm AG : BinaryRXYAndPseudo<"ag", 0xE308, z_sadd, GR64, load, 8>;
953 // Addition to memory.
954 def ASI : BinarySIY<"asi", 0xEB6A, add, imm32sx8>;
955 def AGSI : BinarySIY<"agsi", 0xEB7A, add, imm64sx8>;
957 defm : SXB<z_sadd, GR64, AGFR>;
959 // Addition producing a carry.
961 // Addition of a register.
962 let isCommutable = 1 in {
963 defm ALR : BinaryRRAndK<"alr", 0x1E, 0xB9FA, z_uadd, GR32, GR32>;
964 defm ALGR : BinaryRREAndK<"algr", 0xB90A, 0xB9EA, z_uadd, GR64, GR64>;
966 def ALGFR : BinaryRRE<"algfr", 0xB91A, null_frag, GR64, GR32>;
968 // Addition to a high register.
969 def ALHHHR : BinaryRRFa<"alhhhr", 0xB9CA, null_frag, GRH32, GRH32, GRH32>,
970 Requires<[FeatureHighWord]>;
971 def ALHHLR : BinaryRRFa<"alhhlr", 0xB9DA, null_frag, GRH32, GRH32, GR32>,
972 Requires<[FeatureHighWord]>;
974 // Addition of signed 16-bit immediates.
975 def ALHSIK : BinaryRIE<"alhsik", 0xECDA, z_uadd, GR32, imm32sx16>,
976 Requires<[FeatureDistinctOps]>;
977 def ALGHSIK : BinaryRIE<"alghsik", 0xECDB, z_uadd, GR64, imm64sx16>,
978 Requires<[FeatureDistinctOps]>;
980 // Addition of unsigned 32-bit immediates.
981 def ALFI : BinaryRIL<"alfi", 0xC2B, z_uadd, GR32, uimm32>;
982 def ALGFI : BinaryRIL<"algfi", 0xC2A, z_uadd, GR64, imm64zx32>;
984 // Addition of signed 32-bit immediates.
985 def ALSIH : BinaryRIL<"alsih", 0xCCA, null_frag, GRH32, simm32>,
986 Requires<[FeatureHighWord]>;
988 // Addition of memory.
989 defm AL : BinaryRXPairAndPseudo<"al", 0x5E, 0xE35E, z_uadd, GR32, load, 4>;
990 def ALGF : BinaryRXY<"algf", 0xE31A, z_uadd, GR64, azextloadi32, 4>;
991 defm ALG : BinaryRXYAndPseudo<"alg", 0xE30A, z_uadd, GR64, load, 8>;
993 // Addition to memory.
994 def ALSI : BinarySIY<"alsi", 0xEB6E, null_frag, imm32sx8>;
995 def ALGSI : BinarySIY<"algsi", 0xEB7E, null_frag, imm64sx8>;
997 defm : ZXB<z_uadd, GR64, ALGFR>;
999 // Addition producing and using a carry.
1000 let Defs = [CC], Uses = [CC] in {
1001 // Addition of a register.
1002 def ALCR : BinaryRRE<"alcr", 0xB998, z_addcarry, GR32, GR32>;
1003 def ALCGR : BinaryRRE<"alcgr", 0xB988, z_addcarry, GR64, GR64>;
1005 // Addition of memory.
1006 def ALC : BinaryRXY<"alc", 0xE398, z_addcarry, GR32, load, 4>;
1007 def ALCG : BinaryRXY<"alcg", 0xE388, z_addcarry, GR64, load, 8>;
1010 // Addition that does not modify the condition code.
1011 def ALSIHN : BinaryRIL<"alsihn", 0xCCB, null_frag, GRH32, simm32>,
1012 Requires<[FeatureHighWord]>;
1015 //===----------------------------------------------------------------------===//
1017 //===----------------------------------------------------------------------===//
1019 // Subtraction producing a signed overflow flag.
1020 let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
1021 // Subtraction of a register.
1022 defm SR : BinaryRRAndK<"sr", 0x1B, 0xB9F9, z_ssub, GR32, GR32>;
1023 def SGFR : BinaryRRE<"sgfr", 0xB919, null_frag, GR64, GR32>;
1024 defm SGR : BinaryRREAndK<"sgr", 0xB909, 0xB9E9, z_ssub, GR64, GR64>;
1026 // Subtraction from a high register.
1027 def SHHHR : BinaryRRFa<"shhhr", 0xB9C9, null_frag, GRH32, GRH32, GRH32>,
1028 Requires<[FeatureHighWord]>;
1029 def SHHLR : BinaryRRFa<"shhlr", 0xB9D9, null_frag, GRH32, GRH32, GR32>,
1030 Requires<[FeatureHighWord]>;
1032 // Subtraction of memory.
1033 defm SH : BinaryRXPair<"sh", 0x4B, 0xE37B, z_ssub, GR32, asextloadi16, 2>;
1034 defm S : BinaryRXPairAndPseudo<"s", 0x5B, 0xE35B, z_ssub, GR32, load, 4>;
1035 def SGH : BinaryRXY<"sgh", 0xE339, z_ssub, GR64, asextloadi16, 2>,
1036 Requires<[FeatureMiscellaneousExtensions2]>;
1037 def SGF : BinaryRXY<"sgf", 0xE319, z_ssub, GR64, asextloadi32, 4>;
1038 defm SG : BinaryRXYAndPseudo<"sg", 0xE309, z_ssub, GR64, load, 8>;
1040 defm : SXB<z_ssub, GR64, SGFR>;
1042 // Subtracting an immediate is the same as adding the negated immediate.
1043 let AddedComplexity = 1 in {
1044 def : Pat<(z_ssub GR32:$src1, imm32sx16n:$src2),
1045 (AHIMux GR32:$src1, imm32sx16n:$src2)>,
1046 Requires<[FeatureHighWord]>;
1047 def : Pat<(z_ssub GR32:$src1, simm32n:$src2),
1048 (AFIMux GR32:$src1, simm32n:$src2)>,
1049 Requires<[FeatureHighWord]>;
1050 def : Pat<(z_ssub GR32:$src1, imm32sx16n:$src2),
1051 (AHI GR32:$src1, imm32sx16n:$src2)>;
1052 def : Pat<(z_ssub GR32:$src1, simm32n:$src2),
1053 (AFI GR32:$src1, simm32n:$src2)>;
1054 def : Pat<(z_ssub GR64:$src1, imm64sx16n:$src2),
1055 (AGHI GR64:$src1, imm64sx16n:$src2)>;
1056 def : Pat<(z_ssub GR64:$src1, imm64sx32n:$src2),
1057 (AGFI GR64:$src1, imm64sx32n:$src2)>;
1060 // And vice versa in one special case, where we need to load a
1061 // constant into a register in any case, but the negated constant
1062 // requires fewer instructions to load.
1063 def : Pat<(z_saddo GR64:$src1, imm64lh16n:$src2),
1064 (SGR GR64:$src1, (LLILH imm64lh16n:$src2))>;
1065 def : Pat<(z_saddo GR64:$src1, imm64lf32n:$src2),
1066 (SGR GR64:$src1, (LLILF imm64lf32n:$src2))>;
1068 // Subtraction producing a carry.
1069 let Defs = [CC] in {
1070 // Subtraction of a register.
1071 defm SLR : BinaryRRAndK<"slr", 0x1F, 0xB9FB, z_usub, GR32, GR32>;
1072 def SLGFR : BinaryRRE<"slgfr", 0xB91B, null_frag, GR64, GR32>;
1073 defm SLGR : BinaryRREAndK<"slgr", 0xB90B, 0xB9EB, z_usub, GR64, GR64>;
1075 // Subtraction from a high register.
1076 def SLHHHR : BinaryRRFa<"slhhhr", 0xB9CB, null_frag, GRH32, GRH32, GRH32>,
1077 Requires<[FeatureHighWord]>;
1078 def SLHHLR : BinaryRRFa<"slhhlr", 0xB9DB, null_frag, GRH32, GRH32, GR32>,
1079 Requires<[FeatureHighWord]>;
1081 // Subtraction of unsigned 32-bit immediates.
1082 def SLFI : BinaryRIL<"slfi", 0xC25, z_usub, GR32, uimm32>;
1083 def SLGFI : BinaryRIL<"slgfi", 0xC24, z_usub, GR64, imm64zx32>;
1085 // Subtraction of memory.
1086 defm SL : BinaryRXPairAndPseudo<"sl", 0x5F, 0xE35F, z_usub, GR32, load, 4>;
1087 def SLGF : BinaryRXY<"slgf", 0xE31B, z_usub, GR64, azextloadi32, 4>;
1088 defm SLG : BinaryRXYAndPseudo<"slg", 0xE30B, z_usub, GR64, load, 8>;
1090 defm : ZXB<z_usub, GR64, SLGFR>;
1092 // Subtracting an immediate is the same as adding the negated immediate.
1093 let AddedComplexity = 1 in {
1094 def : Pat<(z_usub GR32:$src1, imm32sx16n:$src2),
1095 (ALHSIK GR32:$src1, imm32sx16n:$src2)>,
1096 Requires<[FeatureDistinctOps]>;
1097 def : Pat<(z_usub GR64:$src1, imm64sx16n:$src2),
1098 (ALGHSIK GR64:$src1, imm64sx16n:$src2)>,
1099 Requires<[FeatureDistinctOps]>;
1102 // And vice versa in one special case (but we prefer addition).
1103 def : Pat<(add GR64:$src1, imm64zx32n:$src2),
1104 (SLGFI GR64:$src1, imm64zx32n:$src2)>;
1106 // Subtraction producing and using a carry.
1107 let Defs = [CC], Uses = [CC] in {
1108 // Subtraction of a register.
1109 def SLBR : BinaryRRE<"slbr", 0xB999, z_subcarry, GR32, GR32>;
1110 def SLBGR : BinaryRRE<"slbgr", 0xB989, z_subcarry, GR64, GR64>;
1112 // Subtraction of memory.
1113 def SLB : BinaryRXY<"slb", 0xE399, z_subcarry, GR32, load, 4>;
1114 def SLBG : BinaryRXY<"slbg", 0xE389, z_subcarry, GR64, load, 8>;
1118 //===----------------------------------------------------------------------===//
1120 //===----------------------------------------------------------------------===//
1122 let Defs = [CC] in {
1123 // ANDs of a register.
1124 let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1125 defm NR : BinaryRRAndK<"nr", 0x14, 0xB9F4, and, GR32, GR32>;
1126 defm NGR : BinaryRREAndK<"ngr", 0xB980, 0xB9E4, and, GR64, GR64>;
1129 let isConvertibleToThreeAddress = 1 in {
1130 // ANDs of a 16-bit immediate, leaving other bits unaffected.
1131 // The CC result only reflects the 16-bit field, not the full register.
1133 // NIxMux expands to NI[LH]x, depending on the choice of register.
1134 def NILMux : BinaryRIPseudo<and, GRX32, imm32ll16c>,
1135 Requires<[FeatureHighWord]>;
1136 def NIHMux : BinaryRIPseudo<and, GRX32, imm32lh16c>,
1137 Requires<[FeatureHighWord]>;
1138 def NILL : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>;
1139 def NILH : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>;
1140 def NIHL : BinaryRI<"nihl", 0xA55, and, GRH32, imm32ll16c>;
1141 def NIHH : BinaryRI<"nihh", 0xA54, and, GRH32, imm32lh16c>;
1142 def NILL64 : BinaryAliasRI<and, GR64, imm64ll16c>;
1143 def NILH64 : BinaryAliasRI<and, GR64, imm64lh16c>;
1144 def NIHL64 : BinaryAliasRI<and, GR64, imm64hl16c>;
1145 def NIHH64 : BinaryAliasRI<and, GR64, imm64hh16c>;
1147 // ANDs of a 32-bit immediate, leaving other bits unaffected.
1148 // The CC result only reflects the 32-bit field, which means we can
1149 // use it as a zero indicator for i32 operations but not otherwise.
1150 let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1151 // Expands to NILF or NIHF, depending on the choice of register.
1152 def NIFMux : BinaryRIPseudo<and, GRX32, uimm32>,
1153 Requires<[FeatureHighWord]>;
1154 def NILF : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>;
1155 def NIHF : BinaryRIL<"nihf", 0xC0A, and, GRH32, uimm32>;
1157 def NILF64 : BinaryAliasRIL<and, GR64, imm64lf32c>;
1158 def NIHF64 : BinaryAliasRIL<and, GR64, imm64hf32c>;
1162 let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1163 defm N : BinaryRXPairAndPseudo<"n", 0x54, 0xE354, and, GR32, load, 4>;
1164 defm NG : BinaryRXYAndPseudo<"ng", 0xE380, and, GR64, load, 8>;
1168 defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, imm32zx8>;
1171 let mayLoad = 1, mayStore = 1 in
1172 defm NC : MemorySS<"nc", 0xD4, z_nc, z_nc_loop>;
1174 defm : RMWIByte<and, bdaddr12pair, NI>;
1175 defm : RMWIByte<and, bdaddr20pair, NIY>;
1177 //===----------------------------------------------------------------------===//
1179 //===----------------------------------------------------------------------===//
1181 let Defs = [CC] in {
1182 // ORs of a register.
1183 let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1184 defm OR : BinaryRRAndK<"or", 0x16, 0xB9F6, or, GR32, GR32>;
1185 defm OGR : BinaryRREAndK<"ogr", 0xB981, 0xB9E6, or, GR64, GR64>;
1188 // ORs of a 16-bit immediate, leaving other bits unaffected.
1189 // The CC result only reflects the 16-bit field, not the full register.
1191 // OIxMux expands to OI[LH]x, depending on the choice of register.
1192 def OILMux : BinaryRIPseudo<or, GRX32, imm32ll16>,
1193 Requires<[FeatureHighWord]>;
1194 def OIHMux : BinaryRIPseudo<or, GRX32, imm32lh16>,
1195 Requires<[FeatureHighWord]>;
1196 def OILL : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>;
1197 def OILH : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>;
1198 def OIHL : BinaryRI<"oihl", 0xA59, or, GRH32, imm32ll16>;
1199 def OIHH : BinaryRI<"oihh", 0xA58, or, GRH32, imm32lh16>;
1200 def OILL64 : BinaryAliasRI<or, GR64, imm64ll16>;
1201 def OILH64 : BinaryAliasRI<or, GR64, imm64lh16>;
1202 def OIHL64 : BinaryAliasRI<or, GR64, imm64hl16>;
1203 def OIHH64 : BinaryAliasRI<or, GR64, imm64hh16>;
1205 // ORs of a 32-bit immediate, leaving other bits unaffected.
1206 // The CC result only reflects the 32-bit field, which means we can
1207 // use it as a zero indicator for i32 operations but not otherwise.
1208 let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1209 // Expands to OILF or OIHF, depending on the choice of register.
1210 def OIFMux : BinaryRIPseudo<or, GRX32, uimm32>,
1211 Requires<[FeatureHighWord]>;
1212 def OILF : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>;
1213 def OIHF : BinaryRIL<"oihf", 0xC0C, or, GRH32, uimm32>;
1215 def OILF64 : BinaryAliasRIL<or, GR64, imm64lf32>;
1216 def OIHF64 : BinaryAliasRIL<or, GR64, imm64hf32>;
1219 let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1220 defm O : BinaryRXPairAndPseudo<"o", 0x56, 0xE356, or, GR32, load, 4>;
1221 defm OG : BinaryRXYAndPseudo<"og", 0xE381, or, GR64, load, 8>;
1225 defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, imm32zx8>;
1228 let mayLoad = 1, mayStore = 1 in
1229 defm OC : MemorySS<"oc", 0xD6, z_oc, z_oc_loop>;
1231 defm : RMWIByte<or, bdaddr12pair, OI>;
1232 defm : RMWIByte<or, bdaddr20pair, OIY>;
1234 //===----------------------------------------------------------------------===//
1236 //===----------------------------------------------------------------------===//
1238 let Defs = [CC] in {
1239 // XORs of a register.
1240 let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1241 defm XR : BinaryRRAndK<"xr", 0x17, 0xB9F7, xor, GR32, GR32>;
1242 defm XGR : BinaryRREAndK<"xgr", 0xB982, 0xB9E7, xor, GR64, GR64>;
1245 // XORs of a 32-bit immediate, leaving other bits unaffected.
1246 // The CC result only reflects the 32-bit field, which means we can
1247 // use it as a zero indicator for i32 operations but not otherwise.
1248 let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1249 // Expands to XILF or XIHF, depending on the choice of register.
1250 def XIFMux : BinaryRIPseudo<xor, GRX32, uimm32>,
1251 Requires<[FeatureHighWord]>;
1252 def XILF : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>;
1253 def XIHF : BinaryRIL<"xihf", 0xC06, xor, GRH32, uimm32>;
1255 def XILF64 : BinaryAliasRIL<xor, GR64, imm64lf32>;
1256 def XIHF64 : BinaryAliasRIL<xor, GR64, imm64hf32>;
1259 let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1260 defm X : BinaryRXPairAndPseudo<"x",0x57, 0xE357, xor, GR32, load, 4>;
1261 defm XG : BinaryRXYAndPseudo<"xg", 0xE382, xor, GR64, load, 8>;
1265 defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, imm32zx8>;
1268 let mayLoad = 1, mayStore = 1 in
1269 defm XC : MemorySS<"xc", 0xD7, z_xc, z_xc_loop>;
1271 defm : RMWIByte<xor, bdaddr12pair, XI>;
1272 defm : RMWIByte<xor, bdaddr20pair, XIY>;
1274 //===----------------------------------------------------------------------===//
1275 // Combined logical operations
1276 //===----------------------------------------------------------------------===//
1278 let Predicates = [FeatureMiscellaneousExtensions3],
1280 // AND with complement.
1281 let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1282 def NCRK : BinaryRRFa<"ncrk", 0xB9F5, andc, GR32, GR32, GR32>;
1283 def NCGRK : BinaryRRFa<"ncgrk", 0xB9E5, andc, GR64, GR64, GR64>;
1286 // OR with complement.
1287 let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1288 def OCRK : BinaryRRFa<"ocrk", 0xB975, orc, GR32, GR32, GR32>;
1289 def OCGRK : BinaryRRFa<"ocgrk", 0xB965, orc, GR64, GR64, GR64>;
1293 let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1294 def NNRK : BinaryRRFa<"nnrk", 0xB974, nand, GR32, GR32, GR32>;
1295 def NNGRK : BinaryRRFa<"nngrk", 0xB964, nand, GR64, GR64, GR64>;
1299 let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1300 def NORK : BinaryRRFa<"nork", 0xB976, nor, GR32, GR32, GR32>;
1301 def NOGRK : BinaryRRFa<"nogrk", 0xB966, nor, GR64, GR64, GR64>;
1305 let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
1306 def NXRK : BinaryRRFa<"nxrk", 0xB977, nxor, GR32, GR32, GR32>;
1307 def NXGRK : BinaryRRFa<"nxgrk", 0xB967, nxor, GR64, GR64, GR64>;
1311 //===----------------------------------------------------------------------===//
1313 //===----------------------------------------------------------------------===//
1315 // Multiplication of a register, setting the condition code. We prefer these
1316 // over MS(G)R if available, even though we cannot use the condition code,
1317 // since they are three-operand instructions.
1318 let Predicates = [FeatureMiscellaneousExtensions2],
1319 Defs = [CC], isCommutable = 1 in {
1320 def MSRKC : BinaryRRFa<"msrkc", 0xB9FD, mul, GR32, GR32, GR32>;
1321 def MSGRKC : BinaryRRFa<"msgrkc", 0xB9ED, mul, GR64, GR64, GR64>;
1324 // Multiplication of a register.
1325 let isCommutable = 1 in {
1326 def MSR : BinaryRRE<"msr", 0xB252, mul, GR32, GR32>;
1327 def MSGR : BinaryRRE<"msgr", 0xB90C, mul, GR64, GR64>;
1329 def MSGFR : BinaryRRE<"msgfr", 0xB91C, null_frag, GR64, GR32>;
1330 defm : SXB<mul, GR64, MSGFR>;
1332 // Multiplication of a signed 16-bit immediate.
1333 def MHI : BinaryRI<"mhi", 0xA7C, mul, GR32, imm32sx16>;
1334 def MGHI : BinaryRI<"mghi", 0xA7D, mul, GR64, imm64sx16>;
1336 // Multiplication of a signed 32-bit immediate.
1337 def MSFI : BinaryRIL<"msfi", 0xC21, mul, GR32, simm32>;
1338 def MSGFI : BinaryRIL<"msgfi", 0xC20, mul, GR64, imm64sx32>;
1340 // Multiplication of memory.
1341 defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, asextloadi16, 2>;
1342 defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load, 4>;
1343 def MGH : BinaryRXY<"mgh", 0xE33C, mul, GR64, asextloadi16, 2>,
1344 Requires<[FeatureMiscellaneousExtensions2]>;
1345 def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, asextloadi32, 4>;
1346 def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load, 8>;
1348 // Multiplication of memory, setting the condition code.
1349 let Predicates = [FeatureMiscellaneousExtensions2], Defs = [CC] in {
1350 def MSC : BinaryRXY<"msc", 0xE353, null_frag, GR32, load, 4>;
1351 def MSGC : BinaryRXY<"msgc", 0xE383, null_frag, GR64, load, 8>;
1354 // Multiplication of a register, producing two results.
1355 def MR : BinaryRR <"mr", 0x1C, null_frag, GR128, GR32>;
1356 def MGRK : BinaryRRFa<"mgrk", 0xB9EC, null_frag, GR128, GR64, GR64>,
1357 Requires<[FeatureMiscellaneousExtensions2]>;
1358 def MLR : BinaryRRE<"mlr", 0xB996, null_frag, GR128, GR32>;
1359 def MLGR : BinaryRRE<"mlgr", 0xB986, null_frag, GR128, GR64>;
1361 def : Pat<(z_smul_lohi GR64:$src1, GR64:$src2),
1362 (MGRK GR64:$src1, GR64:$src2)>;
1363 def : Pat<(z_umul_lohi GR64:$src1, GR64:$src2),
1364 (MLGR (AEXT128 GR64:$src1), GR64:$src2)>;
1366 // Multiplication of memory, producing two results.
1367 def M : BinaryRX <"m", 0x5C, null_frag, GR128, load, 4>;
1368 def MFY : BinaryRXY<"mfy", 0xE35C, null_frag, GR128, load, 4>;
1369 def MG : BinaryRXY<"mg", 0xE384, null_frag, GR128, load, 8>,
1370 Requires<[FeatureMiscellaneousExtensions2]>;
1371 def ML : BinaryRXY<"ml", 0xE396, null_frag, GR128, load, 4>;
1372 def MLG : BinaryRXY<"mlg", 0xE386, null_frag, GR128, load, 8>;
1374 def : Pat<(z_smul_lohi GR64:$src1, (i64 (load bdxaddr20only:$src2))),
1375 (MG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>;
1376 def : Pat<(z_umul_lohi GR64:$src1, (i64 (load bdxaddr20only:$src2))),
1377 (MLG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>;
1379 //===----------------------------------------------------------------------===//
1380 // Division and remainder
1381 //===----------------------------------------------------------------------===//
1383 let hasSideEffects = 1 in { // Do not speculatively execute.
1384 // Division and remainder, from registers.
1385 def DR : BinaryRR <"dr", 0x1D, null_frag, GR128, GR32>;
1386 def DSGFR : BinaryRRE<"dsgfr", 0xB91D, null_frag, GR128, GR32>;
1387 def DSGR : BinaryRRE<"dsgr", 0xB90D, null_frag, GR128, GR64>;
1388 def DLR : BinaryRRE<"dlr", 0xB997, null_frag, GR128, GR32>;
1389 def DLGR : BinaryRRE<"dlgr", 0xB987, null_frag, GR128, GR64>;
1391 // Division and remainder, from memory.
1392 def D : BinaryRX <"d", 0x5D, null_frag, GR128, load, 4>;
1393 def DSGF : BinaryRXY<"dsgf", 0xE31D, null_frag, GR128, load, 4>;
1394 def DSG : BinaryRXY<"dsg", 0xE30D, null_frag, GR128, load, 8>;
1395 def DL : BinaryRXY<"dl", 0xE397, null_frag, GR128, load, 4>;
1396 def DLG : BinaryRXY<"dlg", 0xE387, null_frag, GR128, load, 8>;
1398 def : Pat<(z_sdivrem GR64:$src1, GR32:$src2),
1399 (DSGFR (AEXT128 GR64:$src1), GR32:$src2)>;
1400 def : Pat<(z_sdivrem GR64:$src1, (i32 (load bdxaddr20only:$src2))),
1401 (DSGF (AEXT128 GR64:$src1), bdxaddr20only:$src2)>;
1402 def : Pat<(z_sdivrem GR64:$src1, GR64:$src2),
1403 (DSGR (AEXT128 GR64:$src1), GR64:$src2)>;
1404 def : Pat<(z_sdivrem GR64:$src1, (i64 (load bdxaddr20only:$src2))),
1405 (DSG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>;
1407 def : Pat<(z_udivrem GR32:$src1, GR32:$src2),
1408 (DLR (ZEXT128 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src1,
1409 subreg_l32)), GR32:$src2)>;
1410 def : Pat<(z_udivrem GR32:$src1, (i32 (load bdxaddr20only:$src2))),
1411 (DL (ZEXT128 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src1,
1412 subreg_l32)), bdxaddr20only:$src2)>;
1413 def : Pat<(z_udivrem GR64:$src1, GR64:$src2),
1414 (DLGR (ZEXT128 GR64:$src1), GR64:$src2)>;
1415 def : Pat<(z_udivrem GR64:$src1, (i64 (load bdxaddr20only:$src2))),
1416 (DLG (ZEXT128 GR64:$src1), bdxaddr20only:$src2)>;
1418 //===----------------------------------------------------------------------===//
1420 //===----------------------------------------------------------------------===//
1422 // Logical shift left.
1423 defm SLL : BinaryRSAndK<"sll", 0x89, 0xEBDF, shiftop<shl>, GR32>;
1424 def SLLG : BinaryRSY<"sllg", 0xEB0D, shiftop<shl>, GR64>;
1425 def SLDL : BinaryRS<"sldl", 0x8D, null_frag, GR128>;
1427 // Arithmetic shift left.
1428 let Defs = [CC] in {
1429 defm SLA : BinaryRSAndK<"sla", 0x8B, 0xEBDD, null_frag, GR32>;
1430 def SLAG : BinaryRSY<"slag", 0xEB0B, null_frag, GR64>;
1431 def SLDA : BinaryRS<"slda", 0x8F, null_frag, GR128>;
1434 // Logical shift right.
1435 defm SRL : BinaryRSAndK<"srl", 0x88, 0xEBDE, shiftop<srl>, GR32>;
1436 def SRLG : BinaryRSY<"srlg", 0xEB0C, shiftop<srl>, GR64>;
1437 def SRDL : BinaryRS<"srdl", 0x8C, null_frag, GR128>;
1439 // Arithmetic shift right.
1440 let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
1441 defm SRA : BinaryRSAndK<"sra", 0x8A, 0xEBDC, shiftop<sra>, GR32>;
1442 def SRAG : BinaryRSY<"srag", 0xEB0A, shiftop<sra>, GR64>;
1443 def SRDA : BinaryRS<"srda", 0x8E, null_frag, GR128>;
1447 def RLL : BinaryRSY<"rll", 0xEB1D, shiftop<rotl>, GR32>;
1448 def RLLG : BinaryRSY<"rllg", 0xEB1C, shiftop<rotl>, GR64>;
1450 // Rotate second operand left and inserted selected bits into first operand.
1451 // These can act like 32-bit operands provided that the constant start and
1452 // end bits (operands 2 and 3) are in the range [32, 64).
1453 let Defs = [CC] in {
1454 let isCodeGenOnly = 1 in
1455 def RISBG32 : RotateSelectRIEf<"risbg", 0xEC55, GR32, GR32>;
1456 let CCValues = 0xE, CompareZeroCCMask = 0xE in
1457 def RISBG : RotateSelectRIEf<"risbg", 0xEC55, GR64, GR64>;
1460 // On zEC12 we have a variant of RISBG that does not set CC.
1461 let Predicates = [FeatureMiscellaneousExtensions] in
1462 def RISBGN : RotateSelectRIEf<"risbgn", 0xEC59, GR64, GR64>;
1464 // Forms of RISBG that only affect one word of the destination register.
1465 // They do not set CC.
1466 let Predicates = [FeatureHighWord] in {
1467 def RISBMux : RotateSelectRIEfPseudo<GRX32, GRX32>;
1468 def RISBLL : RotateSelectAliasRIEf<GR32, GR32>;
1469 def RISBLH : RotateSelectAliasRIEf<GR32, GRH32>;
1470 def RISBHL : RotateSelectAliasRIEf<GRH32, GR32>;
1471 def RISBHH : RotateSelectAliasRIEf<GRH32, GRH32>;
1472 def RISBLG : RotateSelectRIEf<"risblg", 0xEC51, GR32, GR64>;
1473 def RISBHG : RotateSelectRIEf<"risbhg", 0xEC5D, GRH32, GR64>;
1476 // Rotate second operand left and perform a logical operation with selected
1477 // bits of the first operand. The CC result only describes the selected bits,
1478 // so isn't useful for a full comparison against zero.
1479 let Defs = [CC] in {
1480 def RNSBG : RotateSelectRIEf<"rnsbg", 0xEC54, GR64, GR64>;
1481 def ROSBG : RotateSelectRIEf<"rosbg", 0xEC56, GR64, GR64>;
1482 def RXSBG : RotateSelectRIEf<"rxsbg", 0xEC57, GR64, GR64>;
1485 //===----------------------------------------------------------------------===//
1487 //===----------------------------------------------------------------------===//
1489 // Signed comparisons. We put these before the unsigned comparisons because
1490 // some of the signed forms have COMPARE AND BRANCH equivalents whereas none
1491 // of the unsigned forms do.
1492 let Defs = [CC], CCValues = 0xE in {
1493 // Comparison with a register.
1494 def CR : CompareRR <"cr", 0x19, z_scmp, GR32, GR32>;
1495 def CGFR : CompareRRE<"cgfr", 0xB930, null_frag, GR64, GR32>;
1496 def CGR : CompareRRE<"cgr", 0xB920, z_scmp, GR64, GR64>;
1498 // Comparison with a high register.
1499 def CHHR : CompareRRE<"chhr", 0xB9CD, null_frag, GRH32, GRH32>,
1500 Requires<[FeatureHighWord]>;
1501 def CHLR : CompareRRE<"chlr", 0xB9DD, null_frag, GRH32, GR32>,
1502 Requires<[FeatureHighWord]>;
1504 // Comparison with a signed 16-bit immediate. CHIMux expands to CHI or CIH,
1505 // depending on the choice of register.
1506 def CHIMux : CompareRIPseudo<z_scmp, GRX32, imm32sx16>,
1507 Requires<[FeatureHighWord]>;
1508 def CHI : CompareRI<"chi", 0xA7E, z_scmp, GR32, imm32sx16>;
1509 def CGHI : CompareRI<"cghi", 0xA7F, z_scmp, GR64, imm64sx16>;
1511 // Comparison with a signed 32-bit immediate. CFIMux expands to CFI or CIH,
1512 // depending on the choice of register.
1513 def CFIMux : CompareRIPseudo<z_scmp, GRX32, simm32>,
1514 Requires<[FeatureHighWord]>;
1515 def CFI : CompareRIL<"cfi", 0xC2D, z_scmp, GR32, simm32>;
1516 def CIH : CompareRIL<"cih", 0xCCD, z_scmp, GRH32, simm32>,
1517 Requires<[FeatureHighWord]>;
1518 def CGFI : CompareRIL<"cgfi", 0xC2C, z_scmp, GR64, imm64sx32>;
1520 // Comparison with memory.
1521 defm CH : CompareRXPair<"ch", 0x49, 0xE379, z_scmp, GR32, asextloadi16, 2>;
1522 def CMux : CompareRXYPseudo<z_scmp, GRX32, load, 4>,
1523 Requires<[FeatureHighWord]>;
1524 defm C : CompareRXPair<"c", 0x59, 0xE359, z_scmp, GR32, load, 4>;
1525 def CHF : CompareRXY<"chf", 0xE3CD, z_scmp, GRH32, load, 4>,
1526 Requires<[FeatureHighWord]>;
1527 def CGH : CompareRXY<"cgh", 0xE334, z_scmp, GR64, asextloadi16, 2>;
1528 def CGF : CompareRXY<"cgf", 0xE330, z_scmp, GR64, asextloadi32, 4>;
1529 def CG : CompareRXY<"cg", 0xE320, z_scmp, GR64, load, 8>;
1530 def CHRL : CompareRILPC<"chrl", 0xC65, z_scmp, GR32, aligned_asextloadi16>;
1531 def CRL : CompareRILPC<"crl", 0xC6D, z_scmp, GR32, aligned_load>;
1532 def CGHRL : CompareRILPC<"cghrl", 0xC64, z_scmp, GR64, aligned_asextloadi16>;
1533 def CGFRL : CompareRILPC<"cgfrl", 0xC6C, z_scmp, GR64, aligned_asextloadi32>;
1534 def CGRL : CompareRILPC<"cgrl", 0xC68, z_scmp, GR64, aligned_load>;
1536 // Comparison between memory and a signed 16-bit immediate.
1537 def CHHSI : CompareSIL<"chhsi", 0xE554, z_scmp, asextloadi16, imm32sx16>;
1538 def CHSI : CompareSIL<"chsi", 0xE55C, z_scmp, load, imm32sx16>;
1539 def CGHSI : CompareSIL<"cghsi", 0xE558, z_scmp, load, imm64sx16>;
1541 defm : SXB<z_scmp, GR64, CGFR>;
1543 // Unsigned comparisons.
1544 let Defs = [CC], CCValues = 0xE, IsLogical = 1 in {
1545 // Comparison with a register.
1546 def CLR : CompareRR <"clr", 0x15, z_ucmp, GR32, GR32>;
1547 def CLGFR : CompareRRE<"clgfr", 0xB931, null_frag, GR64, GR32>;
1548 def CLGR : CompareRRE<"clgr", 0xB921, z_ucmp, GR64, GR64>;
1550 // Comparison with a high register.
1551 def CLHHR : CompareRRE<"clhhr", 0xB9CF, null_frag, GRH32, GRH32>,
1552 Requires<[FeatureHighWord]>;
1553 def CLHLR : CompareRRE<"clhlr", 0xB9DF, null_frag, GRH32, GR32>,
1554 Requires<[FeatureHighWord]>;
1556 // Comparison with an unsigned 32-bit immediate. CLFIMux expands to CLFI
1557 // or CLIH, depending on the choice of register.
1558 def CLFIMux : CompareRIPseudo<z_ucmp, GRX32, uimm32>,
1559 Requires<[FeatureHighWord]>;
1560 def CLFI : CompareRIL<"clfi", 0xC2F, z_ucmp, GR32, uimm32>;
1561 def CLIH : CompareRIL<"clih", 0xCCF, z_ucmp, GRH32, uimm32>,
1562 Requires<[FeatureHighWord]>;
1563 def CLGFI : CompareRIL<"clgfi", 0xC2E, z_ucmp, GR64, imm64zx32>;
1565 // Comparison with memory.
1566 def CLMux : CompareRXYPseudo<z_ucmp, GRX32, load, 4>,
1567 Requires<[FeatureHighWord]>;
1568 defm CL : CompareRXPair<"cl", 0x55, 0xE355, z_ucmp, GR32, load, 4>;
1569 def CLHF : CompareRXY<"clhf", 0xE3CF, z_ucmp, GRH32, load, 4>,
1570 Requires<[FeatureHighWord]>;
1571 def CLGF : CompareRXY<"clgf", 0xE331, z_ucmp, GR64, azextloadi32, 4>;
1572 def CLG : CompareRXY<"clg", 0xE321, z_ucmp, GR64, load, 8>;
1573 def CLHRL : CompareRILPC<"clhrl", 0xC67, z_ucmp, GR32,
1574 aligned_azextloadi16>;
1575 def CLRL : CompareRILPC<"clrl", 0xC6F, z_ucmp, GR32,
1577 def CLGHRL : CompareRILPC<"clghrl", 0xC66, z_ucmp, GR64,
1578 aligned_azextloadi16>;
1579 def CLGFRL : CompareRILPC<"clgfrl", 0xC6E, z_ucmp, GR64,
1580 aligned_azextloadi32>;
1581 def CLGRL : CompareRILPC<"clgrl", 0xC6A, z_ucmp, GR64,
1584 // Comparison between memory and an unsigned 8-bit immediate.
1585 defm CLI : CompareSIPair<"cli", 0x95, 0xEB55, z_ucmp, azextloadi8, imm32zx8>;
1587 // Comparison between memory and an unsigned 16-bit immediate.
1588 def CLHHSI : CompareSIL<"clhhsi", 0xE555, z_ucmp, azextloadi16, imm32zx16>;
1589 def CLFHSI : CompareSIL<"clfhsi", 0xE55D, z_ucmp, load, imm32zx16>;
1590 def CLGHSI : CompareSIL<"clghsi", 0xE559, z_ucmp, load, imm64zx16>;
1592 defm : ZXB<z_ucmp, GR64, CLGFR>;
1594 // Memory-to-memory comparison.
1595 let mayLoad = 1, Defs = [CC] in {
1596 defm CLC : CompareMemorySS<"clc", 0xD5, z_clc, z_clc_loop>;
1597 def CLCL : SideEffectBinaryMemMemRR<"clcl", 0x0F, GR128, GR128>;
1598 def CLCLE : SideEffectTernaryMemMemRS<"clcle", 0xA9, GR128, GR128>;
1599 def CLCLU : SideEffectTernaryMemMemRSY<"clclu", 0xEB8F, GR128, GR128>;
1602 // String comparison.
1603 let mayLoad = 1, Defs = [CC] in
1604 defm CLST : StringRRE<"clst", 0xB25D, z_strcmp>;
1607 let Defs = [CC] in {
1608 // TMxMux expands to TM[LH]x, depending on the choice of register.
1609 def TMLMux : CompareRIPseudo<z_tm_reg, GRX32, imm32ll16>,
1610 Requires<[FeatureHighWord]>;
1611 def TMHMux : CompareRIPseudo<z_tm_reg, GRX32, imm32lh16>,
1612 Requires<[FeatureHighWord]>;
1613 def TMLL : CompareRI<"tmll", 0xA71, z_tm_reg, GR32, imm32ll16>;
1614 def TMLH : CompareRI<"tmlh", 0xA70, z_tm_reg, GR32, imm32lh16>;
1615 def TMHL : CompareRI<"tmhl", 0xA73, z_tm_reg, GRH32, imm32ll16>;
1616 def TMHH : CompareRI<"tmhh", 0xA72, z_tm_reg, GRH32, imm32lh16>;
1618 def TMLL64 : CompareAliasRI<z_tm_reg, GR64, imm64ll16>;
1619 def TMLH64 : CompareAliasRI<z_tm_reg, GR64, imm64lh16>;
1620 def TMHL64 : CompareAliasRI<z_tm_reg, GR64, imm64hl16>;
1621 def TMHH64 : CompareAliasRI<z_tm_reg, GR64, imm64hh16>;
1623 defm TM : CompareSIPair<"tm", 0x91, 0xEB51, z_tm_mem, anyextloadi8, imm32zx8>;
1626 def TML : InstAlias<"tml\t$R, $I", (TMLL GR32:$R, imm32ll16:$I), 0>;
1627 def TMH : InstAlias<"tmh\t$R, $I", (TMLH GR32:$R, imm32lh16:$I), 0>;
1629 // Compare logical characters under mask -- not (yet) used for codegen.
1630 let Defs = [CC] in {
1631 defm CLM : CompareRSPair<"clm", 0xBD, 0xEB21, GR32, 0>;
1632 def CLMH : CompareRSY<"clmh", 0xEB20, GRH32, 0>;
1635 //===----------------------------------------------------------------------===//
1636 // Prefetch and execution hint
1637 //===----------------------------------------------------------------------===//
1639 let mayLoad = 1, mayStore = 1 in {
1640 def PFD : PrefetchRXY<"pfd", 0xE336, z_prefetch>;
1641 def PFDRL : PrefetchRILPC<"pfdrl", 0xC62, z_prefetch>;
1644 let Predicates = [FeatureExecutionHint], hasSideEffects = 1 in {
1645 // Branch Prediction Preload
1646 def BPP : BranchPreloadSMI<"bpp", 0xC7>;
1647 def BPRP : BranchPreloadMII<"bprp", 0xC5>;
1649 // Next Instruction Access Intent
1650 def NIAI : SideEffectBinaryIE<"niai", 0xB2FA, imm32zx4, imm32zx4>;
1653 //===----------------------------------------------------------------------===//
1654 // Atomic operations
1655 //===----------------------------------------------------------------------===//
1657 // A serialization instruction that acts as a barrier for all memory
1658 // accesses, which expands to "bcr 14, 0".
1659 let hasSideEffects = 1 in
1660 def Serialize : Alias<2, (outs), (ins), []>;
1662 // A pseudo instruction that serves as a compiler barrier.
1663 let hasSideEffects = 1, hasNoSchedulingInfo = 1 in
1664 def MemBarrier : Pseudo<(outs), (ins), [(z_membarrier)]>;
1666 let Predicates = [FeatureInterlockedAccess1], Defs = [CC] in {
1667 def LAA : LoadAndOpRSY<"laa", 0xEBF8, atomic_load_add_32, GR32>;
1668 def LAAG : LoadAndOpRSY<"laag", 0xEBE8, atomic_load_add_64, GR64>;
1669 def LAAL : LoadAndOpRSY<"laal", 0xEBFA, null_frag, GR32>;
1670 def LAALG : LoadAndOpRSY<"laalg", 0xEBEA, null_frag, GR64>;
1671 def LAN : LoadAndOpRSY<"lan", 0xEBF4, atomic_load_and_32, GR32>;
1672 def LANG : LoadAndOpRSY<"lang", 0xEBE4, atomic_load_and_64, GR64>;
1673 def LAO : LoadAndOpRSY<"lao", 0xEBF6, atomic_load_or_32, GR32>;
1674 def LAOG : LoadAndOpRSY<"laog", 0xEBE6, atomic_load_or_64, GR64>;
1675 def LAX : LoadAndOpRSY<"lax", 0xEBF7, atomic_load_xor_32, GR32>;
1676 def LAXG : LoadAndOpRSY<"laxg", 0xEBE7, atomic_load_xor_64, GR64>;
1679 def ATOMIC_SWAPW : AtomicLoadWBinaryReg<z_atomic_swapw>;
1680 def ATOMIC_SWAP_32 : AtomicLoadBinaryReg32<atomic_swap_32>;
1681 def ATOMIC_SWAP_64 : AtomicLoadBinaryReg64<atomic_swap_64>;
1683 def ATOMIC_LOADW_AR : AtomicLoadWBinaryReg<z_atomic_loadw_add>;
1684 def ATOMIC_LOADW_AFI : AtomicLoadWBinaryImm<z_atomic_loadw_add, simm32>;
1685 let Predicates = [FeatureNoInterlockedAccess1] in {
1686 def ATOMIC_LOAD_AR : AtomicLoadBinaryReg32<atomic_load_add_32>;
1687 def ATOMIC_LOAD_AHI : AtomicLoadBinaryImm32<atomic_load_add_32, imm32sx16>;
1688 def ATOMIC_LOAD_AFI : AtomicLoadBinaryImm32<atomic_load_add_32, simm32>;
1689 def ATOMIC_LOAD_AGR : AtomicLoadBinaryReg64<atomic_load_add_64>;
1690 def ATOMIC_LOAD_AGHI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx16>;
1691 def ATOMIC_LOAD_AGFI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx32>;
1694 def ATOMIC_LOADW_SR : AtomicLoadWBinaryReg<z_atomic_loadw_sub>;
1695 def ATOMIC_LOAD_SR : AtomicLoadBinaryReg32<atomic_load_sub_32>;
1696 def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>;
1698 def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>;
1699 def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>;
1700 let Predicates = [FeatureNoInterlockedAccess1] in {
1701 def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>;
1702 def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32,
1704 def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32,
1706 def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
1707 def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>;
1708 def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64,
1710 def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64,
1712 def ATOMIC_LOAD_NIHL64 : AtomicLoadBinaryImm64<atomic_load_and_64,
1714 def ATOMIC_LOAD_NIHH64 : AtomicLoadBinaryImm64<atomic_load_and_64,
1716 def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64,
1718 def ATOMIC_LOAD_NIHF64 : AtomicLoadBinaryImm64<atomic_load_and_64,
1722 def ATOMIC_LOADW_OR : AtomicLoadWBinaryReg<z_atomic_loadw_or>;
1723 def ATOMIC_LOADW_OILH : AtomicLoadWBinaryImm<z_atomic_loadw_or, imm32lh16>;
1724 let Predicates = [FeatureNoInterlockedAccess1] in {
1725 def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>;
1726 def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
1727 def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
1728 def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
1729 def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>;
1730 def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
1731 def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
1732 def ATOMIC_LOAD_OIHL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
1733 def ATOMIC_LOAD_OIHH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
1734 def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
1735 def ATOMIC_LOAD_OIHF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
1738 def ATOMIC_LOADW_XR : AtomicLoadWBinaryReg<z_atomic_loadw_xor>;
1739 def ATOMIC_LOADW_XILF : AtomicLoadWBinaryImm<z_atomic_loadw_xor, uimm32>;
1740 let Predicates = [FeatureNoInterlockedAccess1] in {
1741 def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>;
1742 def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
1743 def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>;
1744 def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
1745 def ATOMIC_LOAD_XIHF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
1748 def ATOMIC_LOADW_NRi : AtomicLoadWBinaryReg<z_atomic_loadw_nand>;
1749 def ATOMIC_LOADW_NILHi : AtomicLoadWBinaryImm<z_atomic_loadw_nand,
1751 def ATOMIC_LOAD_NRi : AtomicLoadBinaryReg32<atomic_load_nand_32>;
1752 def ATOMIC_LOAD_NILLi : AtomicLoadBinaryImm32<atomic_load_nand_32,
1754 def ATOMIC_LOAD_NILHi : AtomicLoadBinaryImm32<atomic_load_nand_32,
1756 def ATOMIC_LOAD_NILFi : AtomicLoadBinaryImm32<atomic_load_nand_32, uimm32>;
1757 def ATOMIC_LOAD_NGRi : AtomicLoadBinaryReg64<atomic_load_nand_64>;
1758 def ATOMIC_LOAD_NILL64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
1760 def ATOMIC_LOAD_NILH64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
1762 def ATOMIC_LOAD_NIHL64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
1764 def ATOMIC_LOAD_NIHH64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
1766 def ATOMIC_LOAD_NILF64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
1768 def ATOMIC_LOAD_NIHF64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
1771 def ATOMIC_LOADW_MIN : AtomicLoadWBinaryReg<z_atomic_loadw_min>;
1772 def ATOMIC_LOAD_MIN_32 : AtomicLoadBinaryReg32<atomic_load_min_32>;
1773 def ATOMIC_LOAD_MIN_64 : AtomicLoadBinaryReg64<atomic_load_min_64>;
1775 def ATOMIC_LOADW_MAX : AtomicLoadWBinaryReg<z_atomic_loadw_max>;
1776 def ATOMIC_LOAD_MAX_32 : AtomicLoadBinaryReg32<atomic_load_max_32>;
1777 def ATOMIC_LOAD_MAX_64 : AtomicLoadBinaryReg64<atomic_load_max_64>;
1779 def ATOMIC_LOADW_UMIN : AtomicLoadWBinaryReg<z_atomic_loadw_umin>;
1780 def ATOMIC_LOAD_UMIN_32 : AtomicLoadBinaryReg32<atomic_load_umin_32>;
1781 def ATOMIC_LOAD_UMIN_64 : AtomicLoadBinaryReg64<atomic_load_umin_64>;
1783 def ATOMIC_LOADW_UMAX : AtomicLoadWBinaryReg<z_atomic_loadw_umax>;
1784 def ATOMIC_LOAD_UMAX_32 : AtomicLoadBinaryReg32<atomic_load_umax_32>;
1785 def ATOMIC_LOAD_UMAX_64 : AtomicLoadBinaryReg64<atomic_load_umax_64>;
1787 def ATOMIC_CMP_SWAPW
1788 : Pseudo<(outs GR32:$dst), (ins bdaddr20only:$addr, GR32:$cmp, GR32:$swap,
1789 ADDR32:$bitshift, ADDR32:$negbitshift,
1792 (z_atomic_cmp_swapw bdaddr20only:$addr, GR32:$cmp, GR32:$swap,
1793 ADDR32:$bitshift, ADDR32:$negbitshift,
1794 uimm32:$bitsize))]> {
1798 let usesCustomInserter = 1;
1799 let hasNoSchedulingInfo = 1;
1803 let mayLoad = 1, Defs = [CC] in
1804 def TS : StoreInherentS<"ts", 0x9300, null_frag, 1>;
1806 // Compare and swap.
1807 let Defs = [CC] in {
1808 defm CS : CmpSwapRSPair<"cs", 0xBA, 0xEB14, z_atomic_cmp_swap, GR32>;
1809 def CSG : CmpSwapRSY<"csg", 0xEB30, z_atomic_cmp_swap, GR64>;
1812 // Compare double and swap.
1813 let Defs = [CC] in {
1814 defm CDS : CmpSwapRSPair<"cds", 0xBB, 0xEB31, null_frag, GR128>;
1815 def CDSG : CmpSwapRSY<"cdsg", 0xEB3E, z_atomic_cmp_swap_128, GR128>;
1818 // Compare and swap and store.
1819 let Uses = [R0L, R1D], Defs = [CC], mayStore = 1, mayLoad = 1 in
1820 def CSST : SideEffectTernarySSF<"csst", 0xC82, GR64>;
1822 // Perform locked operation.
1823 let Uses = [R0L, R1D], Defs = [CC], mayStore = 1, mayLoad =1 in
1824 def PLO : SideEffectQuaternarySSe<"plo", 0xEE, GR64>;
1826 // Load/store pair from/to quadword.
1827 def LPQ : UnaryRXY<"lpq", 0xE38F, z_atomic_load_128, GR128, 16>;
1828 def STPQ : StoreRXY<"stpq", 0xE38E, z_atomic_store_128, GR128, 16>;
1830 // Load pair disjoint.
1831 let Predicates = [FeatureInterlockedAccess1], Defs = [CC] in {
1832 def LPD : BinarySSF<"lpd", 0xC84, GR128>;
1833 def LPDG : BinarySSF<"lpdg", 0xC85, GR128>;
1836 //===----------------------------------------------------------------------===//
1837 // Translate and convert
1838 //===----------------------------------------------------------------------===//
1840 let mayLoad = 1, mayStore = 1 in
1841 def TR : SideEffectBinarySSa<"tr", 0xDC>;
1843 let mayLoad = 1, Defs = [CC, R0L, R1D] in {
1844 def TRT : SideEffectBinarySSa<"trt", 0xDD>;
1845 def TRTR : SideEffectBinarySSa<"trtr", 0xD0>;
1848 let mayLoad = 1, mayStore = 1, Uses = [R0L] in
1849 def TRE : SideEffectBinaryMemMemRRE<"tre", 0xB2A5, GR128, GR64>;
1851 let mayLoad = 1, Uses = [R1D], Defs = [CC] in {
1852 defm TRTE : BinaryMemRRFcOpt<"trte", 0xB9BF, GR128, GR64>;
1853 defm TRTRE : BinaryMemRRFcOpt<"trtre", 0xB9BD, GR128, GR64>;
1856 let mayLoad = 1, mayStore = 1, Uses = [R0L, R1D], Defs = [CC] in {
1857 defm TROO : SideEffectTernaryMemMemRRFcOpt<"troo", 0xB993, GR128, GR64>;
1858 defm TROT : SideEffectTernaryMemMemRRFcOpt<"trot", 0xB992, GR128, GR64>;
1859 defm TRTO : SideEffectTernaryMemMemRRFcOpt<"trto", 0xB991, GR128, GR64>;
1860 defm TRTT : SideEffectTernaryMemMemRRFcOpt<"trtt", 0xB990, GR128, GR64>;
1863 let mayLoad = 1, mayStore = 1, Defs = [CC] in {
1864 defm CU12 : SideEffectTernaryMemMemRRFcOpt<"cu12", 0xB2A7, GR128, GR128>;
1865 defm CU14 : SideEffectTernaryMemMemRRFcOpt<"cu14", 0xB9B0, GR128, GR128>;
1866 defm CU21 : SideEffectTernaryMemMemRRFcOpt<"cu21", 0xB2A6, GR128, GR128>;
1867 defm CU24 : SideEffectTernaryMemMemRRFcOpt<"cu24", 0xB9B1, GR128, GR128>;
1868 def CU41 : SideEffectBinaryMemMemRRE<"cu41", 0xB9B2, GR128, GR128>;
1869 def CU42 : SideEffectBinaryMemMemRRE<"cu42", 0xB9B3, GR128, GR128>;
1871 let isAsmParserOnly = 1 in {
1872 defm CUUTF : SideEffectTernaryMemMemRRFcOpt<"cuutf", 0xB2A6, GR128, GR128>;
1873 defm CUTFU : SideEffectTernaryMemMemRRFcOpt<"cutfu", 0xB2A7, GR128, GR128>;
1877 //===----------------------------------------------------------------------===//
1878 // Message-security assist
1879 //===----------------------------------------------------------------------===//
1881 let mayLoad = 1, mayStore = 1, Uses = [R0L, R1D], Defs = [CC] in {
1882 def KM : SideEffectBinaryMemMemRRE<"km", 0xB92E, GR128, GR128>;
1883 def KMC : SideEffectBinaryMemMemRRE<"kmc", 0xB92F, GR128, GR128>;
1885 def KIMD : SideEffectBinaryMemRRE<"kimd", 0xB93E, GR64, GR128>;
1886 def KLMD : SideEffectBinaryMemRRE<"klmd", 0xB93F, GR64, GR128>;
1887 def KMAC : SideEffectBinaryMemRRE<"kmac", 0xB91E, GR64, GR128>;
1889 let Predicates = [FeatureMessageSecurityAssist4] in {
1890 def KMF : SideEffectBinaryMemMemRRE<"kmf", 0xB92A, GR128, GR128>;
1891 def KMO : SideEffectBinaryMemMemRRE<"kmo", 0xB92B, GR128, GR128>;
1892 def KMCTR : SideEffectTernaryMemMemMemRRFb<"kmctr", 0xB92D,
1893 GR128, GR128, GR128>;
1894 def PCC : SideEffectInherentRRE<"pcc", 0xB92C>;
1897 let Predicates = [FeatureMessageSecurityAssist5] in
1898 def PPNO : SideEffectBinaryMemMemRRE<"ppno", 0xB93C, GR128, GR128>;
1899 let Predicates = [FeatureMessageSecurityAssist7], isAsmParserOnly = 1 in
1900 def PRNO : SideEffectBinaryMemMemRRE<"prno", 0xB93C, GR128, GR128>;
1902 let Predicates = [FeatureMessageSecurityAssist8] in
1903 def KMA : SideEffectTernaryMemMemMemRRFb<"kma", 0xB929,
1904 GR128, GR128, GR128>;
1906 let Predicates = [FeatureMessageSecurityAssist9] in
1907 def KDSA : SideEffectBinaryMemRRE<"kdsa", 0xB93A, GR64, GR128>;
1910 //===----------------------------------------------------------------------===//
1912 //===----------------------------------------------------------------------===//
1914 // These instructions use and/or modify the guarded storage control
1915 // registers, which we do not otherwise model, so they should have
1917 let Predicates = [FeatureGuardedStorage], hasSideEffects = 1 in {
1918 def LGG : UnaryRXY<"lgg", 0xE34C, null_frag, GR64, 8>;
1919 def LLGFSG : UnaryRXY<"llgfsg", 0xE348, null_frag, GR64, 4>;
1922 def LGSC : SideEffectBinaryRXY<"lgsc", 0xE34D, GR64>;
1924 def STGSC : SideEffectBinaryRXY<"stgsc", 0xE349, GR64>;
1927 //===----------------------------------------------------------------------===//
1928 // Decimal arithmetic
1929 //===----------------------------------------------------------------------===//
1931 defm CVB : BinaryRXPair<"cvb",0x4F, 0xE306, null_frag, GR32, load, 4>;
1932 def CVBG : BinaryRXY<"cvbg", 0xE30E, null_frag, GR64, load, 8>;
1934 defm CVD : StoreRXPair<"cvd", 0x4E, 0xE326, null_frag, GR32, 4>;
1935 def CVDG : StoreRXY<"cvdg", 0xE32E, null_frag, GR64, 8>;
1937 let mayLoad = 1, mayStore = 1 in {
1938 def MVN : SideEffectBinarySSa<"mvn", 0xD1>;
1939 def MVZ : SideEffectBinarySSa<"mvz", 0xD3>;
1940 def MVO : SideEffectBinarySSb<"mvo", 0xF1>;
1942 def PACK : SideEffectBinarySSb<"pack", 0xF2>;
1943 def PKA : SideEffectBinarySSf<"pka", 0xE9>;
1944 def PKU : SideEffectBinarySSf<"pku", 0xE1>;
1945 def UNPK : SideEffectBinarySSb<"unpk", 0xF3>;
1946 let Defs = [CC] in {
1947 def UNPKA : SideEffectBinarySSa<"unpka", 0xEA>;
1948 def UNPKU : SideEffectBinarySSa<"unpku", 0xE2>;
1952 let mayLoad = 1, mayStore = 1 in {
1953 let Defs = [CC] in {
1954 def AP : SideEffectBinarySSb<"ap", 0xFA>;
1955 def SP : SideEffectBinarySSb<"sp", 0xFB>;
1956 def ZAP : SideEffectBinarySSb<"zap", 0xF8>;
1957 def SRP : SideEffectTernarySSc<"srp", 0xF0>;
1959 def MP : SideEffectBinarySSb<"mp", 0xFC>;
1960 def DP : SideEffectBinarySSb<"dp", 0xFD>;
1961 let Defs = [CC] in {
1962 def ED : SideEffectBinarySSa<"ed", 0xDE>;
1963 def EDMK : SideEffectBinarySSa<"edmk", 0xDF>;
1967 let Defs = [CC] in {
1968 def CP : CompareSSb<"cp", 0xF9>;
1969 def TP : TestRSL<"tp", 0xEBC0>;
1972 //===----------------------------------------------------------------------===//
1974 //===----------------------------------------------------------------------===//
1976 // Read a 32-bit access register into a GR32. As with all GR32 operations,
1977 // the upper 32 bits of the enclosing GR64 remain unchanged, which is useful
1978 // when a 64-bit address is stored in a pair of access registers.
1979 def EAR : UnaryRRE<"ear", 0xB24F, null_frag, GR32, AR32>;
1981 // Set access register.
1982 def SAR : UnaryRRE<"sar", 0xB24E, null_frag, AR32, GR32>;
1984 // Copy access register.
1985 def CPYA : UnaryRRE<"cpya", 0xB24D, null_frag, AR32, AR32>;
1987 // Load address extended.
1988 defm LAE : LoadAddressRXPair<"lae", 0x51, 0xE375, null_frag>;
1990 // Load access multiple.
1991 defm LAM : LoadMultipleRSPair<"lam", 0x9A, 0xEB9A, AR32>;
1993 // Store access multiple.
1994 defm STAM : StoreMultipleRSPair<"stam", 0x9B, 0xEB9B, AR32>;
1996 //===----------------------------------------------------------------------===//
1997 // Program mask and addressing mode
1998 //===----------------------------------------------------------------------===//
2000 // Extract CC and program mask into a register. CC ends up in bits 29 and 28.
2002 def IPM : InherentRRE<"ipm", 0xB222, GR32, z_ipm>;
2004 // Set CC and program mask from a register.
2005 let hasSideEffects = 1, Defs = [CC] in
2006 def SPM : SideEffectUnaryRR<"spm", 0x04, GR32>;
2008 // Branch and link - like BAS, but also extracts CC and program mask.
2009 let isCall = 1, Uses = [CC], Defs = [CC] in {
2010 def BAL : CallRX<"bal", 0x45>;
2011 def BALR : CallRR<"balr", 0x05>;
2014 // Test addressing mode.
2016 def TAM : SideEffectInherentE<"tam", 0x010B>;
2018 // Set addressing mode.
2019 let hasSideEffects = 1 in {
2020 def SAM24 : SideEffectInherentE<"sam24", 0x010C>;
2021 def SAM31 : SideEffectInherentE<"sam31", 0x010D>;
2022 def SAM64 : SideEffectInherentE<"sam64", 0x010E>;
2025 // Branch and set mode. Not really a call, but also sets an output register.
2026 let isBranch = 1, isTerminator = 1, isBarrier = 1 in
2027 def BSM : CallRR<"bsm", 0x0B>;
2029 // Branch and save and set mode.
2030 let isCall = 1, Defs = [CC] in
2031 def BASSM : CallRR<"bassm", 0x0C>;
2033 //===----------------------------------------------------------------------===//
2034 // Transactional execution
2035 //===----------------------------------------------------------------------===//
2037 let hasSideEffects = 1, Predicates = [FeatureTransactionalExecution] in {
2038 // Transaction Begin
2039 let mayStore = 1, usesCustomInserter = 1, Defs = [CC] in {
2040 def TBEGIN : TestBinarySIL<"tbegin", 0xE560, z_tbegin, imm32zx16>;
2041 let hasNoSchedulingInfo = 1 in
2042 def TBEGIN_nofloat : TestBinarySILPseudo<z_tbegin_nofloat, imm32zx16>;
2043 def TBEGINC : SideEffectBinarySIL<"tbeginc", 0xE561,
2044 int_s390_tbeginc, imm32zx16>;
2049 def TEND : TestInherentS<"tend", 0xB2F8, z_tend>;
2051 // Transaction Abort
2052 let isTerminator = 1, isBarrier = 1, mayStore = 1,
2053 hasSideEffects = 1 in
2054 def TABORT : SideEffectAddressS<"tabort", 0xB2FC, int_s390_tabort>;
2056 // Nontransactional Store
2057 def NTSTG : StoreRXY<"ntstg", 0xE325, int_s390_ntstg, GR64, 8>;
2059 // Extract Transaction Nesting Depth
2060 def ETND : InherentRRE<"etnd", 0xB2EC, GR32, int_s390_etnd>;
2063 //===----------------------------------------------------------------------===//
2065 //===----------------------------------------------------------------------===//
2067 let Predicates = [FeatureProcessorAssist] in {
2068 let hasSideEffects = 1 in
2069 def PPA : SideEffectTernaryRRFc<"ppa", 0xB2E8, GR64, GR64, imm32zx4>;
2070 def : Pat<(int_s390_ppa_txassist GR32:$src),
2071 (PPA (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32),
2075 //===----------------------------------------------------------------------===//
2076 // Miscellaneous Instructions.
2077 //===----------------------------------------------------------------------===//
2079 // Find leftmost one, AKA count leading zeros. The instruction actually
2080 // returns a pair of GR64s, the first giving the number of leading zeros
2081 // and the second giving a copy of the source with the leftmost one bit
2082 // cleared. We only use the first result here.
2084 def FLOGR : UnaryRRE<"flogr", 0xB983, null_frag, GR128, GR64>;
2085 def : Pat<(i64 (ctlz GR64:$src)),
2086 (EXTRACT_SUBREG (FLOGR GR64:$src), subreg_h64)>;
2088 // Population count. Counts bits set per byte or doubleword.
2089 let Predicates = [FeatureMiscellaneousExtensions3] in {
2091 def POPCNTOpt : BinaryRRFc<"popcnt", 0xB9E1, GR64, GR64>;
2092 def : Pat<(ctpop GR64:$src), (POPCNTOpt GR64:$src, 8)>;
2094 let Predicates = [FeaturePopulationCount], Defs = [CC] in
2095 def POPCNT : UnaryRRE<"popcnt", 0xB9E1, z_popcnt, GR64, GR64>;
2097 // Search a block of memory for a character.
2098 let mayLoad = 1, Defs = [CC] in
2099 defm SRST : StringRRE<"srst", 0xB25E, z_search_string>;
2100 let mayLoad = 1, Defs = [CC], Uses = [R0L] in
2101 def SRSTU : SideEffectBinaryMemMemRRE<"srstu", 0xB9BE, GR64, GR64>;
2103 // Compare until substring equal.
2104 let mayLoad = 1, Defs = [CC], Uses = [R0L, R1L] in
2105 def CUSE : SideEffectBinaryMemMemRRE<"cuse", 0xB257, GR128, GR128>;
2107 // Compare and form codeword.
2108 let mayLoad = 1, Defs = [CC, R1D, R2D, R3D], Uses = [R1D, R2D, R3D] in
2109 def CFC : SideEffectAddressS<"cfc", 0xB21A, null_frag>;
2112 let mayLoad = 1, mayStore = 1, Defs = [CC, R0D, R1D, R2D, R3D, R5D],
2113 Uses = [R0D, R1D, R2D, R3D, R4D, R5D] in
2114 def UPT : SideEffectInherentE<"upt", 0x0102>;
2117 let mayLoad = 1, Defs = [CC] in
2118 def CKSM : SideEffectBinaryMemMemRRE<"cksm", 0xB241, GR64, GR128>;
2120 // Compression call.
2121 let mayLoad = 1, mayStore = 1, Defs = [CC, R1D], Uses = [R0L, R1D] in
2122 def CMPSC : SideEffectBinaryMemMemRRE<"cmpsc", 0xB263, GR128, GR128>;
2125 let Predicates = [FeatureEnhancedSort],
2126 mayLoad = 1, mayStore = 1, Defs = [CC], Uses = [R0L, R1D] in
2127 def SORTL : SideEffectBinaryMemMemRRE<"sortl", 0xB938, GR128, GR128>;
2129 // Deflate conversion call.
2130 let Predicates = [FeatureDeflateConversion],
2131 mayLoad = 1, mayStore = 1, Defs = [CC], Uses = [R0L, R1D] in
2132 def DFLTCC : SideEffectTernaryMemMemRRFa<"dfltcc", 0xB939,
2133 GR128, GR128, GR64>;
2136 let hasSideEffects = 1 in {
2137 def EX : SideEffectBinaryRX<"ex", 0x44, GR64>;
2138 def EXRL : SideEffectBinaryRILPC<"exrl", 0xC60, GR64>;
2141 //===----------------------------------------------------------------------===//
2142 // .insn directive instructions
2143 //===----------------------------------------------------------------------===//
2145 let isCodeGenOnly = 1, hasSideEffects = 1 in {
2146 def InsnE : DirectiveInsnE<(outs), (ins imm64zx16:$enc), ".insn e,$enc", []>;
2147 def InsnRI : DirectiveInsnRI<(outs), (ins imm64zx32:$enc, AnyReg:$R1,
2149 ".insn ri,$enc,$R1,$I2", []>;
2150 def InsnRIE : DirectiveInsnRIE<(outs), (ins imm64zx48:$enc, AnyReg:$R1,
2151 AnyReg:$R3, brtarget16:$I2),
2152 ".insn rie,$enc,$R1,$R3,$I2", []>;
2153 def InsnRIL : DirectiveInsnRIL<(outs), (ins imm64zx48:$enc, AnyReg:$R1,
2155 ".insn ril,$enc,$R1,$I2", []>;
2156 def InsnRILU : DirectiveInsnRIL<(outs), (ins imm64zx48:$enc, AnyReg:$R1,
2158 ".insn rilu,$enc,$R1,$I2", []>;
2159 def InsnRIS : DirectiveInsnRIS<(outs),
2160 (ins imm64zx48:$enc, AnyReg:$R1,
2161 imm32sx8:$I2, imm32zx4:$M3,
2163 ".insn ris,$enc,$R1,$I2,$M3,$BD4", []>;
2164 def InsnRR : DirectiveInsnRR<(outs),
2165 (ins imm64zx16:$enc, AnyReg:$R1, AnyReg:$R2),
2166 ".insn rr,$enc,$R1,$R2", []>;
2167 def InsnRRE : DirectiveInsnRRE<(outs), (ins imm64zx32:$enc,
2168 AnyReg:$R1, AnyReg:$R2),
2169 ".insn rre,$enc,$R1,$R2", []>;
2170 def InsnRRF : DirectiveInsnRRF<(outs),
2171 (ins imm64zx32:$enc, AnyReg:$R1, AnyReg:$R2,
2172 AnyReg:$R3, imm32zx4:$M4),
2173 ".insn rrf,$enc,$R1,$R2,$R3,$M4", []>;
2174 def InsnRRS : DirectiveInsnRRS<(outs),
2175 (ins imm64zx48:$enc, AnyReg:$R1,
2176 AnyReg:$R2, imm32zx4:$M3,
2178 ".insn rrs,$enc,$R1,$R2,$M3,$BD4", []>;
2179 def InsnRS : DirectiveInsnRS<(outs),
2180 (ins imm64zx32:$enc, AnyReg:$R1,
2181 AnyReg:$R3, bdaddr12only:$BD2),
2182 ".insn rs,$enc,$R1,$R3,$BD2", []>;
2183 def InsnRSE : DirectiveInsnRSE<(outs),
2184 (ins imm64zx48:$enc, AnyReg:$R1,
2185 AnyReg:$R3, bdaddr12only:$BD2),
2186 ".insn rse,$enc,$R1,$R3,$BD2", []>;
2187 def InsnRSI : DirectiveInsnRSI<(outs),
2188 (ins imm64zx48:$enc, AnyReg:$R1,
2189 AnyReg:$R3, brtarget16:$RI2),
2190 ".insn rsi,$enc,$R1,$R3,$RI2", []>;
2191 def InsnRSY : DirectiveInsnRSY<(outs),
2192 (ins imm64zx48:$enc, AnyReg:$R1,
2193 AnyReg:$R3, bdaddr20only:$BD2),
2194 ".insn rsy,$enc,$R1,$R3,$BD2", []>;
2195 def InsnRX : DirectiveInsnRX<(outs), (ins imm64zx32:$enc, AnyReg:$R1,
2196 bdxaddr12only:$XBD2),
2197 ".insn rx,$enc,$R1,$XBD2", []>;
2198 def InsnRXE : DirectiveInsnRXE<(outs), (ins imm64zx48:$enc, AnyReg:$R1,
2199 bdxaddr12only:$XBD2),
2200 ".insn rxe,$enc,$R1,$XBD2", []>;
2201 def InsnRXF : DirectiveInsnRXF<(outs),
2202 (ins imm64zx48:$enc, AnyReg:$R1,
2203 AnyReg:$R3, bdxaddr12only:$XBD2),
2204 ".insn rxf,$enc,$R1,$R3,$XBD2", []>;
2205 def InsnRXY : DirectiveInsnRXY<(outs), (ins imm64zx48:$enc, AnyReg:$R1,
2206 bdxaddr20only:$XBD2),
2207 ".insn rxy,$enc,$R1,$XBD2", []>;
2208 def InsnS : DirectiveInsnS<(outs),
2209 (ins imm64zx32:$enc, bdaddr12only:$BD2),
2210 ".insn s,$enc,$BD2", []>;
2211 def InsnSI : DirectiveInsnSI<(outs),
2212 (ins imm64zx32:$enc, bdaddr12only:$BD1,
2214 ".insn si,$enc,$BD1,$I2", []>;
2215 def InsnSIY : DirectiveInsnSIY<(outs),
2216 (ins imm64zx48:$enc,
2217 bdaddr20only:$BD1, imm32zx8:$I2),
2218 ".insn siy,$enc,$BD1,$I2", []>;
2219 def InsnSIL : DirectiveInsnSIL<(outs),
2220 (ins imm64zx48:$enc, bdaddr12only:$BD1,
2222 ".insn sil,$enc,$BD1,$I2", []>;
2223 def InsnSS : DirectiveInsnSS<(outs),
2224 (ins imm64zx48:$enc, bdraddr12only:$RBD1,
2225 bdaddr12only:$BD2, AnyReg:$R3),
2226 ".insn ss,$enc,$RBD1,$BD2,$R3", []>;
2227 def InsnSSE : DirectiveInsnSSE<(outs),
2228 (ins imm64zx48:$enc,
2229 bdaddr12only:$BD1,bdaddr12only:$BD2),
2230 ".insn sse,$enc,$BD1,$BD2", []>;
2231 def InsnSSF : DirectiveInsnSSF<(outs),
2232 (ins imm64zx48:$enc, bdaddr12only:$BD1,
2233 bdaddr12only:$BD2, AnyReg:$R3),
2234 ".insn ssf,$enc,$BD1,$BD2,$R3", []>;
2237 //===----------------------------------------------------------------------===//
2239 //===----------------------------------------------------------------------===//
2241 // Avoid generating 2 XOR instructions. (xor (and x, y), y) is
2242 // equivalent to (and (xor x, -1), y)
2243 def : Pat<(and (xor GR64:$x, (i64 -1)), GR64:$y),
2244 (XGR GR64:$y, (NGR GR64:$y, GR64:$x))>;
2246 // Shift/rotate instructions only use the last 6 bits of the second operand
2247 // register, so we can safely use NILL (16 fewer bits than NILF) to only AND the
2249 // Complexity is added so that we match this before we match NILF on the AND
2251 let AddedComplexity = 4 in {
2252 def : Pat<(shl GR32:$val, (and GR32:$shift, imm32zx16trunc:$imm)),
2253 (SLL GR32:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
2255 def : Pat<(sra GR32:$val, (and GR32:$shift, imm32zx16trunc:$imm)),
2256 (SRA GR32:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
2258 def : Pat<(srl GR32:$val, (and GR32:$shift, imm32zx16trunc:$imm)),
2259 (SRL GR32:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
2261 def : Pat<(shl GR64:$val, (and GR32:$shift, imm32zx16trunc:$imm)),
2262 (SLLG GR64:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
2264 def : Pat<(sra GR64:$val, (and GR32:$shift, imm32zx16trunc:$imm)),
2265 (SRAG GR64:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
2267 def : Pat<(srl GR64:$val, (and GR32:$shift, imm32zx16trunc:$imm)),
2268 (SRLG GR64:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
2270 def : Pat<(rotl GR32:$val, (and GR32:$shift, imm32zx16trunc:$imm)),
2271 (RLL GR32:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
2273 def : Pat<(rotl GR64:$val, (and GR32:$shift, imm32zx16trunc:$imm)),
2274 (RLLG GR64:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
2277 // Substitute (x*64-s) with (-s), since shift/rotate instructions only
2278 // use the last 6 bits of the second operand register (making it modulo 64).
2279 let AddedComplexity = 4 in {
2280 def : Pat<(shl GR64:$val, (sub imm32mod64, GR32:$shift)),
2281 (SLLG GR64:$val, (LCR GR32:$shift), 0)>;
2283 def : Pat<(sra GR64:$val, (sub imm32mod64, GR32:$shift)),
2284 (SRAG GR64:$val, (LCR GR32:$shift), 0)>;
2286 def : Pat<(srl GR64:$val, (sub imm32mod64, GR32:$shift)),
2287 (SRLG GR64:$val, (LCR GR32:$shift), 0)>;
2289 def : Pat<(rotl GR64:$val, (sub imm32mod64, GR32:$shift)),
2290 (RLLG GR64:$val, (LCR GR32:$shift), 0)>;
2293 // Peepholes for turning scalar operations into block operations.
2294 defm : BlockLoadStore<anyextloadi8, i32, MVCSequence, NCSequence, OCSequence,
2296 defm : BlockLoadStore<anyextloadi16, i32, MVCSequence, NCSequence, OCSequence,
2298 defm : BlockLoadStore<load, i32, MVCSequence, NCSequence, OCSequence,
2300 defm : BlockLoadStore<anyextloadi8, i64, MVCSequence, NCSequence,
2301 OCSequence, XCSequence, 1>;
2302 defm : BlockLoadStore<anyextloadi16, i64, MVCSequence, NCSequence, OCSequence,
2304 defm : BlockLoadStore<anyextloadi32, i64, MVCSequence, NCSequence, OCSequence,
2306 defm : BlockLoadStore<load, i64, MVCSequence, NCSequence, OCSequence,